max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
module_utils/oracle/oci_wait_utils.py
|
slmjy/oci-ansible-modules
| 106 |
89145
|
<filename>module_utils/oracle/oci_wait_utils.py
# Copyright (c) 2019 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.oracle import oci_common_utils
try:
import oci
from oci.util import to_dict
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
LIFECYCLE_STATE_WAITER_KEY = "LIFECYCLE_STATE_WAITER"
WORK_REQUEST_WAITER_KEY = "WORK_REQUEST_WAITER"
NONE_WAITER_KEY = "NONE_WAITER_KEY"
class Waiter:
"""Interface defining wait method"""
def wait(self):
raise NotImplementedError(
"Expected to be implemented by the specific waiter classes."
)
class BaseWaiter(Waiter):
"""Base class for various waiters"""
def __init__(self, client, resource_helper, operation_response, wait_for_states):
self.client = client
self.operation_response = operation_response
self.wait_for_states = wait_for_states
self.resource_helper = resource_helper
def get_initial_response(self):
raise NotImplementedError(
"Expected to be implemented by the specific waiter classes."
)
def get_evaluate_response_lambda(self):
raise NotImplementedError(
"Expected to be implemented by the specific waiter classes."
)
def wait(self):
if not self.resource_helper.module.params.get("wait"):
return self.operation_response
wait_response = oci.wait_until(
self.client,
self.get_initial_response(),
evaluate_response=self.get_evaluate_response_lambda(),
max_wait_seconds=self.resource_helper.module.params.get(
"wait_timeout", oci_common_utils.MAX_WAIT_TIMEOUT_IN_SECONDS
),
)
return self.get_resource_from_wait_response(wait_response)
def get_resource_from_wait_response(self, wait_response):
raise NotImplementedError(
"Expected to be implemented by the specific waiter classes."
)
class LifecycleStateWaiterBase(BaseWaiter):
"""Base class for various waiters"""
def __init__(self, client, resource_helper, operation_response, wait_for_states):
self.client = client
self.operation_response = operation_response
self.wait_for_states = wait_for_states
self.resource_helper = resource_helper
def get_initial_response(self):
return self.resource_helper.get_resource()
def get_evaluate_response_lambda(self):
lowered_wait_for_states = [state.lower() for state in self.wait_for_states]
return (
lambda r: getattr(r.data, "lifecycle_state")
and getattr(r.data, "lifecycle_state").lower() in lowered_wait_for_states
)
def get_resource_from_wait_response(self, wait_response):
return wait_response.data
class LifecycleStateWaiter(LifecycleStateWaiterBase):
"""Waiter which waits on the lifecycle state of the resource"""
def __init__(self, client, resource_helper, operation_response, wait_for_states):
super(LifecycleStateWaiter, self).__init__(
client, resource_helper, operation_response, wait_for_states
)
class CreateOperationLifecycleStateWaiter(LifecycleStateWaiterBase):
"""Waiter which waits on the lifecycle state of the resource"""
def __init__(self, client, resource_helper, operation_response, wait_for_states):
super(CreateOperationLifecycleStateWaiter, self).__init__(
client, resource_helper, operation_response, wait_for_states
)
def get_initial_response(self):
identifier = self.operation_response.data.id
if not identifier:
self.resource_helper.module.fail_json(
"Error getting the resource identifier."
)
try:
id_orig = self.resource_helper.module.params[
self.resource_helper.get_module_resource_id_param()
]
except NotImplementedError:
return self.resource_helper.get_resource()
self.resource_helper.module.params[
self.resource_helper.get_module_resource_id_param()
] = identifier
get_response = self.resource_helper.get_resource()
self.resource_helper.module.params[
self.resource_helper.get_module_resource_id_param()
] = id_orig
return get_response
class WorkRequestWaiter(BaseWaiter):
"""Waiter which waits on the work request"""
def __init__(self, client, resource_helper, operation_response, wait_for_states):
self.client = client
self.resource_helper = resource_helper
self.operation_response = operation_response
self.wait_for_states = wait_for_states
def get_initial_response(self):
return self.client.get_work_request(
self.operation_response.headers["opc-work-request-id"]
)
def get_evaluate_response_lambda(self):
lowered_wait_for_states = [state.lower() for state in self.wait_for_states]
return (
lambda r: getattr(r.data, "status")
and getattr(r.data, "status").lower() in lowered_wait_for_states
)
def get_resource_from_wait_response(self, wait_response):
get_response = self.resource_helper.get_resource()
return get_response.data
class CreateOperationWorkRequestWaiter(WorkRequestWaiter):
"""Waiter which waits on the work request"""
def __init__(self, client, resource_helper, operation_response, wait_for_states):
super(CreateOperationWorkRequestWaiter, self).__init__(
client, resource_helper, operation_response, wait_for_states
)
def get_resource_from_wait_response(self, wait_response):
entity_type = oci_common_utils.get_entity_type(
self.resource_helper.resource_type
)
identifier = None
for resource in wait_response.data.resources:
if (
hasattr(resource, "entity_type")
and getattr(resource, "entity_type") == entity_type
):
identifier = resource.identifier
if not identifier:
self.resource_helper.module.fail_json(
msg="Could not get the resource identifier from work request response {0}".format(
to_dict(wait_response.data)
)
)
get_response = self.resource_helper.get_get_fn()(identifier)
return get_response.data
class NoneWaiter(Waiter):
"""Waiter which does not wait"""
def __init__(self, client, resource_helper, operation_response, wait_for_states):
self.client = client
self.resource_helper = resource_helper
self.operation_response = operation_response
self.wait_for_states = wait_for_states
def wait(self):
return self.operation_response.data
class AuditConfigurationLifecycleStateWaiter(LifecycleStateWaiter):
def __init__(self, client, resource_helper, operation_response, wait_for_states):
super(AuditConfigurationLifecycleStateWaiter, self).__init__(
client, resource_helper, operation_response, wait_for_states
)
def get_evaluate_response_lambda(self):
# The update operation currently returns a work request id but the AuditClient currently does not support
# waiting for the work request. So wait until the configuration is updated by checking the value.
return (
lambda r: r.data.retention_period_days
== self.resource_helper.module.params.get("retention_period_days")
)
# A map specifying the overrides for the default waiters.
# Key is a tuple consisting spec name, resource type and the operation and the value is the waiter class.
# For ex: ("waas", "waas_policy", oci_common_utils.UPDATE_OPERATION_KEY) -> CustomWaasWaiterClass
_WAITER_OVERRIDE_MAP = {
# The audit update operation currently returns a work request id but the AuditClient currently does not support
# waiting for the work request. So inject NoneWaiter and customize it to manually wait on the update condition.
("audit", "configuration", oci_common_utils.UPDATE_OPERATION_KEY): NoneWaiter
}
def get_waiter_override(namespace, resource_type, operation):
"""Return the custom waiter class if any for the resource and operation. Else return None."""
waiter_override_key = (namespace, resource_type, operation)
if waiter_override_key in _WAITER_OVERRIDE_MAP:
return _WAITER_OVERRIDE_MAP.get(waiter_override_key)
# check if an override exists for ANY_OPERATION_KEY. This is helpful if we need a custom waiter for all(any)
# resource operations
waiter_override_key = (namespace, resource_type, oci_common_utils.ANY_OPERATION_KEY)
if waiter_override_key in _WAITER_OVERRIDE_MAP:
return _WAITER_OVERRIDE_MAP.get(waiter_override_key)
return None
def get_waiter(
waiter_type, operation, client, resource_helper, operation_response, wait_for_states
):
"""Return appropriate waiter object based on type and the operation."""
# First check if there is any custom override for the waiter class. If exists, use it.
waiter_override_class = get_waiter_override(
resource_helper.namespace, resource_helper.resource_type, operation
)
if waiter_override_class:
return waiter_override_class(
client, resource_helper, operation_response, wait_for_states
)
if waiter_type == LIFECYCLE_STATE_WAITER_KEY:
if operation == oci_common_utils.CREATE_OPERATION_KEY:
return CreateOperationLifecycleStateWaiter(
client, resource_helper, operation_response, wait_for_states
)
return LifecycleStateWaiter(
client, resource_helper, operation_response, wait_for_states
)
elif waiter_type == WORK_REQUEST_WAITER_KEY:
if operation == oci_common_utils.CREATE_OPERATION_KEY:
return CreateOperationWorkRequestWaiter(
client, resource_helper, operation_response, wait_for_states
)
return WorkRequestWaiter(
client, resource_helper, operation_response, wait_for_states
)
return NoneWaiter(client, resource_helper, operation_response, wait_for_states)
def call_and_wait(
call_fn,
call_fn_args,
call_fn_kwargs,
waiter_type,
operation,
waiter_client,
resource_helper,
wait_for_states,
):
"""Call the given function and wait until the operation is completed and return the resource."""
operation_response = oci_common_utils.call_with_backoff(
call_fn, *call_fn_args, **call_fn_kwargs
)
waiter = get_waiter(
waiter_type,
operation,
waiter_client,
resource_helper,
operation_response=operation_response,
wait_for_states=wait_for_states,
)
return waiter.wait()
|
examples/project_path.py
|
qualichat/questionary
| 851 |
89148
|
import questionary
if __name__ == "__main__":
path = questionary.path("Path to the projects version file").ask()
if path:
print(f"Found version file at {path} 🦄")
else:
print("No version file it is then!")
|
pandashells/bin/p_cdf.py
|
timgates42/pandashells
| 878 |
89163
|
<filename>pandashells/bin/p_cdf.py
#! /usr/bin/env python
# standard library imports
import argparse
import textwrap
import warnings
import sys # NOQA need this for mock testig
from pandashells.lib import module_checker_lib
# import required dependencies
module_checker_lib.check_for_modules([
'pandas',
'numpy',
'matplotlib',
'statsmodels'
])
from pandashells.lib import arg_lib, io_lib, plot_lib
import pandas as pd
import numpy as np
warnings.filterwarnings('ignore')
import pylab as pl
warnings.resetwarnings()
from statsmodels.distributions.empirical_distribution import ECDF
def main():
msg = textwrap.dedent(
"""
Plots the emperical cumulative distribution function (ECDF).
-----------------------------------------------------------------------
Examples:
* Plot ECDF for 10k samples from the standard normal distribution.
p.rand -t normal -n 10000 | p.cdf -c c0
* Instead of plotting, send ECDF values to stdout
p.rand -t normal -n 10000 | p.cdf -c c0 -q | head
-----------------------------------------------------------------------
"""
)
# read command line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
# specify column to use
parser.add_argument(
"-c", "--col", required=True, nargs=1,
help="Column to plot distribution")
parser.add_argument(
'-n', '--n_points', nargs=1, type=int,
help='Number of output points (default is twice input len)')
parser.add_argument(
'-q', '--quiet', action='store_true', default=False,
help='Quiet mean no plots. Send numeric output to stdout instead')
# parse arguments
arg_lib.add_args(parser, 'decorating', 'io_in', 'io_out',)
args = parser.parse_args()
# get the input dataframe and extract column
df = io_lib.df_from_input(args)
x = df[args.col[0]].values
# create the output distribution
n_out = 2 * len(x) if args.n_points is None else args.n_points[0]
x_out = np.linspace(min(x), max(x), n_out)
y_out = ECDF(x)(x_out)
# send values to stdout if quiet specified
if args.quiet:
df_out = pd.DataFrame(
{'x': x_out, 'p_less': y_out, 'p_greater': 1 - y_out})
df_out = df_out[['x', 'p_less', 'p_greater']]
io_lib.df_to_output(args, df_out)
return
# set the appropriate theme ad make plot
plot_lib.set_plot_styling(args)
pl.plot(x_out, y_out, label='P({} < x)'.format(args.col[0]))
pl.plot(x_out, 1. - y_out, label='P({} > x)'.format(args.col[0]))
pl.xlabel('x')
pl.legend(loc='best')
plot_lib.refine_plot(args)
plot_lib.show(args)
if __name__ == '__main__': # pragma: no cover
main()
|
tempest/tests/lib/services/volume/v3/test_quotas_client.py
|
rishabh20111990/tempest
| 254 |
89179
|
<reponame>rishabh20111990/tempest
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v3 import quotas_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestQuotasClient(base.BaseServiceTest):
FAKE_QUOTAS = {
"quota_set": {
"id": '730a1cbd-68ca-4d68-8e09-d603f2dfa72b',
"gigabytes": 5,
"snapshots": 10,
"volumes": 20,
'backups': 10,
'groups': 10,
'per_volume_gigabytes': 1000,
'backup_gigabytes': 2000
}
}
FAKE_UPDATE_QUOTAS_RESPONSE = {
"quota_set": {
"gigabytes": 6,
"snapshots": 11,
"volumes": 21,
'backups': 11,
'groups': 11,
'per_volume_gigabytes': 1001,
'backup_gigabytes': 2001
}
}
def setUp(self):
super(TestQuotasClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = quotas_client.QuotasClient(fake_auth,
'volume',
'regionOne')
def _test_show_default_quota_set(self, bytes_body=False):
self.check_service_client_function(
self.client.show_default_quota_set,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_QUOTAS,
bytes_body, tenant_id="fake_tenant")
def _test_show_quota_set(self, bytes_body=False):
self.check_service_client_function(
self.client.show_quota_set,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_QUOTAS,
bytes_body, tenant_id="fake_tenant")
def _test_update_quota_set(self, bytes_body=False):
self.check_service_client_function(
self.client.update_quota_set,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_UPDATE_QUOTAS_RESPONSE,
bytes_body, tenant_id="fake_tenant")
def test_show_default_quota_set_with_str_body(self):
self._test_show_default_quota_set()
def test_show_default_quota_set_with_bytes_body(self):
self._test_show_default_quota_set(bytes_body=True)
def test_show_quota_set_with_str_body(self):
self._test_show_quota_set()
def test_show_quota_set_with_bytes_body(self):
self._test_show_quota_set(bytes_body=True)
def test_update_quota_set_with_str_body(self):
self._test_update_quota_set()
def test_update_quota_set_with_bytes_body(self):
self._test_update_quota_set(bytes_body=True)
def test_delete_quota_set(self):
self.check_service_client_function(
self.client.delete_quota_set,
'tempest.lib.common.rest_client.RestClient.delete',
{},
tenant_id="fake_tenant")
|
intro/language/demo.py
|
junghun73/Learning
| 419 |
89212
|
"A demo module."
def print_b():
"Prints b."
print 'b'
def print_a():
"Prints a."
print 'a'
c = 2
d = 2
|
src/pandas_profiling/report/presentation/flavours/widget/variable.py
|
abhicantdraw/pandas-profiling
| 8,107 |
89251
|
from ipywidgets import widgets
from pandas_profiling.report.presentation.core import Variable
class WidgetVariable(Variable):
def render(self) -> widgets.VBox:
items = [self.content["top"].render()]
if self.content["bottom"] is not None:
items.append(self.content["bottom"].render())
return widgets.VBox(items)
|
Contents/Code/support/scheduler.py
|
jippo015/Sub-Zero.bundle
| 1,553 |
89261
|
# coding=utf-8
import datetime
import logging
import traceback
from config import config
def parse_frequency(s):
if s == "never" or s is None:
return None, None
kind, num, unit = s.split()
return int(num), unit
class DefaultScheduler(object):
queue_thread = None
scheduler_thread = None
running = False
registry = None
def __init__(self):
self.queue_thread = None
self.scheduler_thread = None
self.running = False
self.registry = []
self.tasks = {}
self.init_storage()
def init_storage(self):
if "tasks" not in Dict:
Dict["tasks"] = {"queue": []}
Dict.Save()
if "queue" not in Dict["tasks"]:
Dict["tasks"]["queue"] = []
def get_task_data(self, name):
if name not in Dict["tasks"]:
raise NotImplementedError("Task missing! %s" % name)
if "data" in Dict["tasks"][name]:
return Dict["tasks"][name]["data"]
def clear_task_data(self, name=None):
if name is None:
# full clean
Log.Debug("Clearing previous task data")
if Dict["tasks"]:
for task_name in Dict["tasks"].keys():
if task_name == "queue":
Dict["tasks"][task_name] = []
continue
Dict["tasks"][task_name]["data"] = {}
Dict["tasks"][task_name]["running"] = False
Dict.Save()
return
if name not in Dict["tasks"]:
raise NotImplementedError("Task missing! %s" % name)
Dict["tasks"][name]["data"] = {}
Dict["tasks"][name]["running"] = False
Dict.Save()
Log.Debug("Task data cleared: %s", name)
def register(self, task):
self.registry.append(task)
def setup_tasks(self):
# discover tasks;
self.tasks = {}
for cls in self.registry:
task = cls()
try:
task_frequency = Prefs["scheduler.tasks.%s.frequency" % task.name]
except KeyError:
task_frequency = getattr(task, "frequency", None)
self.tasks[task.name] = {"task": task, "frequency": parse_frequency(task_frequency)}
def run(self):
self.running = True
self.scheduler_thread = Thread.Create(self.scheduler_worker)
self.queue_thread = Thread.Create(self.queue_worker)
def stop(self):
self.running = False
def task(self, name):
if name not in self.tasks:
return None
return self.tasks[name]["task"]
def is_task_running(self, name):
task = self.task(name)
if task:
return task.running
def last_run(self, task):
if task not in self.tasks:
return None
return self.tasks[task]["task"].last_run
def next_run(self, task):
if task not in self.tasks or not self.tasks[task]["task"].periodic:
return None
frequency_num, frequency_key = self.tasks[task]["frequency"]
if not frequency_num:
return None
last = self.tasks[task]["task"].last_run
use_date = last
now = datetime.datetime.now()
if not use_date:
use_date = now
return max(use_date + datetime.timedelta(**{frequency_key: frequency_num}), now)
def run_task(self, name, *args, **kwargs):
task = self.tasks[name]["task"]
if task.running:
Log.Debug("Scheduler: Not running %s, as it's currently running.", name)
return False
Log.Debug("Scheduler: Running task %s", name)
try:
task.prepare(*args, **kwargs)
task.run()
except Exception, e:
Log.Error("Scheduler: Something went wrong when running %s: %s", name, traceback.format_exc())
finally:
try:
task.post_run(Dict["tasks"][name]["data"])
except:
Log.Error("Scheduler: task.post_run failed for %s: %s", name, traceback.format_exc())
Dict.Save()
config.sync_cache()
def dispatch_task(self, *args, **kwargs):
if "queue" not in Dict["tasks"]:
Dict["tasks"]["queue"] = []
Dict["tasks"]["queue"].append((args, kwargs))
def signal(self, name, *args, **kwargs):
for task_name in self.tasks.keys():
task = self.task(task_name)
if not task:
Log.Error("Scheduler: Task %s not found (?!)" % task_name)
continue
if not task.periodic:
continue
if task.running:
Log.Debug("Scheduler: Sending signal %s to task %s (%s, %s)", name, task_name, args, kwargs)
try:
status = task.signal(name, *args, **kwargs)
except NotImplementedError:
Log.Debug("Scheduler: Signal ignored by %s", task_name)
continue
if status:
Log.Debug("Scheduler: Signal accepted by %s", task_name)
else:
Log.Debug("Scheduler: Signal not accepted by %s", task_name)
continue
Log.Debug("Scheduler: Not sending signal %s to task %s, because: not running", name, task_name)
def queue_worker(self):
Thread.Sleep(10.0)
while 1:
if not self.running:
break
# single dispatch requested?
if Dict["tasks"]["queue"]:
# work queue off
queue = Dict["tasks"]["queue"][:]
Dict["tasks"]["queue"] = []
Dict.Save()
for args, kwargs in queue:
Log.Debug("Queue: Dispatching single task: %s, %s", args, kwargs)
Thread.Create(self.run_task, True, *args, **kwargs)
Thread.Sleep(5.0)
Thread.Sleep(1)
def scheduler_worker(self):
Thread.Sleep(10.0)
while 1:
if not self.running:
break
# scheduled tasks
for name in self.tasks.keys():
now = datetime.datetime.now()
info = self.tasks.get(name)
if not info:
Log.Error("Scheduler: Task %s not found (?!)" % name)
continue
task = info["task"]
if name not in Dict["tasks"] or not task.periodic:
continue
if task.running:
continue
frequency_num, frequency_key = info["frequency"]
if not frequency_num:
continue
# run legacy SARAM once
if name == "SearchAllRecentlyAddedMissing" and ("hasRunLSARAM" not in Dict or not Dict["hasRunLSARAM"]):
task = self.tasks["LegacySearchAllRecentlyAddedMissing"]["task"]
task.last_run = None
name = "LegacySearchAllRecentlyAddedMissing"
Dict["hasRunLSARAM"] = True
Dict.Save()
if not task.last_run or (task.last_run + datetime.timedelta(**{frequency_key: frequency_num}) <= now):
# fixme: scheduled tasks run synchronously. is this the best idea?
Thread.Create(self.run_task, True, name)
#Thread.Sleep(5.0)
#self.run_task(name)
Thread.Sleep(5.0)
Thread.Sleep(1)
scheduler = DefaultScheduler()
|
tests/test_pdp_isolate.py
|
antwhite/PDPbox
| 675 |
89277
|
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas.testing import assert_frame_equal
import pandas as pd
import matplotlib
from pdpbox.pdp import pdp_isolate, pdp_plot
class TestPDPIsolateBinary(object):
def test_pdp_isolate_binary_feature(
self, titanic_model, titanic_data, titanic_features
):
# feature_type: binary
pdp_isolate_out = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature="Sex",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 2
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == "Sex"
assert pdp_isolate_out.feature_type == "binary"
assert pdp_isolate_out.percentile_info == []
assert pdp_isolate_out.display_columns == ["Sex_0", "Sex_1"]
assert pdp_isolate_out.hist_data is None
def test_pdp_isolate_onehot_feature(
self, titanic_model, titanic_data, titanic_features
):
# feature_type: onehot
pdp_isolate_out = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature=["Embarked_C", "Embarked_S", "Embarked_Q"],
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 2
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == ["Embarked_C", "Embarked_S", "Embarked_Q"]
assert pdp_isolate_out.feature_type == "onehot"
assert pdp_isolate_out.percentile_info == []
assert pdp_isolate_out.display_columns == [
"Embarked_C",
"Embarked_S",
"Embarked_Q",
]
assert pdp_isolate_out.hist_data is None
def test_pdp_isolate_numeric_feature(
self, titanic_model, titanic_data, titanic_features
):
# feature_type: numeric
pdp_isolate_out = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature="Fare",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 2
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == "Fare"
assert pdp_isolate_out.feature_type == "numeric"
assert len(pdp_isolate_out.hist_data) == titanic_data.shape[0]
def test_pdp_isolate_cust_grid_points(
self, titanic_model, titanic_data, titanic_features
):
# use cust_grid_points
pdp_isolate_out = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature="Fare",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=range(0, 100, 5),
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 2
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == "Fare"
assert pdp_isolate_out.feature_type == "numeric"
assert pdp_isolate_out.percentile_info == []
assert pdp_isolate_out.display_columns == [
"0",
"5",
"10",
"15",
"20",
"25",
"30",
"35",
"40",
"45",
"50",
"55",
"60",
"65",
"70",
"75",
"80",
"85",
"90",
"95",
]
assert len(pdp_isolate_out.hist_data) == titanic_data.shape[0]
class TestPDPIsolateRegression(object):
def test_pdp_isolate_regression(self, ross_model, ross_data, ross_features):
pdp_isolate_out = pdp_isolate(
model=ross_model,
dataset=ross_data,
model_features=ross_features,
feature="SchoolHoliday",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert pdp_isolate_out._type == "PDPIsolate_instance"
assert pdp_isolate_out.n_classes == 0
assert pdp_isolate_out.which_class is None
assert pdp_isolate_out.feature == "SchoolHoliday"
assert pdp_isolate_out.feature_type == "binary"
assert pdp_isolate_out.percentile_info == []
assert pdp_isolate_out.display_columns == ["SchoolHoliday_0", "SchoolHoliday_1"]
assert pdp_isolate_out.hist_data is None
def test_pdp_isolate_n_jobs(self, ross_model, ross_data, ross_features):
# test n_jobs > 1
_ = pdp_isolate(
model=ross_model,
dataset=ross_data,
model_features=ross_features,
feature="SchoolHoliday",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=2,
predict_kwds={},
data_transformer=None,
)
def test_pdp_isolate_multiclass(otto_model, otto_data, otto_features):
pdp_isolate_out = pdp_isolate(
model=otto_model,
dataset=otto_data,
model_features=otto_features,
feature="feat_67",
num_grid_points=10,
grid_type="percentile",
percentile_range=None,
grid_range=None,
cust_grid_points=None,
memory_limit=0.5,
n_jobs=1,
predict_kwds={},
data_transformer=None,
)
assert len(pdp_isolate_out) == 9
assert pdp_isolate_out[0]._type == "PDPIsolate_instance"
assert pdp_isolate_out[0].n_classes == 9
for i in range(9):
assert pdp_isolate_out[i].which_class == i
assert pdp_isolate_out[0].feature == "feat_67"
assert pdp_isolate_out[0].feature_type == "numeric"
class TestPDPPlotSingle(object):
@pytest.fixture
def pdp_sex(self, titanic_data, titanic_model, titanic_features):
result = pdp_isolate(
model=titanic_model,
dataset=titanic_data,
model_features=titanic_features,
feature="Sex",
)
return result
def test_pdp_plot_single_default(self, pdp_sex):
# single chart without data dist plot
fig, axes = pdp_plot(pdp_sex, "sex")
assert type(fig) == matplotlib.figure.Figure
assert sorted(axes.keys()) == ["pdp_ax", "title_ax"]
assert type(axes["pdp_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["title_ax"]) == matplotlib.axes._subplots.Subplot
def test_pdp_plot_single_distplot(self, pdp_sex):
# single chart with data dist plot
fig, axes = pdp_plot(pdp_sex, "sex", plot_pts_dist=True)
assert sorted(axes.keys()) == ["pdp_ax", "title_ax"]
assert sorted(axes["pdp_ax"].keys()) == ["_count_ax", "_pdp_ax"]
assert type(axes["pdp_ax"]["_pdp_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["pdp_ax"]["_count_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["title_ax"]) == matplotlib.axes._subplots.Subplot
class TestPDPPlotMulti(object):
@pytest.fixture
def pdp_feat_67_rf(self, otto_data, otto_model, otto_features):
result = pdp_isolate(
model=otto_model,
dataset=otto_data,
model_features=otto_features,
feature="feat_67",
)
return result
def test_pdp_plot_multi_default(self, pdp_feat_67_rf):
# multi charts without data dist plot
fig, axes = pdp_plot(
pdp_isolate_out=pdp_feat_67_rf,
feature_name="feat_67",
center=True,
x_quantile=True,
)
assert type(fig) == matplotlib.figure.Figure
assert sorted(axes.keys()) == ["pdp_ax", "title_ax"]
assert len(axes["pdp_ax"]) == 9
assert type(axes["title_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["pdp_ax"][0]) == matplotlib.axes._subplots.Subplot
def test_pdp_plot_multi_which_classes(self, pdp_feat_67_rf):
# change which classes
fig, axes = pdp_plot(
pdp_feat_67_rf,
"feat_67",
center=True,
x_quantile=True,
ncols=2,
which_classes=[0, 3, 7],
)
assert len(axes["pdp_ax"]) == 3
def test_pdp_plot_multi_one_class(self, pdp_feat_67_rf):
# only keep 1 class
fig, axes = pdp_plot(
pdp_feat_67_rf,
"feat_67",
center=True,
x_quantile=True,
ncols=2,
which_classes=[5],
)
assert type(axes["pdp_ax"]) == matplotlib.axes._subplots.Subplot
def test_pdp_plot_multi_distplot(self, pdp_feat_67_rf):
# multi charts with data dist plot
fig, axes = pdp_plot(
pdp_isolate_out=pdp_feat_67_rf,
feature_name="feat_67",
center=True,
x_quantile=True,
plot_pts_dist=True,
)
assert sorted(axes.keys()) == ["pdp_ax", "title_ax"]
assert len(axes["pdp_ax"]) == 9
assert sorted(axes["pdp_ax"][0].keys()) == ["_count_ax", "_pdp_ax"]
assert type(axes["pdp_ax"][0]["_count_ax"]) == matplotlib.axes._subplots.Subplot
assert type(axes["pdp_ax"][0]["_pdp_ax"]) == matplotlib.axes._subplots.Subplot
|
brainiak/matnormal/mnrsa.py
|
osaaso3/brainiak
| 235 |
89302
|
<reponame>osaaso3/brainiak
import tensorflow as tf
from sklearn.base import BaseEstimator
from sklearn.linear_model import LinearRegression
from .covs import CovIdentity
from brainiak.utils.utils import cov2corr
import numpy as np
from brainiak.matnormal.matnormal_likelihoods import matnorm_logp_marginal_row
from brainiak.matnormal.utils import (
pack_trainable_vars,
unpack_trainable_vars,
make_val_and_grad,
unflatten_cholesky_unique,
flatten_cholesky_unique,
)
from scipy.optimize import minimize
__all__ = ["MNRSA"]
class MNRSA(BaseEstimator):
""" Matrix normal version of RSA.
The goal of this analysis is to find the covariance of the mapping from
some design matrix X to the fMRI signal Y. It does so by marginalizing over
the actual mapping (i.e. averaging over the uncertainty in it), which
happens to correct a bias imposed by structure in the design matrix on the
RSA estimate (see Cai et al., NIPS 2016).
This implementation makes different choices about residual covariance
relative to `brainiak.reprsimil.BRSA`: Here, the noise covariance is
assumed to be kronecker-separable. Informally, this means that all voxels
have the same temporal covariance, and all time points have the same
spatial covariance. This is in contrast to BRSA, which allows different
temporal covariance for each voxel. On the other hand, computational
efficiencies enabled by this choice allow MNRSA to support a richer class
of space and time covariances (anything in `brainiak.matnormal.covs`).
For users: in general, if you are worried about voxels each having
different temporal noise structure,you should use
`brainiak.reprsimil.BRSA`. If you are worried about between-voxel
correlations or temporal covaraince structures that BRSA does not
support, you should use MNRSA.
.. math::
Y &\\sim \\mathcal{MN}(0, \\Sigma_t + XLL^TX^T+
X_0X_0^T, \\Sigma_s)\\
U &= LL^T
Parameters
----------
time_cov : subclass of CovBase
Temporal noise covariance class following CovBase interface.
space_cov : subclass of CovBase
Spatial noise covariance class following CovBase interface.
optimizer : string, Default :'L-BFGS'
Name of scipy optimizer to use.
optCtrl : dict, default: None
Additional arguments to pass to scipy.optimize.minimize.
"""
def __init__(
self, time_cov, space_cov, n_nureg=5, optimizer="L-BFGS-B",
optCtrl=None
):
self.n_T = time_cov.size
self.n_V = space_cov.size
self.n_nureg = n_nureg
self.optMethod = optimizer
if optCtrl is None:
self.optCtrl = {}
self.X_0 = tf.Variable(
tf.random.normal([self.n_T, n_nureg], dtype=tf.float64), name="X_0"
)
self.train_variables = [self.X_0]
self.time_cov = time_cov
self.space_cov = space_cov
self.train_variables.extend(self.time_cov.get_optimize_vars())
self.train_variables.extend(self.space_cov.get_optimize_vars())
@property
def L(self):
"""
Cholesky factor of the RSA matrix.
"""
return unflatten_cholesky_unique(self.L_flat)
def fit(self, X, y, naive_init=True):
""" Estimate dimension reduction and cognitive model parameters
Parameters
----------
X: 2d array
Brain data matrix (TRs by voxels). Y in the math
y: 2d array or vector
Behavior data matrix (TRs by behavioral obsevations). X in the math
max_iter: int, default=1000
Maximum number of iterations to run
step: int, default=100
Number of steps between optimizer output
restart: bool, default=True
If this is true, optimizer is restarted (e.g. for a new dataset).
Otherwise optimizer will continue from where it is now (for example
for running more iterations if the initial number was not enough).
"""
# In the method signature we follow sklearn discriminative API
# where brain is X and behavior is y. Internally we are
# generative so we flip this here
X, Y = y, X
self.n_c = X.shape[1]
if naive_init:
# initialize from naive RSA
m = LinearRegression(fit_intercept=False)
m.fit(X=X, y=Y)
self.naive_U_ = np.cov(m.coef_.T)
naiveRSA_L = np.linalg.cholesky(self.naive_U_)
self.L_flat = tf.Variable(
flatten_cholesky_unique(naiveRSA_L), name="L_flat",
dtype="float64"
)
else:
chol_flat_size = (self.n_c * (self.n_c + 1)) // 2
self.L_flat = tf.Variable(
tf.random.normal([chol_flat_size], dtype="float64"),
name="L_flat",
dtype="float64",
)
self.train_variables.extend([self.L_flat])
def lossfn(theta): return -self.logp(X, Y)
val_and_grad = make_val_and_grad(lossfn, self.train_variables)
x0 = pack_trainable_vars(self.train_variables)
opt_results = minimize(fun=val_and_grad, x0=x0,
jac=True, method=self.optMethod, **self.optCtrl)
unpacked_theta = unpack_trainable_vars(
opt_results.x, self.train_variables)
for var, val in zip(self.train_variables, unpacked_theta):
var.assign(val)
self.U_ = self.L.numpy().dot(self.L.numpy().T)
self.C_ = cov2corr(self.U_)
def logp(self, X, Y):
""" MNRSA Log-likelihood"""
rsa_cov = CovIdentity(size=self.n_c + self.n_nureg)
x_stack = tf.concat([tf.matmul(X, self.L), self.X_0], 1)
return (
self.time_cov.logp
+ self.space_cov.logp
+ rsa_cov.logp
+ matnorm_logp_marginal_row(
Y,
row_cov=self.time_cov,
col_cov=self.space_cov,
marg=x_stack,
marg_cov=rsa_cov,
)
)
|
python/fate_arch/federation/pulsar/__init__.py
|
hubert-he/FATE
| 3,787 |
89306
|
<reponame>hubert-he/FATE
from fate_arch.federation.pulsar._federation import Federation, MQ, PulsarManager
__all__ = ['Federation', 'MQ', 'PulsarManager']
|
guillotina/json/serialize_content.py
|
rboixaderg/guillotina
| 173 |
89310
|
<filename>guillotina/json/serialize_content.py
# -*- coding: utf-8 -*-
from guillotina import app_settings
from guillotina import configure
from guillotina.component import ComponentLookupError
from guillotina.component import get_multi_adapter
from guillotina.component import query_utility
from guillotina.content import get_all_behaviors
from guillotina.content import get_cached_factory
from guillotina.directives import merged_tagged_value_dict
from guillotina.directives import read_permission
from guillotina.interfaces import IAsyncBehavior
from guillotina.interfaces import IFolder
from guillotina.interfaces import IPermission
from guillotina.interfaces import IResource
from guillotina.interfaces import IResourceSerializeToJson
from guillotina.interfaces import IResourceSerializeToJsonSummary
from guillotina.json.serialize_value import json_compatible
from guillotina.profile import profilable
from guillotina.schema import get_fields
from guillotina.utils import apply_coroutine
from guillotina.utils import get_object_url
from guillotina.utils import get_security_policy
from zope.interface import Interface
import asyncio
import logging
logger = logging.getLogger("guillotina")
MAX_ALLOWED = 20
@configure.adapter(for_=(IResource, Interface), provides=IResourceSerializeToJson)
class SerializeToJson(object):
def __init__(self, context, request):
self.context = context
self.request = request
self.permission_cache = {}
@profilable
async def __call__(self, include=None, omit=None):
self.include = include or []
self.omit = omit or []
parent = self.context.__parent__
if parent is not None:
# We render the summary of the parent
try:
parent_summary = await get_multi_adapter(
(parent, self.request), IResourceSerializeToJsonSummary
)()
except ComponentLookupError:
parent_summary = {}
else:
parent_summary = {}
factory = get_cached_factory(self.context.type_name)
behaviors = []
for behavior_schema in factory.behaviors or ():
behaviors.append(behavior_schema.__identifier__)
result = {
"@id": get_object_url(self.context, self.request),
"@type": self.context.type_name,
"@name": self.context.__name__,
"@uid": self.context.uuid,
"@static_behaviors": behaviors,
"parent": parent_summary, # should be @parent
"is_folderish": IFolder.providedBy(self.context), # eek, should be @folderish?
"creation_date": json_compatible(self.context.creation_date),
"modification_date": json_compatible(self.context.modification_date),
}
main_schema = factory.schema
await self.get_schema(main_schema, self.context, result, False)
# include can be one of:
# - <field name> on content schema
# - namespace.IBehavior
# - namespace.IBehavior.field_name
included_ifaces = [name for name in self.include if "." in name]
included_ifaces.extend([name.rsplit(".", 1)[0] for name in self.include if "." in name])
for behavior_schema, behavior in await get_all_behaviors(self.context, load=False):
if "*" not in self.include:
dotted_name = behavior_schema.__identifier__
if dotted_name in self.omit or (
len(included_ifaces) > 0 and dotted_name not in included_ifaces
):
# make sure the schema isn't filtered
continue
if not getattr(behavior, "auto_serialize", True) and dotted_name not in included_ifaces:
continue
if IAsyncBehavior.implementedBy(behavior.__class__):
# providedBy not working here?
await behavior.load(create=False)
await self.get_schema(behavior_schema, behavior, result, True)
for post_serialize_processors in app_settings["post_serialize"]:
await apply_coroutine(post_serialize_processors, self.context, result)
return result
@profilable
async def get_schema(self, schema, context, result, behavior):
read_permissions = merged_tagged_value_dict(schema, read_permission.key)
schema_serial = {}
for name, field in get_fields(schema).items():
if not self.check_permission(read_permissions.get(name)):
continue
if behavior:
# omit/include for behaviors need full name
dotted_name = schema.__identifier__ + "." + name
else:
dotted_name = name
if "*" not in self.include and (
dotted_name in self.omit
or (
len(self.include) > 0
and (dotted_name not in self.include and schema.__identifier__ not in self.include)
)
):
# make sure the fields aren't filtered
continue
value = await self.serialize_field(context, field)
if not behavior:
result[name] = value
else:
schema_serial[name] = value
if behavior and len(schema_serial) > 0:
result[schema.__identifier__] = schema_serial
@profilable
async def serialize_field(self, context, field, default=None):
try:
value = await apply_coroutine(field.get, context)
except Exception:
logger.warning(
f"Could not find value for schema field" f"({field.__name__}), falling back to getattr"
)
value = getattr(context, field.__name__, default)
result = json_compatible(value)
if asyncio.iscoroutine(result):
result = await result
return result
def check_permission(self, permission_name):
if permission_name is None:
return True
if permission_name not in self.permission_cache:
permission = query_utility(IPermission, name=permission_name)
if permission is None:
self.permission_cache[permission_name] = True
else:
security = get_security_policy()
self.permission_cache[permission_name] = bool(
security.check_permission(permission.id, self.context)
)
return self.permission_cache[permission_name]
@configure.adapter(for_=(IFolder, Interface), provides=IResourceSerializeToJson)
class SerializeFolderToJson(SerializeToJson):
@profilable
async def __call__(self, include=None, omit=None):
include = include or []
omit = omit or []
result = await super(SerializeFolderToJson, self).__call__(include=include, omit=omit)
security = get_security_policy()
length = await self.context.async_len()
fullobjects = self.request.query.get("fullobjects", False) in (None, "", "true")
if (length > MAX_ALLOWED or length == 0) and not fullobjects:
result["items"] = []
else:
result["items"] = []
async for ident, member in self.context.async_items(suppress_events=True):
if not ident.startswith("_") and bool(
security.check_permission("guillotina.AccessContent", member)
):
if fullobjects:
result["items"].append(
await get_multi_adapter((member, self.request), IResourceSerializeToJson)()
)
else:
result["items"].append(
await get_multi_adapter((member, self.request), IResourceSerializeToJsonSummary)()
)
result["length"] = length
return result
@configure.adapter(for_=(IResource, Interface), provides=IResourceSerializeToJsonSummary)
class DefaultJSONSummarySerializer(object):
"""Default ISerializeToJsonSummary adapter.
Requires context to be adaptable to IContentListingObject, which is
the case for all content objects providing IResource.
"""
def __init__(self, context, request):
self.context = context
self.request = request
async def __call__(self):
summary = json_compatible(
{
"@id": get_object_url(self.context, self.request),
"@name": self.context.__name__,
"@type": self.context.type_name,
"@uid": self.context.uuid,
}
)
return summary
|
configs/__init__.py
|
mutalisk999/bibi
| 1,037 |
89323
|
<filename>configs/__init__.py
# -*- coding: utf-8 -*-
from .config import BaseConfig, DevConfig, TestConfig, get_config
|
rasa/nlu/featurizers/sparse_featurizer/sparse_featurizer.py
|
fintzd/rasa
| 9,701 |
89326
|
from abc import ABC
import scipy.sparse
from rasa.nlu.featurizers.featurizer import Featurizer
class SparseFeaturizer(Featurizer[scipy.sparse.spmatrix], ABC):
"""Base class for all sparse featurizers."""
pass
|
vit/formatter/status_long.py
|
kinifwyne/vit
| 179 |
89345
|
<reponame>kinifwyne/vit<gh_stars>100-1000
from vit.formatter.status import Status
class StatusLong(Status):
pass
|
nntts/utils/plotting.py
|
entn-at/efficient_tts
| 111 |
89396
|
<gh_stars>100-1000
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import os
import logging
def plots(imvs, alphas, mel_preds, mel_gts, step, out_dir, num_plots=4):
output_dir = f"{out_dir}/images/"
os.makedirs(output_dir, exist_ok=True)
imvs = imvs.detach().cpu().numpy()
alphas = alphas.detach().cpu().numpy()
mel_preds = mel_preds.detach().cpu().numpy()
mel_gts = mel_gts.detach().cpu().numpy()
# logging.info(mel_gts.shape)
i = 1
# w, h = plt.figaspect(1.0 / len(imvs))
# fig = plt.Figure(figsize=(w * 1.3, h * 1.3))
for imv, alpha, mel_pred, mel_gt in zip(imvs, alphas, mel_preds, mel_gts):
fig, ax = plt.subplots(4)
ax[0].plot(range(len(imv)), imv)
ax[1].imshow(alpha[::-1])
ax[2].imshow(mel_pred.T)
ax[3].imshow(mel_gt.T)
fig.savefig(f"{output_dir}/step{step}_{i}.png")
i += 1
if i > 4:
break
def plots2(alphas, mel_preds, mel_gts, step, out_dir, num_plots=4):
output_dir = f"{out_dir}/images/"
os.makedirs(output_dir, exist_ok=True)
alphas = alphas.detach().cpu().numpy()
mel_preds = mel_preds.detach().cpu().numpy()
mel_gts = mel_gts.detach().cpu().numpy()
i = 1
for alpha, mel_pred, mel_gt in zip(alphas, mel_preds, mel_gts):
fig, ax = plt.subplots(3)
ax[0].imshow(alpha[::-1])
ax[1].imshow(mel_pred.T)
ax[2].imshow(mel_gt.T)
fig.savefig(f"{output_dir}/step{step}_{i}.png")
i += 1
if i > 4:
break
|
unsupervisedRR/models/model.py
|
Sebastian-Jung/unsupervisedRR
| 105 |
89426
|
<gh_stars>100-1000
import torch
from torch import nn as nn
from ..utils.transformations import transform_points_Rt
from .alignment import align
from .backbones import ResNetDecoder, ResNetEncoder
from .correspondence import get_correspondences
from .model_util import get_grid, grid_to_pointcloud, points_to_ndc
from .renderer import PointsRenderer
def project_rgb(pc_0in1_X, rgb_src, renderer):
# create rgb_features
B, _, H, W = rgb_src.shape
rgb_src = rgb_src.view(B, 3, H * W)
rgb_src = rgb_src.permute(0, 2, 1).contiguous()
# Rasterize and Blend
project_0in1 = renderer(pc_0in1_X, rgb_src)
return project_0in1["feats"]
class PCReg(nn.Module):
def __init__(self, cfg):
super(PCReg, self).__init__()
# set encoder decoder
chan_in = 3
self.cfg = cfg
feat_dim = cfg.feat_dim
# No imagenet pretraining
pretrained = False
self.encode = ResNetEncoder(chan_in, feat_dim, pretrained)
self.decode = ResNetDecoder(feat_dim, 3, nn.Tanh(), pretrained)
self.renderer = PointsRenderer(cfg.renderer)
self.num_corres = cfg.alignment.num_correspodances
self.pointcloud_source = cfg.renderer.pointcloud_source
self.align_cfg = cfg.alignment
def forward(self, rgbs, K, deps, vps=None):
# Estimate Depth -- now for 1 and 2
n_views = len(rgbs)
output = {}
# Encode features
feats = [self.encode(rgbs[i]) for i in range(n_views)]
# generate pointclouds - generate grid once for efficience
B, _, H, W = feats[0].shape
assert feats[0].shape[-1] == deps[0].shape[-1], "Same size"
grid = get_grid(B, H, W)
grid = grid.to(deps[0])
K_inv = K.inverse()
pointclouds = [
grid_to_pointcloud(K_inv, deps[i], feats[i], grid) for i in range(n_views)
]
pcs_X = [pc[0] for pc in pointclouds]
pcs_F = [pc[1] for pc in pointclouds]
if vps is not None:
# Drop first viewpoint -- assumed to be identity transformation
vps = vps[1:]
elif self.align_cfg.algorithm == "weighted_procrustes":
vps = []
cor_loss = []
for i in range(1, n_views):
corr_i = get_correspondences(
P1=pcs_F[0],
P2=pcs_F[i],
P1_X=pcs_X[0],
P2_X=pcs_X[i],
num_corres=self.num_corres,
ratio_test=(self.align_cfg.base_weight == "nn_ratio"),
)
Rt_i, cor_loss_i = align(corr_i, pcs_X[0], pcs_X[i], self.align_cfg)
vps.append(Rt_i)
cor_loss.append(cor_loss_i)
# add for visualization
output[f"corres_0{i}"] = corr_i
output[f"vp_{i}"] = Rt_i
else:
raise ValueError(f"How to align using {self.align_cfg.algorithm}?")
# add correspondance loss to output
output["corr_loss"] = sum(cor_loss)
# Rotate points into the frame of the view image
pcs_X_rot = [
transform_points_Rt(pcs_X[i + 1], vps[i], inverse=True)
for i in range(n_views - 1)
]
pcs_X = pcs_X[0:1] + pcs_X_rot
output["joint_pointcloud"] = torch.cat(pcs_X, dim=1).detach().cpu()
# Get RGB pointcloud as well for direct rendering
pcs_rgb = [rgb.view(B, 3, -1).permute(0, 2, 1).contiguous() for rgb in rgbs]
projs = []
# get joint for all values
if self.pointcloud_source == "joint":
pcs_X_joint = torch.cat(pcs_X, dim=1)
pcs_F_joint = torch.cat(pcs_F, dim=1)
pcs_RGB_joint = torch.cat(pcs_rgb, dim=1)
pcs_FRGB_joint = torch.cat((pcs_F_joint, pcs_RGB_joint), dim=2)
# Rasterize and Blend
for i in range(n_views):
if self.pointcloud_source == "other":
# get joint for all values except the one
pcs_X_joint = torch.cat(pcs_X[0:i] + pcs_X[i + 1 : n_views], dim=1)
pcs_F_joint = torch.cat(pcs_F[0:i] + pcs_F[i + 1 : n_views], dim=1)
pcs_RGB_joint = torch.cat(
pcs_rgb[0:i] + pcs_rgb[i + 1 : n_views], dim=1
)
pcs_FRGB_joint = torch.cat((pcs_F_joint, pcs_RGB_joint), dim=2)
if i > 0:
rot_joint_X = transform_points_Rt(pcs_X_joint, vps[i - 1])
rot_joint_X = points_to_ndc(rot_joint_X, K, (H, W))
else:
rot_joint_X = points_to_ndc(pcs_X_joint, K, (H, W))
projs.append(self.renderer(rot_joint_X, pcs_FRGB_joint))
# Decode
for i in range(n_views):
proj_FRGB_i = projs[i]["feats"]
proj_RGB_i = proj_FRGB_i[:, -3:]
proj_F_i = proj_FRGB_i[:, :-3]
output[f"rgb_decode_{i}"] = self.decode(proj_F_i)
output[f"rgb_render_{i}"] = proj_RGB_i
output[f"ras_depth_{i}"] = projs[i]["depth"]
output[f"cover_{i}"] = projs[i]["mask"].unsqueeze(1) # useless
return output
def forward_pcreg(self, rgbs, K, deps):
# Estimate Depth -- now for 1 and 2
n_views = len(rgbs)
output = {}
# Encode features
feats = [self.encode(rgbs[i]) for i in range(n_views)]
# generate pointclouds - generate grid once for efficience
B, _, H, W = feats[0].shape
assert feats[0].shape[-1] == deps[0].shape[-1], "Same size"
grid = get_grid(B, H, W)
grid = grid.to(deps[0])
K_inv = K.inverse()
pointclouds = [
grid_to_pointcloud(K_inv, deps[i], feats[i], grid) for i in range(n_views)
]
pcs_X = [pc[0] for pc in pointclouds]
pcs_F = [pc[1] for pc in pointclouds]
vps = []
cor_loss = []
for i in range(1, n_views):
corr_i = get_correspondences(
P1=pcs_F[0],
P2=pcs_F[i],
P1_X=pcs_X[0],
P2_X=pcs_X[i],
num_corres=self.num_corres,
ratio_test=(self.align_cfg.base_weight == "nn_ratio"),
)
Rt_i, cor_loss_i = align(corr_i, pcs_X[0], pcs_X[i], self.align_cfg)
vps.append(Rt_i)
cor_loss.append(cor_loss_i)
# add for visualization
output[f"corres_0{i}"] = corr_i
output[f"vp_{i}"] = Rt_i
# add correspondance loss to output
output["corr_loss"] = sum(cor_loss)
# Rotate points into the frame of the view image
pcs_X_rot = [
transform_points_Rt(pcs_X[i + 1], vps[i], inverse=True)
for i in range(n_views - 1)
]
pcs_X = pcs_X[0:1] + pcs_X_rot
output["joint_pointcloud"] = torch.cat(pcs_X, dim=1).detach().cpu()
return output
def generate_pointclouds(self, K, deps, vps=None):
n_views = len(deps)
# generate pointclouds - generate grid once for efficiency
B, _, H, W = deps[0].shape
grid = get_grid(B, H, W)
grid = grid.to(deps[0])
K_inv = K.inverse()
pcs_X = [
grid_to_pointcloud(K_inv, deps[i], None, grid)[0] for i in range(n_views)
]
if vps is not None:
pcs_X_rot = [
transform_points_Rt(pcs_X[i + 1], vps[i + 1], inverse=True,)
for i in range(n_views - 1)
]
pcs_X = pcs_X[0:1] + pcs_X_rot
pcs_X = torch.cat(pcs_X, dim=1).detach().cpu()
return pcs_X
def get_feature_pcs(self, rgbs, K, deps):
# Estimate Depth -- now for 1 and 2
n_views = len(rgbs)
# Encode features
feats = [self.encode(rgbs[i]) for i in range(n_views)]
# generate pointclouds - generate grid once for efficience
B, _, H, W = feats[0].shape
assert (
feats[0].shape[-1] == deps[0].shape[-1]
), f"Same size {feats[0].shape} - {deps[0].shape}"
grid = get_grid(B, H, W)
grid = grid.to(deps[0])
K_inv = K.inverse()
pointclouds = [
grid_to_pointcloud(K_inv, deps[i], feats[i], grid) for i in range(n_views)
]
pcs_X = [pc[0] for pc in pointclouds]
pcs_F = [pc[1] for pc in pointclouds]
return pcs_X, pcs_F, None
|
image_classification/token_labeling/tlt/models/lvvit.py
|
AK391/UniFormer
| 367 |
89498
|
<reponame>AK391/UniFormer<filename>image_classification/token_labeling/tlt/models/lvvit.py
import torch
import torch.nn as nn
from timm.models.helpers import load_pretrained
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_
from timm.models.resnet import resnet26d, resnet50d, resnet101d
import numpy as np
from .layers import *
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225),
'classifier': 'head',
**kwargs
}
default_cfgs = {
'LV_ViT_Tiny': _cfg(),
'LV_ViT': _cfg(),
'LV_ViT_Medium': _cfg(crop_pct=1.0),
'LV_ViT_Large': _cfg(crop_pct=1.0),
}
def get_block(block_type, **kargs):
if block_type=='mha':
# multi-head attention block
return MHABlock(**kargs)
elif block_type=='ffn':
# feed forward block
return FFNBlock(**kargs)
elif block_type=='tr':
# transformer block
return Block(**kargs)
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def get_dpr(drop_path_rate,depth,drop_path_decay='linear'):
if drop_path_decay=='linear':
# linear dpr decay
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
elif drop_path_decay=='fix':
# use fixed dpr
dpr= [drop_path_rate]*depth
else:
# use predefined drop_path_rate list
assert len(drop_path_rate)==depth
dpr=drop_path_rate
return dpr
class LV_ViT(nn.Module):
""" Vision Transformer with tricks
Arguements:
p_emb: different conv based position embedding (default: 4 layer conv)
skip_lam: residual scalar for skip connection (default: 1.0)
order: which order of layers will be used (default: None, will override depth if given)
mix_token: use mix token augmentation for batch of tokens (default: False)
return_dense: whether to return feature of all tokens with an additional aux_head (default: False)
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., drop_path_decay='linear', hybrid_backbone=None, norm_layer=nn.LayerNorm, p_emb='4_2', head_dim = None,
skip_lam = 1.0,order=None, mix_token=False, return_dense=False):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.output_dim = embed_dim if num_classes==0 else num_classes
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
if p_emb=='4_2':
patch_embed_fn = PatchEmbed4_2
elif p_emb=='4_2_128':
patch_embed_fn = PatchEmbed4_2_128
else:
patch_embed_fn = PatchEmbedNaive
self.patch_embed = patch_embed_fn(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if order is None:
dpr=get_dpr(drop_path_rate, depth, drop_path_decay)
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, head_dim=head_dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, skip_lam=skip_lam)
for i in range(depth)])
else:
# use given order to sequentially generate modules
dpr=get_dpr(drop_path_rate, len(order), drop_path_decay)
self.blocks = nn.ModuleList([
get_block(order[i],
dim=embed_dim, num_heads=num_heads, head_dim=head_dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, skip_lam=skip_lam)
for i in range(len(order))])
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.return_dense=return_dense
self.mix_token=mix_token
if return_dense:
self.aux_head=nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if mix_token:
self.beta = 1.0
assert return_dense, "always return all features when mixtoken is enabled"
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, GroupLinear):
trunc_normal_(m.group_weight, std=.02)
if isinstance(m, GroupLinear) and m.group_bias is not None:
nn.init.constant_(m.group_bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_embeddings(self,x):
x = self.patch_embed(x)
return x
def forward_tokens(self, x):
B = x.shape[0]
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
def forward_features(self,x):
# simple forward to obtain feature map (without mixtoken)
x = self.forward_embeddings(x)
x = x.flatten(2).transpose(1, 2)
x = self.forward_tokens(x)
return x
def forward(self, x):
x = self.forward_embeddings(x)
# token level mixtoken augmentation
if self.mix_token and self.training:
lam = np.random.beta(self.beta, self.beta)
patch_h, patch_w = x.shape[2],x.shape[3]
bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam)
temp_x = x.clone()
temp_x[:, :, bbx1:bbx2, bby1:bby2] = x.flip(0)[:, :, bbx1:bbx2, bby1:bby2]
x = temp_x
else:
bbx1, bby1, bbx2, bby2 = 0,0,0,0
x = x.flatten(2).transpose(1, 2)
x = self.forward_tokens(x)
x_cls = self.head(x[:,0])
if self.return_dense:
x_aux = self.aux_head(x[:,1:])
if not self.training:
return x_cls+0.5*x_aux.max(1)[0]
# recover the mixed part
if self.mix_token and self.training:
x_aux = x_aux.reshape(x_aux.shape[0],patch_h, patch_w,x_aux.shape[-1])
temp_x = x_aux.clone()
temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :]
x_aux = temp_x
x_aux = x_aux.reshape(x_aux.shape[0],patch_h*patch_w,x_aux.shape[-1])
return x_cls, x_aux, (bbx1, bby1, bbx2, bby2)
return x_cls
@register_model
def vit(pretrained=False, **kwargs):
model = LV_ViT(patch_size=16, embed_dim=384, depth=16, num_heads=6, mlp_ratio=3.,
p_emb=1, **kwargs)
model.default_cfg = default_cfgs['LV_ViT']
return model
@register_model
def lvvit(pretrained=False, **kwargs):
model = LV_ViT(patch_size=16, embed_dim=384, depth=16, num_heads=6, mlp_ratio=3.,
p_emb='4_2',skip_lam=2., **kwargs)
model.default_cfg = default_cfgs['LV_ViT']
return model
@register_model
def lvvit_s(pretrained=False, **kwargs):
model = LV_ViT(patch_size=16, embed_dim=384, depth=16, num_heads=6, mlp_ratio=3.,
p_emb='4_2',skip_lam=2., return_dense=True,mix_token=True, **kwargs)
model.default_cfg = default_cfgs['LV_ViT']
return model
@register_model
def lvvit_m(pretrained=False, **kwargs):
model = LV_ViT(patch_size=16, embed_dim=512, depth=20, num_heads=8, mlp_ratio=3.,
p_emb='4_2',skip_lam=2., return_dense=True,mix_token=True, **kwargs)
model.default_cfg = default_cfgs['LV_ViT_Medium']
return model
@register_model
def lvvit_l(pretrained=False, **kwargs):
order = ['tr']*24 # this will override depth, can also be set as None
model = LV_ViT(patch_size=16, embed_dim=768,depth=24, num_heads=12, mlp_ratio=3.,
p_emb='4_2_128',skip_lam=3., return_dense=True,mix_token=True, order=order, **kwargs)
model.default_cfg = default_cfgs['LV_ViT_Large']
return model
|
portia_server/portia_dashboard/views.py
|
rmdes/portia-dashboard
| 223 |
89538
|
from django.conf import settings
from portia_api.jsonapi import JSONResponse
from portia_api.jsonapi.renderers import JSONRenderer
from .models import (Job, Log, Schedule,JobItem)
import time, datetime
import requests
from storage import (get_storage_class,create_project_storage)
from portia_orm.models import Project
import inspect
import uuid
import re
from django.db.models import Max
import logging
logger = logging.getLogger('portia_dashboard')
def _request_get(url):
retryTime = 5
res = None
for i in range(retryTime):
try:
res = requests.get(url)#self.proxyUtil.getRandomProxy())
if res.status_code !=200:
continue
break
except:
continue
return res
def _request_post(url):
retryTime = 5
res = None
for i in range(retryTime):
try:
res = requests.post(url)#self.proxyUtil.getRandomProxy())
if res.status_code !=200:
continue
break
except:
continue
return res
def matchDate(line):
matchThis = ""
matched = re.match(r'\d\d\d\d-\d\d-\d\d\ \d\d:\d\d:\d\d',line)
if matched:
#matches a date and adds it to matchThis
matchThis = matched.group()
else:
matchThis = "NONE"
return matchThis
def parseLine(line):
return re.findall( r'(?P<date>\d\d\d\d-\d\d-\d\d\ \d\d:\d\d:\d\d) \[(?P<source>[^\]]+)\] (?P<level>INFO|DEBUG|ERROR|WARNING|CRITICAL): (?P<text>.*)', line ) [0]
def generateDicts(log):
currentDict = {}
index = 1
for line in log.splitlines():
if line.startswith(matchDate(line)):
if currentDict:
yield currentDict
date, source, level, text = parseLine(line)
currentDict = {
"index" : index,
"date": date,
"source":source,
"level":level,
"text":text
}
index = index + 1
else:
currentDict["text"] += line
yield currentDict
def _get_log_from_scrapyd(project_id, spider_id, job_id ) :
res = _request_get("%s/logs/%s/%s/%s.log" %(settings.SCRAPYD_URL,project_id, spider_id, job_id ))
return res.text if res.status_code == 200 else ''
def _get_log(project_id, spider_id, job_id, job_status ):
log = None
try:
log = Log.objects.get(id=job_id )
except Log.DoesNotExist:
content = _get_log_from_scrapyd(project_id, spider_id, job_id )
log = Log.objects.create(id=job_id , content=content )
return log
if job_status != 'finished' :
log.content = _get_log_from_scrapyd(project_id, spider_id, job_id )
log.save()
return log
def job_log(request):
result = []
project_id = request.GET.get('project')
spider_id = request.GET.get('spider')
job_id = request.GET.get('job')
job = Job.objects.get(id=job_id)
if job:
log = _get_log(project_id, job.spider, job.id, job.status )
if log :
result = list(generateDicts(log.content))
return JSONResponse({"project":project_id,"spider":spider_id, "job": job_id, "log":result})
def _get_log_count(project_id, spider_id, job_id, job_status ) :
warnings, errors , criticals = 0,0,0
log = _get_log(project_id, spider_id, job_id, job_status )
if log :
try:
result = list(generateDicts(log.content ))
for item in result :
if item['level'] == 'WARNING' :
warnings += 1
elif item['level'] == 'ERROR' :
errors += 1
elif item['level'] == 'CRITICAL' :
criticals += 1
except KeyError:
pass
return warnings, errors, criticals
def job_cancel(request):
project_id = request.GET.get('project')
job_id = request.GET.get('job')
res = _request_post("%s/cancel.json?project=%s&job=%s" %(settings.SCRAPYD_URL,project_id, job_id ))
if res:
result = res.json()
if result.get("status", '') == 'ok' :
return JSONResponse({'status':'ok'})
return JSONResponse({'status':'error'})
def job_delete(request):
id = request.GET.get('job')
if id:
Job.objects.get(id=id).delete()
Log.objects.get(id=id ).delete()
return JSONResponse({'status':'ok'})
else:
return JSONResponse({'status':'error'})
def _get_timestamp_from_string( timestring ) :
dt = datetime.datetime.strptime(timestring,"%Y-%m-%d %H:%M:%S.%f")
ts = time.mktime (dt.timetuple()) * 1000
return ts
def _get_stub_job ():
try:
job = Job.objects.get(id='ffffffffffffffff0000000000000000')
except Job.DoesNotExist:
job = Job.objects.create(id = 'ffffffffffffffff0000000000000000', spider='', start_time = 0 , index = 0 )
return job
def _get_last_start_time ():
job = _get_stub_job()
max_start_time = job.start_time
return max_start_time
def _set_last_start_time(last_start_time) :
job = _get_stub_job()
job.start_time = last_start_time
job.save()
def _get_last_index():
job = _get_stub_job()
last_index = job.index
return last_index
def _set_last_index ( last_index ) :
job = _get_stub_job()
job.index = last_index
job.save()
def _update_jobs_model(project_id) :
#last_start_time = _get_last_start_time()
updated_count = 0
created_count = 0
res = _request_get("%s/listjobs.json?project=%s" %(settings.SCRAPYD_URL,project_id))
if res:
for status in ['pending', 'running', 'finished']:
data = res.json().get(status,[])
jobs = []
for item in data:
created = False
try:
job = Job.objects.get(id=item['id'])
except Job.DoesNotExist:
if 'start_time' in item and _get_timestamp_from_string(item['start_time']) <= _get_last_start_time() :
# the job must be removed, so skip it
continue
job = Job.objects.create(id = item['id'], spider=item['spider'], index = ( _get_last_index() + 1 ))
_set_last_index(job.index)
created = True
created_count += 1
#job maybe changed if not in 'finished' status.
if job.status != 'finished' or job.start_time == 0 or job.end_time == 0 :
if 'start_time' in item :
job.start_time = _get_timestamp_from_string(item['start_time'])
if 'end_time' in item :
job.end_time = _get_timestamp_from_string(item['end_time'])
if status == 'finished' :
job.warning_count, job.error_count, job.critical_count = _get_log_count(project_id, job.spider, job.id, job.status )
job.status = status
job.save()
updated_count += 1
if created == True and job.start_time > _get_last_start_time() :
_set_last_start_time(job.start_time)
return created_count, updated_count
def _get_string_from_timestamp( timestamp) :
return datetime.datetime.fromtimestamp(timestamp / 1000 ).strftime("%Y-%m-%d %H:%M:%S")
def job_list(request) :
result = {}
project_id = request.GET.get('project')
spider = request.GET.get('spider', '')
_update_jobs_model(project_id )
for status in ['pending', 'running', 'finished']:
res_jobs = []
jobs = Job.objects.filter(status = status ).order_by('-start_time')
for job in jobs :
if (spider == '' or spider == job.spider ):
res_jobs.append({'id':job.id ,
'index' : job.index,
'project':project_id,
'spider':job.spider,
'start_time': _get_string_from_timestamp(job.start_time),
'end_time': _get_string_from_timestamp(job.end_time),
'error_count': job.error_count,
'warning_count': job.warning_count,
'critical_count': job.critical_count
})
result[status] = res_jobs
return JSONResponse(result)
def schedule_add(request):
project = request.GET.get('project')
spider = request.GET.get('spider')
interval = request.GET.get('interval')
times = request.GET.get('times')
if project and spider and interval:
schedule = Schedule(id = uuid.uuid1().hex,
project = project,
spider = spider,
start_time = int(time.time() * 1000),
interval = interval,
times = times,
date_update = int(time.time() * 1000)
)
schedule.save()
return JSONResponse({'status':'ok'})
else:
return JSONResponse({'status':'error'})
def schedule_list(request):
result =[]
schedules = Schedule.objects.all()
for schedule in schedules:
result.append({'id':schedule.id ,
'project':schedule.project,
'spider':schedule.spider,
'start_time': _get_string_from_timestamp(schedule.start_time),
'update_time': _get_string_from_timestamp(schedule.date_update),
'interval' : schedule.interval,
'times' : schedule.times
})
return JSONResponse(result)
def schedule_del(request):
id = request.GET.get('id')
if id:
Schedule.objects.get(id=id).delete()
return JSONResponse({'status':'ok'})
else:
return JSONResponse({'status':'error'})
def article_list(request):
result =[]
job = request.GET.get('job')
items = JobItem.objects(job=job)
for item in items:
res = { 'id': str(item.id) ,
'item-display-name' : 'item',
'job':item.job,
'spider':item.spider,
'url':item.url,
'time' : item.time.strftime("%Y-%m-%d %H:%M:%S")
}
result.append(res)
return JSONResponse(result)
def article_detail(request):
result = {}
job_item_id = request.GET.get('job_item')
job_items = JobItem.objects( id = job_item_id )
if job_items[0] :
for name, value in job_items[0].__dict__.iteritems():
if not name.startswith('_') and not inspect.ismethod(value):
#value = getattr(item, name )
result[name] = value
return JSONResponse(result)
def article_del(request):
spider_id = request.GET.get('spider')
job_id = request.GET.get('job')
job_item_id = request.GET.get('job_item')
if spider_id :
jobItems = JobItem.objects.filter(spider=spider_id)
for item in jobItems :
item.delete()
return JSONResponse({'status':'ok'})
elif job_id :
jobItems = JobItem.objects.filter(job=job_id)
for item in jobItems :
item.delete()
return JSONResponse({'status':'ok'})
elif job_item_id:
JobItem.objects.get(id=job_item_id).delete()
return JSONResponse({'status':'ok'})
else:
return JSONResponse({'status':'error'})
|
pyrgg/test.py
|
sepandhaghighi/pyggen
| 164 |
89552
|
<filename>pyrgg/test.py
# -*- coding: utf-8 -*-
"""Test file."""
"""
>>> from pyrgg import *
>>> import pyrgg.params
>>> import random
>>> import os
>>> import json
>>> import yaml
>>> import pickle
>>> pyrgg.params.PYRGG_TEST_MODE = True
>>> get_precision(2)
0
>>> get_precision(2.2)
1
>>> get_precision(2.22)
2
>>> get_precision(2.223)
3
>>> convert_str_to_number("20")
20
>>> convert_str_to_number("20.2")
20.2
>>> convert_str_to_bool("1")
True
>>> convert_str_to_bool("3")
True
>>> convert_str_to_bool("0")
False
>>> is_float(10)
False
>>> is_float(10.2)
True
>>> is_float(None)
False
>>> result = input_filter({"file_name": "test","vertices": 5,"max_weight": 1000,"min_weight":455,"min_edge": -45,"max_edge": -11,"sign": False,"output_format": 19, "direct": False,"self_loop": True,"multigraph":False,"number_of_files":2})
>>> result == {'output_format': 1, 'min_weight': 455, 'min_edge': 5, 'max_edge': 5, 'file_name': 'test', 'vertices': 5, 'max_weight': 1000, 'sign': False, "direct": False,"self_loop": True,"multigraph":False,"number_of_files":2}
True
>>> result = input_filter({"file_name": "test","vertices": 5,"max_weight": 1000,"min_weight":455,"min_edge": -45,"max_edge": -11,"sign": False,"output_format": 19, "direct": False,"self_loop": False,"multigraph":False,"number_of_files":2})
>>> result == {'output_format': 1, 'min_weight': 455, 'min_edge': 4, 'max_edge': 4, 'file_name': 'test', 'vertices': 5, 'max_weight': 1000, 'sign': False, "direct": False,"self_loop": False,"multigraph":False,"number_of_files":2}
True
>>> result = input_filter({"file_name": "test","vertices": -5,"max_weight": 1000,"min_weight":455,"min_edge": -45,"max_edge": -11,"sign": False,"output_format": 19, "direct": False,"self_loop": False,"multigraph":True,"number_of_files":-1})
>>> result == {'output_format': 1, 'min_weight': 455, 'min_edge': 11, 'max_edge': 45, 'file_name': 'test', 'vertices': 5, 'max_weight': 1000, 'sign': False, "direct": False,"self_loop": False,"multigraph":True,"number_of_files":1}
True
>>> result = input_filter({"file_name": "test2","vertices": 23,"max_weight": 2,"min_weight": 80,"min_edge": 23,"max_edge": 1,"sign": True,"output_format": 1, "direct": False,"self_loop": True,"multigraph":False,"number_of_files":100})
>>> result == {'min_weight': 2, 'vertices': 23, 'file_name': 'test2', 'max_edge': 23, 'min_edge': 1, 'max_weight': 80, 'output_format': 1, 'sign': True, "direct": False,"self_loop": True,"multigraph":False,"number_of_files":100}
True
>>> logger('test',100,50,1000,10,1,0,0,1,20,1,'2min')
>>> file=open('logfile.log','r')
>>> print("\n".join(file.read().splitlines()[1:-1]))
Filename : test
Vertices : 100
Total Edges : 50
Max Edge : 1000
Min Edge : 10
Directed : True
Signed : False
Multigraph : False
Self Loop : True
Weighted : True
Max Weight : 20
Min Weight : 1
Elapsed Time : 2min
>>> convert_bytes(200)
'200.0 bytes'
>>> convert_bytes(6000)
'5.9 KB'
>>> convert_bytes(80000)
'78.1 KB'
>>> time_convert(33)
'00 days, 00 hours, 00 minutes, 33 seconds'
>>> time_convert(15000)
'00 days, 04 hours, 10 minutes, 00 seconds'
>>> time_convert('sadasdasd')
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'sadasdasd'
>>> line(12,"*")
************
>>> random.seed(2)
>>> sign_gen()
1
>>> random.seed(11)
>>> sign_gen()
-1
>>> used_vertices = {k:[] for k in range(1,41)}
>>> degree_dict = {k:0 for k in range(1,41)}
>>> degree_dict_sort = {k:{} for k in range(41)}
>>> degree_dict_sort[0] = {i:i for i in range(1,41)}
>>> all_vertices = list(range(1, 41))
>>> random.seed(2)
>>> branch_gen(1,10,10,1,20,True,True,True,False,used_vertices,degree_dict,degree_dict_sort)
[[4, 25, 18, 3, 30, 34, 2, 26, 14, 11], [3, 10, 20, 14, -18, -2, -15, -14, 8, 6]]
>>> random.seed(20)
>>> branch_gen(1,10,4,1,20,False,True,True,False,used_vertices,degree_dict,degree_dict_sort)
[[], []]
>>> used_vertices = {k:[] for k in range(1,41)}
>>> degree_dict = {k:0 for k in range(1,41)}
>>> degree_dict_sort = {k:{} for k in range(41)}
>>> degree_dict_sort[0] = {i:i for i in range(1,41)}
>>> branch_gen(1,10,4,1,20,False,True,True,False,used_vertices,degree_dict,degree_dict_sort)
[[10, 7, 39, 2], [9, 11, 6, 14]]
>>> branch_gen(40,1,20,1)
Traceback (most recent call last):
...
TypeError: branch_gen() missing 8 required positional arguments: 'max_weight', 'sign', 'direct', 'self_loop', 'multigraph', 'used_vertices', 'degree_dict', and 'degree_sort_dict'
>>> random.seed(2)
>>> edge_gen(20,0,400,2,10,True,True,True,False)
[{1: [3, 7], 2: [4, 17, 20, 9, 11], 3: [14, 8, 5, 12, 16, 19, 15], 4: [15, 17, 12, 8, 14, 13], 5: [16, 9, 7, 20, 19, 18, 13, 5], 6: [6, 10], 7: [18, 10, 11], 8: [], 9: [], 10: [12, 18, 8, 1, 14], 11: [9, 11], 12: [], 13: [], 14: [19, 16, 17, 20, 15], 15: [6, 1, 19], 16: [12, 13, 8, 9, 17], 17: [], 18: [9, 12, 17, 6, 20, 19, 1], 19: [13], 20: []}, {1: [184, -128], 2: [220, -278, -257, 14, -163], 3: [286, 118, 166, 261, -263, 228, -303], 4: [-82, -335, 250, -256, -338, -179], 5: [-337, -358, -395, -155, -159, 250, -350, -371], 6: [30, -302], 7: [386, -125, 216], 8: [], 9: [], 10: [127, 42, 12, 191, 80], 11: [-301, 77], 12: [], 13: [], 14: [146, -15, -282, 135, 242], 15: [-52, -65, -249], 16: [-132, -334, 343, -17, 87], 17: [], 18: [126, -37, 302, -131, -142, 77, -209], 19: [123], 20: []}, 61]
>>> random.seed(11)
>>> edge_gen(20,0,100,2,10,False,True,True,False)
[{1: [18, 15, 19, 7, 20, 11, 2, 6, 3], 2: [17], 3: [8, 4, 5, 9, 12, 10, 14, 16], 4: [20, 13, 4, 6], 5: [12, 7, 11, 10, 14], 6: [9], 7: [19], 8: [8, 18, 11, 2, 16, 17, 10], 9: [15, 12, 18], 10: [20, 14, 13, 15, 17, 16], 11: [19, 7, 20], 12: [13], 13: [2, 16, 13], 14: [18, 19, 6, 14, 17, 15], 15: [6, 7, 16], 16: [17, 20, 12, 18], 17: [19], 18: [7, 6, 9, 12, 20], 19: [19, 11, 4], 20: []}, {1: [99, 57, 75, 23, 80, 23, 57, 18, 68], 2: [50], 3: [79, 67, 7, 24, 76, 99, 41, 75], 4: [29, 63, 84, 58], 5: [70, 90, 40, 65, 3], 6: [51], 7: [37], 8: [2, 0, 26, 60, 90, 53, 72], 9: [43, 39, 1], 10: [15, 31, 1, 59, 22, 57], 11: [98, 53, 49], 12: [53], 13: [34, 2, 23], 14: [82, 12, 18, 56, 1, 37], 15: [9, 26, 1], 16: [47, 58, 75, 73], 17: [23], 18: [39, 78, 92, 20, 49], 19: [10, 6, 13], 20: []}, 74]
>>> edge_gen(0,400,2,10,1)
Traceback (most recent call last):
...
TypeError: edge_gen() missing 4 required positional arguments: 'sign', 'direct', 'self_loop', and 'multigraph'
>>> random.seed(2)
>>> dimacs_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.gr','r')
>>> print(file.read())
c FILE :testfile.gr
c No. of vertices :10
c No. of edges :7
c Max. weight :200
c Min. weight :0
c Min. edge :0
c Max. edge :2
p sp 10 7
a 4 3 -64
a 5 6 148
a 5 9 110
a 6 10 -139
a 7 7 7
a 8 2 -97
a 9 1 60
<BLANKLINE>
>>> random.seed(4)
>>> dimacs_maker('testfile2',0,50,30,0,4,True,True,True,False)
35
>>> file=open('testfile2.gr','r')
>>> print(file.read())
c FILE :testfile2.gr
c No. of vertices :30
c No. of edges :35
c Max. weight :50
c Min. weight :0
c Min. edge :0
c Max. edge :4
p sp 30 35
a 1 10 46
a 2 18 5
a 2 4 25
a 2 22 -48
a 4 23 -17
a 5 7 -13
a 7 15 10
a 7 17 -40
a 8 8 -42
a 8 25 11
a 9 29 -5
a 10 3 -36
a 10 27 -48
a 11 13 -27
a 11 26 -27
a 11 21 14
a 11 16 -2
a 14 20 -44
a 14 14 43
a 14 12 26
a 15 28 -11
a 16 30 -40
a 16 24 20
a 19 19 7
a 20 12 -29
a 20 1 22
a 22 24 20
a 22 23 -9
a 23 18 18
a 23 27 28
a 24 6 -24
a 25 17 23
a 27 6 -50
a 28 21 28
a 28 13 -13
<BLANKLINE>
>>> random.seed(20)
>>> dimacs_maker('testfile3',10,30,100,0,4,False,True,True,False)
137
>>> file=open('testfile3.gr','r')
>>> print(file.read())
c FILE :testfile3.gr
c No. of vertices :100
c No. of edges :137
c Max. weight :30
c Min. weight :10
c Min. edge :0
c Max. edge :4
p sp 100 137
a 1 34 30
a 3 76 15
a 3 5 23
a 4 13 13
a 4 21 20
a 4 67 28
a 5 60 16
a 5 32 20
a 5 92 20
a 6 64 12
a 6 94 26
a 7 62 12
a 7 36 28
a 7 42 11
a 8 20 12
a 9 47 19
a 10 49 15
a 10 27 10
a 11 48 17
a 11 51 11
a 13 58 14
a 13 70 29
a 14 37 30
a 14 61 27
a 14 87 15
a 15 84 13
a 16 83 28
a 17 45 17
a 17 24 29
a 17 18 26
a 18 59 15
a 19 98 12
a 21 2 30
a 21 99 20
a 22 69 26
a 22 96 11
a 22 88 15
a 24 79 20
a 24 12 12
a 24 82 13
a 26 50 30
a 26 30 19
a 29 52 26
a 31 25 26
a 32 68 14
a 33 65 13
a 33 78 13
a 33 55 17
a 34 63 13
a 35 44 27
a 35 57 14
a 37 74 10
a 37 41 16
a 37 100 30
a 38 72 13
a 38 56 16
a 39 91 19
a 39 43 13
a 41 28 22
a 41 81 19
a 42 90 13
a 42 46 28
a 42 97 16
a 45 86 10
a 45 53 18
a 46 85 13
a 46 23 11
a 47 71 29
a 48 95 12
a 48 77 19
a 48 93 11
a 49 75 22
a 50 73 18
a 50 40 24
a 50 54 28
a 51 80 17
a 51 66 19
a 51 89 20
a 52 58 29
a 52 16 21
a 52 43 12
a 53 8 13
a 53 98 17
a 54 55 10
a 56 62 26
a 56 27 10
a 57 70 26
a 58 44 22
a 59 90 27
a 59 91 19
a 59 78 29
a 60 87 12
a 60 92 25
a 61 69 14
a 61 79 17
a 62 25 21
a 63 97 27
a 63 29 30
a 65 9 26
a 65 64 21
a 66 67 27
a 66 95 19
a 66 93 30
a 68 30 18
a 70 83 12
a 70 99 15
a 71 31 17
a 71 89 20
a 73 36 18
a 75 72 12
a 76 2 26
a 76 12 25
a 76 86 22
a 78 23 19
a 78 100 27
a 79 40 24
a 80 84 26
a 80 80 14
a 81 20 16
a 82 15 16
a 82 88 22
a 83 19 19
a 84 85 13
a 84 28 16
a 85 77 16
a 85 94 23
a 86 1 21
a 87 74 15
a 87 96 19
a 90 93 22
a 92 49 14
a 95 98 26
a 95 55 11
a 97 38 28
a 99 19 29
a 99 89 24
a 100 40 11
<BLANKLINE>
>>> dimacs_maker('testfile', 0, 200, 10, 0,0,True)
Traceback (most recent call last):
...
TypeError: dimacs_maker() missing 3 required positional arguments: 'direct', 'self_loop', and 'multigraph'
>>> random.seed(2)
>>> json_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.json','r')
>>> testfile_1=json.load(file)
>>> testfile_1['graph']['nodes'][1]
{'id': 2}
>>> testfile_1['graph']['edges'][1]['source']
5
>>> testfile_1['graph']['edges'][1]['target']
6
>>> testfile_1['graph']['edges'][1]['weight']
148
>>> json_to_yaml('testfile')
>>> file=open('testfile.yaml','r')
>>> testfile_1_yaml=yaml.load(file)
>>> testfile_1_yaml['graph']['edges'][1]['source']
5
>>> testfile_1_yaml['graph']['edges'][1]['target']
6
>>> testfile_1_yaml['graph']['edges'][1]['weight']
148
>>> json_to_pickle('testfile')
>>> testfile_1_p=pickle.load( open( 'testfile.p', 'rb' ) )
>>> testfile_1_p['graph']['edges'][1]['source']
5
>>> testfile_1_p['graph']['edges'][1]['target']
6
>>> testfile_1_p['graph']['edges'][1]['weight']
148
>>> random.seed(4)
>>> json_maker('testfile2',0,50,30,0,4,True,True,True,False)
35
>>> file=open('testfile2.json','r')
>>> testfile_2=json.load(file)
>>> testfile_2['graph']['nodes'][1]
{'id': 2}
>>> testfile_2['graph']['edges'][1]['source']
2
>>> testfile_2['graph']['edges'][1]['target']
18
>>> testfile_2['graph']['edges'][1]['weight']
5
>>> json_to_yaml('testfile2')
>>> file=open('testfile2.yaml','r')
>>> testfile_2_yaml=yaml.load(file)
>>> testfile_2_yaml['graph']['nodes'][1]
{'id': 2}
>>> testfile_2_yaml['graph']['edges'][1]['source']
2
>>> testfile_2_yaml['graph']['edges'][1]['target']
18
>>> testfile_2_yaml['graph']['edges'][1]['weight']
5
>>> json_to_pickle('testfile2')
>>> testfile_2_p=pickle.load( open( 'testfile2.p', 'rb' ) )
>>> testfile_2_p['graph']['edges'][1]['source']
2
>>> testfile_2_p['graph']['edges'][1]['target']
18
>>> testfile_2_p['graph']['edges'][1]['weight']
5
>>> random.seed(20)
>>> json_maker('testfile3',10,30,100,0,4,False,True,True,False)
137
>>> file=open('testfile3.json','r')
>>> testfile_3=json.load(file)
>>> testfile_3['graph']['nodes'][1]
{'id': 2}
>>> testfile_3['graph']['edges'][1]['source']
3
>>> testfile_3['graph']['edges'][1]['target']
76
>>> testfile_3['graph']['edges'][1]['weight']
15
>>> json_to_yaml('testfile3')
>>> file=open('testfile3.yaml','r')
>>> testfile_3_yaml=yaml.load(file)
>>> testfile_3_yaml['graph']['nodes'][1]
{'id': 2}
>>> testfile_3_yaml['graph']['edges'][1]['source']
3
>>> testfile_3_yaml['graph']['edges'][1]['target']
76
>>> testfile_3_yaml['graph']['edges'][1]['weight']
15
>>> json_to_yaml('testfile24')
[Error] Bad Input File!
>>> json_to_pickle('testfile24')
[Error] Bad Input File!
>>> json_maker('testfile', 0, 200, 10, 0, 0,True)
Traceback (most recent call last):
...
TypeError: json_maker() missing 3 required positional arguments: 'direct', 'self_loop', and 'multigraph'
>>> json_to_pickle('testfile3')
>>> testfile_3_p=pickle.load( open( 'testfile3.p', 'rb' ) )
>>> testfile_3_p['graph']['edges'][1]['source']
3
>>> testfile_3_p['graph']['edges'][1]['target']
76
>>> testfile_3_p['graph']['edges'][1]['weight']
15
>>> random.seed(2)
>>> csv_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> random.seed(2)
>>> gml_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.gml','r')
>>> print(file.read())
graph
[
multigraph 0
directed 1
node
[
id 1
label "Node 1"
]
node
[
id 2
label "Node 2"
]
node
[
id 3
label "Node 3"
]
node
[
id 4
label "Node 4"
]
node
[
id 5
label "Node 5"
]
node
[
id 6
label "Node 6"
]
node
[
id 7
label "Node 7"
]
node
[
id 8
label "Node 8"
]
node
[
id 9
label "Node 9"
]
node
[
id 10
label "Node 10"
]
edge
[
source 4
target 3
value -64
]
edge
[
source 5
target 6
value 148
]
edge
[
source 5
target 9
value 110
]
edge
[
source 6
target 10
value -139
]
edge
[
source 7
target 7
value 7
]
edge
[
source 8
target 2
value -97
]
edge
[
source 9
target 1
value 60
]
]
>>> random.seed(2)
>>> gexf_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.gexf', 'r')
>>> random.seed(2)
>>> mtx_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> random.seed(2)
>>> tsv_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.mtx','r')
>>> print(file.read())
%%MatrixMarket matrix coordinate real general
10 10 7
4 3 -64
5 6 148
5 9 110
6 10 -139
7 7 7
8 2 -97
9 1 60
<BLANKLINE>
>>> random.seed(2)
>>> gdf_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.gdf','r')
>>> print(file.read())
nodedef>name VARCHAR,label VARCHAR
1,Node1
2,Node2
3,Node3
4,Node4
5,Node5
6,Node6
7,Node7
8,Node8
9,Node9
10,Node10
edgedef>node1 VARCHAR,node2 VARCHAR,weight DOUBLE
4,3,-64
5,6,148
5,9,110
6,10,-139
7,7,7
8,2,-97
9,1,60
<BLANKLINE>
>>> random.seed(2)
>>> gl_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.gl','r')
>>> print(file.read())
4 3:-64
5 6:148 9:110
6 10:-139
7 7:7
8 2:-97
9 1:60
<BLANKLINE>
>>> file=open('testfile.csv','r')
>>> print(file.read())
4,3,-64
5,6,148
5,9,110
6,10,-139
7,7,7
8,2,-97
9,1,60
<BLANKLINE>
>>> random.seed(4)
>>> csv_maker('testfile2',0,50,30,0,4,True,True,True,False)
35
>>> file=open('testfile2.csv','r')
>>> print(file.read())
1,10,46
2,18,5
2,4,25
2,22,-48
4,23,-17
5,7,-13
7,15,10
7,17,-40
8,8,-42
8,25,11
9,29,-5
10,3,-36
10,27,-48
11,13,-27
11,26,-27
11,21,14
11,16,-2
14,20,-44
14,14,43
14,12,26
15,28,-11
16,30,-40
16,24,20
19,19,7
20,12,-29
20,1,22
22,24,20
22,23,-9
23,18,18
23,27,28
24,6,-24
25,17,23
27,6,-50
28,21,28
28,13,-13
<BLANKLINE>
>>> random.seed(4)
>>> csv_maker('testfile4',0,50.2,30,0,4,True,True,True,False)
41
>>> file=open('testfile4.csv','r')
>>> print(file.read())
1,10,36.2
2,6,3.3
2,16,-40.2
2,29,11.1
3,17,-39.1
3,7,-10.8
3,3,-40.2
4,12,-14.5
5,9,-33.7
5,28,8.9
6,21,47.4
6,27,-0.4
6,15,-42.6
7,20,-30.1
8,23,11.7
8,18,4.1
8,25,-26.0
9,24,50.1
9,13,20.7
9,14,-13.9
10,26,-31.8
10,19,-5.1
12,22,6.1
13,30,-1.3
14,11,-36.9
14,22,16.2
15,16,-43.2
15,11,-31.0
16,19,12.6
17,21,18.2
18,18,-39.3
18,25,-28.7
19,23,-46.0
24,20,27.4
25,4,-50.1
25,1,-38.8
26,27,-10.1
26,30,-24.7
26,29,-12.5
27,28,-9.4
29,20,26.4
<BLANKLINE>
>>> random.seed(20)
>>> csv_maker('testfile3',10,30,100,0,4,False,True,True,False)
137
>>> file=open('testfile3.csv','r')
>>> print(file.read())
1,34,30
3,76,15
3,5,23
4,13,13
4,21,20
4,67,28
5,60,16
5,32,20
5,92,20
6,64,12
6,94,26
7,62,12
7,36,28
7,42,11
8,20,12
9,47,19
10,49,15
10,27,10
11,48,17
11,51,11
13,58,14
13,70,29
14,37,30
14,61,27
14,87,15
15,84,13
16,83,28
17,45,17
17,24,29
17,18,26
18,59,15
19,98,12
21,2,30
21,99,20
22,69,26
22,96,11
22,88,15
24,79,20
24,12,12
24,82,13
26,50,30
26,30,19
29,52,26
31,25,26
32,68,14
33,65,13
33,78,13
33,55,17
34,63,13
35,44,27
35,57,14
37,74,10
37,41,16
37,100,30
38,72,13
38,56,16
39,91,19
39,43,13
41,28,22
41,81,19
42,90,13
42,46,28
42,97,16
45,86,10
45,53,18
46,85,13
46,23,11
47,71,29
48,95,12
48,77,19
48,93,11
49,75,22
50,73,18
50,40,24
50,54,28
51,80,17
51,66,19
51,89,20
52,58,29
52,16,21
52,43,12
53,8,13
53,98,17
54,55,10
56,62,26
56,27,10
57,70,26
58,44,22
59,90,27
59,91,19
59,78,29
60,87,12
60,92,25
61,69,14
61,79,17
62,25,21
63,97,27
63,29,30
65,9,26
65,64,21
66,67,27
66,95,19
66,93,30
68,30,18
70,83,12
70,99,15
71,31,17
71,89,20
73,36,18
75,72,12
76,2,26
76,12,25
76,86,22
78,23,19
78,100,27
79,40,24
80,84,26
80,80,14
81,20,16
82,15,16
82,88,22
83,19,19
84,85,13
84,28,16
85,77,16
85,94,23
86,1,21
87,74,15
87,96,19
90,93,22
92,49,14
95,98,26
95,55,11
97,38,28
99,19,29
99,89,24
100,40,11
<BLANKLINE>
>>> csv_maker('testfile', 0, 200, 10, 0,0,True)
Traceback (most recent call last):
...
TypeError: csv_maker() missing 3 required positional arguments: 'direct', 'self_loop', and 'multigraph'
>>> random.seed(2)
>>> wel_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.wel','r')
>>> print(file.read())
4 3 -64
5 6 148
5 9 110
6 10 -139
7 7 7
8 2 -97
9 1 60
<BLANKLINE>
>>> random.seed(4)
>>> wel_maker('testfile2',0,50,30,0,4,True,True,True,False)
35
>>> file=open('testfile2.wel','r')
>>> print(file.read())
1 10 46
2 18 5
2 4 25
2 22 -48
4 23 -17
5 7 -13
7 15 10
7 17 -40
8 8 -42
8 25 11
9 29 -5
10 3 -36
10 27 -48
11 13 -27
11 26 -27
11 21 14
11 16 -2
14 20 -44
14 14 43
14 12 26
15 28 -11
16 30 -40
16 24 20
19 19 7
20 12 -29
20 1 22
22 24 20
22 23 -9
23 18 18
23 27 28
24 6 -24
25 17 23
27 6 -50
28 21 28
28 13 -13
<BLANKLINE>
>>> random.seed(20)
>>> wel_maker('testfile3',10,30,100,0,4,False,True,True,False)
137
>>> file=open('testfile3.wel','r')
>>> print(file.read())
1 34 30
3 76 15
3 5 23
4 13 13
4 21 20
4 67 28
5 60 16
5 32 20
5 92 20
6 64 12
6 94 26
7 62 12
7 36 28
7 42 11
8 20 12
9 47 19
10 49 15
10 27 10
11 48 17
11 51 11
13 58 14
13 70 29
14 37 30
14 61 27
14 87 15
15 84 13
16 83 28
17 45 17
17 24 29
17 18 26
18 59 15
19 98 12
21 2 30
21 99 20
22 69 26
22 96 11
22 88 15
24 79 20
24 12 12
24 82 13
26 50 30
26 30 19
29 52 26
31 25 26
32 68 14
33 65 13
33 78 13
33 55 17
34 63 13
35 44 27
35 57 14
37 74 10
37 41 16
37 100 30
38 72 13
38 56 16
39 91 19
39 43 13
41 28 22
41 81 19
42 90 13
42 46 28
42 97 16
45 86 10
45 53 18
46 85 13
46 23 11
47 71 29
48 95 12
48 77 19
48 93 11
49 75 22
50 73 18
50 40 24
50 54 28
51 80 17
51 66 19
51 89 20
52 58 29
52 16 21
52 43 12
53 8 13
53 98 17
54 55 10
56 62 26
56 27 10
57 70 26
58 44 22
59 90 27
59 91 19
59 78 29
60 87 12
60 92 25
61 69 14
61 79 17
62 25 21
63 97 27
63 29 30
65 9 26
65 64 21
66 67 27
66 95 19
66 93 30
68 30 18
70 83 12
70 99 15
71 31 17
71 89 20
73 36 18
75 72 12
76 2 26
76 12 25
76 86 22
78 23 19
78 100 27
79 40 24
80 84 26
80 80 14
81 20 16
82 15 16
82 88 22
83 19 19
84 85 13
84 28 16
85 77 16
85 94 23
86 1 21
87 74 15
87 96 19
90 93 22
92 49 14
95 98 26
95 55 11
97 38 28
99 19 29
99 89 24
100 40 11
<BLANKLINE>
>>> wel_maker('testfile', 0, 200, 10, 0,0,True)
Traceback (most recent call last):
...
TypeError: wel_maker() missing 3 required positional arguments: 'direct', 'self_loop', and 'multigraph'
>>> random.seed(2)
>>> lp_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.lp','r')
>>> print(file.read())
node(1).
node(2).
node(3).
node(4).
node(5).
node(6).
node(7).
node(8).
node(9).
node(10).
edge(4,3,-64).
edge(5,6,148).
edge(5,9,110).
edge(6,10,-139).
edge(7,7,7).
edge(8,2,-97).
edge(9,1,60).
<BLANKLINE>
>>> random.seed(4)
>>> lp_maker('testfile2',0,50,30,0,4,True,True,True,False)
35
>>> file=open('testfile2.lp','r')
>>> print(file.read())
node(1).
node(2).
node(3).
node(4).
node(5).
node(6).
node(7).
node(8).
node(9).
node(10).
node(11).
node(12).
node(13).
node(14).
node(15).
node(16).
node(17).
node(18).
node(19).
node(20).
node(21).
node(22).
node(23).
node(24).
node(25).
node(26).
node(27).
node(28).
node(29).
node(30).
edge(1,10,46).
edge(2,18,5).
edge(2,4,25).
edge(2,22,-48).
edge(4,23,-17).
edge(5,7,-13).
edge(7,15,10).
edge(7,17,-40).
edge(8,8,-42).
edge(8,25,11).
edge(9,29,-5).
edge(10,3,-36).
edge(10,27,-48).
edge(11,13,-27).
edge(11,26,-27).
edge(11,21,14).
edge(11,16,-2).
edge(14,20,-44).
edge(14,14,43).
edge(14,12,26).
edge(15,28,-11).
edge(16,30,-40).
edge(16,24,20).
edge(19,19,7).
edge(20,12,-29).
edge(20,1,22).
edge(22,24,20).
edge(22,23,-9).
edge(23,18,18).
edge(23,27,28).
edge(24,6,-24).
edge(25,17,23).
edge(27,6,-50).
edge(28,21,28).
edge(28,13,-13).
<BLANKLINE>
>>> input_dic=get_input(input_func=lambda x: str(len(x)))
>>> input_dic['sign']
True
>>> input_dic['vertices']
20
>>> input_dic['min_edge']
20
>>> input_dic['min_weight']
15
>>> input_dic['output_format']
1
>>> input_dic['max_weight']
15
>>> input_dic['file_name']
'14'
>>> input_dic['max_edge']
20
>>> random.seed(2)
>>> tgf_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.tgf','r')
>>> print(file.read())
1
2
3
4
5
6
7
8
9
10
#
4 3 -64
5 6 148
5 9 110
6 10 -139
7 7 7
8 2 -97
9 1 60
<BLANKLINE>
>>> random.seed(4)
>>> tgf_maker('testfile2',0,50,30,0,4,True,True,True,False)
35
>>> file=open('testfile2.tgf','r')
>>> print(file.read())
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#
1 10 46
2 18 5
2 4 25
2 22 -48
4 23 -17
5 7 -13
7 15 10
7 17 -40
8 8 -42
8 25 11
9 29 -5
10 3 -36
10 27 -48
11 13 -27
11 26 -27
11 21 14
11 16 -2
14 20 -44
14 14 43
14 12 26
15 28 -11
16 30 -40
16 24 20
19 19 7
20 12 -29
20 1 22
22 24 20
22 23 -9
23 18 18
23 27 28
24 6 -24
25 17 23
27 6 -50
28 21 28
28 13 -13
<BLANKLINE>
>>> random.seed(2)
>>> dl_maker('testfile', 0, 200, 10, 0, 2, True,True,True,False)
7
>>> file=open('testfile.dl','r')
>>> print(file.read())
dl
format=edgelist1
n=10
data:
4 3 -64
5 6 148
5 9 110
6 10 -139
7 7 7
8 2 -97
9 1 60
<BLANKLINE>
>>> random.seed(4)
>>> dl_maker('testfile2',0,50,30,0,4,True,True,True,False)
35
>>> file=open('testfile2.dl','r')
>>> print(file.read())
dl
format=edgelist1
n=30
data:
1 10 46
2 18 5
2 4 25
2 22 -48
4 23 -17
5 7 -13
7 15 10
7 17 -40
8 8 -42
8 25 11
9 29 -5
10 3 -36
10 27 -48
11 13 -27
11 26 -27
11 21 14
11 16 -2
14 20 -44
14 14 43
14 12 26
15 28 -11
16 30 -40
16 24 20
19 19 7
20 12 -29
20 1 22
22 24 20
22 23 -9
23 18 18
23 27 28
24 6 -24
25 17 23
27 6 -50
28 21 28
28 13 -13
<BLANKLINE>
>>> file.close()
>>> os.remove('testfile.csv')
>>> os.remove('testfile.gml')
>>> os.remove('testfile.gexf')
>>> os.remove('testfile.tsv')
>>> os.remove('testfile.dl')
>>> os.remove('testfile.gr')
>>> os.remove('testfile.json')
>>> os.remove('testfile.lp')
>>> os.remove('testfile.p')
>>> os.remove('testfile.tgf')
>>> os.remove('testfile.wel')
>>> os.remove('testfile.yaml')
>>> os.remove('testfile.mtx')
>>> os.remove('testfile.gdf')
>>> os.remove('testfile.gl')
>>> os.remove('testfile2.csv')
>>> os.remove('testfile2.dl')
>>> os.remove('testfile2.gr')
>>> os.remove('testfile2.json')
>>> os.remove('testfile2.lp')
>>> os.remove('testfile2.p')
>>> os.remove('testfile2.tgf')
>>> os.remove('testfile2.wel')
>>> os.remove('testfile2.yaml')
>>> os.remove('testfile3.csv')
>>> os.remove('testfile4.csv')
>>> os.remove('testfile3.gr')
>>> os.remove('testfile3.json')
>>> os.remove('testfile3.p')
>>> os.remove('testfile3.wel')
>>> os.remove('testfile3.yaml')
>>> os.remove('logfile.log')
"""
|
share/tools/ubi_reader/ubi/defines.py
|
zengzhen1994k/leonsioy
| 143 |
89553
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############################################################
# Adapted in part from linux-source-3.2/drivers/mtd/ubi/ubi-media.h
# for use in Python.
# Oct. 2013 by <NAME>
#
# Original copyright notice.
# --------------------------
#
# Copyright (c) International Business Machines Corp., 2006
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: <NAME> (Битюцкий Артём)
# <NAME>
# <NAME>
# <NAME>
# <NAME>
#
#############################################################
import struct
# Initial CRC32 checksum value.
UBI_CRC32_INIT = 4294967295 #0xFFFFFFFF
# Max number of volumes allowed.
UBI_MAX_VOLUMES = 128
# Internal Volume ID start.
UBI_INTERNAL_VOL_START = 2147479551
# Error Count header.
UBI_EC_HDR_MAGIC = '\x55\x42\x49\x23' # UBI#
EC_HDR_FORMAT = '>4sB3sQIII32sI'
EC_HDR_FIELDS = ['magic', # Magic string UBI#
'version', # UBI version meant to accept this image.
'padding', # Reserved for future, zeros.
'ec', # Erase counter
'vid_hdr_offset', # Where the VID header starts.
'data_offset', # Where user data starts.
'image_seq', # Image sequence number
'padding2', # Reserved for future, zeros.
'hdr_crc'] # EC header crc32 checksum.
UBI_EC_HDR_SZ = struct.calcsize(EC_HDR_FORMAT) # 64
# Volume ID header.
UBI_VID_HDR_MAGIC ='\x55\x42\x49\x21' # UBI!
VID_HDR_FORMAT = '>4sBBBBII4sIIII4sQ12sI'
VID_HDR_FIELDS = ['magic', # Magic string UBI!
'version', # UBI version meant to accept this image.
'vol_type', # Volume type, Dynamic/Static
'copy_flag', # If this is a copied PEB b/c of wear leveling.
'compat', # Compatibility of this volume UBI_COMPAT_*
'vol_id', # ID of this volume.
'lnum', # LEB number.
'padding', # Reserved for future, zeros.
'data_size', # How many bytes of data this contains.
# Used for static types only.
'used_ebs', # Total num of used LEBs in this volume.
'data_pad', # How many bytes at end of LEB are not used.
'data_crc', # CRC32 checksum of data, static type only.
'padding2', # Reserved for future, zeros.
'sqnum', # Sequence number.
'padding3', # Reserved for future, zeros.
'hdr_crc'] # VID header CRC32 checksum.
UBI_VID_HDR_SZ = struct.calcsize(VID_HDR_FORMAT) # 64
# Volume table records.
VTBL_REC_FORMAT = '>IIIBBH128sB23sI'
VTBL_REC_FIELDS = ['reserved_pebs', # How many PEBs reserved for this volume.
'alignment', # Volume alignment.
'data_pad', # Number of unused bytes at end of PEB.
'vol_type', # Volume type, static/dynamic.
'upd_marker', # If vol update started but not finished.
'name_len', # Length of name.
'name', # Volume name.
'flags', # Volume flags
'padding', # Reserved for future, zeros.
'crc'] # Vol record CRC32 checksum.
UBI_VTBL_REC_SZ = struct.calcsize(VTBL_REC_FORMAT) # 172
# Volume Identifier Header
UBI_VID_DYNAMIC = 1 # Volume can be resized.
UBI_VID_STATIC = 2 # Volume can not be resized.
PRINT_VOL_TYPE_LIST = [0, 'dynamic', 'static']
# Volume table record
UBI_VTBL_AUTORESIZE_FLG = 1
UBI_COMPAT_DELETE = 1 # Delete this internal volume before anything written.
UBI_COMPAT_RO = 2 # Attach this device in read-only mode.
UBI_COMPAT_PRESERVE = 4 # Preserve this internal volume - touch nothing.
UBI_COMPAT_REJECT = 5 # Reject this UBI image
PRINT_COMPAT_LIST = [0, 'Delete', 'Read Only', 0, 'Preserve', 'Reject']
# File chunk size for reads.
FILE_CHUNK_SZ = 5 * 1024 * 1024
|
tests/test_schema_editor_partitioning.py
|
adamchainz/django-postgres-extra
| 529 |
89574
|
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, models
from psqlextra.backend.schema import PostgresSchemaEditor
from psqlextra.types import PostgresPartitioningMethod
from . import db_introspection
from .fake_model import define_fake_partitioned_model
def test_schema_editor_create_delete_partitioned_model_range():
"""Tests whether creating a partitioned model and adding a list partition
to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.RANGE
key = ["timestamp"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_range_partition(model, "pt1", "2019-01-01", "2019-02-01")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_pt1"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
def test_schema_editor_create_delete_partitioned_model_list():
"""Tests whether creating a partitioned model and adding a range partition
to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.LIST
key = ["category"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "category": models.TextField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_list_partition(model, "pt1", ["car", "boat"])
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_pt1"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
def test_schema_editor_create_delete_partitioned_model_default():
"""Tests whether creating a partitioned model and adding a default
partition to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.LIST
key = ["category"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "category": models.TextField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_default_partition(model, "default")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_default"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
def test_schema_editor_create_partitioned_model_no_method():
"""Tests whether its possible to create a partitioned model without
explicitly setting a partitioning method.
The default is "range" so setting one explicitely should not be
needed.
"""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"key": ["timestamp"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
pt = db_introspection.get_partitioned_table(model._meta.db_table)
assert pt.method == PostgresPartitioningMethod.RANGE
assert len(pt.partitions) == 0
def test_schema_editor_create_partitioned_model_no_key():
"""Tests whether trying to create a partitioned model without a
partitioning key raises :see:ImproperlyConfigured as its not possible to
create a partitioned model without one and we cannot have a sane
default."""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": PostgresPartitioningMethod.RANGE},
)
schema_editor = PostgresSchemaEditor(connection)
with pytest.raises(ImproperlyConfigured):
schema_editor.create_partitioned_model(model)
def test_schema_editor_add_range_partition():
"""Tests whether adding a range partition works."""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"key": ["timestamp"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_range_partition(
model,
name="mypartition",
from_values="2019-1-1",
to_values="2019-2-1",
comment="test",
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
def test_schema_editor_add_list_partition():
"""Tests whether adding a list partition works."""
model = define_fake_partitioned_model(
{"name": models.TextField()},
{"method": PostgresPartitioningMethod.LIST, "key": ["name"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_list_partition(
model, name="mypartition", values=["1"], comment="test"
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
@pytest.mark.parametrize(
"method,key",
[
(PostgresPartitioningMethod.RANGE, ["timestamp"]),
(PostgresPartitioningMethod.LIST, ["name"]),
],
)
def test_schema_editor_add_default_partition(method, key):
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_default_partition(
model, name="mypartition", comment="test"
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
|
test/test_index.py
|
Scartography/mapchete
| 161 |
89581
|
import fiona
import numpy as np
import os
import pytest
import rasterio
import mapchete
from mapchete.index import zoom_index_gen
from mapchete.io import get_boto3_bucket
@pytest.mark.remote
def test_remote_indexes(mp_s3_tmpdir, gtiff_s3):
zoom = 7
gtiff_s3.dict.update(zoom_levels=zoom)
def gen_indexes_and_check():
# generate indexes
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
geojson=True,
txt=True,
vrt=True,
)
)
# assert GeoJSON exists
with fiona.open(
os.path.join(mp.config.output.path, "%s.geojson" % zoom)
) as src:
assert len(src) == 2
# assert TXT exists
txt_index = os.path.join(mp.config.output.path, "%s.txt" % zoom)
bucket = get_boto3_bucket(txt_index.split("/")[2])
key = "/".join(txt_index.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
content = obj.get()["Body"].read().decode()
assert len([l + "\n" for l in content.split("\n") if l]) == 2
# assert VRT exists
with rasterio.open(os.path.join(mp.config.output.path, "%s.vrt" % zoom)) as src:
assert src.read().any()
with mapchete.open(gtiff_s3.dict) as mp:
# write output data
mp.batch_process(zoom=zoom)
# generate indexes and check
gen_indexes_and_check()
# generate indexes again and assert nothing has changes
gen_indexes_and_check()
def test_vrt(mp_tmpdir, cleantopo_br):
zoom = 8
with mapchete.open(
dict(cleantopo_br.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
output_tiles = list(
mp.config.output_pyramid.tiles_from_bounds(
mp.config.bounds_at_zoom(zoom=zoom), zoom=zoom
)
)
bounds = (
min([t.left for t in output_tiles]),
min([t.bottom for t in output_tiles]),
max([t.right for t in output_tiles]),
max([t.top for t in output_tiles]),
)
# bounds = mp.config.effective_bounds
vrt_index = os.path.join(mp.config.output.path, "%s.vrt" % zoom)
with rasterio.open(vrt_index) as vrt:
assert vrt.driver == "VRT"
assert vrt.dtypes[0] == "uint16"
assert vrt.meta["dtype"] == "uint16"
assert vrt.count == 1
assert vrt.nodata == 0
assert vrt.bounds == bounds
vrt_data = vrt.read()
assert vrt_data.any()
# generate a VRT using GDAL and compare
out_dir = os.path.join(mp_tmpdir, "cleantopo_br")
temp_vrt = os.path.join(out_dir, str(zoom) + "_gdal.vrt")
gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
temp_vrt,
out_dir,
zoom,
)
os.system(gdalbuildvrt)
with rasterio.open(temp_vrt, "r") as gdal_vrt:
assert gdal_vrt.dtypes[0] == "uint16"
assert gdal_vrt.meta["dtype"] == "uint16"
assert gdal_vrt.count == 1
assert gdal_vrt.nodata == 0
assert gdal_vrt.bounds == bounds
gdal_vrt_data = gdal_vrt.read()
assert np.array_equal(vrt_data, gdal_vrt_data)
# make sure handling an existing VRT works
with mapchete.open(
dict(cleantopo_br.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
def test_vrt_mercator(mp_tmpdir, cleantopo_br_mercator):
zoom = 8
with mapchete.open(
dict(cleantopo_br_mercator.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
output_tiles = list(
mp.config.output_pyramid.tiles_from_bounds(
mp.config.bounds_at_zoom(zoom=zoom), zoom=zoom
)
)
bounds = (
min([t.left for t in output_tiles]),
min([t.bottom for t in output_tiles]),
max([t.right for t in output_tiles]),
max([t.top for t in output_tiles]),
)
# bounds = mp.config.effective_bounds
vrt_index = os.path.join(mp.config.output.path, "%s.vrt" % zoom)
with rasterio.open(vrt_index) as vrt:
assert vrt.driver == "VRT"
assert vrt.dtypes[0] == "uint16"
assert vrt.meta["dtype"] == "uint16"
assert vrt.count == 1
assert vrt.nodata == 0
for vrt_b, b in zip(vrt.bounds, bounds):
assert round(vrt_b, 6) == round(b, 6)
vrt_data = vrt.read()
assert vrt_data.any()
# generate a VRT using GDAL and compare
out_dir = os.path.join(mp_tmpdir, "cleantopo_br_mercator")
temp_vrt = os.path.join(out_dir, str(zoom) + "_gdal.vrt")
gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
temp_vrt,
out_dir,
zoom,
)
os.system(gdalbuildvrt)
with rasterio.open(temp_vrt, "r") as gdal_vrt:
assert gdal_vrt.dtypes[0] == "uint16"
assert gdal_vrt.meta["dtype"] == "uint16"
assert gdal_vrt.count == 1
assert gdal_vrt.nodata == 0
for vrt_b, b in zip(vrt.bounds, bounds):
assert round(vrt_b, 6) == round(b, 6)
gdal_vrt_data = gdal_vrt.read()
assert np.array_equal(vrt_data, gdal_vrt_data)
assert gdal_vrt_data.any()
# make sure handling an existing VRT works
with mapchete.open(
dict(cleantopo_br_mercator.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
|
vscode/utils.py
|
TTitcombe/vscode-ext
| 140 |
89588
|
<gh_stars>100-1000
from typing import Optional
__all__ = (
"log",
"camel_case_to_snake_case",
"snake_case_to_camel_case",
"snake_case_to_title_case",
"python_condition_to_js_condition",
)
def log(*args, **kwargs):
kwargs["flush"] = True
print(*args, **kwargs)
def camel_case_to_snake_case(text: str) -> str:
return "".join("_" + i.lower() if i.isupper() else i for i in text).lstrip("_")
def snake_case_to_camel_case(text: Optional[str]) -> Optional[str]:
if text is None:
return None
temp = text.split("_")
return temp[0] + "".join(ele.title() for ele in temp[1:])
def snake_case_to_title_case(text: Optional[str]) -> Optional[str]:
if text is None:
return None
return text.replace("_", " ").title()
def python_condition_to_js_condition(condition: Optional[str]) -> Optional[str]:
if condition is None:
return None
condition = " ".join(
i if "_" not in i else snake_case_to_camel_case(i) for i in condition.split(" ")
)
condition = condition.replace(" and ", " && ")
condition = condition.replace(" or ", " || ")
if " not " in condition:
if "(" not in condition or ")" not in condition:
raise SyntaxError(
"Use parenthesis '()' while using 'not' otherwise your conditions might not work as expected!"
)
else:
condition = condition.replace(" not ", " !")
return condition
|
model-optimizer/extensions/front/mxnet/squeeze_ext.py
|
monroid/openvino
| 2,406 |
89638
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from mo.front.extractor import FrontExtractorOp
from mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from mo.ops.squeeze import Squeeze
class SqueezeExtractor(FrontExtractorOp):
op = 'squeeze'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
Squeeze.update_node_stat(node, {'squeeze_dims': attrs.int("axis", None), 'keep_at_least_1d': True})
return cls.enabled
|
recipes/Python/161816_Enable_xmlrpclib_maintaJSP/recipe-161816.py
|
tdiprima/code
| 2,023 |
89657
|
"""A little Transport layer to maintain the JSESSIONID cookie that
Javaserver pages use to maintain a session. I'd like to use this
to make xmlrpclib session aware.
Sample usage:
server = Server("http://foobar.com/baz/servlet/xmlrpc.class")
print server.get_jsession_id();
print server.test.sayHello()
print server.get_jsession_id();
print server.test.sayGoodbye()
print server.get_jsession_id();
"""
import xmlrpclib
import Cookie
class JspAuthTransport(xmlrpclib.Transport):
def __init__(self):
self.__cookies = Cookie.SmartCookie()
def request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.send_request(h, handler, request_body)
self.__sendJsessionCookie(h)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
errcode, errmsg, headers = h.getreply()
if errcode != 200:
raise xmlrpclib.ProtocolError(
host + handler,
errcode, errmsg,
headers
)
self.verbose = verbose
self.__processCookies(headers)
return self.parse_response(h.getfile())
def get_jsession_id(self):
if self.__cookies.has_key('JSESSIONID'):
return self.__cookies['JSESSIONID'].value
return None
def __sendJsessionCookie(self, connection):
if self.__cookies.has_key('JSESSIONID'):
connection.putheader('Cookie', '$Version="1"; JSESSIONID=%s'
% self.get_jsession_id())
def __processCookies(self, headers):
if headers.getheader('Set-Cookie'):
self.__cookies.load(headers.getheader('Set-Cookie'))
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "text/xml")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
class Server:
"""A little wrapper to keep the transport and serverproxy together."""
def __init__(self, uri):
self.transport = JspAuthTransport()
self.serverproxy = xmlrpclib.ServerProxy(uri, self.transport)
def __getattr__(self, attr):
return getattr(self.serverproxy, attr)
def get_jsession_id(self):
return self.transport.get_jsession_id()
def _test2():
server = Server("http://www.oreillynet.com/meerkat/xml-rpc/server.php")
print server.system.listMethods()
if __name__ == '__main__':
_test2()
|
tests/fakes/component.py
|
linshoK/pysen
| 423 |
89686
|
import enum
import pathlib
from typing import DefaultDict, Dict, List, Optional, Sequence, Tuple
from pysen import ComponentBase
from pysen.command import CommandBase
from pysen.diagnostic import Diagnostic
from pysen.reporter import Reporter
from pysen.runner_options import PathContext, RunOptions
from pysen.setting import SettingFile
class Operation(enum.Enum):
ADD = "+"
MUL = "*"
class FakeCommand(CommandBase):
def __init__(
self, coef: int, op: Operation, ref: List[float], options: RunOptions
) -> None:
self.coef = coef
self.op = op
self.ref = ref
self.options = options
assert len(ref) == 1
@property
def name(self) -> str:
return f"{self.op.value} {self.coef}"
def __call__(self, reporter: Reporter) -> int:
value = self.ref[0]
coef = float(self.coef)
if self.op == Operation.ADD:
value += coef
elif self.op == Operation.MUL:
value *= coef
else:
raise AssertionError(f"invalid op: {self.op}")
self.ref[0] = value
if value >= 0.0:
return 0
else:
if self.options.require_diagnostics:
reporter.report_diagnostics(
[Diagnostic(pathlib.Path(".").resolve(), message="")]
)
return 1
class FakeComponent(ComponentBase):
def __init__(
self,
name: str,
ops: Dict[str, Tuple[int, Operation]],
expected_base_dir: Optional[pathlib.Path],
expected_settings_dir: Optional[pathlib.Path],
ref: List[float],
) -> None:
self._name = name
self._ops = ops
self._expected_base_dir = expected_base_dir
self._expected_settings_dir = expected_settings_dir
self._ref = ref
assert len(ref) == 1
@property
def name(self) -> str:
return self._name
def export_settings(
self,
paths: PathContext,
files: DefaultDict[str, SettingFile],
) -> None:
if self._expected_base_dir is not None:
assert paths.base_dir == self._expected_base_dir
if self._expected_settings_dir is not None:
assert paths.settings_dir == self._expected_settings_dir
for name, op in self._ops.items():
fname = f"{name}.yaml"
setting_file = files[fname]
setting_file.set_section((self.name,), {"coef": op[0], "op": op[1].value})
@property
def targets(self) -> Sequence[str]:
return list(self._ops.keys())
def create_command(
self, target: str, paths: PathContext, options: RunOptions
) -> CommandBase:
if self._expected_base_dir is not None:
assert paths.base_dir == self._expected_base_dir
if self._expected_settings_dir is not None:
assert paths.settings_dir == self._expected_settings_dir
op = self._ops[target]
return FakeCommand(op[0], op[1], self._ref, options)
|
utils/torch_utils.py
|
Wassouli/projet-prat-oceano
| 196 |
89751
|
<gh_stars>100-1000
import torch
import shutil
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import numbers
import random
import math
from torch.optim import Optimizer
def init_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def weight_parameters(module):
return [param for name, param in module.named_parameters() if 'weight' in name]
def bias_parameters(module):
return [param for name, param in module.named_parameters() if 'bias' in name]
def load_checkpoint(model_path):
weights = torch.load(model_path)
epoch = None
if 'epoch' in weights:
epoch = weights.pop('epoch')
if 'state_dict' in weights:
state_dict = (weights['state_dict'])
else:
state_dict = weights
return epoch, state_dict
def save_checkpoint(save_path, states, file_prefixes, is_best, filename='ckpt.pth.tar'):
def run_one_sample(save_path, state, prefix, is_best, filename):
torch.save(state, save_path / '{}_{}'.format(prefix, filename))
if is_best:
shutil.copyfile(save_path / '{}_{}'.format(prefix, filename),
save_path / '{}_model_best.pth.tar'.format(prefix))
if not isinstance(file_prefixes, str):
for (prefix, state) in zip(file_prefixes, states):
run_one_sample(save_path, state, prefix, is_best, filename)
else:
run_one_sample(save_path, states, file_prefixes, is_best, filename)
def restore_model(model, pretrained_file):
epoch, weights = load_checkpoint(pretrained_file)
model_keys = set(model.state_dict().keys())
weight_keys = set(weights.keys())
# load weights by name
weights_not_in_model = sorted(list(weight_keys - model_keys))
model_not_in_weights = sorted(list(model_keys - weight_keys))
if len(model_not_in_weights):
print('Warning: There are weights in model but not in pre-trained.')
for key in (model_not_in_weights):
print(key)
weights[key] = model.state_dict()[key]
if len(weights_not_in_model):
print('Warning: There are pre-trained weights not in model.')
for key in (weights_not_in_model):
print(key)
from collections import OrderedDict
new_weights = OrderedDict()
for key in model_keys:
new_weights[key] = weights[key]
weights = new_weights
model.load_state_dict(weights)
return model
class AdamW(Optimizer):
"""Implements AdamW algorithm.
It has been proposed in `Fixing Weight Decay Regularization in Adam`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'AdamW does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# according to the paper, this penalty should come after the bias correction
# if group['weight_decay'] != 0:
# grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'], p.data)
return loss
|
tests/api/test_api.py
|
ludeeus/HARM
| 456 |
89777
|
import os
from homeassistant.core import HomeAssistant
import pytest
from custom_components.hacs.websocket import (
acknowledge_critical_repository,
get_critical_repositories,
hacs_config,
hacs_removed,
hacs_repositories,
hacs_repository,
hacs_repository_data,
hacs_settings,
hacs_status,
)
@pytest.mark.asyncio
async def test_check_local_path(hacs, connection, tmpdir):
hacs.hass = HomeAssistant()
os.makedirs(tmpdir, exist_ok=True)
get_critical_repositories(hacs.hass, connection, {"id": 1})
hacs_config(hacs.hass, connection, {"id": 1})
hacs_removed(hacs.hass, connection, {"id": 1})
hacs_repositories(hacs.hass, connection, {"id": 1})
hacs_repository(hacs.hass, connection, {"id": 1})
hacs_repository_data(hacs.hass, connection, {"id": 1})
hacs_settings(hacs.hass, connection, {"id": 1})
hacs_status(hacs.hass, connection, {"id": 1})
acknowledge_critical_repository(hacs.hass, connection, {"repository": "test/test", "id": 1})
|
mayan/apps/document_parsing/tests/test_parsers.py
|
eshbeata/open-paperless
| 2,743 |
89799
|
<filename>mayan/apps/document_parsing/tests/test_parsers.py
from __future__ import unicode_literals
from django.core.files.base import File
from django.test import override_settings
from common.tests import BaseTestCase
from documents.models import DocumentType
from documents.tests import TEST_DOCUMENT_PATH, TEST_DOCUMENT_TYPE_LABEL
from ..parsers import PopplerParser
@override_settings(OCR_AUTO_OCR=False)
class ParserTestCase(BaseTestCase):
def setUp(self):
super(ParserTestCase, self).setUp()
self.document_type = DocumentType.objects.create(
label=TEST_DOCUMENT_TYPE_LABEL
)
with open(TEST_DOCUMENT_PATH) as file_object:
self.document = self.document_type.new_document(
file_object=File(file_object)
)
def tearDown(self):
self.document_type.delete()
super(ParserTestCase, self).tearDown()
def test_poppler_parser(self):
parser = PopplerParser()
parser.process_document_version(self.document.latest_version)
self.assertTrue(
'Mayan EDMS Documentation' in self.document.pages.first().content.content
)
|
stackimpact/profilers/allocation_profiler.py
|
timgates42/stackimpact-python
| 742 |
89803
|
<gh_stars>100-1000
from __future__ import division
import sys
import time
import re
import threading
from ..runtime import min_version, runtime_info, read_vm_size
from ..utils import timestamp
from ..metric import Metric
from ..metric import Breakdown
if min_version(3, 4):
import tracemalloc
class AllocationProfiler(object):
MAX_TRACEBACK_SIZE = 25 # number of frames
MAX_MEMORY_OVERHEAD = 10 * 1e6 # 10MB
MAX_PROFILED_ALLOCATIONS = 25
def __init__(self, agent):
self.agent = agent
self.ready = False
self.profile = None
self.profile_lock = threading.Lock()
self.overhead_monitor = None
self.start_ts = None
def setup(self):
if self.agent.get_option('allocation_profiler_disabled'):
return
if not runtime_info.OS_LINUX and not runtime_info.OS_DARWIN:
self.agent.log('CPU profiler is only supported on Linux and OS X.')
return
if not min_version(3, 4):
self.agent.log('Memory allocation profiling is available for Python 3.4 or higher')
return
self.ready = True
def reset(self):
self.profile = Breakdown('Allocation call graph', Breakdown.TYPE_CALLGRAPH)
def start_profiler(self):
self.agent.log('Activating memory allocation profiler.')
def start():
tracemalloc.start(self.MAX_TRACEBACK_SIZE)
self.agent.run_in_main_thread(start)
self.start_ts = time.time()
def monitor_overhead():
if tracemalloc.is_tracing() and tracemalloc.get_tracemalloc_memory() > self.MAX_MEMORY_OVERHEAD:
self.agent.log('Allocation profiler memory overhead limit exceeded: {0} bytes'.format(tracemalloc.get_tracemalloc_memory()))
self.stop_profiler()
self.overhead_monitor = self.agent.schedule(0.5, 0.5, monitor_overhead)
def stop_profiler(self):
self.agent.log('Deactivating memory allocation profiler.')
with self.profile_lock:
if self.overhead_monitor:
self.overhead_monitor.cancel()
self.overhead_monitor = None
if tracemalloc.is_tracing():
snapshot = tracemalloc.take_snapshot()
self.agent.log('Allocation profiler memory overhead {0} bytes'.format(tracemalloc.get_tracemalloc_memory()))
tracemalloc.stop()
self.process_snapshot(snapshot, time.time() - self.start_ts)
def build_profile(self, duration):
with self.profile_lock:
self.profile.normalize(duration)
self.profile.propagate()
self.profile.floor()
self.profile.filter(2, 1000, float("inf"))
return [{
'category': Metric.CATEGORY_MEMORY_PROFILE,
'name': Metric.NAME_UNCOLLECTED_ALLOCATIONS,
'unit': Metric.UNIT_BYTE,
'unit_interval': 1,
'profile': self.profile
}]
def destroy(self):
pass
def process_snapshot(self, snapshot, duration):
stats = snapshot.statistics('traceback')
for stat in stats[:self.MAX_PROFILED_ALLOCATIONS]:
if stat.traceback:
skip_stack = False
for frame in stat.traceback:
if self.agent.frame_cache.is_agent_frame(frame.filename):
skip_stack = True
break
if skip_stack:
continue
current_node = self.profile
for frame in reversed(stat.traceback):
if frame.filename == '<unknown>':
continue
frame_name = '{0}:{1}'.format(frame.filename, frame.lineno)
current_node = current_node.find_or_add_child(frame_name)
current_node.set_type(Breakdown.TYPE_CALLSITE)
current_node.increment(stat.size, stat.count)
|
conrad/cli.py
|
vinayak-mehta/conrad
| 244 |
89816
|
# -*- coding: utf-8 -*-
import os
import re
import sys
import json
import shutil
import hashlib
import inspect
import datetime as dt
import click
import requests
import sqlalchemy
import textdistance
from rich.table import Table
from rich.console import Console
try:
import bs4
import git
import pandas
import cerberus
import googleapiclient
except ImportError:
_HAS_CRAWL_REQUIREMENTS = False
else:
_HAS_CRAWL_REQUIREMENTS = True
if _HAS_CRAWL_REQUIREMENTS:
import crawlers
from crawlers import *
from . import __version__, CONRAD_HOME
from .schema import *
from .db import engine, Session
from .models import Base, Event, Reminder
from .utils import apply_schema, initialize_database, validate_events, mkdir
DATE_FMT = "%Y-%m-%dT%H:%M:%S"
def has_less():
return shutil.which("less")
def set_default_pager():
if has_less() is not None:
os.environ["LESS"] = "-SRXF"
def get_events():
click.echo("Fetching latest events!")
events_filename = eval(f"f{LATEST}")
response = requests.get(
f"https://raw.githubusercontent.com/vinayak-mehta/conrad/master/data/{events_filename}",
timeout=5,
)
with open(os.path.join(CONRAD_HOME, events_filename), "w") as f:
f.write(json.dumps(response.json()))
def rebuild_events_table():
events_filename = eval(f"f{LATEST}")
with open(os.path.join(CONRAD_HOME, events_filename), "r") as f:
events = json.load(f)
session = Session()
for event in events:
event_id = hashlib.md5(
(event["name"] + event["start_date"]).encode("utf-8")
).hexdigest()
e = Event(
id=event_id[:6],
name=event["name"],
url=event["url"],
city=event["city"],
state=event["state"],
country=event["country"],
location=event["location"],
cfp_open=event["cfp_open"],
cfp_end_date=dt.datetime.strptime(event["cfp_end_date"], "%Y-%m-%d"),
start_date=dt.datetime.strptime(event["start_date"], "%Y-%m-%d"),
end_date=dt.datetime.strptime(event["end_date"], "%Y-%m-%d"),
source=event["source"],
tags=json.dumps(event["tags"]),
kind=event["kind"],
by=event["by"],
)
session.add(e)
session.commit()
session.close()
def set_update_timestamp(overwrite=False):
updated_at = os.path.join(CONRAD_HOME, ".updated_at")
if overwrite or not os.path.exists(updated_at):
with open(updated_at, "w") as f:
f.write(dt.datetime.now().strftime(DATE_FMT))
def initialize_conrad():
set_update_timestamp()
if not os.path.exists(os.path.join(CONRAD_HOME, "conrad.db")):
get_events()
initialize_database()
rebuild_events_table()
def refresh_conrad():
get_events()
if not os.path.exists(os.path.join(CONRAD_HOME, "conrad.db")):
initialize_database()
else:
Event.__table__.drop(engine)
Base.metadata.tables["event"].create(bind=engine)
rebuild_events_table()
set_update_timestamp(overwrite=True)
def clean_old_events():
session = Session()
now = dt.datetime.now()
reminders = list(
session.query(Event, Reminder)
.filter(Event.id == Reminder.id, Event.end_date < now)
.all()
)
for r, __ in reminders:
session.query(Reminder).filter(Reminder.id == r.id).delete()
events = list(session.query(Event).filter(Event.end_date < now).all())
for e in events:
session.query(Event).filter(Event.id == e.id).delete()
session.commit()
session.close()
def auto_refresh():
try:
updated_at = os.path.join(CONRAD_HOME, ".updated_at")
with open(updated_at, "r") as f:
last_updated_at = dt.datetime.strptime(f.read().strip(), DATE_FMT)
except (IOError, FileNotFoundError):
last_updated_at = dt.datetime.strptime("1970-01-01T00:00:00", DATE_FMT)
if (dt.datetime.now() - last_updated_at) > dt.timedelta(days=7):
refresh_conrad()
clean_old_events()
# https://stackoverflow.com/a/50889894
def make_exclude_hook_command(callback):
"""for any command that is not decorated, call the callback"""
hook_attr_name = "hook_" + callback.__name__
class HookGroup(click.Group):
"""group to hook context invoke to see if the callback is needed"""
def group(self, *args, **kwargs):
"""new group decorator to make sure sub groups are also hooked"""
if "cls" not in kwargs:
kwargs["cls"] = type(self)
return super(HookGroup, self).group(*args, **kwargs)
def command(self, *args, **kwargs):
"""new command decorator to monkey patch command invoke"""
cmd = super(HookGroup, self).command(*args, **kwargs)
def hook_command_decorate(f):
# decorate the command
ret = cmd(f)
# grab the original command invoke
orig_invoke = ret.invoke
def invoke(ctx):
"""call the call back right before command invoke"""
parent = ctx.parent
sub_cmd = (
parent and parent.command.commands[parent.invoked_subcommand]
)
if (
not sub_cmd
or not isinstance(sub_cmd, click.Group)
and getattr(sub_cmd, hook_attr_name, True)
):
# invoke the callback
callback()
return orig_invoke(ctx)
# hook our command invoke to command and return cmd
ret.invoke = invoke
return ret
# return hooked command decorator
return hook_command_decorate
def decorator(func=None):
if func is None:
# if called other than as decorator, return group class
return HookGroup
setattr(func, hook_attr_name, False)
return decorator
bypass_auto_refresh = make_exclude_hook_command(auto_refresh)
@click.group(name="conrad", cls=bypass_auto_refresh())
@click.version_option(version=__version__)
@click.pass_context
def cli(ctx, *args, **kwargs):
"""conrad: Track conferences and meetups on your terminal."""
set_default_pager()
@bypass_auto_refresh
@cli.command("refresh", short_help="Refresh event database.")
@click.confirmation_option(prompt="Would you like conrad to look for new events?")
@click.pass_context
def _refresh(ctx, *args, **kwargs):
# TODO: print("10 new events found!")
refresh_conrad()
click.echo("All done! ✨ 🍰 ✨")
click.echo("Event database updated.")
@cli.command("show", short_help="Show all saved events.")
@click.option(
"--id",
"-i",
help="Show event with a particular id.",
)
@click.option(
"--kind",
"-k",
help="Show kind of event, conference or meetup.",
)
@click.option(
"--cfp",
"-c",
is_flag=True,
help="Show only events which have an open CFP (call for proposals).",
)
@click.option(
"--tag", "-t", default="", help="Look at conferences with a specific tag."
)
@click.option(
"--name",
"-n",
default="",
help="Look at conferences containing a specific word in their name.",
)
@click.option(
"--location",
"-l",
default="",
help="Look at conferences in a specific city, state or country.",
)
@click.option(
"--date",
"-d",
default=[],
multiple=True,
help='Look at conferences based on when they\'re happening. For example: conrad show --date ">= 2019-10-01" --date "<= 2020-01-01".',
)
@click.pass_context
def _show(ctx, *args, **kwargs):
# TODO: conrad show --new
initialize_conrad()
_id = kwargs["id"]
kind = kwargs["kind"]
cfp = kwargs["cfp"]
tag = kwargs["tag"]
name = kwargs["name"]
date = list(kwargs["date"])
location = kwargs["location"]
filters = []
if _id:
filters.append(Event.id == _id)
if kind:
filters.append(Event.kind == kind)
if cfp:
filters.append(Event.cfp_open.is_(cfp))
if tag:
filters.append(Event.tags.contains(tag))
if name:
filters.append(Event.name.ilike(f"%{name}%"))
if date:
date_filters = []
for d in date:
cmp, date = d.split(" ")
if not (">" in cmp or "<" in cmp):
raise click.UsageError("Wrong comparison operator!")
try:
__ = dt.datetime.strptime(date, "%Y-%m-%d")
except ValueError:
raise click.UsageError("Wrong date format!")
if ">" in cmp:
date_filters.append(Event.start_date >= date)
elif "<" in cmp:
date_filters.append(Event.start_date <= date)
filters.append(sqlalchemy.and_(*date_filters))
if location:
filters.append(
sqlalchemy.or_(
Event.name.ilike(f"%{location}%"),
Event.city.ilike(f"%{location}%"),
Event.state.ilike(f"%{location}%"),
Event.country.ilike(f"%{location}%"),
Event.location.ilike(f"%{location}%"),
)
)
session = Session()
try:
events = list(
session.query(Event).filter(*filters).order_by(Event.start_date).all()
)
except sqlalchemy.exc.OperationalError:
refresh_conrad()
events = list(
session.query(Event).filter(*filters).order_by(Event.start_date).all()
)
if len(events) > 0:
console = Console()
table = Table(show_header=True, header_style="bold magenta", show_lines=True)
table.add_column("id")
table.add_column("Name")
table.add_column("Website")
table.add_column("City")
table.add_column("Country")
table.add_column("Start Date")
table.add_column("End Date")
events_output = []
rids = [r.id for r in session.query(Reminder).all()]
for event in events:
event_output = [
event.id,
event.name,
event.url,
event.city,
event.country,
event.start_date.strftime("%Y-%m-%d"),
event.end_date.strftime("%Y-%m-%d"),
]
# highlight event which has a reminder set
if event.id in rids:
event_output = list(
map(
lambda x: f"[bold][green]{x}[/green][/bold]",
event_output,
)
)
table.add_row(*event_output)
session.close()
console_kwargs = {}
if has_less():
console_kwargs["styles"] = True
with console.pager(**console_kwargs):
console.print(table)
else:
click.echo("No events found!")
@cli.command("remind", short_help="Set and display reminders.")
@click.option("--id", "-i", default=None, help="Conference identifier.")
@click.pass_context
def _remind(ctx, *args, **kwargs):
def get_days_left(event):
start = dt.datetime.now()
cfp_days_left = (event.cfp_end_date - start).days
event_days_left = (event.start_date - start).days
if event.cfp_open and cfp_days_left >= 0:
days_left = cfp_days_left
elif event_days_left >= 0:
days_left = event_days_left
else:
days_left = -1
return days_left, event.cfp_open
initialize_conrad()
_id = kwargs["id"]
if _id is None:
session = Session()
reminders = list(
session.query(Event, Reminder)
.filter(Event.id == Reminder.id)
.order_by(Event.start_date)
.all()
)
if len(reminders) > 0:
console = Console()
table = Table(
show_header=True, header_style="bold magenta", show_lines=True
)
table.add_column("id")
table.add_column("Name")
table.add_column("Start Date")
table.add_column("Days Left")
reminders_output = []
for reminder, __ in reminders:
days_left, cfp_open = get_days_left(reminder)
if cfp_open and days_left >= 0:
days_left_output = f"{days_left} days left to cfp deadline!"
elif days_left >= 0:
days_left_output = f"{days_left} days left!"
else:
days_left_output = "Event ended."
style = "white"
if days_left >= 30:
style = "green"
elif 30 > days_left >= 10:
style = "yellow"
elif 10 > days_left >= 0:
style = "red"
days_left_output = f"[bold][{style}]{days_left_output}[/{style}][/bold]"
reminder_output = [
reminder.id,
reminder.name,
reminder.start_date.strftime("%Y-%m-%d"),
days_left_output,
]
table.add_row(*reminder_output)
session.close()
console_kwargs = {}
if has_less():
console_kwargs["styles"] = True
with console.pager(**console_kwargs):
console.print(table)
else:
click.echo("No reminders found!")
else:
try:
session = Session()
event = session.query(Event).filter(Event.id == _id).first()
if event is None:
click.echo("Event not found!")
else:
days_left, __ = get_days_left(event)
if days_left == -1:
click.echo("Event ended.")
else:
reminder = Reminder(id=event.id)
session.add(reminder)
session.commit()
session.close()
click.echo("Reminder set!")
except sqlalchemy.exc.IntegrityError:
session.rollback()
if click.confirm("Do you want to remove this reminder?"):
session = Session()
session.query(Reminder).filter(Reminder.id == _id).delete()
session.commit()
session.close()
click.echo("Reminder removed!")
@cli.command("generate", short_help="Generate skeleton crawler code.")
@click.argument("entity")
@click.argument("entity_name")
@click.pass_context
def _generate(ctx, *args, **kwargs):
SUPPORTED_ENTITIES = ["crawler"]
entity = kwargs["entity"]
if entity not in SUPPORTED_ENTITIES:
click.UsageError(f"Entity '{entity}' not supported")
entity_name = kwargs["entity_name"]
entity_name_snake_case = re.sub(r"(?<!^)(?=[A-Z])", "_", entity_name).lower()
crawler_dir = f"crawlers/{entity_name_snake_case}"
mkdir(crawler_dir)
with open(os.path.join(crawler_dir, "__init__.py"), "w") as f:
f.write("# -*- coding: utf-8 -*-\n")
crawler_content = f"""# -*- coding: utf-8 -*-
from ..base import BaseCrawler
class {entity_name}Crawler(BaseCrawler):
def get_events(self):
# Populate this list of events using your code
events = []
# YOUR CODE HERE
# Extend the self.events list with the new list
self.events.extend(events)
"""
crawler_path = os.path.join(crawler_dir, f"{entity_name_snake_case}_crawler.py")
with open(crawler_path, "w") as f:
f.write(crawler_content)
with open("crawlers/__init__.py", "a") as f:
f.write(
f"from .{entity_name_snake_case}.{entity_name_snake_case}_crawler import {entity_name}Crawler"
)
click.echo(f"\t{click.style('create', fg='green', bold=True)}\t{crawler_path}")
@cli.command("run", short_help="Run crawler code.")
@click.argument("entity")
@click.argument("entity_name")
@click.pass_context
def _run(ctx, *args, **kwargs):
if not _HAS_CRAWL_REQUIREMENTS:
raise click.UsageError(
"To run crawlers, please install the requirements with\n"
"'pip install --upgrade conference-radar[crawl]'."
)
SUPPORTED_ENTITIES = ["crawler"]
entity = kwargs["entity"]
if entity not in SUPPORTED_ENTITIES:
click.UsageError(f"Entity '{entity}' not supported")
entity_name = kwargs["entity_name"]
entity_name_snake_case = re.sub(r"(?<!^)(?=[A-Z])", "_", entity_name).lower()
crawler = [
m[0]
for m in inspect.getmembers(crawlers, inspect.isclass)
if m[1].__module__.startswith("crawlers") and m[0] == f"{entity_name}Crawler"
]
if len(crawler):
filename = crawler[0].lower().replace("crawler", "")
Crawler = eval(crawler[0])
c = Crawler()
c.get_events()
crawler_data_path = f"data/{filename}.json"
c.export(crawler_data_path)
click.echo(
f"\t{click.style('save', fg='green', bold=True)}\t{crawler_data_path}"
)
else:
print("Crawler not found!")
@bypass_auto_refresh
@cli.command("import", short_help="Import new events into conrad.")
@click.option("--file", "-f", default=None, help="JSON file to import.")
@click.pass_context
def _import(ctx, *args, **kwargs):
file = kwargs["file"]
if file is None:
raise click.UsageError("No file provided!")
if not os.path.exists(file):
raise click.UsageError("File does not exist!")
with open(file, "r") as f:
input_events = json.load(f)
failures = validate_events(input_events, version=LATEST)
if len(failures) > 0:
raise click.UsageError(
"The following validations failed!\n{}".format(
"".join(
list(map(lambda x: "- " + x + "\n", failures[:-1]))
+ list(map(lambda x: "- " + x, failures[-1:]))
)
)
)
events_path = os.path.join(os.getcwd(), "data", f"{eval(f'f{LATEST}')}")
try:
with open(events_path, "r") as f:
events = json.load(f)
except (IOError, ValueError, KeyError, FileNotFoundError):
events = []
now = dt.datetime.now()
old_events = []
for e in events:
event_end_date = dt.datetime.strptime(e["end_date"], "%Y-%m-%d")
if event_end_date < now:
print(f"Removing {e['name']}")
continue
if e["cfp_end_date"] is not None:
cfp_end_date = dt.datetime.strptime(e["cfp_end_date"], "%Y-%m-%d")
if cfp_end_date < now:
e["cfp_open"] = False
old_events.append(e)
removed = len(events) - len(old_events)
s = "s" if removed != 1 else ""
click.echo(f"Removed {removed} old event{s}!")
pattern = "[0-9]"
new_events = []
for ie in input_events:
if ie["end_date"] is None:
continue
event_end_date = dt.datetime.strptime(ie["end_date"], "%Y-%m-%d")
if event_end_date < now:
continue
if ie["cfp_end_date"] is not None:
cfp_end_date = dt.datetime.strptime(ie["cfp_end_date"], "%Y-%m-%d")
if cfp_end_date < now:
ie["cfp_open"] = False
match = False
for oe in old_events:
input_event_name = ie["name"].replace(" ", "").lower()
input_event_name = re.sub(pattern, "", input_event_name)
old_event_name = oe["name"].replace(" ", "").lower()
old_event_name = re.sub(pattern, "", old_event_name)
similarity = textdistance.levenshtein.normalized_similarity(
input_event_name, old_event_name
)
if similarity > 0.9:
click.echo(f"Updating {oe['name']}")
oe.update(ie)
match = True
if not match:
click.echo(f"Adding {ie['name']}")
new_events.append(ie)
old_events.extend(new_events)
s = "s" if len(new_events) != 1 else ""
click.echo(f"Added {len(new_events)} new event{s}!")
with open(events_path, "w") as f:
f.write(json.dumps(old_events, indent=4, sort_keys=True))
for version in reversed(range(1, int(LATEST))):
events = old_events.copy()
events = apply_schema(events, version=version)
events_path = os.path.join(os.getcwd(), "data", f"{eval(f'f{version}')}")
with open(events_path, "w") as f:
f.write(json.dumps(events, indent=4, sort_keys=True))
|
load_MNIST.py
|
kaixinhuaihuai/ufldl_tutorial
| 595 |
89822
|
<filename>load_MNIST.py
import numpy as np
def load_MNIST_images(filename):
"""
returns a 28x28x[number of MNIST images] matrix containing
the raw MNIST images
:param filename: input data file
"""
with open(filename, "r") as f:
magic = np.fromfile(f, dtype=np.dtype('>i4'), count=1)
num_images = np.fromfile(f, dtype=np.dtype('>i4'), count=1)
num_rows = np.fromfile(f, dtype=np.dtype('>i4'), count=1)
num_cols = np.fromfile(f, dtype=np.dtype('>i4'), count=1)
images = np.fromfile(f, dtype=np.ubyte)
images = images.reshape((num_images, num_rows * num_cols)).transpose()
images = images.astype(np.float64) / 255
f.close()
return images
def load_MNIST_labels(filename):
"""
returns a [number of MNIST images]x1 matrix containing
the labels for the MNIST images
:param filename: input file with labels
"""
with open(filename, 'r') as f:
magic = np.fromfile(f, dtype=np.dtype('>i4'), count=1)
num_labels = np.fromfile(f, dtype=np.dtype('>i4'), count=1)
labels = np.fromfile(f, dtype=np.ubyte)
f.close()
return labels
|
tests/components/spc/__init__.py
|
domwillcode/home-assistant
| 30,023 |
89832
|
"""Tests for the spc component."""
|
tests/test_configuration.py
|
TurboGears/tg2
| 812 |
89833
|
<gh_stars>100-1000
"""
Testing for TG2 Configuration
"""
from nose import SkipTest
from nose.tools import eq_, raises
import sys, os
from datetime import datetime
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.engine import Engine
from ming import Session
from ming.orm import ThreadLocalORMSession
from tg.configurator.base import ConfigurationComponent, Configurator, BeforeConfigConfigurationAction
from tg.configurator.components.app_globals import AppGlobalsConfigurationComponent
from tg.configurator.components.auth import SimpleAuthenticationConfigurationComponent
from tg.configurator.components.caching import CachingConfigurationComponent
from tg.configurator.components.dispatch import DispatchConfigurationComponent
from tg.configurator.components.helpers import HelpersConfigurationComponent
from tg.configurator.components.i18n import I18NConfigurationComponent
from tg.configurator.components.ming import MingConfigurationComponent
from tg.configurator.components.paths import PathsConfigurationComponent
from tg.configurator.components.registry import RegistryConfigurationComponent
from tg.configurator.components.rendering import \
TemplateRenderingConfigurationComponent
from tg.configurator.components.session import SessionConfigurationComponent
from tg.configurator.components.sqlalchemy import SQLAlchemyConfigurationComponent
from tg.configurator.components.transactions import \
TransactionManagerConfigurationComponent
from tg.configuration.tgconfig import _init_default_global_config
from tg.appwrappers.mingflush import MingApplicationWrapper
from tg.util import Bunch
from tg.configuration import config
from tg.configurator import FullStackApplicationConfigurator
from tg.configurator import ApplicationConfigurator
from tg.configuration.app_config import AppConfig
from tg.configuration.auth import _AuthenticationForgerPlugin
from tg.configuration.auth.metadata import _AuthMetadataAuthenticator
from tg.configuration.utils import coerce_config, coerce_options, TGConfigError
from tg.configuration import milestones
from tg.support.converters import asint, asbool
import tg.i18n
from tg import TGController, expose, response, request, abort, MinimalApplicationConfigurator
from tests.base import setup_session_dir, teardown_session_dir
from webtest import TestApp
from tg.renderers.base import RendererFactory
from tg.wsgiapp import TGApp
from tg._compat import PY3
def setup():
milestones._reset_all()
setup_session_dir()
def teardown():
milestones._reset_all()
teardown_session_dir()
def _reset_global_config():
milestones._reset_all()
try:
config.config_proxy.pop_thread_config()
except:
pass
try:
config.config_proxy.pop_process_config()
except:
pass
class PackageWithModel:
__name__ = 'tests'
__file__ = __file__
def __init__(self):
self.model = self.ModelClass()
self.model.DBSession = self.model.FakeDBSession()
class ModelClass:
class FakeDBSession:
def remove(self):
self.DBSESSION_REMOVED=True
def init_model(self, engine):
if isinstance(engine, Engine):
# SQLA
return self.DBSession
else:
# Ming
return dict(ming=True)
class lib:
class app_globals:
class Globals:
pass
PackageWithModel.__name__ = 'tests'
class UncopiableList(list):
"""
This is to test configuration methods that make a copy
of a list to modify it, using this we can check how it has
been modified
"""
def __copy__(self):
return self
class FakeTransaction:
def get(self):
return self
def begin(self):
self.aborted = False
self.doomed = False
return self
def abort(self):
self.aborted = True
def commit(self):
self.aborted = False
def _retryable(self, *args):
return True
note = _retryable
def isDoomed(self):
return self.doomed
def doom(self):
self.doomed = True
from tg.configuration.auth import TGAuthMetadata
class ApplicationAuthMetadata(TGAuthMetadata):
def get_user(self, identity, userid):
return {'name':'None'}
class ApplicationAuthMetadataWithAuthentication(TGAuthMetadata):
def authenticate(self, environ, identity):
return 1
def get_user(self, identity, userid):
return {'name':'None'}
class AtExitTestException(Exception):
pass
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
class TestPylonsConfigWrapper:
def setup(self):
_reset_global_config()
_init_default_global_config()
self.config = config
def tearDown(self):
_reset_global_config()
_init_default_global_config()
def test_create(self):
pass
def test_getitem(self):
expected_keys = ['debug', 'package', 'tg.app_globals', 'tg.strict_tmpl_context']
for key in expected_keys:
self.config[key]
def test_repr(self):
_reset_global_config()
assert repr(self.config) == '<TGConfig: missing>'
_init_default_global_config()
assert repr(self.config) == repr(self.config.config_proxy.current_conf())
@raises(KeyError)
def test_getitem_bad(self):
self.config['no_such_key']
def test_setitem(self):
self.config['no_such_key'] = 'something'
def test_delattr(self):
del self.config.debug
eq_(hasattr(self.config, 'debug'), False)
self.config.debug = False
@raises(AttributeError)
def test_delattr_bad(self):
del self.config.i_dont_exist
def test_keys(self):
k = self.config.keys()
assert 'tg.app_globals' in k
def test_coerce_config():
opts = {'ming.connection.max_pool_size': '5'}
conf = coerce_config(opts, 'ming.connection.', {'max_pool_size':asint})
assert conf['max_pool_size'] == 5
assert opts['ming.connection.max_pool_size'] == '5'
def test_coerce_options():
opts = {'connection': 'false'}
conf = coerce_options(opts, {'connection': asbool})
assert conf['connection'] is False
assert opts['connection'] == 'false'
class TestConfigurator:
def setup(self):
_reset_global_config()
def teardown(self):
_reset_global_config()
tg.hooks._clear() # Reset hooks
def test_repr_action(self):
act = BeforeConfigConfigurationAction()
assert repr(act) == "<BeforeConfigConfigurationAction: None>"
def test_reqlocal_configuration_dictionary(self):
cfg = FullStackApplicationConfigurator()
cfg.update_blueprint({'RANDOM_VALUE': 5})
conf = cfg.configure({}, {})
assert config['RANDOM_VALUE'] == 5
assert len(config) == len(conf)
def test_blueprint_invalid_view(self):
cfg = FullStackApplicationConfigurator()
try:
cfg.get_blueprint_view('this.that.')
except ValueError as e:
assert str(e) == 'A Blueprint key cannot end with a .'
else:
assert False, 'Should have raised'
def test_invalid_component(self):
cfg = FullStackApplicationConfigurator()
try:
cfg.register(str)
except ValueError as e:
assert str(e) == 'Configuration component must inherit ConfigurationComponent'
else:
assert False, 'Should have raised'
def test_replace_component(self):
cfg = FullStackApplicationConfigurator()
class TestComponentFirst(ConfigurationComponent):
id = 'TESTCOMPONENT'
class TestComponentSecond(ConfigurationComponent):
id = 'TESTCOMPONENT2'
cfg.register(TestComponentFirst)
try:
cfg.replace(TestComponentFirst, str)
except ValueError as e:
assert str(e) == 'Configuration component must inherit ConfigurationComponent'
else:
assert False, 'Should have raised'
cfg.replace('TESTCOMPONENT', TestComponentSecond)
comp = cfg.get_component('TESTCOMPONENT')
assert isinstance(comp, TestComponentSecond), comp
def test_component_without_id(self):
cfg = FullStackApplicationConfigurator()
class TestComponentFirst(ConfigurationComponent):
pass
try:
cfg.register(TestComponentFirst)
except ValueError as e:
assert str(e).startswith('ConfigurationComponent must provide an id class attribute')
else:
assert False, 'Should have raised'
try:
cfg.replace(TestComponentFirst, TestComponentFirst)
except ValueError as e:
assert str(e).startswith('ConfigurationComponent must provide an id class attribute')
else:
assert False, 'Should have raised'
def test_retrieve_current_configurator(self):
cfg = FullStackApplicationConfigurator()
cfg.update_blueprint({'RANDOM_VALUE': 5})
cfg.configure({}, {})
configurator = FullStackApplicationConfigurator.current()
assert configurator.get_blueprint_value('RANDOM_VALUE') == 5
def test_application_wrapper_replacement(self):
class AppWrapperTest(object):
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kw):
return tg.Response('AppWrapper #1')
class AppWrapperTestReplacement(object):
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kw):
return tg.Response('AppWrapper #2')
cfg = FullStackApplicationConfigurator()
cfg.update_blueprint({'root_controller': Bunch(index=lambda *args, **kwargs: 'HI')})
cfg.register_application_wrapper(AppWrapperTest)
app = TestApp(cfg.make_wsgi_app({'debug': True}, {}))
assert app.get('/').text == 'AppWrapper #1', app.get('/').text
cfg.replace_application_wrapper('AppWrapperTest', AppWrapperTestReplacement)
app = TestApp(cfg.make_wsgi_app({}, {}))
assert app.get('/').text == 'AppWrapper #2', app.get('/').text
def test_sa_auth_requires_app_config(self):
configurator = Configurator()
configurator.register(SimpleAuthenticationConfigurationComponent)
try:
configurator.configure({}, {})
except TGConfigError as e:
assert str(e) == 'Simple Authentication only works on an ApplicationConfigurator'
else:
assert False, 'Should have raised'
def test_sa_auth_authmetadata_without_authenticate(self):
cfg = FullStackApplicationConfigurator()
class FakeAuthMetadata():
pass
cfg.update_blueprint({
'root_controller': Bunch(index=lambda *args, **kwargs: 'HI'),
'auth_backend': 'authmetadata',
'sa_auth.authmetadata': FakeAuthMetadata(),
'sa_auth.cookie_secret': 'SECRET!'
})
cfg.make_wsgi_app({}, {})
def test_caching_required_app_config(self):
configurator = Configurator()
configurator.register(CachingConfigurationComponent)
try:
configurator.configure({}, {})
except TGConfigError as e:
assert str(e) == 'Caching only works on an ApplicationConfigurator'
else:
assert False, 'Should have raised'
def test_i18n_required_app_config(self):
configurator = Configurator()
configurator.register(I18NConfigurationComponent)
try:
configurator.configure({}, {})
except TGConfigError as e:
assert str(e) == 'I18N only works on an ApplicationConfigurator'
else:
assert False, 'Should have raised'
def test_ming_required_app_config(self):
configurator = Configurator()
configurator.register(MingConfigurationComponent)
try:
configurator.configure({}, {})
except TGConfigError as e:
assert str(e).endswith('only works on an ApplicationConfigurator')
else:
assert False, 'Should have raised'
def test_session_required_app_config(self):
configurator = Configurator()
configurator.register(SessionConfigurationComponent)
try:
configurator.configure({}, {})
except TGConfigError as e:
assert str(e).endswith('only work on an ApplicationConfigurator')
else:
assert False, 'Should have raised'
def test_sqlalchemy_required_app_config(self):
configurator = Configurator()
configurator.register(SQLAlchemyConfigurationComponent)
try:
configurator.configure({}, {})
except TGConfigError as e:
assert str(e).endswith('only works on an ApplicationConfigurator')
else:
assert False, 'Should have raised'
def test_transaction_required_app_config(self):
configurator = Configurator()
configurator.register(TransactionManagerConfigurationComponent)
try:
configurator.configure({}, {})
except TGConfigError as e:
assert str(e).endswith('only works on an ApplicationConfigurator')
else:
assert False, 'Should have raised'
def test_dispatch_without_mimetypes(self):
# This is exactly like MinimalApplicationConfigurator
# but without the mimetypes component.
apc = ApplicationConfigurator()
apc.register(PathsConfigurationComponent, after=False)
apc.register(DispatchConfigurationComponent, after=False)
apc.register(AppGlobalsConfigurationComponent)
apc.register(HelpersConfigurationComponent)
apc.register(TemplateRenderingConfigurationComponent)
apc.register(RegistryConfigurationComponent, after=True)
class MinimalController(TGController):
@expose()
def index(self):
return 'HI'
apc.update_blueprint({
'root_controller': MinimalController()
})
app = TestApp(apc.make_wsgi_app({}, {}))
assert app.get('/').text == 'HI'
def test_app_without_controller(self):
cfg = MinimalApplicationConfigurator()
app = TestApp(cfg.make_wsgi_app({}, {}))
try:
app.get('/')
except TGConfigError as e:
assert str(e) == 'Unable to load controllers, no controllers path configured!'
else:
assert False, 'Should have raised.'
def test_tgapp_caches_controller_classes(self):
class RootController(TGController):
@expose()
def index(self):
return 'HI'
tgapp = Bunch(app=None)
def save_app(app):
tgapp.app = app
return app
cfg = MinimalApplicationConfigurator()
app = TestApp(cfg.make_wsgi_app({}, {}, wrap_app=save_app))
tgapp.app.controller_classes['root'] = RootController
assert app.get('/').text == 'HI'
class TestAppConfig:
def __init__(self):
self.fake_package = PackageWithModel
def setup(self):
_reset_global_config()
def teardown(self):
_reset_global_config()
tg.hooks._clear() # Reset hooks
def test_get_value(self):
conf = AppConfig(minimal=True)
conf['existing_value'] = 5
assert conf['existing_value'] == 5
assert conf.get('non_existing_value') == None
def test_missing_attribute(self):
conf = AppConfig(minimal=True)
conf['existing_value'] = 5
assert conf['existing_value'] == 5
assert conf.existing_value == 5
try:
conf['missing_value']
except KeyError:
pass
else:
raise RuntimeError('Should have raised KeyError')
try:
conf.missing_value
except AttributeError:
pass
else:
raise RuntimeError('Should have raised AttributeError')
def test_lang_can_be_changed_by_ini(self):
conf = AppConfig(minimal=True)
conf.make_wsgi_app(**{'i18n.lang': 'ru'})
assert config['i18n.lang'] == 'ru'
def test_create_minimal_app(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
conf = AppConfig(minimal=True, root_controller=RootController())
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
def test_create_minimal_app_with_factory(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
conf = AppConfig(minimal=True, root_controller=RootController())
app_factory = conf.setup_tg_wsgi_app()
app = app_factory()
app = TestApp(app)
assert 'HI!' in app.get('/test')
def test_minimal_app_with_sqlalchemy(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
DBSession = scoped_session(sessionmaker(autoflush=True, autocommit=False))
def init_model(engine):
DBSession.configure(bind=engine)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['use_sqlalchemy'] = True
conf['sqlalchemy.url'] = 'sqlite://'
conf['model'] = Bunch(DBSession=DBSession,
init_model=init_model)
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
@raises(TGConfigError)
def test_sqlalchemy_without_models(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
conf = AppConfig(minimal=True, root_controller=RootController())
conf['use_sqlalchemy'] = True
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
def test_minimal_app_with_ming(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
mainsession = Session()
DBSession = ThreadLocalORMSession(mainsession)
def init_model(engine):
mainsession.bind = engine
conf = AppConfig(minimal=True, root_controller=RootController())
conf['use_ming'] = True
conf['ming.url'] = 'mim:///dbname'
conf['model'] = Bunch(init_model=init_model, DBSession=DBSession)
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
@raises(TGConfigError)
def test_ming_without_models(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
DBSession = scoped_session(sessionmaker(autoflush=True, autocommit=False))
def init_model(engine):
DBSession.configure(bind=engine)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['use_ming'] = True
conf['ming.url'] = 'mim://'
app = conf.make_wsgi_app()
def test_setup_jinja_without_package(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
conf = AppConfig(minimal=True, root_controller=RootController())
conf.renderers = ['jinja']
app = conf.make_wsgi_app()
def test_setup_sqlalchemy(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert package.model.DBSession.DBSESSION_REMOVED
def test_sqlalchemy_commit_veto(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
@expose()
def crash(self):
raise Exception('crash')
@expose()
def forbidden(self):
response.status = 403
return 'FORBIDDEN'
@expose()
def notfound(self):
response.status = 404
return 'NOTFOUND'
def custom_commit_veto(environ, status, headers):
if status.startswith('404'):
return True
return False
fake_transaction = FakeTransaction()
import transaction
prev_transaction_manager = transaction.manager
transaction.manager = fake_transaction
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf['package'] = package
conf['model'] = package.model
conf['use_sqlalchemy'] = True
conf['tm.enabled'] = True
conf['tm.commit_veto'] = custom_commit_veto
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
assert hasattr(conf, 'use_transaction_manager') is False
app.get('/test')
assert fake_transaction.aborted == False
try:
app.get('/crash')
except:
pass
assert fake_transaction.aborted == True
app.get('/forbidden', status=403)
assert fake_transaction.aborted == False
app.get('/notfound', status=404)
assert fake_transaction.aborted == True
transaction.manager = prev_transaction_manager
def test_sqlalchemy_doom(self):
fake_transaction = FakeTransaction()
import transaction
prev_transaction_manager = transaction.manager
transaction.manager = fake_transaction
class RootController(TGController):
@expose()
def test(self):
fake_transaction.doom()
return 'HI!'
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf['tm.enabled'] = True
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
assert hasattr(conf, 'use_transaction_manager') is False
app.get('/test')
assert fake_transaction.aborted == True
transaction.manager = prev_transaction_manager
def test_sqlalchemy_retry(self):
fake_transaction = FakeTransaction()
import transaction
prev_transaction_manager = transaction.manager
transaction.manager = fake_transaction
from transaction.interfaces import TransientError
class RootController(TGController):
attempts = []
@expose()
def test(self):
self.attempts.append(True)
if len(self.attempts) == 3:
return 'HI!'
raise TransientError()
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf['tm.enabled'] = True
conf['sqlalchemy.url'] = 'sqlite://'
conf['tm.attempts'] = 3
app = conf.make_wsgi_app()
app = TestApp(app)
assert hasattr(conf, 'use_transaction_manager') is False
resp = app.get('/test')
assert 'HI' in resp
transaction.manager = prev_transaction_manager
def test_setup_sqla_persistance(self):
conf = AppConfig(minimal=True, root_controller=RootController())
conf['sqlalchemy.url'] = 'sqlite://'
conf.use_sqlalchemy = True
conf.package = PackageWithModel()
conf.make_wsgi_app()
def test_setup_sqla_balanced(self):
conf = AppConfig(minimal=True, root_controller=RootController())
conf['sqlalchemy.master.url'] = 'sqlite://'
conf['sqlalchemy.slaves.slave1.url'] = 'sqlite://'
conf.use_sqlalchemy = True
conf.package = PackageWithModel()
conf.make_wsgi_app()
@raises(TGConfigError)
def test_setup_sqla_balanced_prevent_slave_named_master(self):
conf = AppConfig(minimal=True, root_controller=RootController())
conf['sqlalchemy.master.url'] = 'sqlite://'
conf['sqlalchemy.slaves.master.url'] = 'sqlite://'
conf.use_sqlalchemy = True
conf.package = PackageWithModel()
conf.make_wsgi_app()
@raises(TGConfigError)
def test_setup_sqla_balanced_no_slaves(self):
conf = AppConfig(minimal=True, root_controller=RootController())
conf['sqlalchemy.master.url'] = 'sqlite://'
conf.use_sqlalchemy = True
conf.package = PackageWithModel()
conf.make_wsgi_app()
def test_setup_ming_persistance(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://'
conf['ming.db'] = 'inmemdb'
app = conf.make_wsgi_app()
tgapp = app.application
while not isinstance(tgapp, TGApp):
tgapp = tgapp.app
ming_handler = tgapp.wrapped_dispatch
while ming_handler != tgapp._dispatch:
if isinstance(ming_handler, MingApplicationWrapper):
break
ming_handler = ming_handler.next_handler
assert isinstance(ming_handler, MingApplicationWrapper), ming_handler
class FakeMingSession(object):
actions = []
def flush_all(self):
self.actions.append('FLUSH')
def close_all(self):
self.actions.append('CLOSE')
ming_handler.ThreadLocalODMSession = FakeMingSession()
app = TestApp(app)
resp = app.get('/test')
assert 'HI' in resp
assert ming_handler.ThreadLocalODMSession.actions == ['FLUSH']
def test_setup_ming_persistance_closes_on_failure(self):
class RootController(TGController):
@expose()
def test(self):
raise Exception('CRASH!')
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://'
conf['ming.db'] = 'inmemdb'
app = conf.make_wsgi_app()
tgapp = app.application
while not isinstance(tgapp, TGApp):
tgapp = tgapp.app
ming_handler = tgapp.wrapped_dispatch
while ming_handler != tgapp._dispatch:
if isinstance(ming_handler, MingApplicationWrapper):
break
ming_handler = ming_handler.next_handler
assert isinstance(ming_handler, MingApplicationWrapper), ming_handler
class FakeMingSession(object):
actions = []
def flush_all(self):
self.actions.append('FLUSH')
def close_all(self):
self.actions.append('CLOSE')
ming_handler.ThreadLocalODMSession = FakeMingSession()
app = TestApp(app)
try:
app.get('/test', status=500)
except:
assert ming_handler.ThreadLocalODMSession.actions == ['CLOSE']
else:
assert False, 'Should have raised exception'
def test_setup_ming_persistance_with_url_alone(self):
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://inmemdb'
app = conf.make_wsgi_app()
assert app is not None
dstore = config['tg.app_globals'].ming_datastore
dstore_name = dstore.name
# Looks like ming has empty dstore.name when using MIM.
assert dstore_name == '', dstore
def test_setup_sqla_and_ming_both(self):
package = PackageWithModel()
base_config = AppConfig(minimal=True, root_controller=None)
base_config.package = package
base_config.model = package.model
base_config.use_ming = True
base_config['ming.url'] = 'mim://inmemdb'
base_config.use_sqlalchemy = True
base_config['sqlalchemy.url'] = 'sqlite://'
app = base_config.make_wsgi_app()
assert app is not None
assert config['MingSession'], config
assert config['tg.app_globals'].ming_datastore, config['tg.app_globals']
assert config['SQLASession'], config
assert config['tg.app_globals'].sa_engine, config['tg.app_globals']
assert config['DBSession'] is config['SQLASession'], config
def test_setup_ming_persistance_with_url_and_db(self):
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://inmemdb'
conf['ming.db'] = 'realinmemdb'
app = conf.make_wsgi_app()
assert app is not None
dstore = config['tg.app_globals'].ming_datastore
dstore_name = dstore.name
assert dstore_name == 'realinmemdb', dstore
def test_setup_ming_persistance_advanced_options(self):
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mim://inmemdb'
conf['ming.connection.read_preference'] = 'PRIMARY'
app = conf.make_wsgi_app()
assert app is not None
def test_setup_ming_persistance_replica_set(self):
if sys.version_info[:2] == (2, 6):
raise SkipTest()
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mongodb://localhost:27017,localhost:27018/testdb?replicaSet=test'
conf['ming.db'] = ''
app = conf.make_wsgi_app()
assert app is not None
expected_url = 'mongodb://localhost:27017,localhost:27018/testdb?replicaSet=test'
expected_db = 'testdb'
dstore = config['tg.app_globals'].ming_datastore
assert expected_db == dstore.name, dstore.name
assert dstore.bind._conn_args[0] == expected_url
def test_setup_ming_persistance_replica_set_option(self):
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.use_ming = True
conf['ming.url'] = 'mongodb://localhost:27017,localhost:27018/testdb'
conf['ming.connection.replicaSet'] = 'test'
app = conf.make_wsgi_app()
assert app is not None
expected_url = 'mongodb://localhost:27017,localhost:27018/testdb'
expected_db = 'testdb'
dstore = config['tg.app_globals'].ming_datastore
assert expected_db == dstore.name, dstore.name
assert dstore.bind._conn_args[0] == expected_url
assert 'test' == dstore.bind._conn_kwargs.get('replicaSet'), dstore.bind._conn_kwargs
def test_setup_sqla_auth_repozesqla(self):
if PY3: raise SkipTest()
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf.auth_backend = 'sqlalchemy'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadata(),
'dbsession': None,
'user_class': None,
'cookie_secret': '12345'}
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert 'repoze.who.plugins' in resp, resp
def test_setup_sqla_auth(self):
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf.auth_backend = 'sqlalchemy'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'dbsession': None,
'user_class': None,
'cookie_secret': '12345'}
conf['sqlalchemy.url'] = 'sqlite://'
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert 'repoze.who.plugins' in resp, resp
def test_setup_ming_auth_tgming(self):
if PY3: raise SkipTest()
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_ming = True
conf.auth_backend = 'ming'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadata(),
'cookie_secret': '12345',
'user_class': None}
conf['ming.url'] = 'mim:///testdb'
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert 'repoze.who.plugins' in resp, resp
def test_setup_ming_auth(self):
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_ming = True
conf.auth_backend = 'ming'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'cookie_secret': '12345',
'user_class': None}
conf['ming.url'] = 'mim:///testdb'
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert 'repoze.who.plugins' in resp, resp
def test_setup_authtkt(self):
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.use_sqlalchemy = True
conf.auth_backend = 'sqlalchemy'
conf['sa_auth'] = {'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'dbsession': None,
'user_class': None,
'cookie_secret': '12345',
'post_login_url': '/'}
conf['sqlalchemy.url'] = 'sqlite://'
secure_app = conf.make_wsgi_app(**{'sa_auth.authtkt.secure': True})
secure_app = TestApp(secure_app)
resp = secure_app.post('/login_handler', params={'login': 'l', 'password': 'p'})
assert 'HttpOnly' in resp.headers["Set-Cookie"], resp.headers
insecure_app = conf.make_wsgi_app(**{'sa_auth.authtkt.secure': False})
insecure_app = TestApp(insecure_app)
resp = insecure_app.post('/login_handler', params={'login': 'l', 'password': 'p'})
assert 'HttpOnly' not in resp.headers["Set-Cookie"], resp.headers
def test_sessions_enabled(self):
class RootController(TGController):
@expose('json')
def test(self):
try:
tg.session['counter'] += 1
except KeyError:
tg.session['counter'] = 0
tg.session.save()
return dict(counter=tg.session['counter'])
conf = AppConfig(minimal=True, root_controller=RootController())
conf['session.enabled'] = True
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
assert resp.json['counter'] == 0, resp
resp = app.get('/test')
assert resp.json['counter'] == 1, resp
def test_caching_enabled(self):
class RootController(TGController):
@expose('json')
def test(self):
cache = tg.cache.get_cache('test_caching_enabled')
now = cache.get_value('test_cache_key', createfunc=datetime.utcnow)
return dict(now=now)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['cache.enabled'] = True
app = conf.make_wsgi_app()
app = TestApp(app)
resp = app.get('/test')
now = resp.json['now']
for x in range(20):
resp = app.get('/test')
assert resp.json['now'] == now, (resp, now)
def test_controler_wrapper_setup(self):
from tg.configurator.components.dispatch import _call_controller
orig_caller = _call_controller
appcfg = AppConfig(minimal=True, root_controller=RootController())
conf = {}
dispatch = appcfg._configurator.get_component('dispatch')
dispatch._controller_wrappers[:] = []
dispatch._setup_controller_wrappers(conf, None)
assert conf['controller_caller'] == orig_caller
def controller_wrapper(caller):
def call(*args, **kw):
return caller(*args, **kw)
return call
conf = {}
dispatch = appcfg._configurator.get_component('dispatch')
dispatch._controller_wrappers[:] = [controller_wrapper]
dispatch._setup_controller_wrappers(conf, None)
assert conf['controller_caller'].__name__ == controller_wrapper(orig_caller).__name__
def test_global_controller_wrapper(self):
milestones._reset_all()
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
wrapper_has_been_visited = []
def controller_wrapper(caller):
def call(*args, **kw):
wrapper_has_been_visited.append(True)
return caller(*args, **kw)
return call
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_controller_wrapper(controller_wrapper)
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert wrapper_has_been_visited[0] is True
def test_multiple_global_controller_wrapper(self):
milestones._reset_all()
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
wrapper_has_been_visited = []
def controller_wrapper(caller):
def call(*args, **kw):
wrapper_has_been_visited.append(True)
return caller(*args, **kw)
return call
def controller_wrapper2(caller):
def call(*args, **kw):
wrapper_has_been_visited.append(True)
return caller(*args, **kw)
return call
def controller_wrapper3(caller):
def call(*args, **kw):
wrapper_has_been_visited.append(True)
return caller(*args, **kw)
return call
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_controller_wrapper(controller_wrapper2)
conf.register_controller_wrapper(controller_wrapper3)
conf.register_controller_wrapper(controller_wrapper)
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert len(wrapper_has_been_visited) == 3
def test_dedicated_controller_wrapper(self):
milestones._reset_all()
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
wrapper_has_been_visited = []
def controller_wrapper(caller):
def call(*args, **kw):
wrapper_has_been_visited.append(True)
return caller(*args, **kw)
return call
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_controller_wrapper(controller_wrapper, controller=RootController.test)
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert wrapper_has_been_visited[0] is True
def test_dedicated_controller_wrapper_old(self):
milestones._reset_all()
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
wrapper_has_been_visited = []
def controller_wrapper(caller):
def call(*args, **kw):
wrapper_has_been_visited.append(True)
return caller(*args, **kw)
return call
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_controller_wrapper(controller_wrapper, controller=RootController.test)
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert wrapper_has_been_visited[0] is True
def test_mixed_controller_wrapper(self):
milestones._reset_all()
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
app_wrapper_has_been_visited = []
def app_controller_wrapper(caller):
def call(*args, **kw):
app_wrapper_has_been_visited.append(True)
return caller(*args, **kw)
return call
wrapper_has_been_visited = []
def controller_wrapper(caller):
def call(*args, **kw):
wrapper_has_been_visited.append(True)
return caller(*args, **kw)
return call
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_controller_wrapper(app_controller_wrapper)
conf.register_controller_wrapper(controller_wrapper, controller=RootController.test)
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert wrapper_has_been_visited[0] is True
assert app_wrapper_has_been_visited[0] is True
def test_controler_wrapper_after_environment_setup(self):
milestones._reset_all()
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
wrapper_has_been_visited = []
def controller_wrapper(caller):
def call(*args, **kw):
wrapper_has_been_visited.append(True)
return caller(*args, **kw)
return call
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_controller_wrapper(controller_wrapper)
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert wrapper_has_been_visited[0] is True
assert len(wrapper_has_been_visited) == 1
conf.register_controller_wrapper(controller_wrapper)
app2 = conf.make_wsgi_app()
app2 = TestApp(app2)
wrapper_has_been_visited[:] = []
assert 'HI!' in app2.get('/test')
assert wrapper_has_been_visited[0] is True
assert len(wrapper_has_been_visited) == 2
def test_application_wrapper_setup(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
wrapper_has_been_visited = []
class AppWrapper(object):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def __call__(self, *args, **kw):
wrapper_has_been_visited.append(True)
return self.dispatcher(*args, **kw)
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_wrapper(AppWrapper)
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert wrapper_has_been_visited[0] == True
def test_application_wrapper_ordering_after(self):
class AppWrapper1:
pass
class AppWrapper2:
pass
class AppWrapper3:
pass
class AppWrapper4:
pass
class AppWrapper5:
pass
conf = AppConfig(minimal=True)
conf.register_wrapper(AppWrapper2)
conf.register_wrapper(AppWrapper4, after=AppWrapper3)
conf.register_wrapper(AppWrapper3)
conf.register_wrapper(AppWrapper1, after=False)
conf.register_wrapper(AppWrapper5, after=AppWrapper3)
milestones.environment_loaded.reach()
app_wrappers = list(conf._configurator._application_wrappers.values())
assert app_wrappers[0] == AppWrapper1
assert app_wrappers[1] == AppWrapper2
assert app_wrappers[2] == AppWrapper3
assert app_wrappers[3] == AppWrapper4
assert app_wrappers[4] == AppWrapper5
def test_wrap_app(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
middleware_has_been_visited = []
class AppWrapper(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
middleware_has_been_visited.append(True)
return self.app(environ, start_response)
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = PackageWithModel()
app = conf.make_wsgi_app(wrap_app=AppWrapper)
app = TestApp(app)
assert 'HI!' in app.get('/test')
assert middleware_has_been_visited[0] == True
@raises(TGConfigError)
def test_unsupported_renderer(self):
conf = AppConfig(root_controller=RootController())
conf['renderers'] = ['unknwon']
try:
conf.make_wsgi_app()
except TGConfigError as e:
assert 'This configuration object does not support the unknwon renderer' in str(e)
raise
@raises(TGConfigError)
def test_cookie_secret_required(self):
conf = AppConfig(root_controller=RootController())
conf['auth_backend'] = 'sqlalchemy'
conf['sa_auth'] = {}
try:
conf.make_wsgi_app()
except TGConfigError as e:
assert str(e).startswith('You must provide a value for authentication cookies secret')
raise
def test_sqla_auth_middleware(self):
if PY3: raise SkipTest()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.auth_backend = 'sqlalchemy'
conf.skip_authentication = True
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadata(),
'dbsession': None,
'user_class':None,
'cookie_secret':'12345',
'authenticators':UncopiableList([('default', None)])})
conf.make_wsgi_app()
authenticators = [x[0] for x in config['sa_auth.authenticators']]
assert 'cookie' in authenticators
assert 'sqlauth' in authenticators
def test_sqla_auth_middleware_using_translations(self):
if PY3: raise SkipTest()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.auth_backend = 'sqlalchemy'
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadata(),
'dbsession': None,
'user_class':None,
'translations': {'user_name':'SomethingElse'},
'cookie_secret':'12345',
'authenticators':UncopiableList([('default', None)])})
conf.make_wsgi_app()
authenticators = [x[0] for x in config['sa_auth.authenticators']]
assert 'cookie' in authenticators
assert 'sqlauth' in authenticators
auth = None
for authname, authobj in config['sa_auth.authenticators']:
if authname == 'sqlauth':
auth = authobj
break
assert auth is not None, config['sa_auth.authenticators']
assert auth.translations['user_name'] == 'SomethingElse', auth.translations
def test_sqla_auth_middleware_default_after(self):
if PY3: raise SkipTest()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.auth_backend = 'sqlalchemy'
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadata(),
'cookie_secret':'12345',
'dbsession': None,
'user_class': None,
'authenticators':UncopiableList([('superfirst', None),
('default', None)])})
conf.make_wsgi_app()
authenticators = [x[0] for x in config['sa_auth.authenticators']]
assert authenticators[1] == 'superfirst'
assert 'cookie' in authenticators
assert 'sqlauth' in authenticators
def test_sqla_auth_middleware_no_authenticators(self):
if PY3: raise SkipTest()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.auth_backend = 'sqlalchemy'
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadata(),
'dbsession': None,
'user_class': None,
'cookie_secret':'12345'})
# In this case we can just test it doesn't crash
# as the sa_auth dict doesn't have an authenticators key to check for
conf.make_wsgi_app()
def test_sqla_auth_middleware_only_mine(self):
class RootController(TGController):
@expose()
def test(self):
return str(request.environ)
@expose()
def forbidden(self):
response.status = "401"
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = package
conf.model = package.model
conf.auth_backend = 'sqlalchemy'
conf.use_sqlalchemy = True
conf['sqlalchemy.url'] = 'sqlite://'
alwaysadmin = _AuthenticationForgerPlugin(fake_user_key='FAKE_USER')
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadata(),
'cookie_secret':'12345',
'form_plugin':alwaysadmin,
'authenticators':UncopiableList([('alwaysadmin', alwaysadmin)]),
'identifiers':[('alwaysadmin', alwaysadmin)],
'challengers':[]})
app = conf.make_wsgi_app()
authenticators = [x[0] for x in config['sa_auth.authenticators']]
assert authenticators[0] == 'alwaysadmin'
assert 'sqlauth' not in authenticators
challengers = [x[1] for x in config['sa_auth.challengers']]
assert alwaysadmin in challengers
app = TestApp(app)
assert 'repoze.who.identity' in app.get('/test', extra_environ={'FAKE_USER':'admin'})
assert app.get('/forbidden', status=401)
def test_sqla_auth_logging_stderr(self):
package = PackageWithModel()
conf = AppConfig(minimal=True, root_controller=None)
conf.package = package
conf.model = package.model
conf.auth_backend = 'sqlalchemy'
conf.use_sqlalchemy = True
conf['sqlalchemy.url'] = 'sqlite://'
alwaysadmin = _AuthenticationForgerPlugin(fake_user_key='FAKE_USER')
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadata(),
'cookie_secret':'12345',
'form_plugin':alwaysadmin,
'log_level':'DEBUG',
'authenticators':UncopiableList([('alwaysadmin', alwaysadmin)]),
'identifiers':[('alwaysadmin', alwaysadmin)],
'challengers':[]})
conf['sa_auth']['log_file'] = 'stderr'
app = conf.make_wsgi_app()
conf['sa_auth']['log_file'] = 'stdout'
app = conf.make_wsgi_app()
import tempfile
f = tempfile.NamedTemporaryFile()
conf['sa_auth']['log_file'] = f.name
app = conf.make_wsgi_app()
def test_ming_auth_middleware(self):
if PY3: raise SkipTest()
conf = AppConfig(root_controller=RootController(),
auth_backend='ming')
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadata(),
'user_class':None,
'cookie_secret':'12345',
'authenticators': UncopiableList([('default', None)])})
conf.make_wsgi_app()
authenticators = [x[0] for x in config['sa_auth.authenticators']]
assert 'cookie' in authenticators
assert 'mingauth' in authenticators
@raises(KeyError)
def test_sqla_auth_middleware_no_backend(self):
conf = AppConfig(root_controller=RootController())
conf.auth_backend = None
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadata(),
'cookie_secret':'12345'})
conf.make_wsgi_app()
authenticators = [x[0] for x in config['sa_auth.authenticators']]
assert 'cookie' in authenticators
assert len(authenticators) == 1
def test_tgauthmetadata_auth_middleware(self):
conf = AppConfig(root_controller=RootController(),
auth_backend='sqlalchemy')
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'dbsession': None,
'user_class':None,
'cookie_secret':'12345',
'authenticators':UncopiableList([('default', None)])})
conf.make_wsgi_app()
authenticators = [x[0] for x in config['sa_auth.authenticators']]
assert 'cookie' in authenticators
assert 'tgappauth' in authenticators
def test_auth_setup_default_identifier(self):
conf = AppConfig(root_controller=RootController(),
auth_backend='sqlalchemy')
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'dbsession': None,
'user_class':None,
'cookie_secret':'12345',
'identifiers': UncopiableList([('default', None)])})
conf.make_wsgi_app()
identifiers = [x[0] for x in tg.config['sa_auth.identifiers']]
assert 'cookie' in identifiers
def test_auth_setup_custom_identifier(self):
conf = AppConfig(root_controller=RootController(),
auth_backend='sqlalchemy')
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'dbsession': None,
'user_class':None,
'cookie_secret':'12345',
'identifiers': UncopiableList([('custom', None)])})
conf.make_wsgi_app()
identifiers = [x[0] for x in config['sa_auth.identifiers']]
assert 'custom' in identifiers
def test_auth_middleware_doesnt_touch_authenticators(self):
# Checks that the auth middleware process doesn't touch original authenticators
# list, to prevent regressions on this.
conf = AppConfig(root_controller=RootController(),
auth_backend='sqlalchemy')
conf['sa_auth'].update({'authmetadata': ApplicationAuthMetadataWithAuthentication(),
'dbsession': None,
'user_class':None,
'cookie_secret':'12345',
'authenticators':[('default', None)]})
conf.make_wsgi_app()
authenticators = [x[0] for x in conf['sa_auth.authenticators']]
assert len(authenticators) == 1
def test_tgauthmetadata_loginpwd(self):
who_authenticator = _AuthMetadataAuthenticator(ApplicationAuthMetadataWithAuthentication(), using_password=True)
assert who_authenticator.authenticate({}, {}) == None
def test_tgauthmetadata_nologinpwd(self):
who_authenticator = _AuthMetadataAuthenticator(ApplicationAuthMetadataWithAuthentication(), using_password=False)
assert who_authenticator.authenticate({}, {}) == 1
def test_error_middleware_disabled_with_optimize(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = PackageWithModel()
os.environ['PYTHONOPTIMIZE'] = '2'
app = conf.make_wsgi_app()
os.environ.pop('PYTHONOPTIMIZE')
app = TestApp(app)
assert 'HI!' in app.get('/test')
def test_serve_statics(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = PackageWithModel()
conf.serve_static = True
app = conf.make_wsgi_app()
assert app.__class__.__name__.startswith('Statics')
app = TestApp(app)
assert 'HI!' in app.get('/test')
def test_mount_point_with_minimal(self):
class SubController(TGController):
@expose()
def test(self):
return self.mount_point
class RootController(TGController):
sub = SubController()
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
assert '/sub' in app.get('/sub/test')
def test_application_test_vars(self):
class RootController(TGController):
pass
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'DONE' in app.get('/_test_vars')
assert request.path == '/_test_vars'
# This should trash away the preserved registry to avoid
# leaking memory.
app.get('/', status=404)
try:
request.path
except TypeError:
# TypeError means the request has been properly removed
pass
else:
assert False, 'There should have been no requests in place...'
def test_application_empty_controller(self):
class RootController(object):
def __call__(self, environ, start_response):
return None
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
try:
r = app.get('/something')
except Exception as e:
assert 'No content returned by controller' in str(e)
else:
assert False, 'Should have raised "No content returned by controller"'
def test_application_test_mode_detection(self):
class FakeRegistry(object):
def register(self, *args, **kw):
pass
def track_app(app):
# Save a reference to the plain TGApp before it's wrapped by middlewares.
track_app.app = app
return app
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = PackageWithModel()
conf.make_wsgi_app(wrap_app=track_app)
testmode, __, __ = track_app.app._setup_app_env({'paste.registry':FakeRegistry()})
assert testmode is False
testmode, __, __ = track_app.app._setup_app_env({'paste.registry':FakeRegistry(),
'paste.testing_variables':{}})
assert testmode is True
def test_application_no_controller_hijacking(self):
class RootController(TGController):
@expose()
def test(self):
return 'HI!'
class AppWrapper(object):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def __call__(self, controller, environ, start_response):
return self.dispatcher(None, environ, start_response)
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_wrapper(AppWrapper)
conf.package = PackageWithModel()
app = conf.make_wsgi_app()
app = TestApp(app)
app.get('/test', status=404)
def test_package_no_app_globals(self):
class RootController(TGController):
pass
conf = AppConfig(minimal=True, root_controller=RootController())
conf.package = sys.modules[__name__]
app = conf.make_wsgi_app()
def test_custom_error_document(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
abort(403)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf['errorpage.handle_exceptions'] = False
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=403)
assert 'ERROR!!!' in resp, resp
def test_custom_error_document_with_streamed_response(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
response.status_code = 403
def _output():
yield 'Hi'
yield 'World'
return _output()
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf['errorpage.handle_exceptions'] = False
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=403)
assert 'ERROR!!!' in resp, resp
def test_error_document_passthrough(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
request.disable_error_pages()
abort(403, detail='Custom Detail')
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf['errorpage.handle_exceptions'] = False
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=403)
assert 'Custom Detail' in resp, resp
def test_custom_old_error_document(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
abort(403)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf.status_code_redirect = True
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=403)
assert 'ERROR!!!' in resp, resp
def test_custom_old_error_document_with_streamed_response(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
response.status_code = 403
def _output():
yield 'Hi'
yield 'World'
return _output()
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf.status_code_redirect = True
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=403)
assert 'ERROR!!!' in resp, resp
def test_custom_500_document(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
abort(500)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf['debug'] = False
conf['errorpage.handle_exceptions'] = False
conf['errorpage.status_codes'] += [500]
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=500)
assert 'ERROR!!!' in resp, resp
def test_custom_500_document_on_crash(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
raise Exception('Crash!')
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf['debug'] = False
conf['errorpage.handle_exceptions'] = True
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=500)
assert 'ERROR!!!' in resp, resp
def test_errorpage_reraises_exceptions(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
raise Exception('Crash!')
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf['debug'] = False
conf['errorpage.handle_exceptions'] = False
app = conf.make_wsgi_app(full_stack=False)
app = TestApp(app)
try:
resp = app.get('/test', status=500)
except Exception as e:
assert 'Crash!' in str(e)
else:
assert False, 'Should have raised Crash! exception'
def test_old_custom_500_document(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
abort(500)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['debug'] = False
conf.status_code_redirect = True
conf['errorpage.enabled'] = True
conf['errorpage.status_codes'] += [500]
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=500)
assert 'ERROR!!!' in resp, resp
def test_skips_custom_500_document_when_debug(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
abort(500)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf['debug'] = True
conf['errorpage.handle_exceptions'] = False
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=500)
assert 'ERROR!!!' not in resp, resp
def test_skips_old_custom_500_document_when_debug(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
abort(500)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['debug'] = True
conf.status_code_redirect = True
conf['errorpage.enabled'] = True
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=500)
assert 'ERROR!!!' not in resp, resp
def test_skips_custom_error_document_when_disabled(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
abort(403)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = False
conf['errorpage.status_codes'] = (403, 404)
conf['errorpage.handle_exceptions'] = False
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=403)
assert 'ERROR!!!' not in resp, resp
def test_skips_custom_error_document_when_disabled_and_manually_registered(self):
class ErrorController(TGController):
@expose()
def document(self, *args, **kw):
return 'ERROR!!!'
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
abort(403)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = False
conf['errorpage.status_codes'] = (403, 404)
conf['errorpage.handle_exceptions'] = False
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=403)
assert 'ERROR!!!' not in resp, resp
def test_custom_500_json(self):
class ErrorController(TGController):
@expose(content_type="text/html")
@expose('json', content_type="application/json")
def document(self, *args, **kw):
return dict(a=5)
class RootController(TGController):
error = ErrorController()
@expose()
def test(self):
abort(500)
conf = AppConfig(minimal=True, root_controller=RootController())
conf['errorpage.enabled'] = True
conf['debug'] = False
conf['errorpage.handle_exceptions'] = False
conf['errorpage.status_codes'] += [500]
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=500,
headers={'Accept': 'application/json'})
assert '{"a": 5}' in resp.text, resp
assert 'application/json' == resp.content_type
def test_errorware_configuration(self):
class RootController(TGController):
@expose()
def test(self, *args, **kwargs):
return 'HI'
conf = AppConfig(minimal=True, root_controller=RootController())
app = conf.make_wsgi_app(full_stack=True,
**{'trace_errors.error_email': '<EMAIL>'})
app = TestApp(app)
resp = app.get('/test')
assert 'HI' in resp, resp
assert config['tg.errorware']['error_email'] == '<EMAIL>'
assert config['tg.errorware']['error_subject_prefix'] == 'WebApp Error: '
assert config['tg.errorware']['error_message'] == 'An internal server error occurred'
def test_tw2_unsupported_renderer(self):
import tw2.core
class RootController(TGController):
@expose()
def test(self, *args, **kwargs):
rl = tw2.core.core.request_local()
tw2conf = rl['middleware'].config
return ','.join(tw2conf.preferred_rendering_engines)
conf = AppConfig(minimal=True, root_controller=RootController())
conf.prefer_toscawidgets2 = True
conf.renderers = ['json', 'kajiki']
conf.default_renderer = 'json'
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test')
assert 'kajiki' in resp, resp
def test_tw2_renderers_preference(self):
import tw2.core
class RootController(TGController):
@expose()
def test(self, *args, **kwargs):
rl = tw2.core.core.request_local()
tw2conf = rl['middleware'].config
return ','.join(tw2conf.preferred_rendering_engines)
conf = AppConfig(minimal=True, root_controller=RootController())
conf.prefer_toscawidgets2 = True
conf.renderers = ['kajiki']
conf.default_renderer = 'kajiki'
app = conf.make_wsgi_app(full_stack=True)
app = TestApp(app)
resp = app.get('/test')
assert 'kajiki' in resp, resp
def test_tw2_unsupported(self):
import tw2.core
class RootController(TGController):
@expose()
def test(self, *args, **kwargs):
rl = tw2.core.core.request_local()
tw2conf = rl['middleware'].config
return ','.join(tw2conf.preferred_rendering_engines)
conf = AppConfig(minimal=True, root_controller=RootController())
conf.prefer_toscawidgets2 = True
conf.renderers = ['json']
conf.default_renderer = 'json'
try:
app = conf.make_wsgi_app(full_stack=True)
assert False
except TGConfigError as e:
assert 'None of the configured rendering engines' in str(e)
assert 'is supported by ToscaWidgets2, unable to configure ToscaWidgets.' in str(e)
def test_render_factory_success(self):
class RootController(TGController):
@expose()
def test(self, *args, **kwargs):
return 'HELLO'
class FailedFactory(RendererFactory):
engines = {'broken': {'content_type': 'text/plain'}}
@classmethod
def create(cls, config, app_globals):
return {'broken': 'BROKEN'}
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_rendering_engine(FailedFactory)
conf.renderers = ['json', 'broken']
app = conf.make_wsgi_app(full_stack=True)
assert config['renderers'] == ['json', 'broken']
assert config['render_functions']['broken'] == 'BROKEN'
def test_render_factory_failure(self):
class RootController(TGController):
@expose()
def test(self, *args, **kwargs):
return 'HELLO'
class FailedFactory(RendererFactory):
engines = {'broken': {'content_type': 'text/plain'}}
@classmethod
def create(cls, config, app_globals):
return None
conf = AppConfig(minimal=True, root_controller=RootController())
conf.register_rendering_engine(FailedFactory)
conf.renderers = ['json', 'broken']
conf.make_wsgi_app(full_stack=True)
assert config['renderers'] == ['json']
def test_make_body_seekable(self):
class RootController(TGController):
@expose()
def test(self, *args, **kwargs):
request.body_file.seek(0)
return 'HELLO'
conf = AppConfig(minimal=True, root_controller=RootController())
conf['make_body_seekable'] = True
app = conf.make_wsgi_app(full_stack=False)
assert app.application.__class__.__name__ == 'SeekableRequestBodyMiddleware', \
app.application.__class__
app = TestApp(app)
assert 'HELLO' in app.get('/test')
def test_make_body_seekable_disabled(self):
class RootController(TGController):
@expose()
def test(self, *args, **kwargs):
request.body_file.seek(0)
return 'HELLO'
conf = AppConfig(minimal=True, root_controller=RootController())
conf['make_body_seekable'] = False
app = conf.make_wsgi_app(full_stack=False)
app = TestApp(app)
assert 'HELLO' in app.get('/test')
def test_debug_middleware(self):
class RootController(TGController):
@expose()
def test(self):
raise Exception('Crash!')
conf = AppConfig(root_controller=RootController())
conf['errorpage.enabled'] = True
app = conf.make_wsgi_app(debug=True, full_stack=True)
app = TestApp(app)
resp = app.get('/test', status=500, expect_errors=True)
assert 'Exception: Crash! // Backlash' in resp, resp
def test_make_app_with_custom_appglobals(self):
class RootController(TGController):
@expose('')
def test(self, *args, **kwargs):
return tg.app_globals.TEXT
class FakeGlobals(Bunch):
def __init__(self):
super(FakeGlobals, self).__init__()
self['TEXT'] = 'HI!'
conf = AppConfig(minimal=True, root_controller=RootController())
conf.app_globals = FakeGlobals
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
def test_make_app_with_appglobals_submodule(self):
class RootController(TGController):
@expose('')
def test(self, *args, **kwargs):
return tg.app_globals.text
conf = AppConfig(minimal=True, root_controller=RootController())
from .fixtures import package_with_helpers_submodule
conf['package'] = package_with_helpers_submodule
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!!' in app.get('/test')
def test_make_app_with_custom_helpers(self):
class RootController(TGController):
@expose('')
def test(self, *args, **kwargs):
return config['helpers'].get_text()
class FakeHelpers(object):
@classmethod
def get_text(cls):
return 'HI!'
conf = AppConfig(minimal=True, root_controller=RootController())
conf.helpers = FakeHelpers()
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!' in app.get('/test')
def test_make_app_with_helpers_submodule(self):
class RootController(TGController):
@expose('')
def test(self, *args, **kwargs):
return config['helpers'].get_text()
conf = AppConfig(minimal=True, root_controller=RootController())
from .fixtures import package_with_helpers_submodule
conf['package'] = package_with_helpers_submodule
app = conf.make_wsgi_app()
app = TestApp(app)
assert 'HI!!' in app.get('/test')
|
sky/legacy/scraper.py
|
Asteur/sky_python_crawler
| 325 |
89848
|
<filename>sky/legacy/scraper.py
from training import *
from findLeaf import *
import re
from lxml.html import fromstring
from lxml.html import tostring
def stripReasonableWhite(x):
return re.sub(r"\s+", " ", x).strip()
def splitN(txt, outcome):
# consider splitting to get result
txt = stripReasonableWhite(txt)
outcome = stripReasonableWhite(outcome)
splitables = set(txt.replace(outcome, '', 1)) - set(' ')
options = set()
for s in splitables:
for i, x in enumerate(txt.split(s)):
if stripReasonableWhite(x) == stripReasonableWhite(outcome):
options.add((s, i))
return options
def splitSolution(how):
def solution(txt):
return txt.split(how[0])[how[1]]
return solution
def asNumeric(x):
return re.sub("[^0-9]", "", x)
def applySolutionChain(solution, x):
for sol in solution:
if isinstance(sol, dict):
x = x.find(**sol)
else:
x = sol(x)
return x
def buildSolution(training):
res = findLeaf(training)
print("len(res)", len(res))
x = findSharedKeyValues(training, res)
print("len(shared)", len(x))
solutions = secondLevelDown(training.soups[0], training.targets[0], x)
print("len(solutions)", len(solutions))
return solutions
def tryUniqueID(c, sp):
return len(sp.findAll(c.name, attrs=c.attrs)) == 1
def buildNewSolution(tr):
childs = []
num = 0
options = []
for soup, target in zip(tr.soups, tr.targets):
print('num',num)
num+=1
for c in soup.findChildren():
try:
if c.name not in ['body', 'html']:
if target in c.text:
childs.append([c, len(c.text)])
except:
pass
tmp = []
for i,x in enumerate(childs[::-1]):
if tryUniqueID(x[0], soup):
attrs = x[0].attrs
attrs['name'] = x[0].name
attrs = {'attrs' : attrs}
if x[0].text == target:
tmp.append((attrs, BeautifulSoup.get_text))
elif stripReasonableWhite(x[0].text) == stripReasonableWhite(target):
tmp.append((attrs, BeautifulSoup.get_text, stripReasonableWhite))
elif splitN(x[0].text, target):
for splitable in splitN(x[0].text, target):
tmp.append((attrs, BeautifulSoup.get_text, splitSolution(splitable)))
else:
print(len([y for y in x[0].children]))
else:
print('not unique', len([y for y in x[0].children]))
options.append(tmp)
good_options = []
if options:
for x in options[0]:
if all(x in y for y in options[1:]):
good_options.append(x)
return good_options
#testAutoScraperSolutions(buildSolution(tr), tr, False)
tr1 = Training("marktplaats-testcase1", "/Users/pascal/egoroot/sky_package/sky/tests/").load()
# tr2 = Training("nieuwsdumper-testcase1", "/Users/pascal/egoroot/sky_package/sky/tests/").load()
tr3 = Training("nieuwsdumper-testcase2", "/Users/pascal/egoroot/sky_package/sky/tests/").load()
# tr4 = Training("bouwmaterieel-testcase1", "/Users/pascal/egoroot/sky_package/sky/tests/").load()
# tr5 = Training('betterdoctor-doctor-referalls', '/Users/pascal/egoroot/sky_package/sky/tests/').load()
tr6 = Training("pypi-author", "/Users/pascal/egoroot/sky_package/sky/tests/").load()
# Moet wel text_content zijn, anders ga je dingen mislopen!!!!!!!!!!!!!
# plottwist misschien gewoon 2 methoden
def getMatchedNodes(tr):
matchedLeafs = []
for tree, outcome in zip(tr.trees, tr.targets):
matchedLeaf = {'text' : [], 'tail' : [], 'attrib' : [], 'content' : []}
for x in tree.iter():
if x.text and outcome in x.text:
matchedLeaf['text'].append(x)
if x.tail and outcome in x.tail:
matchedLeaf['tail'].append(x)
if x.attrib and any([outcome in y for y in x.attrib.values()]):
matchedLeaf['attrib'].append(x)
matchedLeafs.append(matchedLeaf)
return matchedLeafs
def getMatchedTextContentNodes(tree, outcome, container):
children = tree.getchildren()
if children:
for c in children:
if outcome in c.text_content():
container.append(c)
getMatchedTextContentNodes(c, outcome, container)
return container
for i in range(1000):
res = getMatchedNodes(tr3)
for tree, outcome, r in zip(tr3.trees, tr3.targets, res):
r['content'] = getMatchedTextContentNodes(tree, outcome, [])
div = fromstring('<div>I have <strong>5</strong> friends</div>')
|
examples/geojson.py
|
Yook74/dash-extensions
| 250 |
89854
|
<gh_stars>100-1000
import dash
import dash_html_components as html
import json
import dash_leaflet as dl
from examples import geojson_csf
from dash_extensions.transpile import inject_js, module_to_props
# Create geojson.
with open("assets/us-states.json", 'r') as f:
data = json.load(f)
js = module_to_props(geojson_csf) # do transcrypt to enable passing python functions as props
geojson = dl.GeoJSON(data=data, id="geojson", options=dict(style=geojson_csf.style),
hoverStyle=geojson_csf.hover_style)
# Create app.
app = dash.Dash(prevent_initial_callbacks=True)
app.layout = html.Div([dl.Map(children=[dl.TileLayer(), geojson], center=[39, -98], zoom=4, id="map")],
style={'width': '100%', 'height': '50vh', 'margin': "auto", "display": "block"})
# Inject transcrypted javascript.
inject_js(app, js)
if __name__ == '__main__':
app.run_server(port=7777, debug=True)
|
examples/magic_app_example.py
|
Jan-Zeiseweis/pycaw
| 234 |
89897
|
"""
Note
----
'import pycaw.magic' must be generally at the topmost.
To be more specific:
It needs to be imported before any other pycaw or comtypes import.
Reserved Atrributes
-------------------
Note that certain methods and attributes are reserved for the magic module.
Please look into the source code of MagicApp for more information.
But to avoid conflicts now and in the future, i recommend using
a prefix for each of your custom methods and attributes.
Features
--------
Instantiate a new MagicApp with one or more app executables:
magic = MagicApp({"msedge.exe", "another.exe"})
--------
you could also inherit from MagicApp and create customized callbacks:
class MyCustomApp(MagicApp):
def __init__(self, app_execs):
super().__init__(app_execs,
volume_callback=self.custom_volume_callback,
mute_callback=self...,
state_callback=self...,
session_callback=self...)
def custom_volume_callback(self, volume):
print(volume)
print(self.mute)
self.mute = True
print(self.mute)
mega_magic = MyCustomApp({"msedge.exe"})
"""
import time
from contextlib import suppress
from pycaw.magic import MagicApp
def handle_all(*args):
print("callback")
print(args)
magic = MagicApp({"msedge.exe"},
volume_callback=handle_all,
mute_callback=handle_all,
state_callback=handle_all,
session_callback=handle_all)
def main():
with suppress(KeyboardInterrupt):
for _ in range(5):
"""
open and close your MagicApp app_exec (msedge.exe)
and see how it will change the volume as long as
the app is opened. When you close app_exec it wont change
the volume and None is printed.
if you change for example the volume in the Windows sound mixer
handle_all() is fired.
"""
if magic.state is None:
print(f"No session active for: {magic}")
time.sleep(2)
continue
print("Volume:")
magic.volume = 0.1
print(magic.volume)
time.sleep(1)
magic.volume = 0.9
print(magic.volume)
time.sleep(1)
print(f"{str(magic.state)} {magic.app_execs}")
print("Mute:")
magic.mute = True
print(magic.mute)
time.sleep(1)
magic.mute = False
print(magic.mute)
print("\nTschüss")
if __name__ == '__main__':
main()
|
RecoBTag/PerformanceDB/python/BTagPerformanceDB1012.py
|
ckamtsikis/cmssw
| 852 |
89919
|
<filename>RecoBTag/PerformanceDB/python/BTagPerformanceDB1012.py
from RecoBTag.PerformanceDB.measure.Btag_mistag101220 import *
|
guillotina/json/serialize_value.py
|
rboixaderg/guillotina
| 173 |
89936
|
<reponame>rboixaderg/guillotina
# -*- coding: utf-8 -*-
from datetime import date
from datetime import datetime
from datetime import time
from datetime import timedelta
from decimal import Decimal
from guillotina import configure
from guillotina.component import query_adapter
from guillotina.i18n import Message
from guillotina.interfaces import IValueToJson
from guillotina.profile import profilable
from guillotina.schema.vocabulary import SimpleVocabulary
_MISSING = object()
@profilable
def json_compatible(value):
if value is None:
return value
type_ = type(value)
if type_ in (str, bool, int, float):
return value
result_value = query_adapter(value, IValueToJson, default=_MISSING)
if result_value is _MISSING:
raise TypeError("No converter for making" " {0!r} ({1}) JSON compatible.".format(value, type(value)))
else:
return result_value
@configure.value_serializer(SimpleVocabulary)
def vocabulary_converter(value):
return [x.token for x in value]
@configure.value_serializer(str)
def string_converter(value):
return str(value)
@configure.value_serializer(bytes)
def bytes_converter(value):
return str(value, encoding="utf-8")
@configure.value_serializer(list)
def list_converter(value):
return list(map(json_compatible, value))
@configure.value_serializer(tuple)
def tuple_converter(value):
return list(map(json_compatible, value))
@configure.value_serializer(frozenset)
def frozenset_converter(value):
return list(map(json_compatible, value))
@configure.value_serializer(set)
def set_converter(value):
return list(map(json_compatible, value))
@configure.value_serializer(dict)
def dict_converter(value):
if value == {}:
return {}
keys, values = zip(*value.items())
keys = map(json_compatible, keys)
values = map(json_compatible, values)
return dict(zip(keys, values))
@configure.value_serializer(datetime)
def python_datetime_converter(value):
try:
return value.isoformat()
except AttributeError: # handle date problems
return None
@configure.value_serializer(date)
def date_converter(value):
return value.isoformat()
@configure.value_serializer(time)
def time_converter(value):
return value.isoformat()
@configure.value_serializer(timedelta)
def timedelta_converter(value):
return value.total_seconds()
@configure.value_serializer(Message)
def i18n_message_converter(value):
# TODO:
# value = translate(value, context=getRequest())
return value
@configure.value_serializer(Decimal)
def decimal_converter(value):
return str(value)
|
test/hummingbot/client/config/test_config_security.py
|
BGTCapital/hummingbot
| 3,027 |
89949
|
#!/usr/bin/env python
import unittest
from hummingbot.client.config.security import Security
from hummingbot.client import settings
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.config.config_crypt import encrypt_n_save_config_value
import os
import shutil
import asyncio
temp_folder = "conf_testing_temp/"
class ConfigSecurityNewPasswordUnitTest(unittest.TestCase):
def setUp(self):
settings.CONF_FILE_PATH = temp_folder
global_config_map["key_file_path"].value = temp_folder
os.makedirs(settings.CONF_FILE_PATH, exist_ok=False)
def tearDown(self):
shutil.rmtree(temp_folder)
def test_new_password_process(self):
# empty folder, new password is required
self.assertFalse(Security.any_encryped_files())
self.assertTrue(Security.new_password_required())
# login will pass with any password
result = Security.login("a")
self.assertTrue(result)
Security.update_secure_config("new_key", "new_value")
self.assertTrue(os.path.exists(f"{temp_folder}encrypted_new_key.json"))
self.assertTrue(Security.encrypted_file_exists("new_key"))
class ConfigSecurityExistingPasswordUnitTest(unittest.TestCase):
def setUp(self):
settings.CONF_FILE_PATH = temp_folder
global_config_map["key_file_path"].value = temp_folder
os.makedirs(settings.CONF_FILE_PATH, exist_ok=False)
encrypt_n_save_config_value("test_key_1", "test_value_1", "a")
encrypt_n_save_config_value("test_key_2", "test_value_2", "a")
def tearDown(self):
shutil.rmtree(temp_folder)
async def _test_existing_password(self):
# check the 2 encrypted files exist
self.assertTrue(os.path.exists(f"{temp_folder}encrypted_test_key_1.json"))
self.assertTrue(os.path.exists(f"{temp_folder}encrypted_test_key_2.json"))
self.assertTrue(Security.any_encryped_files())
self.assertFalse(Security.new_password_required())
# login fails with incorrect password
result = Security.login("b")
self.assertFalse(result)
# login passes with correct password
result = Security.login("a")
self.assertTrue(result)
# right after logging in, the decryption shouldn't finished yet
self.assertFalse(Security.is_decryption_done())
await Security.wait_til_decryption_done()
self.assertEqual(len(Security.all_decrypted_values()), 2)
config_value = Security.decrypted_value("test_key_1")
self.assertEqual("test_value_1", config_value)
Security.update_secure_config("test_key_1", "new_value")
self.assertEqual("new_value", Security.decrypted_value("test_key_1"))
def test_existing_password(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_existing_password())
|
Awesome-face-operations/Face Clustering/Script/constants.py
|
swapnilgarg7/Face-X
| 175 |
89968
|
import os
# face_data (directory) represents the path component to be joined.
FACE_DATA_PATH = os.path.join(os.getcwd(),'face_cluster')
ENCODINGS_PATH = os.path.join(os.getcwd(),'encodings.pickle')
CLUSTERING_RESULT_PATH = os.getcwd()
|
encoder/esim.py
|
zhufz/nlp_research
| 160 |
89971
|
<reponame>zhufz/nlp_research<gh_stars>100-1000
#-*- coding:utf-8 -*-
import keras
import tensorflow as tf
from keras.layers import *
from keras.activations import softmax
from keras.models import Model
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.utils import multi_gpu_model
from encoder import EncoderBase
#refer:https://arxiv.org/abs/1609.06038
class ESIM(EncoderBase):
def __init__(self, **kwargs):
super(ESIM, self).__init__(**kwargs)
self.embedding_size = kwargs['embedding_size']
self.recurrent_units = 300
self.dense_units = 300
def update_features(self, features):
pass
def __call__(self, x_query, x_sample, reuse = tf.AUTO_REUSE, **kwargs):
#embedding_sequence_q1 = BatchNormalization(axis=2)(x_query)
#embedding_sequence_q2 = BatchNormalization(axis=2)(x_sample)
#final_embedding_sequence_q1 = SpatialDropout1D(0.25)(embedding_sequence_q1)
#final_embedding_sequence_q2 = SpatialDropout1D(0.25)(embedding_sequence_q2)
#################### 输入编码input encoding #######################
#分别对query和sample进行双向编码
rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_query)
rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_sample)
#rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q1)
#rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q2)
############## 局部推理local inference modeling ###################
#计算dot attention
attention = Dot(axes=-1)([rnn_layer_q1, rnn_layer_q2])
#分别计算query和sample进行attention后的结果
w_attn_1 = Lambda(lambda x: softmax(x, axis=1))(attention)
w_attn_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2))(attention))
align_layer_1 = Dot(axes=1)([w_attn_1, rnn_layer_q1])
align_layer_2 = Dot(axes=1)([w_attn_2, rnn_layer_q2])
############# 推理组合Inference Composition #######################
subtract_layer_1 = subtract([rnn_layer_q1, align_layer_1])
subtract_layer_2 = subtract([rnn_layer_q2, align_layer_2])
multiply_layer_1 = multiply([rnn_layer_q1, align_layer_1])
multiply_layer_2 = multiply([rnn_layer_q2, align_layer_2])
m_q1 = concatenate([rnn_layer_q1, align_layer_1, subtract_layer_1, multiply_layer_1])
m_q2 = concatenate([rnn_layer_q2, align_layer_2, subtract_layer_2, multiply_layer_2])
############### 编码+池化 #######################
v_q1_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q1)
v_q2_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q2)
avgpool_q1 = GlobalAveragePooling1D()(v_q1_i)
avgpool_q2 = GlobalAveragePooling1D()(v_q2_i)
maxpool_q1 = GlobalMaxPooling1D()(v_q1_i)
maxpool_q2 = GlobalMaxPooling1D()(v_q2_i)
merged_q1 = concatenate([avgpool_q1, maxpool_q1])
merged_q2 = concatenate([avgpool_q2, maxpool_q2])
final_v = BatchNormalization()(concatenate([merged_q1, merged_q2]))
#output = Dense(units=self.dense_units, activation='relu')(final_v)
output = Dense(units=self.num_output, activation=None)(final_v)
#output = BatchNormalization()(output)
#output = Dropout(self.dropout_rate)(output)
#output = tf.nn.dropout(output, self.keep_prob)
#高级api tf.layer.dropout 与 keras的Dropout都使用dropout
#tf.nn.dropout使用keep_prob
#output = Dense(units=self.num_output, activation='sigmoid')(output)
#output = Dense(units=self.num_output, activation=None)(output)
#output = tf.squeeze(output, -1)
return output
|
molotov/tests/test_sharedconsole.py
|
jldiaz-uniovi/molotov
| 401 |
89981
|
<reponame>jldiaz-uniovi/molotov<filename>molotov/tests/test_sharedconsole.py
import unittest
import asyncio
import sys
import os
import re
import io
from molotov.util import multiprocessing
from molotov.sharedconsole import SharedConsole
from molotov.tests.support import dedicatedloop, catch_output
OUTPUT = """\
one
two
3
TypeError\\("unsupported operand type(.*)?
TypeError\\("unsupported operand type.*"""
# pre-forked variable
_CONSOLE = SharedConsole(interval=0.0)
_PROC = []
def run_worker(input):
if os.getpid() not in _PROC:
_PROC.append(os.getpid())
_CONSOLE.print("hello")
try:
3 + ""
except Exception:
_CONSOLE.print_error("meh")
with catch_output() as (stdout, stderr):
loop = asyncio.new_event_loop()
fut = asyncio.ensure_future(_CONSOLE.display(), loop=loop)
loop.run_until_complete(fut)
loop.close()
stdout = stdout.read()
assert stdout == "", stdout
class TestSharedConsole(unittest.TestCase):
@dedicatedloop
def test_simple_usage(self):
test_loop = asyncio.get_event_loop()
stream = io.StringIO()
console = SharedConsole(interval=0.0, stream=stream)
async def add_lines():
console.print("one")
console.print("two")
console.print("3")
try:
1 + "e"
except Exception as e:
console.print_error(e)
console.print_error(e, sys.exc_info()[2])
await asyncio.sleep(0.2)
await console.stop()
with catch_output() as (stdout, stderr):
adder = asyncio.ensure_future(add_lines())
displayer = asyncio.ensure_future(console.display())
test_loop.run_until_complete(asyncio.gather(adder, displayer))
stream.seek(0)
output = stream.read()
test_loop.close()
self.assertTrue(re.match(OUTPUT, output, re.S | re.M) is not None, output)
@unittest.skipIf(os.name == "nt", "win32")
@dedicatedloop
def test_multiprocess(self):
test_loop = asyncio.get_event_loop()
# now let's try with several processes
pool = multiprocessing.Pool(3)
try:
inputs = [1] * 3
pool.map(run_worker, inputs)
finally:
pool.close()
async def stop():
await asyncio.sleep(1)
await _CONSOLE.stop()
with catch_output() as (stdout, stderr):
stop = asyncio.ensure_future(stop())
display = asyncio.ensure_future(_CONSOLE.display())
test_loop.run_until_complete(asyncio.gather(stop, display))
output = stdout.read()
for pid in _PROC:
self.assertTrue("[%d]" % pid in output)
test_loop.close()
|
Linux/etc/decript.py
|
Dave360-crypto/Oblivion
| 339 |
89988
|
#!/usr/bin/python
import os
import pathlib
from cryptography.fernet import Fernet
# Global variables/Variáveis globais.
path_atual_dc = str(pathlib.Path(__file__).parent.absolute())
path_dc_final = path_atual_dc.replace('/etc','')
def decript_file(arquivo, chave=None):
"""
Decrypt a file/Desencriptografa uma arquivo.
:param arquivo: Path file/Local do arquivo.
:param chave: Key/Chave
"""
if chave == None:
with open(f'{path_dc_final}/etc/key_crypt.txt', 'r') as pegar_key:
key = pegar_key.read()
input_file = arquivo #+ '.encrypted'
output_file = arquivo
with open(input_file, 'rb') as f:
data = f.read()
fernet = Fernet(key)
decrypted = fernet.decrypt(data)
with open(output_file, 'wb') as f:
f.write(decrypted)
arquivo_f = str(arquivo)
arquivo_f = arquivo_f.replace('.encrypted', '')
os.rename(arquivo, arquivo_f)
else:
try:
key = str(chave)
input_file = arquivo
output_file = arquivo
with open(input_file, 'rb') as f:
data = f.read()
fernet = Fernet(key)
try:
decrypted = fernet.decrypt(data)
with open(output_file, 'wb') as f:
f.write(decrypted)
arquivo_f = str(arquivo)
arquivo_f = arquivo_f.replace('.encrypted', '')
os.rename(arquivo, arquivo_f)
except:
pass
except:
pass
|
venv/lib/python3.9/site-packages/pendulum/parser.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
| 224 |
90036
|
# -*- coding: utf-8 -*-
from __future__ import division
from .parsing import Parser as BaseParser
from .tz import UTC
from .pendulum import Pendulum
from .date import Date
from .time import Time
from ._global import Global
class Parser(BaseParser):
"""
Parser that returns known types (Pendulum, Date, Time)
"""
def parse(self, text):
"""
Parses a string with the given options.
:param text: The string to parse.
:type text: str
:rtype: mixed
"""
# Handling special cases
if text == 'now':
return Pendulum.now()
parsed = super(Parser, self).parse(text)
if not self.is_exact():
return self._create_pendulum_object(parsed)
# Checking for date
if 'year' in parsed:
# Checking for time
if 'hour' in parsed:
return self._create_pendulum_object(parsed)
else:
return self._create_date_object(parsed)
return self._create_time_object(parsed)
def _create_pendulum_object(self, parsed):
if parsed['offset'] is None:
tz = self._options.get('tz', UTC)
else:
tz = parsed['offset'] / 3600
return Pendulum(
parsed['year'], parsed['month'], parsed['day'],
parsed['hour'], parsed['minute'], parsed['second'],
parsed['subsecond'],
tzinfo=tz
)
def _create_date_object(self, parsed):
return Date(
parsed['year'], parsed['month'], parsed['day']
)
def _create_time_object(self, parsed):
return Time(
parsed['hour'], parsed['minute'], parsed['second'],
parsed['subsecond']
)
def parse(text, **options):
# Use the mock now value if it exists
options['now'] = options.get('now', Global.get_test_now())
return Parser(**options).parse(text)
|
source/python-vsmclient/vsmclient/v1/mdses.py
|
ramkrsna/virtual-storage-manager
| 172 |
90071
|
# Copyright 2014 Intel Corporation, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
MDSes interface.
"""
import urllib
from vsmclient import base
class Mds(base.Resource):
"""A mds stores metadata on behalf of the Ceph Filesystem."""
def __repr__(self):
return "<MDS: %s>" % self.id
def delete(self):
"""Delete this mds."""
self.manager.delete(self)
class MdsesManager(base.ManagerWithFind):
"""
Manage :class:`MDS` resources.
"""
resource_class = Mds
def get(self, mds_id):
"""
Get a mds.
:param mds_id: The ID of the mds.
:rtype: :class:`MDS`
"""
return self._get("/mdses/%s" % mds_id, "mds")
def list(self, detailed=False, search_opts=None):
"""
Get a list of all mdses.
:rtype: list of :class:`MDS`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in search_opts.iteritems():
if val:
qparams[opt] = val
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
ret = self._list("/mdses%s%s" % (detail, query_string),
"mdses")
return ret
def restart(self, mds):
self._action('restart', mds)
def remove(self, mds):
self._action('remove', mds)
def delete(self, mds):
self._delete("/mdses/%s" % base.getid(mds))
def restore(self, mds):
self._action('restore', mds)
def summary(self):
"""
summary
"""
url = "/mdses/summary"
return self._get(url, 'mds-summary')
def _action(self, action, mds, info=None, **kwargs):
"""
Perform a mds "action."
"""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/mdses/%s/action' % base.getid(mds)
return self.api.client.post(url, body=body)
|
src/job-exporter/test/test_amd.py
|
luxius-luminus/pai
| 1,417 |
90103
|
<reponame>luxius-luminus/pai
import os
import sys
import unittest
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../src"))
import amd
PACKAGE_DIRECTORY_COM = os.path.dirname(os.path.abspath(__file__))
class TestAmd(unittest.TestCase):
def setUp(self):
try:
os.chdir(PACKAGE_DIRECTORY_COM)
except OSError:
pass
def test_parse_rocm_smi_result(self):
sample_path = "data/rocm_smi.json"
with open(sample_path, "r") as f:
rocm_smi_result = f.read()
rocm_smi_parse_result = amd.parse_smi_json_result(rocm_smi_result)
expect = [{
"pci_addr": "0000:03:00.0",
"temperature": 31
}, {
"pci_addr": "0000:06:00.0",
"temperature": 25
}]
for e, v in zip(expect, rocm_smi_parse_result.values()):
self.assertEqual(e["pci_addr"], v.pci_addr)
self.assertEqual(e["temperature"], v.temperature)
if __name__ == '__main__':
unittest.main()
|
chariot/resource/data_file.py
|
Y-Kuro-u/chariot
| 134 |
90108
|
<reponame>Y-Kuro-u/chariot
import os
import mmap
from chariot.util import xtqdm
class DataFile():
def __init__(self, path, encoding="utf-8"):
self.path = path
self.encoding = encoding
file_name = os.path.basename(path)
base_name, ext = os.path.splitext(file_name)
self.base_name = base_name
self.ext = ext
def exists(self):
return os.path.exists(self.path)
def convert(self, data_dir_to="", add_attribute="",
attribute_to=(), ext_to=""):
_dir = os.path.dirname(self.path)
elements = self._elements()
ext = self.ext
if data_dir_to:
_dir = os.path.join(_dir, "../" + data_dir_to)
if add_attribute:
elements.append(add_attribute)
elif len(attribute_to) > 0:
# File name format is name + "__".join(attributes)
# So attribute is elements[1:]
for a in attribute_to:
if a in elements[1:]:
index = elements[1:].index(a)
elements[1 + index] = attribute_to[a]
if ext_to:
ext = ext_to
base_name = "__".join(elements)
new_path = os.path.join(_dir, base_name + ext)
return self.__class__(new_path)
@property
def name(self):
return self._elements[0]
@property
def attributes(self):
return self._elements[1:]
def _elements(self):
elements = self.base_name.split("__")
return elements
def get_line_count(self):
count = 0
with open(self.path, "r+") as f:
buf = mmap.mmap(f.fileno(), 0)
while buf.readline():
count += 1
return count
def fetch(self, progress=False):
total_count = 0
if progress:
total_count = self.get_line_count()
with open(self.path, encoding=self.encoding) as f:
iterator = f
if progress:
iterator = xtqdm(f, total=total_count)
for line in iterator:
yield line.strip()
def to_array(self):
lines = []
with open(self.path, encoding=self.encoding) as f:
lines = f.readlines()
lines = [ln.strip() for ln in lines]
return lines
|
ipysheet/easy.py
|
kdop-dev/ipysheet
| 495 |
90132
|
"""Easy context-based interface for generating a sheet and cells.
Comparable to matplotlib pylab interface, this interface keeps track of the current
sheet. Using the ``cell`` function, ``Cell`` widgets are added to the current sheet.
"""
__all__ = ['sheet', 'current', 'cell', 'calculation', 'row', 'column', 'cell_range', 'hold_cells', 'renderer']
import numbers
import six
from contextlib import contextmanager
import ipywidgets as widgets
from .sheet import Cell, Sheet, Renderer
from .utils import transpose as default_transpose
from .utils import adapt_value
from .docutils import doc_subst
_last_sheet = None
_sheets = {} # maps from key to Sheet instance
_hold_cells = False # when try (using hold_cells() it does not add cells directly)
_cells = () # cells that aren't added directly
_common_doc = {
'args': """
type (string): Type of cell, options are: text, numeric, checkbox, dropdown, numeric, date, widget.
If type is None, the type is inferred from the type of the value being passed,
numeric (float or int type), boolean (bool type), widget (any widget object), or else text.
When choice is given the type will be assumed to be dropdown.
The types refer (currently) to the handsontable types: https://handsontable.com/docs/6.2.2/demo-custom-renderers.html
color (string): The text color in the cell
background_color (string): The background color in the cell
read_only (bool): Whether the cell is editable or not
numeric_format (string): Numbers format
date_format (string): Dates format
time_format (string): Time format
renderer (string): Renderer name to use for the cell
"""
}
def sheet(key=None, rows=5, columns=5, column_width=None, row_headers=True, column_headers=True,
stretch_headers='all', cls=Sheet, **kwargs):
"""Creates a new ``Sheet`` instance or retrieves one registered with key, and sets this as the 'current'.
If the key argument is given, and no sheet is created before with this key, it will be registered under
this key. If this function is called again with the same key argument, that ``Sheet`` instance
will be returned.
Args:
key (string): If not used before, register the sheet under this key. If used before, return the
previous ``Sheet`` instance registered with this key.
rows (int): The number of rows in the sheet
columns (int): The number of columns in the sheet
row_headers (bool, list): Either a boolean specifying if row headers should be displayed or not,
or a list of strings containing the row headers
column_headers (bool, list): Either a boolean specifying if column headers should be displayed or not,
or a list of strings containing the column headers
Returns:
The new ``Sheet`` widget, or if key is given, the previously created sheet registered with this key.
Example:
>>> from ipysheet import sheet, current
>>>
>>> s1 = sheet('key1')
>>> s2 = sheet('key2')
>>>
>>> assert s2 is current()
>>> assert s1 is sheet('key1')
>>> assert s1 is current()
"""
global _last_sheet
if isinstance(key, Sheet):
_last_sheet = key
elif key is None or key not in _sheets:
_last_sheet = cls(rows=rows, columns=columns, column_width=column_width,
row_headers=row_headers, column_headers=column_headers,
stretch_headers=stretch_headers, **kwargs)
if key is not None:
_sheets[key] = _last_sheet
else:
_last_sheet = _sheets[key]
return _last_sheet
def current():
"""
Returns:
the current ``Sheet`` instance
"""
return _last_sheet
@doc_subst(_common_doc)
def cell(row, column, value=0., type=None, color=None, background_color=None,
font_style=None, font_weight=None, style=None, label_left=None, choice=None,
read_only=False, numeric_format='0.000', date_format='YYYY/MM/DD', renderer=None, **kwargs):
"""Adds a new ``Cell`` widget to the current ``Sheet``
Args:
row (int): Zero based row index where to put the cell in the sheet
column (int): Zero based column index where to put the cell in the sheet
value (int, float, string, bool, Widget): The value of the cell
{args}
Returns:
The new ``Cell`` widget.
Example:
>>> from ipysheet import sheet, cell
>>>
>>> s1 = sheet()
>>> cell(0, 0, 36.) # The Cell type will be 'numeric'
>>> cell(1, 0, True) # The Cell type will be 'checkbox'
>>> cell(0, 1, 'Hello World!') # The Cell type will be 'text'
>>> c = cell(1, 1, True)
>>> c.value = False # Dynamically changing the cell value at row=1, column=1
"""
global _cells
if type is None:
if isinstance(value, bool):
type = 'checkbox'
elif isinstance(value, numbers.Number):
type = 'numeric'
elif isinstance(value, widgets.Widget):
type = 'widget'
else:
type = 'text'
if choice is not None:
type = 'dropdown'
style = style or {}
if color is not None:
style['color'] = color
if background_color is not None:
style['backgroundColor'] = background_color
if font_style is not None:
style['fontStyle'] = font_style
if font_weight is not None:
style['fontWeight'] = font_weight
c = Cell(value=value, row_start=row, column_start=column, row_end=row, column_end=column,
squeeze_row=True, squeeze_column=True, type=type, style=style, choice=choice,
read_only=read_only, numeric_format=numeric_format, date_format=date_format,
renderer=renderer, **kwargs)
if _hold_cells:
_cells += (c,)
else:
_last_sheet.cells = _last_sheet.cells+(c,)
if label_left:
if column-1 < 0:
raise IndexError("cannot put label to the left of column 0")
cell(row, column-1, value=label_left, font_weight='bold')
return c
@doc_subst(_common_doc)
def row(row, value, column_start=0, column_end=None, type=None, color=None, background_color=None,
font_style=None, font_weight=None, style=None, choice=None,
read_only=False, numeric_format='0.000', date_format='YYYY/MM/DD', renderer=None, **kwargs):
"""Create a ``Cell`` widget, representing multiple cells in a sheet, in a horizontal row
Args:
row (int): Zero based row index where to put the row in the sheet
value (list): The list of cell values representing the row
column_start (int): Which column the row will start, default 0.
column_end (int): Which column the row will end, default is the last.
{args}
Returns:
The new ``Cell`` widget.
Example:
>>> from ipysheet import sheet, row
>>>
>>> s1 = sheet()
>>> row(0, [1, 2, 3, 34, 5]) # The Cell type will be 'numeric'
>>> row(1, [True, False, True], column_start=2) # The Cell type will be 'checkbox'
"""
return cell_range(value, column_start=column_start, column_end=column_end, row_start=row, row_end=row,
squeeze_row=True, squeeze_column=False,
color=color, background_color=background_color,
font_style=font_style, font_weight=font_weight, style=style, type=type, choice=choice,
read_only=read_only, numeric_format=numeric_format, date_format=date_format, renderer=renderer, **kwargs)
@doc_subst(_common_doc)
def column(column, value, row_start=0, row_end=None, type=None, color=None, background_color=None,
font_style=None, font_weight=None, style=None, choice=None,
read_only=False, numeric_format='0.000', date_format='YYYY/MM/DD', renderer=None, **kwargs):
"""Create a ``Cell`` widget, representing multiple cells in a sheet, in a vertical column
Args:
column (int): Zero based column index where to put the column in the sheet
value (list): The list of cell values representing the column
row_start (int): Which row the column will start, default 0.
row_end (int): Which row the column will end, default is the last.
{args}
Returns:
The new ``Cell`` widget.
Example:
>>> from ipysheet import sheet, column
>>>
>>> s1 = sheet()
>>> column(0, [1, 2, 3, 34, 5]) # The Cell type will be 'numeric'
>>> column(1, [True, False, True], row_start=2) # The Cell type will be 'checkbox'
"""
return cell_range(value, column_start=column, column_end=column, row_start=row_start, row_end=row_end,
squeeze_row=False, squeeze_column=True, style=style, choice=choice,
read_only=read_only, numeric_format=numeric_format, date_format=date_format, renderer=renderer,
color=color, background_color=background_color, type=type,
font_style=font_style, font_weight=font_weight, **kwargs)
@doc_subst(_common_doc)
def cell_range(value,
row_start=0, column_start=0, row_end=None, column_end=None, transpose=False,
squeeze_row=False, squeeze_column=False, type=None, color=None, background_color=None,
font_style=None, font_weight=None, style=None, choice=None,
read_only=False, numeric_format='0.000', date_format='YYYY/MM/DD', renderer=None, **kwargs):
"""Create a ``Cell`` widget, representing multiple cells in a sheet
Args:
value (list): The list of cell values representing the range
row_start (int): Which row the range will start, default 0.
column_start (int): Which column the range will start, default 0.
row_end (int): Which row the range will end, default is the last.
column_end (int): Which column the range will end, default is the last.
transpose (bool): Whether to interpret the value array as value[column_index][row_index] or not.
squeeze_row (bool): Take out the row dimensions, meaning only value[column_index] is used.
squeeze_column (bool): Take out the column dimensions, meaning only value[row_index] is used.
{args}
Returns:
The new ``Cell`` widget.
Example:
>>> from ipysheet import sheet, cell_range
>>>
>>> s1 = sheet()
>>> cell_range([[1, 2, 3, 34, 5], [6, 7, 8, 89, 10]])
"""
global _cells
value_original = value
value = adapt_value(value)
# instead of an if statements, we just use T to transpose or not when needed
T = (lambda x: x) if not transpose else default_transpose
# we work with the optionally transposed values for simplicity
value = T(value)
if squeeze_row:
value = [value]
if squeeze_column:
value = [[k] for k in value]
if row_end is None:
row_end = row_start + len(value) - 1
row_length = row_end - row_start + 1
if row_length != len(value):
raise ValueError("length or array doesn't match number of rows")
if row_length == 0:
raise ValueError("0 rows not supported")
if column_end is None:
column_end = column_start + len(value[0]) - 1
column_length = column_end - column_start + 1
if column_length == 0:
raise ValueError("0 columns not supported")
for row in value:
if column_length != len(row):
raise ValueError("not a regular matrix, columns lengths differ")
if row_start + row_length > _last_sheet.rows:
raise ValueError("array will go outside of sheet, too many rows")
if column_start + column_length > _last_sheet.columns:
raise ValueError("array will go outside of sheet, too many columns")
# see if we an infer a type from the data, otherwise leave it None
if type is None:
type_check_map = [('checkbox', lambda x: isinstance(x, bool)),
('numeric', lambda x: isinstance(x, numbers.Number)),
('text', lambda x: isinstance(x, six.string_types)),
('widget', lambda x: isinstance(x, widgets.Widget)),
]
for type_check, check in type_check_map:
checks = True # ok until proven wrong
for i in range(row_length):
for j in range(column_length):
if not check(value[i][j]):
checks = False
if checks: # we found a matching type
type = type_check
break
style = style or {}
if color is not None:
style['color'] = color
if background_color is not None:
style['backgroundColor'] = background_color
if font_style is not None:
style['fontStyle'] = font_style
if font_weight is not None:
style['fontWeight'] = font_weight
c = Cell(value=value_original, row_start=row_start, column_start=column_start, row_end=row_end, column_end=column_end,
squeeze_row=squeeze_row, squeeze_column=squeeze_column, transpose=transpose, type=type,
read_only=read_only, choice=choice, renderer=renderer, numeric_format=numeric_format, date_format=date_format,
style=style, **kwargs)
if _hold_cells:
_cells += (c,)
else:
_last_sheet.cells = _last_sheet.cells+(c,)
return c
def renderer(code, name):
"""Create a ``Renderer`` widget
Args:
code (string or code or function object): If a string object, it is assumed to be a JavaScript
snippet, else it is assumed to be a function or code object and will be transpiled to
javascript using flexxui/pscript.
name (string): Name of the renderer
Returns:
The new ``Renderer`` widget.
Example:
>>> from ipysheet import sheet, renderer, cell
>>>
>>> s1 = sheet()
>>>
>>> def renderer_negative(instance, td, row, col, prop, value, cellProperties):
>>> Handsontable.renderers.TextRenderer.apply(this, arguments);
>>> if value < 0:
>>> td.style.backgroundColor = 'orange'
>>> else:
>>> td.style.backgroundColor = ''
>>>
>>> renderer(code=renderer_negative, name='negative');
>>> cell(0, 0, 36, renderer='negative') # Will be white
>>> cell(1, 0, -36, renderer='negative') # Will be orange
"""
if not isinstance(code, six.string_types):
from pscript import py2js
code_transpiled = py2js(code, new_name='the_renderer', indent=4)
code = '''
function() {
%s
return the_renderer
}()
''' % code_transpiled
renderer = Renderer(code=code, name=name)
return renderer
def _assign(object, value):
if isinstance(object, widgets.Widget):
object, trait = object, 'value'
else:
object, trait = object
setattr(object, trait, value)
def calculation(inputs, output, initial_calculation=True):
"""A decorator that assigns to output cell a calculation depending on the inputs
Args:
inputs (list of widgets, or (widget, 'traitname') pairs): List of all widget, whose
values (default 'value', otherwise specified by 'traitname') are input of the function
that is decorated
output (widget or (widget, 'traitname')): The output of the decorator function will be
assigned to output.value or output.<traitname>.
initial_calculation (bool): When True the calculation will be done
directly for the first time.
Example:
>>> from ipywidgets import IntSlider
>>> from ipysheet import cell, calculation
>>>
>>> a = cell(0, 0, value=1)
>>> b = cell(1, 0, value=IntSlider(value=2))
>>> c = IntSlider(max=56)
>>> d = cell(3, 0, value=1)
>>>
>>> @calculation(inputs=[a, (b, 'value'), (c, 'max')], output=d)
>>> def add(a, b, c):
>>> return a + b + c
"""
def decorator(f):
def get_value(input):
if isinstance(input, widgets.Widget):
object, trait = input, 'value'
else:
object, trait = input # assume it's a tup;e
if isinstance(object, Cell) and isinstance(object.value, widgets.Widget):
object = object.value
return getattr(object, trait)
def calculate(*ignore_args):
values = map(get_value, inputs)
result = f(*values)
_assign(output, result)
for input in inputs:
if isinstance(input, widgets.Widget):
object, trait = input, 'value'
else:
object, trait = input # assume it's a tuple
if isinstance(object, Cell) and isinstance(object.value, widgets.Widget):
# when it is a cell which holds a widget, we actually want the widgets' value
object.value.observe(calculate, trait)
else:
object.observe(calculate, trait)
def handle_possible_widget_change(change, trait=trait):
if isinstance(change['old'], widgets.Widget):
change['old'].unobserve(calculate, trait)
if isinstance(change['new'], widgets.Widget):
change['new'].observe(calculate, trait)
calculate()
object.observe(handle_possible_widget_change, 'value')
if initial_calculation:
calculate()
return decorator
@contextmanager
def hold_cells():
"""Hold adding any cell widgets until leaving this context.
This may give a better performance when adding many cells.
Example:
>>> from ipysheet import sheet, cell, hold_cells
>>>
>>> sheet(rows=10,columns=10)
>>> with hold_cells()
>>> for i in range(10):
>>> for j in range(10):
>>> cell(i, j, value=i * 10 + j)
>>> # at this line, the Cell widgets are added
"""
global _hold_cells
global _cells
if _hold_cells is True:
yield
else:
try:
_hold_cells = True
yield
finally:
_hold_cells = False
# print(_cells, _last_sheet.cells)
_last_sheet.cells = tuple(_last_sheet.cells) + tuple(_cells)
_cells = ()
|
Lib/fontbakery/profiles/hhea.py
|
paullinnerud/fontbakery
| 351 |
90138
|
<filename>Lib/fontbakery/profiles/hhea.py
from fontbakery.callable import check
from fontbakery.status import FAIL, PASS, SKIP, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('glyph_metrics_stats', 'is_ttf'))
]
@check(
id = 'com.google.fonts/check/linegaps',
proposal = 'legacy:check/041'
)
def com_google_fonts_check_linegaps(ttFont):
"""Checking Vertical Metric Linegaps."""
if ttFont["hhea"].lineGap != 0:
yield WARN,\
Message("hhea",
"hhea lineGap is not equal to 0.")
elif ttFont["OS/2"].sTypoLineGap != 0:
yield WARN,\
Message("OS/2",
"OS/2 sTypoLineGap is not equal to 0.")
else:
yield PASS, "OS/2 sTypoLineGap and hhea lineGap are both 0."
@check(
id = 'com.google.fonts/check/maxadvancewidth',
proposal = 'legacy:check/073'
)
def com_google_fonts_check_maxadvancewidth(ttFont):
"""MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables?"""
hhea_advance_width_max = ttFont['hhea'].advanceWidthMax
hmtx_advance_width_max = None
for g in ttFont['hmtx'].metrics.values():
if hmtx_advance_width_max is None:
hmtx_advance_width_max = max(0, g[0])
else:
hmtx_advance_width_max = max(g[0], hmtx_advance_width_max)
if hmtx_advance_width_max != hhea_advance_width_max:
yield FAIL,\
Message("mismatch",
f"AdvanceWidthMax mismatch:"
f" expected {hmtx_advance_width_max} (from hmtx);"
f" got {hhea_advance_width_max} (from hhea)")
else:
yield PASS, ("MaxAdvanceWidth is consistent"
" with values in the Hmtx and Hhea tables.")
|
models/losses.py
|
wanghaisheng/LiveSpeechPortraits
| 375 |
90139
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
class GMMLogLoss(nn.Module):
''' compute the GMM loss between model output and the groundtruth data.
Args:
ncenter: numbers of gaussian distribution
ndim: dimension of each gaussian distribution
sigma_bias:
sigma_min: current we do not use it.
'''
def __init__(self, ncenter, ndim, sigma_min=0.03):
super(GMMLogLoss,self).__init__()
self.ncenter = ncenter
self.ndim = ndim
self.sigma_min = sigma_min
def forward(self, output, target):
'''
Args:
output: [b, T, ncenter + ncenter * ndim * 2]:
[:, :, : ncenter] shows each gaussian probability
[:, :, ncenter : ncenter + ndim * ncenter] shows the average values of each dimension of each gaussian
[: ,:, ncenter + ndim * ncenter : ncenter + ndim * 2 * ncenter] show the negative log sigma of each dimension of each gaussian
target: [b, T, ndim], the ground truth target landmark data is shown here
To maximize the log-likelihood equals to minimize the negative log-likelihood.
NOTE: It is unstable to directly compute the log results of sigma, e.g. ln(-0.1) as we need to clip the sigma results
into positive. Hence here we predict the negative log sigma results to avoid numerical instablility, which mean:
`` sigma = 1/exp(predict), predict = -ln(sigma) ``
Also, it will be just the 'B' term below!
Currently we only implement single gaussian distribution, hence the first values of pred are meaningless.
For single gaussian distribution:
L(mu, sigma) = -n/2 * ln(2pi * sigma^2) - 1 / (2 x sigma^2) * sum^n (x_i - mu)^2 (n for prediction times, n=1 for one frame, x_i for gt)
= -1/2 * ln(2pi) - 1/2 * ln(sigma^2) - 1/(2 x sigma^2) * (x - mu)^2
== min -L(mu, sgima) = 0.5 x ln(2pi) + 0.5 x ln(sigma^2) + 1/(2 x sigma^2) * (x - mu)^2
= 0.5 x ln_2PI + ln(sigma) + 0.5 x (MU_DIFF/sigma)^2
= A - B + C
In batch and Time sample, b and T are summed and averaged.
'''
b, T, _ = target.shape
# read prediction paras
mus = output[:, :, self.ncenter : (self.ncenter + self.ncenter * self.ndim)].view(b, T, self.ncenter, self.ndim) # [b, T, ncenter, ndim]
# apply min sigma
neg_log_sigmas_out = output[:, :, (self.ncenter + self.ncenter * self.ndim):].view(b, T, self.ncenter, self.ndim) # [b, T, ncenter, ndim]
inv_sigmas_min = torch.ones(neg_log_sigmas_out.size()).cuda() * (1. / self.sigma_min)
inv_sigmas_min_log = torch.log(inv_sigmas_min)
neg_log_sigmas = torch.min(neg_log_sigmas_out, inv_sigmas_min_log)
inv_sigmas = torch.exp(neg_log_sigmas)
# replicate the target of ncenter to minus mu
target_rep = target.unsqueeze(2).expand(b, T, self.ncenter, self.ndim) # [b, T, ncenter, ndim]
MU_DIFF = target_rep - mus # [b, T, ncenter, ndim]
# sigma process
A = 0.5 * math.log(2 * math.pi) # 0.9189385332046727
B = neg_log_sigmas # [b, T, ncenter, ndim]
C = 0.5 * (MU_DIFF * inv_sigmas)**2 # [b, T, ncenter, ndim]
negative_loglikelihood = A - B + C # [b, T, ncenter, ndim]
return negative_loglikelihood.mean()
def Sample_GMM(gmm_params, ncenter, ndim, weight_smooth = 0.0, sigma_scale = 0.0):
''' Sample values from a given a GMM distribution.
Args:
gmm_params: [b, target_length, (2 * ndim + 1) * ncenter], including the
distribution weights, average and sigma
ncenter: numbers of gaussian distribution
ndim: dimension of each gaussian distribution
weight_smooth: float, smooth the gaussian distribution weights
sigma_scale: float, adjust the gaussian scale, larger for sharper prediction,
0 for zero sigma which always return average values
Returns:
current_sample: []
'''
# reshape as [b*T, (2 * ndim + 1) * ncenter]
b, T, _ = gmm_params.shape
gmm_params_cpu = gmm_params.cpu().view(-1, (2 * ndim + 1) * ncenter)
# compute each distrubution probability
prob = nn.functional.softmax(gmm_params_cpu[:, : ncenter] * (1 + weight_smooth), dim=1)
# select the gaussian distribution according to their weights
selected_idx = torch.multinomial(prob, num_samples=1, replacement=True)
mu = gmm_params_cpu[:, ncenter : ncenter + ncenter * ndim]
# please note that we use -logsigma as output, hence here we need to take the negative
sigma = torch.exp(-gmm_params_cpu[:, ncenter + ncenter * ndim:]) * sigma_scale
# print('sigma average:', sigma.mean())
selected_sigma = torch.empty(b*T, ndim).float()
selected_mu = torch.empty(b*T, ndim).float()
current_sample = torch.randn(b*T, ndim).float()
# current_sample = test_sample
for i in range(b*T):
idx = selected_idx[i, 0]
selected_sigma[i, :] = sigma[i, idx * ndim:(idx + 1) * ndim]
selected_mu[i, :] = mu[i, idx * ndim:(idx + 1) * ndim]
# sample with sel sigma and sel mean
current_sample = current_sample * selected_sigma + selected_mu
# cur_sample = sel_mu
# return current_sample.unsqueeze(1).cuda()
return current_sample.reshape(b, T, -1).cuda()
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
gpu_id = input.get_device()
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).cuda(gpu_id).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).cuda(gpu_id).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
if isinstance(input[0], list):
loss = 0
for input_i in input:
pred = input_i[-1]
target_tensor = self.get_target_tensor(pred, target_is_real)
loss += self.loss(pred, target_tensor)
return loss
else:
target_tensor = self.get_target_tensor(input[-1], target_is_real)
return self.loss(input[-1], target_tensor)
class VGGLoss(nn.Module):
def __init__(self, model=None):
super(VGGLoss, self).__init__()
if model is None:
self.vgg = Vgg19()
else:
self.vgg = model
self.vgg.cuda()
# self.vgg.eval()
self.criterion = nn.L1Loss()
self.style_criterion = StyleLoss()
self.weights = [1.0, 1.0, 1.0, 1.0, 1.0]
self.style_weights = [1.0, 1.0, 1.0, 1.0, 1.0]
# self.weights = [5.0, 1.0, 0.5, 0.4, 0.8]
# self.style_weights = [10e4, 1000, 50, 15, 50]
def forward(self, x, y, style=False):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
if style:
# return both perceptual loss and style loss.
style_loss = 0
for i in range(len(x_vgg)):
this_loss = (self.weights[i] *
self.criterion(x_vgg[i], y_vgg[i].detach()))
this_style_loss = (self.style_weights[i] *
self.style_criterion(x_vgg[i], y_vgg[i].detach()))
loss += this_loss
style_loss += this_style_loss
return loss, style_loss
for i in range(len(x_vgg)):
this_loss = (self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()))
loss += this_loss
return loss
def gram_matrix(input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a * b, c * d) # resise F_XL into \hat F_XL
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
class StyleLoss(nn.Module):
def __init__(self):
super(StyleLoss, self).__init__()
def forward(self, x, y):
Gx = gram_matrix(x)
Gy = gram_matrix(y)
return F.mse_loss(Gx, Gy) * 30000000
class MaskedL1Loss(nn.Module):
def __init__(self):
super(MaskedL1Loss, self).__init__()
self.criterion = nn.L1Loss()
def forward(self, input, target, mask):
mask = mask.expand(-1, input.size()[1], -1, -1)
loss = self.criterion(input * mask, target * mask)
return loss
from torchvision import models
class Vgg19(nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
|
cloudtunes-server/cloudtunes/handlers.py
|
skymemoryGit/cloudtunes
| 529 |
90146
|
from tornado.web import StaticFileHandler
from cloudtunes import settings
from cloudtunes.base.handlers import BaseHandler
class MainHandler(BaseHandler):
def get(self):
webapp_dir = self.settings['static_path']
homepage_dir = settings.HOMEPAGE_SITE_DIR
if self.current_user:
app_dir = webapp_dir
else:
if self.request.path != '/':
return self.redirect('/')
app_dir = homepage_dir
with open(app_dir + '/index.html') as f:
self.write(f.read())
class NoCacheStaticFileHandler(StaticFileHandler):
def set_extra_headers(self, path):
self.set_header('Cache-control', 'no-cache')
|
extrafiles/tooling/ValidateYAML.py
|
angriman/network
| 366 |
90147
|
<gh_stars>100-1000
#!/usr/bin/env python3
"""
This tool validates NetworKit YAML configuration files.
"""
import nktooling as nkt
import os
import yaml
nkt.setup()
os.chdir(nkt.getNetworKitRoot())
for configFile in [".clang-format", ".clang-tidy", "CITATION.cff"]:
try:
with open(configFile, "r") as f:
yaml.load(f, yaml.SafeLoader)
except yaml.YAMLError as err:
nkt.failIfMalformed(configFile, err)
|
tests/test_zenbelly.py
|
mathiazom/recipe-scrapers
| 811 |
90160
|
<filename>tests/test_zenbelly.py
from recipe_scrapers.zenbelly import ZenBelly
from tests import ScraperTest
class TestZenBellyScraper(ScraperTest):
scraper_class = ZenBelly
def test_host(self):
self.assertEqual("zenbelly.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.zenbelly.com/gingerbread/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Paleo Gingerbread")
def test_yields(self):
self.assertEqual("20 serving(s)", self.harvester_class.yields())
def test_total_time(self):
self.assertEqual(40, self.harvester_class.total_time())
def test_image(self):
self.assertEqual(
"https://www.zenbelly.com/wp-content/uploads/2019/01/gingerbread-3-225x225.jpeg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"butter (ghee, or shortening for greasing the pan)",
"3 cups almond flour",
"1 1/2 cups tapioca starch (plus more for flouring pan)",
"1/2 cup coconut flour",
"1 1/2 teaspoons baking soda",
"2 teaspoons ground ginger",
"1 teaspoons ground cinnamon",
"1/4 teaspoon ground allspice",
"1/4 teaspoon ground cardamom",
"1/2 teaspoon finely ground sea salt",
"1 cup coconut sugar",
"1 cup molasses (use true molasses if you can find it)",
"2 teaspoons fresh ginger",
"1 cup boiling water",
"4 eggs",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"\n".join(
[
"Preheat the oven to 350ºF. Grease a 9×13-inch cake pan.",
"In a large bowl, whisk together the almond flour, tapioca starch, coconut flour, baking soda, ground ginger, cinnamon, allspice, cardamom, and salt.",
"In a medium heat proof bowl, whisk together the coconut sugar, molasses, fresh ginger, and boiling water. Once it’s lukewarm (the molasses and coconut sugar should take the temperature down enough), whisk in the eggs.",
"Pour the wet ingredients into the dry ingredients and whisk until there are no lumps.",
"Pour into the prepared pan and bake for 28-35 minutes*",
]
),
self.harvester_class.instructions(),
)
|
streaming/python/runtime/remote_call.py
|
firebolt55439/ray
| 21,382 |
90177
|
import logging
import os
import ray
import time
from enum import Enum
from ray.actor import ActorHandle
from ray.streaming.generated import remote_call_pb2
from ray.streaming.runtime.command\
import WorkerCommitReport, WorkerRollbackRequest
logger = logging.getLogger(__name__)
class CallResult:
"""
Call Result
"""
def __init__(self, success, result_code, result_msg, result_obj):
self.success = success
self.result_code = result_code
self.result_msg = result_msg
self.result_obj = result_obj
@staticmethod
def success(payload=None):
return CallResult(True, CallResultEnum.SUCCESS, None, payload)
@staticmethod
def fail(payload=None):
return CallResult(False, CallResultEnum.FAILED, None, payload)
@staticmethod
def skipped(msg=None):
return CallResult(True, CallResultEnum.SKIPPED, msg, None)
def is_success(self):
if self.result_code is CallResultEnum.SUCCESS:
return True
return False
class CallResultEnum(Enum):
"""
call result enum
"""
SUCCESS = 0
FAILED = 1
SKIPPED = 2
class RemoteCallMst:
"""
remote call job master
"""
@staticmethod
def request_job_worker_rollback(master: ActorHandle,
request: WorkerRollbackRequest):
logger.info("Remote call mst: request job worker rollback start.")
request_pb = remote_call_pb2.BaseWorkerCmd()
request_pb.actor_id = request.from_actor_id
request_pb.timestamp = int(time.time() * 1000.0)
rollback_request_pb = remote_call_pb2.WorkerRollbackRequest()
rollback_request_pb.exception_msg = request.exception_msg()
rollback_request_pb.worker_hostname = os.uname()[1]
rollback_request_pb.worker_pid = str(os.getpid())
request_pb.detail.Pack(rollback_request_pb)
return_ids = master.requestJobWorkerRollback\
.remote(request_pb.SerializeToString())
result = remote_call_pb2.BoolResult()
result.ParseFromString(ray.get(return_ids))
logger.info("Remote call mst: request job worker rollback finish.")
return result.boolRes
@staticmethod
def report_job_worker_commit(master: ActorHandle,
report: WorkerCommitReport):
logger.info("Remote call mst: report job worker commit start.")
report_pb = remote_call_pb2.BaseWorkerCmd()
report_pb.actor_id = report.from_actor_id
report_pb.timestamp = int(time.time() * 1000.0)
wk_commit = remote_call_pb2.WorkerCommitReport()
wk_commit.commit_checkpoint_id = report.commit_checkpoint_id
report_pb.detail.Pack(wk_commit)
return_id = master.reportJobWorkerCommit\
.remote(report_pb.SerializeToString())
result = remote_call_pb2.BoolResult()
result.ParseFromString(ray.get(return_id))
logger.info("Remote call mst: report job worker commit finish.")
return result.boolRes
|
nsot/management/commands/start.py
|
comerford/nsot
| 387 |
90207
|
<reponame>comerford/nsot
from __future__ import absolute_import, print_function
"""
Command to start the NSoT server process.
"""
from django.conf import settings
from django.core.management import call_command
import sys
from nsot.services import http
from nsot.util.commands import NsotCommand, CommandError
class Command(NsotCommand):
help = 'Start the NSoT server process.'
def add_arguments(self, parser):
parser.add_argument(
'service',
nargs='?',
default='http',
help='Starts the specified service.',
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help='Toggle debug output.',
)
parser.add_argument(
'--max-requests',
type=int,
default=settings.NSOT_MAX_REQUESTS,
help=(
'The maximum number of requests a worker will process before '
'restarting.'
),
)
parser.add_argument(
'--max-requests-jitter',
type=int,
default=settings.NSOT_MAX_REQUESTS_JITTER,
help=(
'The maximum jitter to add to the max_requests setting.'
),
)
parser.add_argument(
'--noinput',
action='store_true',
default=False,
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--no-collectstatic',
action='store_false',
dest='collectstatic',
default=True,
help='Do not automatically collect static files into STATIC_ROOT.',
)
parser.add_argument(
'--no-upgrade',
action='store_false',
dest='upgrade',
default=True,
help='Do not automatically perform any database upgrades.',
)
parser.add_argument(
'--preload',
action='store_true',
default=settings.NSOT_PRELOAD,
help=(
'Load application code before the worker processes are '
'forked.'
),
)
parser.add_argument(
'-a', '--address',
type=str,
default='%s:%s' % (settings.NSOT_HOST, settings.NSOT_PORT),
help='Host:port to listen on.',
)
parser.add_argument(
'-k', '--worker-class',
type=str,
default=settings.NSOT_WORKER_CLASS,
help='The type of gunicorn workers to use.',
)
parser.add_argument(
'-t', '--timeout',
type=int,
default=settings.NSOT_WORKER_TIMEOUT,
help='Timeout before gunicorn workers are killed/restarted.',
)
parser.add_argument(
'-w', '--workers',
type=int,
default=settings.NSOT_NUM_WORKERS,
help=(
'The number of gunicorn worker processes for handling '
'requests.'
),
)
def handle(self, **options):
address = options.get('address')
# Break address into host:port
if address:
if ':' in address:
host, port = address.split(':', 1)
port = int(port)
else:
host = address
port = None
else:
host, port = None, None
services = {
'http': http.NsotHTTPServer,
}
# Ensure we perform an upgrade before starting any service.
if options.get('upgrade'):
print("Performing upgrade before service startup...")
call_command(
'upgrade', verbosity=0, noinput=options.get('noinput')
)
# Ensure we collect static before starting any service, but only if
# SERVE_STATIC_FILES=True.
if options.get('collectstatic') and settings.SERVE_STATIC_FILES:
print("Performing collectstatic before service startup...")
call_command('collectstatic', interactive=False, ignore=['src'])
service_name = options.get('service')
try:
service_class = services[service_name]
except KeyError:
raise CommandError('%r is not a valid service' % service_name)
service = service_class(
debug=options.get('debug'),
host=host,
port=port,
workers=options.get('workers'),
worker_class=options.get('worker_class'),
timeout=options.get('timeout'),
max_requests=options.get('max_requests'),
max_requests_jitter=options.get('max_requests_jitter'),
preload=options.get('preload'),
)
# Remove command line arguments to avoid optparse failures with service
# code that calls call_command which reparses the command line, and if
# --no-upgrade is supplied a parse error is thrown.
sys.argv = sys.argv[:1]
service.run()
|
examples/account_media.py
|
smaeda-ks/twitter-python-ads-sdk
| 162 |
90216
|
<reponame>smaeda-ks/twitter-python-ads-sdk
# Copyright (C) 2015-2016 Twitter, Inc.
# Note: All account_media/media_creatives must be uploaded via the media-upload endpoints
# See: https://dev.twitter.com/rest/media/uploading-media
from twitter_ads.client import Client
from twitter_ads.http import Request
from twitter_ads.enums import CREATIVE_TYPE
from twitter_ads.creative import AccountMedia, MediaCreative
CONSUMER_KEY = 'your consumer key'
CONSUMER_SECRET = 'your consumer secret'
ACCESS_TOKEN = 'access token'
ACCESS_TOKEN_SECRET = 'access token secret'
ACCOUNT_ID = 'account id'
# initialize the client
client = Client(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# load the advertiser account instance
account = client.accounts(ACCOUNT_ID)
# grab the first line_item on the account
line_item_id = account.line_items().first.id
# retrive the `id` of the media creative associated with a line item
print(account.media_creatives().first.id)
# retrieve the `id` of the first account media associated with the account
account_media_id = account.account_media().first.id
# create a new account media
account_media = AccountMedia(account)
account_media.media_id = 'your-media-id'
# OR account_media.video_id OR account_media.vast_url
# see the media_upload.py example for more details
account_media.creative_type = CREATIVE_TYPE.BANNER
account_media.save()
#create a new media creative
media_creative = MediaCreative(account)
media_creative.line_item_id = line_item_id
media_creative.account_media_id = account_media_id
media_creative.landing_url = "https://my-landing-url"
media_creative.save()
# delete the media creative
media_creative.delete()
|
ansible/roles/lib_git/build/src/git_rebase.py
|
fahlmant/openshift-tools
| 164 |
90223
|
<filename>ansible/roles/lib_git/build/src/git_rebase.py
# pylint: skip-file
class GitRebase(GitCLI):
''' Class to wrap the git merge line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
path,
branch,
rebase_branch,
ssh_key=None):
''' Constructor for GitPush '''
super(GitRebase, self).__init__(path, ssh_key=ssh_key)
self.path = path
self.branch = branch
self.rebase_branch = rebase_branch
self.debug = []
os.chdir(path)
def checkout_branch(self):
''' check out the desired branch '''
current_branch_results = self._get_current_branch()
if current_branch_results['results'] == self.branch:
return True
current_branch_results = self._checkout(self.branch)
self.debug.append(current_branch_results)
if current_branch_results['returncode'] == 0:
return True
return False
def remote_update(self):
''' update the git remotes '''
remote_update_results = self._remote_update()
self.debug.append(remote_update_results)
if remote_update_results['returncode'] == 0:
return True
return False
def need_rebase(self):
''' checks to see if rebase is needed '''
git_diff_results = self._diff(self.rebase_branch)
self.debug.append(git_diff_results)
if git_diff_results['results']:
return True
return False
def rebase(self):
'''perform a git push '''
if self.checkout_branch():
if self.remote_update():
if self.need_rebase():
rebase_results = self._rebase(self.rebase_branch)
rebase_results['debug'] = self.debug
return rebase_results
else:
return {'returncode': 0,
'results': {},
'no_rebase_needed': True
}
return {'returncode': 1,
'results': {},
'debug': self.debug
}
|
venv/lib/python3.7/site-packages/allauth/socialaccount/providers/angellist/urls.py
|
vikram0207/django-rest
| 6,342 |
90234
|
<gh_stars>1000+
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import AngelListProvider
urlpatterns = default_urlpatterns(AngelListProvider)
|
text/symbols.py
|
highmaru-public/multi-speaker-tacotron-tensorflow
| 183 |
90246
|
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run
through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details.
'''
from jamo import h2j, j2h
from jamo.jamo import _jamo_char_to_hcj
from .korean import ALL_SYMBOLS, PAD, EOS
#symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!\'(),-.:;? '
symbols = ALL_SYMBOLS
|
photon__crawler_spider__examples/main.py
|
DazEB2/SimplePyScripts
| 117 |
90257
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/s0md3v/Photon/wiki/Photon-Library
# pip install photon
import photon
photon.crawl('https://github.com/s0md3v/Photon/wiki/Photon-Library')
result = photon.result()
print(result)
with open('result.json', 'w', encoding='utf-8') as f:
import json
json.dump(result, f, indent=4, ensure_ascii=False)
|
twistedcaldav/test/test_database.py
|
backwardn/ccs-calendarserver
| 462 |
90270
|
##
# Copyright (c) 2009-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twistedcaldav.database import AbstractADBAPIDatabase, ADBAPISqliteMixin
import twistedcaldav.test.util
from twisted.internet.defer import inlineCallbacks
import os
import time
class Database (twistedcaldav.test.util.TestCase):
"""
Test abstract SQL DB class
"""
class TestDB(ADBAPISqliteMixin, AbstractADBAPIDatabase):
def __init__(self, path, persistent=False, version="1"):
self.version = version
self.dbpath = path
super(Database.TestDB, self).__init__("sqlite", "sqlite3", (path,), persistent, cp_min=3, cp_max=3)
def _db_version(self):
"""
@return: the schema version assigned to this index.
"""
return self.version
def _db_type(self):
"""
@return: the collection type assigned to this index.
"""
return "TESTTYPE"
def _db_init_data_tables(self):
"""
Initialise the underlying database tables.
@param q: a database cursor to use.
"""
#
# TESTTYPE table
#
return self._db_execute(
"""
create table TESTTYPE (
KEY text unique,
VALUE text
)
"""
)
def _db_remove_data_tables(self):
return self._db_execute("drop table TESTTYPE")
class TestDBRecreateUpgrade(TestDB):
class RecreateDBException(Exception):
pass
class UpgradeDBException(Exception):
pass
def __init__(self, path, persistent=False):
super(Database.TestDBRecreateUpgrade, self).__init__(path, persistent, version="2")
def _db_recreate(self):
raise self.RecreateDBException()
class TestDBCreateIndexOnUpgrade(TestDB):
def __init__(self, path, persistent=False):
super(Database.TestDBCreateIndexOnUpgrade, self).__init__(path, persistent, version="2")
def _db_upgrade_data_tables(self, old_version):
return self._db_execute(
"""
create index TESTING on TESTTYPE (VALUE)
"""
)
class TestDBPauseInInit(TestDB):
def _db_init(self):
time.sleep(1)
super(Database.TestDBPauseInInit, self)._db_init()
@inlineCallbacks
def inlineCallbackRaises(self, exc, f, *args, **kwargs):
try:
yield f(*args, **kwargs)
except exc:
pass
except Exception, e:
self.fail("Wrong exception raised: %s" % (e,))
else:
self.fail("%s not raised" % (exc,))
@inlineCallbacks
def test_connect(self):
"""
Connect to database and create table
"""
db = Database.TestDB(self.mktemp())
self.assertFalse(db.initialized)
yield db.open()
self.assertTrue(db.initialized)
db.close()
@inlineCallbacks
def test_connectFailure(self):
"""
Failure to connect cleans up the pool
"""
db = Database.TestDB(self.mktemp())
# Make _db_init fail
db._db_init = lambda: 1 / 0
self.assertFalse(db.initialized)
try:
yield db.open()
except:
pass
self.assertFalse(db.initialized)
self.assertEquals(db.pool, None)
@inlineCallbacks
def test_readwrite(self):
"""
Add a record, search for it
"""
db = Database.TestDB(self.mktemp())
yield db.execute("INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)", ("FOO", "BAR",))
items = (yield db.query("SELECT * from TESTTYPE"))
self.assertEqual(items, (("FOO", "BAR"),))
items = (yield db.queryList("SELECT * from TESTTYPE"))
self.assertEqual(items, ("FOO",))
db.close()
@inlineCallbacks
def test_close(self):
"""
Close database
"""
db = Database.TestDB(self.mktemp())
self.assertFalse(db.initialized)
yield db.open()
db.close()
self.assertFalse(db.initialized)
db.close()
@inlineCallbacks
def test_version_upgrade_nonpersistent(self):
"""
Connect to database and create table
"""
db_file = self.mktemp()
db = Database.TestDB(db_file)
yield db.open()
yield db.execute("INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)", ("FOO", "BAR",))
items = (yield db.query("SELECT * from TESTTYPE"))
self.assertEqual(items, (("FOO", "BAR"),))
db.close()
db = None
db = Database.TestDBRecreateUpgrade(db_file)
yield self.inlineCallbackRaises(Database.TestDBRecreateUpgrade.RecreateDBException, db.open)
items = (yield db.query("SELECT * from TESTTYPE"))
self.assertEqual(items, ())
db.close()
@inlineCallbacks
def test_version_upgrade_persistent(self):
"""
Connect to database and create table
"""
db_file = self.mktemp()
db = Database.TestDB(db_file, persistent=True)
yield db.open()
yield db.execute("INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)", ("FOO", "BAR",))
items = (yield db.query("SELECT * from TESTTYPE"))
self.assertEqual(items, (("FOO", "BAR"),))
db.close()
db = None
db = Database.TestDBRecreateUpgrade(db_file, persistent=True)
yield self.inlineCallbackRaises(NotImplementedError, db.open)
self.assertTrue(os.path.exists(db_file))
db.close()
db = None
db = Database.TestDB(db_file, persistent=True)
yield db.open()
items = (yield db.query("SELECT * from TESTTYPE"))
self.assertEqual(items, (("FOO", "BAR"),))
db.close()
@inlineCallbacks
def test_version_upgrade_persistent_add_index(self):
"""
Connect to database and create table
"""
db_file = self.mktemp()
db = Database.TestDB(db_file, persistent=True)
yield db.open()
yield db.execute("INSERT into TESTTYPE (KEY, VALUE) values (:1, :2)", ("FOO", "BAR",))
items = (yield db.query("SELECT * from TESTTYPE"))
self.assertEqual(items, (("FOO", "BAR"),))
db.close()
db = None
db = Database.TestDBCreateIndexOnUpgrade(db_file, persistent=True)
yield db.open()
items = (yield db.query("SELECT * from TESTTYPE"))
self.assertEqual(items, (("FOO", "BAR"),))
db.close()
|
RecoEcal/EgammaClusterProducers/python/multi5x5SuperClustersWithPreshower_cfi.py
|
ckamtsikis/cmssw
| 852 |
90392
|
<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
# Preshower cluster producer
multi5x5SuperClustersWithPreshower = cms.EDProducer("PreshowerPhiClusterProducer",
esStripEnergyCut = cms.double(0.0),
esPhiClusterDeltaEta = cms.double(0.15),
esPhiClusterDeltaPhi = cms.double(0.12),
preshClusterCollectionY = cms.string('preshowerYClusters'),
# building endcap association
assocSClusterCollection = cms.string(''),
etThresh = cms.double(0.0),
# building preshower clusters
preshRecHitProducer = cms.InputTag("ecalPreshowerRecHit","EcalRecHitsES"),
preshClusterCollectionX = cms.string('preshowerXClusters'),
endcapSClusterProducer = cms.InputTag("multi5x5SuperClusters","multi5x5EndcapSuperClusters")
)
uncleanedOnlyMulti5x5SuperClustersWithPreshower = multi5x5SuperClustersWithPreshower.clone(
endcapSClusterProducer = "multi5x5SuperClusters:uncleanOnlyMulti5x5EndcapSuperClusters"
)
|
plugins/quetz_runexports/quetz_runexports/api.py
|
maresb/quetz
| 108 |
90409
|
<gh_stars>100-1000
import json
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm.session import Session
from quetz.db_models import PackageVersion
from quetz.deps import get_db
router = APIRouter()
@router.get(
"/api/channels/{channel_name}/packages/{package_name}/versions/"
"{platform}/{filename}/run_exports"
)
def get_run_exports(
channel_name: str,
package_name: str,
platform: str,
filename: str,
db: Session = Depends(get_db),
):
package_version = (
db.query(PackageVersion)
.filter(PackageVersion.channel_name == channel_name)
.filter(PackageVersion.platform == platform)
.filter(PackageVersion.filename == filename)
.first()
)
if not package_version.runexports:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=(
f"run_exports for package {channel_name}/{platform}/{filename}"
"not found"
),
)
run_exports = json.loads(package_version.runexports.data)
return run_exports
|
navec/train/quantiles.py
|
FreedomSlow/navec
| 115 |
90410
|
SHARES = [
0.5, 0.6, 0.7, 0.8, 0.9,
0.91, 0.92, 0.93, 0.94,
0.95, 0.96, 0.97, 0.98,
0.99, 1.0
]
def pop(items):
return items[0], items[1:]
def get_quantiles(records, shares=SHARES):
if not shares:
return
counts = [count for _, count in records]
total = sum(counts)
accumulator = 0
shares = sorted(shares)
share, shares = pop(shares)
counts = sorted(counts, reverse=True)
for index, count in enumerate(counts):
accumulator += count
if accumulator / total >= share:
yield share, index
if not shares:
break
share, shares = pop(shares)
|
python_modules/libraries/dagstermill/dagstermill_tests/test_serialization.py
|
dbatten5/dagster
| 4,606 |
90412
|
import pytest
from dagster import Any, String, usable_as_dagster_type
from dagster.check import CheckError
from dagster.core.types.dagster_type import resolve_dagster_type
from dagster.utils import safe_tempfile_path
from dagstermill.serialize import read_value, write_value
def test_scalar():
with safe_tempfile_path() as tempfile_path:
assert (
read_value(
resolve_dagster_type(String),
write_value(resolve_dagster_type(String), "foo", tempfile_path),
)
== "foo"
)
def test_scalar_any():
with safe_tempfile_path() as tempfile_path:
assert (
read_value(
resolve_dagster_type(Any),
write_value(resolve_dagster_type(Any), "foo", tempfile_path),
)
== "foo"
)
@usable_as_dagster_type
class EvenType:
def __init__(self, num):
assert num % 2 is 0
self.num = num
def test_custom_dagster_type():
with safe_tempfile_path() as tempfile_path:
assert (
read_value(
resolve_dagster_type(EvenType),
write_value(resolve_dagster_type(EvenType), 4, tempfile_path),
)
== 4
)
def test_read_bad_value():
with pytest.raises(CheckError, match="Malformed value"):
read_value(resolve_dagster_type(Any), {"value": "foo", "file": "bar"})
with pytest.raises(CheckError, match="Malformed value"):
read_value(resolve_dagster_type(Any), {"quux": "buzz"})
|
output/python37/Lib/idlelib/idle_test/test_outwin.py
|
cy15196/FastCAE
| 117 |
90477
|
""" Test idlelib.outwin.
"""
import unittest
from tkinter import Tk, Text
from idlelib.idle_test.mock_tk import Mbox_func
from idlelib.idle_test.mock_idle import Func
from idlelib import outwin
from test.support import requires
from unittest import mock
class OutputWindowTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
root = cls.root = Tk()
root.withdraw()
w = cls.window = outwin.OutputWindow(None, None, None, root)
cls.text = w.text = Text(root)
@classmethod
def tearDownClass(cls):
cls.window.close()
del cls.text, cls.window
cls.root.destroy()
del cls.root
def setUp(self):
self.text.delete('1.0', 'end')
def test_ispythonsource(self):
# OutputWindow overrides ispythonsource to always return False.
w = self.window
self.assertFalse(w.ispythonsource('test.txt'))
self.assertFalse(w.ispythonsource(__file__))
def test_window_title(self):
self.assertEqual(self.window.top.title(), 'Output')
def test_maybesave(self):
w = self.window
eq = self.assertEqual
w.get_saved = Func()
w.get_saved.result = False
eq(w.maybesave(), 'no')
eq(w.get_saved.called, 1)
w.get_saved.result = True
eq(w.maybesave(), 'yes')
eq(w.get_saved.called, 2)
del w.get_saved
def test_write(self):
eq = self.assertEqual
delete = self.text.delete
get = self.text.get
write = self.window.write
# Test bytes.
b = b'Test bytes.'
eq(write(b), len(b))
eq(get('1.0', '1.end'), b.decode())
# No new line - insert stays on same line.
delete('1.0', 'end')
test_text = 'test text'
eq(write(test_text), len(test_text))
eq(get('1.0', '1.end'), 'test text')
eq(get('insert linestart', 'insert lineend'), 'test text')
# New line - insert moves to next line.
delete('1.0', 'end')
test_text = 'test text\n'
eq(write(test_text), len(test_text))
eq(get('1.0', '1.end'), 'test text')
eq(get('insert linestart', 'insert lineend'), '')
# Text after new line is tagged for second line of Text widget.
delete('1.0', 'end')
test_text = 'test text\nLine 2'
eq(write(test_text), len(test_text))
eq(get('1.0', '1.end'), 'test text')
eq(get('2.0', '2.end'), 'Line 2')
eq(get('insert linestart', 'insert lineend'), 'Line 2')
# Test tags.
delete('1.0', 'end')
test_text = 'test text\n'
test_text2 = 'Line 2\n'
eq(write(test_text, tags='mytag'), len(test_text))
eq(write(test_text2, tags='secondtag'), len(test_text2))
eq(get('mytag.first', 'mytag.last'), test_text)
eq(get('secondtag.first', 'secondtag.last'), test_text2)
eq(get('1.0', '1.end'), test_text.rstrip('\n'))
eq(get('2.0', '2.end'), test_text2.rstrip('\n'))
def test_writelines(self):
eq = self.assertEqual
get = self.text.get
writelines = self.window.writelines
writelines(('Line 1\n', 'Line 2\n', 'Line 3\n'))
eq(get('1.0', '1.end'), 'Line 1')
eq(get('2.0', '2.end'), 'Line 2')
eq(get('3.0', '3.end'), 'Line 3')
eq(get('insert linestart', 'insert lineend'), '')
def test_goto_file_line(self):
eq = self.assertEqual
w = self.window
text = self.text
w.flist = mock.Mock()
gfl = w.flist.gotofileline = Func()
showerror = w.showerror = Mbox_func()
# No file/line number.
w.write('Not a file line')
self.assertIsNone(w.goto_file_line())
eq(gfl.called, 0)
eq(showerror.title, 'No special line')
# Current file/line number.
w.write(f'{str(__file__)}: 42: spam\n')
w.write(f'{str(__file__)}: 21: spam')
self.assertIsNone(w.goto_file_line())
eq(gfl.args, (str(__file__), 21))
# Previous line has file/line number.
text.delete('1.0', 'end')
w.write(f'{str(__file__)}: 42: spam\n')
w.write('Not a file line')
self.assertIsNone(w.goto_file_line())
eq(gfl.args, (str(__file__), 42))
del w.flist.gotofileline, w.showerror
class ModuleFunctionTest(unittest.TestCase):
@classmethod
def setUp(cls):
outwin.file_line_progs = None
def test_compile_progs(self):
outwin.compile_progs()
for pat, regex in zip(outwin.file_line_pats, outwin.file_line_progs):
self.assertEqual(regex.pattern, pat)
@mock.patch('builtins.open')
def test_file_line_helper(self, mock_open):
flh = outwin.file_line_helper
test_lines = (
(r'foo file "testfile1", line 42, bar', ('testfile1', 42)),
(r'foo testfile2(21) bar', ('testfile2', 21)),
(r' testfile3 : 42: foo bar\n', (' testfile3 ', 42)),
(r'foo testfile4.py :1: ', ('foo testfile4.py ', 1)),
('testfile5: \u19D4\u19D2: ', ('testfile5', 42)),
(r'testfile6: 42', None), # only one `:`
(r'testfile7 42 text', None) # no separators
)
for line, expected_output in test_lines:
self.assertEqual(flh(line), expected_output)
if expected_output:
mock_open.assert_called_with(expected_output[0], 'r')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
gpymusic/view.py
|
academo/gpymusic
| 136 |
90510
|
class View(dict):
"""A View contains the content displayed in the main window."""
def __init__(self, d=None):
"""
View constructor.
Keyword arguments:
d=None: Initial keys and values to initialize the view with.
Regardless of the value of d, keys 'songs', 'artists' and
'albums' are created with empty lists as default values.
"""
self['songs'], self['artists'], self['albums'] = [], [], []
if d is not None:
if isinstance(d, dict):
for k in d:
self[k] = d[k]
else:
raise TypeError('Initializing View with invalid argument')
def __setitem__(self, key, val):
"""Restrict values to lists only."""
if not isinstance(val, list):
raise TypeError('View can only hold lists as values.')
super().__setitem__(key, val)
def __len__(self):
"""Return the sum of each list's length."""
return sum(len(self[k]) for k in self)
def replace(self, other):
"""Replace the view's contents with some other dict."""
self = self.__init__(other)
def clear(self):
"""Clear elements without removing keys."""
for k in self.keys():
del self[k][:]
def is_empty(self):
"""Returns whether or not the view is empty."""
return all(not self[k] for k in self)
def copy(self):
"""Return a deep copy of the view's contents."""
return {k: self[k][:] for k in self}
|
asyncpg/protocol/__init__.py
|
baltitenger/asyncpg
| 5,714 |
90563
|
<gh_stars>1000+
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
# flake8: NOQA
from .protocol import Protocol, Record, NO_TIMEOUT, BUILTIN_TYPE_NAME_MAP
|
jmilkfansblog/controllers/v1/__init__.py
|
xiaoyh121/program
| 176 |
90605
|
<gh_stars>100-1000
from pecan import rest
from wsme import types as wtypes
from jmilkfansblog.api.expose import expose as wsexpose
from jmilkfansblog.controllers.v1 import users
from jmilkfansblog.controllers.v1 import posts
class V1(wtypes.Base):
id = wtypes.text
"""The ID of the version, also acts as the release number"""
@staticmethod
def convert():
v1 = V1()
v1.id = 'v1'
return v1
class Controller(rest.RestController):
"""Version 1 API controller root."""
users = users.UsersController()
posts = posts.PostsController()
@wsexpose(V1)
def get(self):
return V1.convert()
|
Chapter 09/seq2seq_translation.py
|
bharlow058/Packt-TF-cook-book
| 587 |
90642
|
# -*- coding: utf-8 -*-
#
# Creating Sequence to Sequence Models
#-------------------------------------
# Here we show how to implement sequence to sequence models.
# Specifically, we will build an English to German translation model.
#
import os
import re
import string
import requests
import io
import numpy as np
import collections
import random
import pickle
import string
import matplotlib.pyplot as plt
import tensorflow as tf
from zipfile import ZipFile
from collections import Counter
from tensorflow.models.rnn.translate import data_utils
from tensorflow.models.rnn.translate import seq2seq_model
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a session
sess = tf.Session()
# Model Parameters
learning_rate = 0.1
lr_decay_rate = 0.99
lr_decay_every = 100
max_gradient = 5.0
batch_size = 50
num_layers = 3
rnn_size = 500
layer_size = 512
generations = 10000
vocab_size = 10000
save_every = 1000
eval_every = 500
output_every = 50
punct = string.punctuation
# Data Parameters
data_dir = 'temp'
data_file = 'eng_ger.txt'
model_path = 'seq2seq_model'
full_model_dir = os.path.join(data_dir, model_path)
# Test Translation from English (lowercase, no punct)
test_english = ['hello where is my computer',
'the quick brown fox jumped over the lazy dog',
'is it going to rain tomorrow']
# Make Model Directory
if not os.path.exists(full_model_dir):
os.makedirs(full_model_dir)
# Make data directory
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print('Loading English-German Data')
# Check for data, if it doesn't exist, download it and save it
if not os.path.isfile(os.path.join(data_dir, data_file)):
print('Data not found, downloading Eng-Ger sentences from www.manythings.org')
sentence_url = 'http://www.manythings.org/anki/deu-eng.zip'
r = requests.get(sentence_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('deu.txt')
# Format Data
eng_ger_data = file.decode()
eng_ger_data = eng_ger_data.encode('ascii',errors='ignore')
eng_ger_data = eng_ger_data.decode().split('\n')
# Write to file
with open(os.path.join(data_dir, data_file), 'w') as out_conn:
for sentence in eng_ger_data:
out_conn.write(sentence + '\n')
else:
eng_ger_data = []
with open(os.path.join(data_dir, data_file), 'r') as in_conn:
for row in in_conn:
eng_ger_data.append(row[:-1])
# Remove punctuation
eng_ger_data = [''.join(char for char in sent if char not in punct) for sent in eng_ger_data]
# Split each sentence by tabs
eng_ger_data = [x.split('\t') for x in eng_ger_data if len(x)>=1]
[english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)]
english_sentence = [x.lower().split() for x in english_sentence]
german_sentence = [x.lower().split() for x in german_sentence]
print('Processing the vocabularies.')
# Process the English Vocabulary
all_english_words = [word for sentence in english_sentence for word in sentence]
all_english_counts = Counter(all_english_words)
eng_word_keys = [x[0] for x in all_english_counts.most_common(vocab_size-1)] #-1 because 0=unknown is also in there
eng_vocab2ix = dict(zip(eng_word_keys, range(1,vocab_size)))
eng_ix2vocab = {val:key for key, val in eng_vocab2ix.items()}
english_processed = []
for sent in english_sentence:
temp_sentence = []
for word in sent:
try:
temp_sentence.append(eng_vocab2ix[word])
except:
temp_sentence.append(0)
english_processed.append(temp_sentence)
# Process the German Vocabulary
all_german_words = [word for sentence in german_sentence for word in sentence]
all_german_counts = Counter(all_german_words)
ger_word_keys = [x[0] for x in all_german_counts.most_common(vocab_size-1)]
ger_vocab2ix = dict(zip(ger_word_keys, range(1,vocab_size)))
ger_ix2vocab = {val:key for key, val in ger_vocab2ix.items()}
german_processed = []
for sent in german_sentence:
temp_sentence = []
for word in sent:
try:
temp_sentence.append(ger_vocab2ix[word])
except:
temp_sentence.append(0)
german_processed.append(temp_sentence)
# Process the test english sentences, use '0' if word not in our vocab
test_data = []
for sentence in test_english:
temp_sentence = []
for word in sentence.split(' '):
try:
temp_sentence.append(eng_vocab2ix[word])
except:
# Use '0' if the word isn't in our vocabulary
temp_sentence.append(0)
test_data.append(temp_sentence)
# Define Buckets for sequence lengths
# We will split data into the corresponding buckets:
# (x1, y1), (x2, y2), ...
# Where all entries in bucket 1: len(x)<x1 and len(y)<y1 and so on.
x_maxs = [5, 7, 11, 50]
y_maxs = [10, 12, 17, 60]
buckets = [x for x in zip(x_maxs, y_maxs)]
bucketed_data = [[] for _ in range(len(x_maxs))]
for eng, ger in zip(english_processed, german_processed):
for ix, (x_max, y_max) in enumerate(zip(x_maxs, y_maxs)):
if (len(eng) <= x_max) and (len(ger) <= y_max):
bucketed_data[ix].append([eng, ger])
break
# Print summaries of buckets
train_bucket_sizes = [len(bucketed_data[b]) for b in range(len(buckets))]
train_total_size = float(sum(train_bucket_sizes))
for ix, bucket in enumerate(bucketed_data):
print('Data pts in bucket {}: {}'.format(ix, len(bucket)))
# Create sequence to sequence model
def translation_model(sess, input_vocab_size, output_vocab_size,
buckets, rnn_size, num_layers, max_gradient,
learning_rate, lr_decay_rate, forward_only):
model = seq2seq_model.Seq2SeqModel(
input_vocab_size,
output_vocab_size,
buckets,
rnn_size,
num_layers,
max_gradient,
batch_size,
learning_rate,
lr_decay_rate,
forward_only=forward_only,
dtype=tf.float32)
return(model)
print('Creating Translation Model')
input_vocab_size = vocab_size
output_vocab_size = vocab_size
with tf.variable_scope('translate_model') as scope:
translate_model = translation_model(sess, vocab_size, vocab_size,
buckets, rnn_size, num_layers,
max_gradient, learning_rate,
lr_decay_rate, False)
#Reuse the variables for the test model
scope.reuse_variables()
test_model = translation_model(sess, vocab_size, vocab_size,
buckets, rnn_size, num_layers,
max_gradient, learning_rate,
lr_decay_rate, True)
test_model.batch_size = 1
# Initialize all model variables
init = tf.global_variables_initializer()
sess.run(init)
# Start training
train_loss = []
for i in range(generations):
rand_bucket_ix = np.random.choice(len(bucketed_data))
model_outputs = translate_model.get_batch(bucketed_data, rand_bucket_ix)
encoder_inputs, decoder_inputs, target_weights = model_outputs
# Get the (gradient norm, loss, and outputs)
_, step_loss, _ = translate_model.step(sess, encoder_inputs, decoder_inputs,
target_weights, rand_bucket_ix, False)
# Output status
if (i+1) % output_every == 0:
train_loss.append(step_loss)
print('Gen #{} out of {}. Loss: {:.4}'.format(i+1, generations, step_loss))
# Check if we should decay the learning rate
if (i+1) % lr_decay_every == 0:
sess.run(translate_model.learning_rate_decay_op)
# Save model
if (i+1) % save_every == 0:
print('Saving model to {}.'.format(full_model_dir))
model_save_path = os.path.join(full_model_dir, "eng_ger_translation.ckpt")
translate_model.saver.save(sess, model_save_path, global_step=i)
# Eval on test set
if (i+1) % eval_every == 0:
for ix, sentence in enumerate(test_data):
# Find which bucket sentence goes in
bucket_id = next(index for index, val in enumerate(x_maxs) if val>=len(sentence))
# Get RNN model outputs
encoder_inputs, decoder_inputs, target_weights = test_model.get_batch(
{bucket_id: [(sentence, [])]}, bucket_id)
# Get logits
_, test_loss, output_logits = test_model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
ix_output = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is a 0 symbol in outputs end the output there.
ix_output = ix_output[0:[ix for ix, x in enumerate(ix_output+[0]) if x==0][0]]
# Get german words from indices
test_german = [ger_ix2vocab[x] for x in ix_output]
print('English: {}'.format(test_english[ix]))
print('German: {}'.format(test_german))
# Plot train loss
loss_generations = [i for i in range(generations) if i%output_every==0]
plt.plot(loss_generations, train_loss, 'k-')
plt.title('Sequence to Sequence Loss')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
|
cumulusci/tasks/robotframework/debugger/DebugListener.py
|
davisagli/CumulusCI
| 163 |
90682
|
"""
Robot Debugger
"""
from cumulusci.tasks.robotframework.debugger import (
Breakpoint,
DebuggerCli,
Keyword,
Suite,
Testcase,
)
class DebugListener(object):
"""A robot framework listener for debugging test cases
This acts as the controller for the debugger. It is responsible
for managing breakpoints, and pausing execution of a test when a
breakpoint is hit.
The listener is also responsible for instantiating the debugger UI
(class DebuggerCli).
"""
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, *breakpoints):
self.show_initial_help = True
self.current_file = None
self.stack = []
self.rdb = DebuggerCli(listener=self)
if breakpoints:
self.breakpoints = list(breakpoints)
else:
self.breakpoints = [
Breakpoint(Keyword, "*::cumulusci.robotframework.Salesforce.Breakpoint")
]
def start_suite(self, name, attrs):
self.stack.append(Suite(name, attrs))
def start_test(self, name, attrs):
self.stack.append(Testcase(name, attrs))
def start_keyword(self, name, attrs):
context = Keyword(name, attrs)
self.stack.append(context)
self.break_if_breakpoint()
def end_keyword(self, name, attrs):
self.stack.pop()
def end_test(self, name, attrs):
self.stack.pop()
def end_suite(self, name, attrs):
self.stack.pop()
def do_step(self):
"""Single-step through the code
This will set a temporary breakpoint on the next keyword in
the current context before continuing. Once the breakpoint
is hit, it will be removed from the list of breakpoints.
"""
breakpoint = Breakpoint(
Keyword, "{}::*".format(self.stack[-2].longname), temporary=True
)
self.breakpoints.append(breakpoint)
def break_if_breakpoint(self):
"""Pause test execution and issue a prompt if we are at a breakpoint"""
# filter breakpoints to only those that match the current context
# (eg: Suite, Testcase, Keyword), and iterate over them looking
# for a match.
for breakpoint in [
bp
for bp in self.breakpoints
if isinstance(self.stack[-1], bp.breakpoint_type)
]:
statement = "{}::{}".format(self.stack[-2].longname, self.stack[-1].name)
if breakpoint.match(statement):
if breakpoint.temporary:
self.breakpoints.remove(breakpoint)
intro = "\n"
if self.show_initial_help:
self.show_initial_help = False
intro += self.rdb.intro
intro += "\n> {}\n-> {}".format(
self.stack[-2].longname, str(self.stack[-1])
)
# Note: this call won't return until a debugger command
# has been issued which returns True (eg: 'continue' or 'step')
self.rdb.cmdloop(intro)
return
|
webwaybooks/tests/utils/test_log.py
|
bysorry/telegram_media_downloader
| 401 |
90708
|
<filename>webwaybooks/tests/utils/test_log.py
"""Unittest module for log handlers."""
import os
import sys
import unittest
import mock
sys.path.append("..") # Adds higher directory to python modules path.
from utils.log import LogFilter
class MockLog:
"""
Mock logs.
"""
def __init__(self, **kwargs):
self.funcName = kwargs["funcName"]
class MetaTestCase(unittest.TestCase):
def test_log_filter(self):
result = LogFilter().filter(MockLog(funcName="send"))
self.assertEqual(result, False)
result1 = LogFilter().filter(MockLog(funcName="get_file"))
self.assertEqual(result1, False)
result2 = LogFilter().filter(MockLog(funcName="Synced"))
self.assertEqual(result2, True)
|
.modules/.recon-ng/modules/recon/domains-credentials/pwnedlist/domain_creds.py
|
termux-one/EasY_HaCk
| 1,103 |
90715
|
from recon.core.module import BaseModule
from recon.utils.crypto import aes_decrypt
class Module(BaseModule):
meta = {
'name': 'PwnedList - Pwned Domain Credentials Fetcher',
'author': '<NAME> (@LaNMaSteR53)',
'description': 'Queries the PwnedList API to fetch all credentials for a domain. Updates the \'credentials\' table with the results.',
'required_keys': ['pwnedlist_api', 'pwnedlist_secret', 'pwnedlist_iv'],
'comments': (
'API Query Cost: 10,000 queries per request, 1 query for each account returned, and 1 query per unique leak.',
),
'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL',
}
def module_run(self, domains):
key = self.keys.get('pwnedlist_api')
secret = self.keys.get('pwnedlist_secret')
decrypt_key = secret[:16]
iv = self.keys.get('pwnedlist_iv')
# setup the API call
url = 'https://api.pwnedlist.com/api/1/domains/query'
for domain in domains:
self.heading(domain, level=0)
payload = {'domain_identifier': domain, 'daysAgo': 0}
while True:
# build the payload
pwnedlist_payload = self.build_pwnedlist_payload(payload, 'domains.query', key, secret)
# make the request
resp = self.request(url, payload=pwnedlist_payload)
if resp.json: jsonobj = resp.json
else:
self.error('Invalid JSON response for \'%s\'.\n%s' % (domain, resp.text))
break
if len(jsonobj['accounts']) == 0:
self.output('No results returned for \'%s\'.' % (domain))
break
# extract the credentials
for cred in jsonobj['accounts']:
username = cred['plain']
password = aes_decrypt(cred['password'], decrypt_key, iv)
leak = cred['leak_id']
self.add_credentials(username=username, password=password, leak=leak)
self.add_leaks(mute=True, **self.get_pwnedlist_leak(leak))
# paginate
if jsonobj['token']:
payload['token'] = jsonobj['token']
continue
break
|
components/isceobj/RtcProc/runVerifyDEM.py
|
vincentschut/isce2
| 1,133 |
90716
|
#
# Author: <NAME>
# Copyright 2016
#
import logging
import isceobj
import mroipac
import os
import numpy as np
from isceobj.Util.decorators import use_api
logger = logging.getLogger('isce.insar.VerifyDEM')
def runVerifyDEM(self):
'''
Make sure that a DEM is available for processing the given data.
'''
self.demStitcher.noFilling = False
###If provided in the input XML file
if self.demFilename not in ['',None]:
demimg = isceobj.createDemImage()
demimg.load(self.demFilename + '.xml')
if not os.path.exists(self.demFilename + '.vrt'):
demimg.renderVRT()
if demimg.reference.upper() == 'EGM96':
wgsdemname = self.demFilename + '.wgs84'
if os.path.exists(wgsdemname) and os.path.exists(wgsdemname + '.xml'):
demimg = isceobj.createDemImage()
demimg.load(wgsdemname + '.xml')
if demimg.reference.upper() == 'EGM96':
raise Exception('WGS84 version of dem found by reference set to EGM96')
else:
demimg = self.demStitcher.correct(demimg)
elif demimg.reference.upper() != 'WGS84':
raise Exception('Unknown reference system for DEM: {0}'.format(demimg.reference))
else:
refPol = self._grd.polarizations[0]
reference = self._grd.loadProduct( os.path.join(self._grd.outputFolder, 'beta_{0}.xml'.format(refPol)))
bbox = reference.getBbox()
####Truncate to integers
tbox = [np.floor(bbox[0]), np.ceil(bbox[1]),
np.floor(bbox[2]), np.ceil(bbox[3])]
filename = self.demStitcher.defaultName(tbox)
wgsfilename = filename + '.wgs84'
####Check if WGS84 file exists
if os.path.exists(wgsfilename) and os.path.exists(wgsfilename + '.xml'):
demimg = isceobj.createDemImage()
demimg.load(wgsfilename + '.xml')
if not os.path.exists(wgsfilename + '.vrt'):
demimg.renderVRT()
####Check if EGM96 file exists
elif os.path.exists(filename) and os.path.exists(filename + '.xml'):
inimg = isceobj.createDemImage()
inimg.load(filename + '.xml')
if not os.path.exists(filename + '.xml'):
inimg.renderVRT()
demimg = self.demStitcher.correct(inimg)
else:
stitchOk = self.demStitcher.stitch(tbox[0:2], tbox[2:4])
if not stitchOk:
logger.error("Cannot form the DEM for the region of interest. If you have one, set the appropriate DEM component in the input file.")
raise Exception
inimg = isceobj.createDemImage()
inimg.load(filename + '.xml')
if not os.path.exists(filename):
inimg.renderVRT()
demimg = self.demStitcher.correct(inimg)
#get water mask
# self.runCreateWbdMask(info)
return demimg.filename
|
Configuration/Generator/python/SingleTaupt_50_cfi.py
|
ckamtsikis/cmssw
| 852 |
90730
|
<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
generator = cms.EDProducer("Pythia6PtGun",
PGunParameters = cms.PSet(
ParticleID = cms.vint32(-15),
AddAntiParticle = cms.bool(False),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinPt = cms.double(50.0),
MaxPt = cms.double(50.0001),
MinEta = cms.double(-2.4),
MaxEta = cms.double(2.4)
),
PythiaParameters = cms.PSet(
pythiaTauJets = cms.vstring(
'MDME(89,1)=0 ! no tau->electron',
'MDME(90,1)=0 ! no tau->muon'
),
pythiaUESettings = cms.vstring(
'MSTJ(11)=3 ! Choice of the fragmentation function',
'MSTJ(22)=2 ! Decay those unstable particles',
'PARJ(71)=10 . ! for which ctau 10 mm',
'MSTP(2)=1 ! which order running alphaS',
'MSTP(33)=0 ! no K factors in hard cross sections',
'MSTP(51)=7 ! structure function chosen',
'MSTP(81)=1 ! multiple parton interactions 1 is Pythia default',
'MSTP(82)=4 ! Defines the multi-parton model',
'MSTU(21)=1 ! Check on possible errors during program execution',
'PARP(82)=1.9409 ! pt cutoff for multiparton interactions',
'PARP(89)=1960. ! sqrts for which PARP82 is set',
'PARP(83)=0.5 ! Multiple interactions: matter distrbn parameter',
'PARP(84)=0.4 ! Multiple interactions: matter distribution parameter',
'PARP(90)=0.16 ! Multiple interactions: rescaling power',
'PARP(67)=2.5 ! amount of initial-state radiation',
'PARP(85)=1.0 ! gluon prod. mechanism in MI',
'PARP(86)=1.0 ! gluon prod. mechanism in MI',
'PARP(62)=1.25 ! ',
'PARP(64)=0.2 ! ',
'MSTP(91)=1 !',
'PARP(91)=2.1 ! kt distribution',
'PARP(93)=15.0 ! '
),
parameterSets = cms.vstring(
'pythiaUESettings',
'pythiaTauJets'
)
)
)
|
api/urls.py
|
siarheipuhach/tutorialdb
| 109 |
90745
|
<gh_stars>100-1000
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='api-home'),
path('tutorials/', views.tutorials),
path('tutorials/<str:tags>/', views.tutorial_tag),
path('tutorials/<str:tags>/<str:category>/', views.tutorial_tag_category),
path('tags/', views.tags),
path('latest/', views.latest),
]
|
scripts/artifacts/smanagerCrash.py
|
deagler4n6/ALEAPP
| 187 |
90751
|
<filename>scripts/artifacts/smanagerCrash.py
import sqlite3
import textwrap
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly
def get_smanagerCrash(files_found, report_folder, seeker, wrap_text):
file_found = str(files_found[0])
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
datetime(crash_time / 1000, "unixepoch"),
package_name
from crash_info
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Samsung Smart Manager - Crash')
report.start_artifact_report(report_folder, 'Samsung Smart Manager - Crash')
report.add_script()
data_headers = ('Timestamp','Package Name')
data_list = []
for row in all_rows:
data_list.append((row[0],row[1]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'samsung smart manager - crash'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Samsung Smart Manager - Crash'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Samsung Smart Manager - Crash data available')
db.close()
return
|
fortytwo/s02_reduce_clutter.py
|
rpharoah/42-workshop
| 221 |
90768
|
<filename>fortytwo/s02_reduce_clutter.py
"""02: Reduce Clutter
Reduce Clutter by Disabling Tools.
- We are an IDE, but we can also do a lean-and-mean UI
- Hide the Toolbar, Tool Window Bars, Navigation Bar
Repo: https://github.com/pauleveritt/42-workshop
Playlist: https://www.jetbrains.com/pycharm/guide/playlists/42/
"""
from fortytwo import App, Greeter
def main():
site = App()
with site as container:
greeter = container.get(Greeter)
greeting = greeter('Larry')
return greeting
if __name__ == '__main__':
print(main())
|
openstackclient/tests/unit/volume/v3/test_volume_group_snapshot.py
|
mydevice/python-openstackclient
| 262 |
90771
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import api_versions
from osc_lib import exceptions
from openstackclient.tests.unit.volume.v3 import fakes as volume_fakes
from openstackclient.volume.v3 import volume_group_snapshot
class TestVolumeGroupSnapshot(volume_fakes.TestVolume):
def setUp(self):
super().setUp()
self.volume_groups_mock = self.app.client_manager.volume.groups
self.volume_groups_mock.reset_mock()
self.volume_group_snapshots_mock = \
self.app.client_manager.volume.group_snapshots
self.volume_group_snapshots_mock.reset_mock()
class TestVolumeGroupSnapshotCreate(TestVolumeGroupSnapshot):
fake_volume_group = volume_fakes.FakeVolumeGroup.create_one_volume_group()
fake_volume_group_snapshot = \
volume_fakes.FakeVolumeGroupSnapshot.create_one_volume_group_snapshot()
columns = (
'ID',
'Status',
'Name',
'Description',
'Group',
'Group Type',
)
data = (
fake_volume_group_snapshot.id,
fake_volume_group_snapshot.status,
fake_volume_group_snapshot.name,
fake_volume_group_snapshot.description,
fake_volume_group_snapshot.group_id,
fake_volume_group_snapshot.group_type_id,
)
def setUp(self):
super().setUp()
self.volume_groups_mock.get.return_value = self.fake_volume_group
self.volume_group_snapshots_mock.create.return_value = \
self.fake_volume_group_snapshot
self.volume_group_snapshots_mock.get.return_value = \
self.fake_volume_group_snapshot
self.cmd = volume_group_snapshot.CreateVolumeGroupSnapshot(
self.app, None)
def test_volume_group_snapshot_create(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.14')
arglist = [
self.fake_volume_group.id,
]
verifylist = [
('volume_group', self.fake_volume_group.id),
('name', None),
('description', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volume_groups_mock.get.assert_called_once_with(
self.fake_volume_group.id)
self.volume_group_snapshots_mock.create.assert_called_once_with(
self.fake_volume_group.id, None, None,
)
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
def test_volume_group_snapshot_create_with_options(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.14')
arglist = [
self.fake_volume_group.id,
'--name', 'foo',
'--description', 'hello, world',
]
verifylist = [
('volume_group', self.fake_volume_group.id),
('name', 'foo'),
('description', 'hello, world'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volume_groups_mock.get.assert_called_once_with(
self.fake_volume_group.id)
self.volume_group_snapshots_mock.create.assert_called_once_with(
self.fake_volume_group.id, 'foo', 'hello, world',
)
self.assertEqual(self.columns, columns)
self.assertCountEqual(self.data, data)
def test_volume_group_snapshot_create_pre_v314(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.13')
arglist = [
self.fake_volume_group.id,
]
verifylist = [
('volume_group', self.fake_volume_group.id),
('name', None),
('description', None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
exc = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-volume-api-version 3.14 or greater is required',
str(exc))
class TestVolumeGroupSnapshotDelete(TestVolumeGroupSnapshot):
fake_volume_group_snapshot = \
volume_fakes.FakeVolumeGroupSnapshot.create_one_volume_group_snapshot()
def setUp(self):
super().setUp()
self.volume_group_snapshots_mock.get.return_value = \
self.fake_volume_group_snapshot
self.volume_group_snapshots_mock.delete.return_value = None
self.cmd = volume_group_snapshot.DeleteVolumeGroupSnapshot(
self.app, None)
def test_volume_group_snapshot_delete(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.14')
arglist = [
self.fake_volume_group_snapshot.id,
]
verifylist = [
('snapshot', self.fake_volume_group_snapshot.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.volume_group_snapshots_mock.delete.assert_called_once_with(
self.fake_volume_group_snapshot.id,
)
self.assertIsNone(result)
def test_volume_group_snapshot_delete_pre_v314(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.13')
arglist = [
self.fake_volume_group_snapshot.id,
]
verifylist = [
('snapshot', self.fake_volume_group_snapshot.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
exc = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-volume-api-version 3.14 or greater is required',
str(exc))
class TestVolumeGroupSnapshotList(TestVolumeGroupSnapshot):
fake_volume_group_snapshots = \
volume_fakes.FakeVolumeGroupSnapshot.create_volume_group_snapshots()
columns = (
'ID',
'Status',
'Name',
)
data = [
(
fake_volume_group_snapshot.id,
fake_volume_group_snapshot.status,
fake_volume_group_snapshot.name,
) for fake_volume_group_snapshot in fake_volume_group_snapshots
]
def setUp(self):
super().setUp()
self.volume_group_snapshots_mock.list.return_value = \
self.fake_volume_group_snapshots
self.cmd = volume_group_snapshot.ListVolumeGroupSnapshot(
self.app, None)
def test_volume_group_snapshot_list(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.14')
arglist = [
'--all-projects',
]
verifylist = [
('all_projects', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.volume_group_snapshots_mock.list.assert_called_once_with(
search_opts={
'all_tenants': True,
},
)
self.assertEqual(self.columns, columns)
self.assertCountEqual(tuple(self.data), data)
def test_volume_group_snapshot_list_pre_v314(self):
self.app.client_manager.volume.api_version = \
api_versions.APIVersion('3.13')
arglist = [
]
verifylist = [
('all_projects', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
exc = self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args)
self.assertIn(
'--os-volume-api-version 3.14 or greater is required',
str(exc))
|
bitex/formatters/ccex.py
|
ligggooo/quant2018
| 312 |
90777
|
<filename>bitex/formatters/ccex.py
# Import Built-Ins
import logging
# Import Third-Party
# Import Homebrew
from bitex.formatters.base import Formatter
# Init Logging Facilities
log = logging.getLogger(__name__)
class CcexFormatter(Formatter):
@staticmethod
def ticker(data, *args, **kwargs):
return (data['buy'], data['sell'], data['high'], data['low'], None,
None, data['lastprice'], None, data['updated'])
|
lib-src/lv2/suil/waflib/extras/softlink_libs.py
|
joshrose/audacity
| 7,892 |
90816
|
#! /usr/bin/env python
# per rosengren 2011
from waflib.TaskGen import feature, after_method
from waflib.Task import Task, always_run
from os.path import basename, isabs
from os import tmpfile, linesep
def options(opt):
grp = opt.add_option_group('Softlink Libraries Options')
grp.add_option('--exclude', default='/usr/lib,/lib', help='No symbolic links are created for libs within [%default]')
def configure(cnf):
cnf.find_program('ldd')
if not cnf.env.SOFTLINK_EXCLUDE:
cnf.env.SOFTLINK_EXCLUDE = cnf.options.exclude.split(',')
@feature('softlink_libs')
@after_method('process_rule')
def add_finder(self):
tgt = self.path.find_or_declare(self.target)
self.create_task('sll_finder', tgt=tgt)
self.create_task('sll_installer', tgt=tgt)
always_run(sll_installer)
class sll_finder(Task):
ext_out = 'softlink_libs'
def run(self):
bld = self.generator.bld
linked=[]
target_paths = []
for g in bld.groups:
for tgen in g:
# FIXME it might be better to check if there is a link_task (getattr?)
target_paths += [tgen.path.get_bld().bldpath()]
linked += [t.outputs[0].bldpath()
for t in getattr(tgen, 'tasks', [])
if t.__class__.__name__ in
['cprogram', 'cshlib', 'cxxprogram', 'cxxshlib']]
lib_list = []
if len(linked):
cmd = [self.env.LDD] + linked
# FIXME add DYLD_LIBRARY_PATH+PATH for osx+win32
ldd_env = {'LD_LIBRARY_PATH': ':'.join(target_paths + self.env.LIBPATH)}
# FIXME the with syntax will not work in python 2
with tmpfile() as result:
self.exec_command(cmd, env=ldd_env, stdout=result)
result.seek(0)
for line in result.readlines():
words = line.split()
if len(words) < 3 or words[1] != '=>':
continue
lib = words[2]
if lib == 'not':
continue
if any([lib.startswith(p) for p in
[bld.bldnode.abspath(), '('] +
self.env.SOFTLINK_EXCLUDE]):
continue
if not isabs(lib):
continue
lib_list.append(lib)
lib_list = sorted(set(lib_list))
self.outputs[0].write(linesep.join(lib_list + self.env.DYNAMIC_LIBS))
return 0
class sll_installer(Task):
ext_in = 'softlink_libs'
def run(self):
tgt = self.outputs[0]
self.generator.bld.install_files('${LIBDIR}', tgt, postpone=False)
lib_list=tgt.read().split()
for lib in lib_list:
self.generator.bld.symlink_as('${LIBDIR}/'+basename(lib), lib, postpone=False)
return 0
|
tools/go_generics/defs.bzl
|
Exhorder6/gvisor
| 12,536 |
90835
|
<reponame>Exhorder6/gvisor<gh_stars>1000+
"""Generics support via go_generics.
A Go template is similar to a go library, except that it has certain types that
can be replaced before usage. For example, one could define a templatized List
struct, whose elements are of type T, then instantiate that template for
T=segment, where "segment" is the concrete type.
"""
TemplateInfo = provider(
"Information about a go_generics template.",
fields = {
"unsafe": "whether the template requires unsafe code",
"types": "required types",
"opt_types": "optional types",
"consts": "required consts",
"opt_consts": "optional consts",
"deps": "package dependencies",
"template": "merged template source file",
},
)
def _go_template_impl(ctx):
srcs = ctx.files.srcs
template = ctx.actions.declare_file(ctx.label.name + "_template.go")
args = ["-o=%s" % template.path] + [f.path for f in srcs]
ctx.actions.run(
inputs = srcs,
outputs = [template],
mnemonic = "GoGenericsTemplate",
progress_message = "Building Go template %s" % ctx.label,
arguments = args,
executable = ctx.executable._tool,
)
return [TemplateInfo(
types = ctx.attr.types,
opt_types = ctx.attr.opt_types,
consts = ctx.attr.consts,
opt_consts = ctx.attr.opt_consts,
deps = ctx.attr.deps,
template = template,
)]
go_template = rule(
implementation = _go_template_impl,
attrs = {
"srcs": attr.label_list(doc = "the list of source files that comprise the template", mandatory = True, allow_files = True),
"deps": attr.label_list(doc = "the standard dependency list", allow_files = True, cfg = "target"),
"types": attr.string_list(doc = "the list of generic types in the template that are required to be specified"),
"opt_types": attr.string_list(doc = "the list of generic types in the template that can but aren't required to be specified"),
"consts": attr.string_list(doc = "the list of constants in the template that are required to be specified"),
"opt_consts": attr.string_list(doc = "the list of constants in the template that can but aren't required to be specified"),
"_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_generics/go_merge")),
},
)
def _go_template_instance_impl(ctx):
info = ctx.attr.template[TemplateInfo]
output = ctx.outputs.out
# Check that all required types are defined.
for t in info.types:
if t not in ctx.attr.types:
fail("Missing value for type %s in %s" % (t, ctx.attr.template.label))
# Check that all defined types are expected by the template.
for t in ctx.attr.types:
if (t not in info.types) and (t not in info.opt_types):
fail("Type %s is not a parameter to %s" % (t, ctx.attr.template.label))
# Check that all required consts are defined.
for t in info.consts:
if t not in ctx.attr.consts:
fail("Missing value for constant %s in %s" % (t, ctx.attr.template.label))
# Check that all defined consts are expected by the template.
for t in ctx.attr.consts:
if (t not in info.consts) and (t not in info.opt_consts):
fail("Const %s is not a parameter to %s" % (t, ctx.attr.template.label))
# Build the argument list.
args = ["-i=%s" % info.template.path, "-o=%s" % output.path]
if ctx.attr.package:
args.append("-p=%s" % ctx.attr.package)
if len(ctx.attr.prefix) > 0:
args.append("-prefix=%s" % ctx.attr.prefix)
if len(ctx.attr.suffix) > 0:
args.append("-suffix=%s" % ctx.attr.suffix)
args += [("-t=%s=%s" % (p[0], p[1])) for p in ctx.attr.types.items()]
args += [("-c=%s=%s" % (p[0], p[1])) for p in ctx.attr.consts.items()]
args += [("-import=%s=%s" % (p[0], p[1])) for p in ctx.attr.imports.items()]
if ctx.attr.anon:
args.append("-anon")
ctx.actions.run(
inputs = [info.template],
outputs = [output],
mnemonic = "GoGenericsInstance",
progress_message = "Building Go template instance %s" % ctx.label,
arguments = args,
executable = ctx.executable._tool,
)
return [DefaultInfo(
files = depset([output]),
)]
go_template_instance = rule(
implementation = _go_template_instance_impl,
attrs = {
"template": attr.label(doc = "the label of the template to be instantiated", mandatory = True),
"prefix": attr.string(doc = "a prefix to be added to globals in the template"),
"suffix": attr.string(doc = "a suffix to be added to globals in the template"),
"types": attr.string_dict(doc = "the map from generic type names to concrete ones"),
"consts": attr.string_dict(doc = "the map from constant names to their values"),
"imports": attr.string_dict(doc = "the map from imports used in types/consts to their import paths"),
"anon": attr.bool(doc = "whether anoymous fields should be processed", mandatory = False, default = False),
"package": attr.string(doc = "the package for the generated source file", mandatory = False),
"out": attr.output(doc = "output file", mandatory = True),
"_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_generics")),
},
)
|
modules/kobo.py
|
hwiorn/orger
| 241 |
90867
|
#!/usr/bin/env python3
from orger import Mirror
from orger.inorganic import node, link, OrgNode
from orger.common import dt_heading
from my.kobo import get_books_with_highlights, Highlight
class KoboView(Mirror):
def get_items(self) -> Mirror.Results:
def render_highlight(h: Highlight) -> OrgNode:
# TODO FIXME could use bookmark page??
heading = 'bookmark' if h.kind == 'bookmark' else (h.text or '')
body = h.annotation # TODO check if empty
return node(
heading=dt_heading(h.dt, heading),
body=body,
)
for page in get_books_with_highlights():
yield str(page.book), node(
heading=dt_heading(page.dt, str(page.book)),
children=[render_highlight(h) for h in page.highlights],
)
# TODO maybe fixture instead?
test = KoboView.make_test(
heading='Unsong',
contains='Singer',
)
if __name__ == '__main__':
KoboView.main()
|
matrix-python-project/archive_tools/auto_split_clip_by_scene/auto_clip.py
|
hokaso/hocassian-media-matrix
| 141 |
90876
|
# 在使用前,请通过「pip install scenedetect[opencv,progress_bar,scenedetect]」安装部分所需要的依赖~
from __future__ import print_function
import os
# Standard PySceneDetect imports:
from scenedetect.video_splitter import split_video_ffmpeg
from scenedetect.video_manager import VideoManager
from scenedetect.scene_manager import SceneManager
# For caching detection metrics and saving/loading to a stats file
from scenedetect.stats_manager import StatsManager
# For content-aware scene detection:
from scenedetect.detectors.content_detector import ContentDetector
class AutoClip(object):
def __init__(self):
self.input_dir = './input'
self.output_dir = 'output'
self.current_clip_output_path = os.getcwd() + "/" + self.output_dir
if not os.path.exists(self.current_clip_output_path):
os.makedirs(self.current_clip_output_path)
self.threshold_default = 27.5
self.crf_default = 20
def run(self):
print("请确保运行前素材已经放置在同一目录下的input文件夹中~\n")
_file_list = self.file_prepare()
threshold = input("请输入画面阈值(选择区间[10, 90],如无输入则默认27.5):")
if not threshold:
threshold = self.threshold_default
crf = input("请输入crf值(选择区间[1, 51],如无输入则默认20):")
if not crf:
crf = self.crf_default
for key in _file_list:
scenes = self.find_scenes(key, threshold)
print(scenes)
file_path, full_name = os.path.split(key)
f_name, ext = os.path.splitext(full_name)
split_video_ffmpeg(
[key],
scenes,
"$VIDEO_NAME - Scene $SCENE_NUMBER.mp4",
self.output_dir + "/" + f_name,
arg_override='-c:v libx264 -preset slow -crf ' + str(crf) + ' -c:a aac',
hide_progress=False,
suppress_output=False
)
print("处理完毕~")
@staticmethod
def find_scenes(video_path, threshold):
video_manager = VideoManager([video_path])
stats_manager = StatsManager()
# Construct our SceneManager and pass it our StatsManager.
scene_manager = SceneManager(stats_manager)
# Add ContentDetector algorithm (each detector's constructor
# takes detector options, e.g. threshold).
scene_manager.add_detector(ContentDetector(threshold=threshold))
base_timecode = video_manager.get_base_timecode()
try:
# Set downscale factor to improve processing speed.
video_manager.set_downscale_factor()
# Start video_manager.
video_manager.start()
# Perform scene detection on video_manager.
scene_manager.detect_scenes(frame_source=video_manager)
# Obtain list of detected scenes.
scene_list = scene_manager.get_scene_list(base_timecode)
# Each scene is a tuple of (start, end) FrameTimecodes.
print('List of scenes obtained:')
final_scene_list = []
for i, scene in enumerate(scene_list):
temp = list(scene)
# print(temp)
temp[0] = temp[0] + 1
temp[1] = temp[1] - 1
scene = tuple(temp)
final_scene_list.append(scene)
finally:
video_manager.release()
return final_scene_list
def file_prepare(self):
file_list = []
for root, dirs, files in os.walk(self.input_dir):
file_list = [self.input_dir + "/" + i for i in files]
return file_list
if __name__ == '__main__':
print("如果出现错误,请通过「pip install scenedetect[opencv,progress_bar,scenedetect]」安装部分所需要的依赖~")
AutoClip().run()
|
build_tools/install_cexts.py
|
MBKayro/kolibri
| 545 |
90879
|
"""
This module defines functions to install c extensions for all the platforms into
Kolibri.
It is required to have pip version greater than 19.3.1 to run this script.
Usage:
> python build_tools/install_cexts.py --file "requirements/cext.txt" --cache-path "/cext_cache"
It reads the package name and version from requirements/cext.txt file and
installs the package and its dependencies using `pip install` with cache_path as
the cache directory. It installs from PyPi for platforms such as manylinux,
Windows and Piwheels for platforms such as ARM Linux. Please check the
description of the function `parse_package_page` to see the platforms we skip
downloading.
When Kolibri starts, the function `prepend_cext_path` in `env.py` will calculate
the c extension path based on system information and add it to sys.path so
Kolibri can import the c extension.
The cache directory is mainly used to stabilize the installation of c extensions
from Piwheels website for the builds on Buildkite. If the directory of the cache_path
passed into the function is not writable, a folder named `cext_cache` will be
created under the directory where the script runs to store the cache data.
"""
import argparse
import os
import shutil
import subprocess
import sys
import requests
from bs4 import BeautifulSoup
DIST_CEXT = os.path.join(
os.path.dirname(os.path.realpath(os.path.dirname(__file__))),
"kolibri",
"dist",
"cext",
)
PYPI_DOWNLOAD = "https://pypi.python.org/simple/"
PIWHEEL_DOWNLOAD = "https://www.piwheels.org/simple/"
def get_path_with_arch(platform, path, abi, implementation, python_version):
"""
Calculate package path according to the platform.
"""
# Split the platform into two parts.
# For example: manylinux1_x86_64 to Linux, x86_64
platform_split = (
platform.replace("manylinux1", "Linux").replace("linux", "Linux").split("_", 1)
)
# Windows 32-bit's machine name is x86.
if platform_split[0] == "win32":
return os.path.join(path, "Windows", "x86")
# Windows 64-bit
elif platform_split[0] == "win":
return os.path.join(path, "Windows", "AMD64")
# Prior to CPython 3.3, there were two ABI-incompatible ways of building CPython
# There could be abi tag 'm' for narrow-unicode and abi tag 'mu' for wide-unicode
if implementation == "cp" and int(python_version) < 33:
return os.path.join(path, platform_split[0], abi, platform_split[1])
return os.path.join(path, platform_split[0], platform_split[1])
def run_pip_install(
path,
platform,
version,
implementation,
abi,
name,
pk_version,
index_url,
cache_path,
):
"""
Install the package and its dependencies according to platform,
python version, implementation and abi using `pip install` with cache_path as
the cache directory.
"""
return_code = subprocess.call(
[
"pip",
"install",
"-q",
"-t",
path,
"--platform",
platform,
"--python-version",
version,
"--implementation",
implementation,
"--abi",
abi,
"-i",
index_url,
"--cache-dir",
cache_path,
"--only-binary=:all:",
"--no-deps",
"{}=={}".format(name, pk_version),
]
)
return return_code
def install_package(package_name, package_version, index_url, info, cache_path):
"""
Install packages based on the information we gather from the index_url page
"""
for item in info:
platform = item["platform"]
implementation = item["implementation"]
python_version = item["version"]
abi = item["abi"]
filename = "-".join([package_name, package_version, abi, platform])
# Calculate the path that the package will be installed into
version_path = os.path.join(DIST_CEXT, implementation + python_version)
package_path = get_path_with_arch(
platform, version_path, abi, implementation, python_version
)
print("Installing package {}...".format(filename))
# Install the package using pip with cache_path as the cache directory
install_return = run_pip_install(
package_path,
platform,
python_version,
implementation,
abi,
package_name,
package_version,
index_url,
cache_path,
)
# Ignore Piwheels installation failure because the website is not always stable
if install_return == 1 and index_url == PYPI_DOWNLOAD:
sys.exit("\nInstallation failed for package {}.\n".format(filename))
else:
# Clean up .dist-info folders
dist_info_folders = os.listdir(package_path)
for folder in dist_info_folders:
if folder.endswith(".dist-info"):
shutil.rmtree(os.path.join(package_path, folder))
def parse_package_page(files, pk_version, index_url, cache_path):
"""
Parse the PYPI and Piwheels links for the package information.
We are not going to install the packages if they are:
* not a whl file
* not the version specified in requirements.txt
* not python versions that kolibri does not support
* not macosx
* not win_x64 with python 3.6
"""
result = []
for file in files.find_all("a"):
# Skip if not a whl file
if not file.string.endswith("whl"):
continue
file_name_chunks = file.string.split("-")
package_version = file_name_chunks[1]
package_name = file_name_chunks[0]
python_version = file_name_chunks[2][2:]
platform = file_name_chunks[4].split(".")[0]
implementation = file_name_chunks[2][:2]
abi = file_name_chunks[3]
if package_version != pk_version:
continue
if python_version == "26":
continue
if "macosx" in platform:
continue
if "win_amd64" in platform and python_version != "36":
continue
# Cryptography builds for Linux target Python 3.4+ but the only existing
# build is labeled 3.4 (the lowest version supported).
# Expand the abi3 tag here. e.g. cp34 abi3 is expanded to cp34m, cp35m, cp36m, cp37m
# https://cryptography.io/en/latest/faq/#why-are-there-no-wheels-for-python-3-6-on-linux-or-macos
if abi == "abi3":
for actual_version in range(int(python_version), 38):
actual_version = str(actual_version)
actual_abi = "".join([implementation, actual_version, "m"])
info = {
"platform": platform,
"implementation": implementation,
"version": actual_version,
"abi": actual_abi,
}
result.append(info)
else:
info = {
"platform": platform,
"implementation": implementation,
"version": python_version,
"abi": abi,
}
result.append(info)
install_package(package_name, pk_version, index_url, result, cache_path)
def parse_pypi_and_piwheels(name, pk_version, cache_path):
"""
Start installing from the pypi and piwheels pages of the package.
"""
links = [PYPI_DOWNLOAD, PIWHEEL_DOWNLOAD]
for link in links:
r = requests.get(link + name)
if r.status_code == 200:
files = BeautifulSoup(r.content, "html.parser")
parse_package_page(files, pk_version, link, cache_path)
else:
sys.exit("\nUnable to find package {} on {}.\n".format(name, link))
def check_cache_path_writable(cache_path):
"""
If the defined cache path is not writable, change it to a folder named
cext_cache under the current directory where the script runs.
"""
try:
check_file = os.path.join(cache_path, "check.txt")
with open(check_file, "w") as f:
f.write("check")
os.remove(check_file)
return cache_path
except (OSError, IOError):
new_path = os.path.realpath("cext_cache")
print(
"The cache directory {old_path} is not writable. Changing to directory {new_path}.".format(
old_path=cache_path, new_path=new_path
)
)
return new_path
def parse_requirements(args):
"""
Parse the requirements.txt to get packages' names and versions,
then install them.
"""
# pip version needs to be greater than 19.3.1 to run this script
# see https://github.com/pypa/pip/issues/6070
pip_version = str(subprocess.check_output(["pip", "--version"]))
pip_version_major = int(str(pip_version).split(".")[0].split("pip")[1].strip())
if pip_version_major < 20:
sys.exit(
"pip version is lower or equal to 19.3.1. Please upgrade the pip version to run this script."
)
with open(args.file) as f:
cache_path = os.path.realpath(args.cache_path)
cache_path = check_cache_path_writable(cache_path)
for line in f:
char_list = line.split("==")
if len(char_list) == 2:
# Parse PyPi and Piwheels pages to install package according to
# its name and version
parse_pypi_and_piwheels(
char_list[0].strip(), char_list[1].strip(), cache_path
)
# Ignore comments
elif not line.startswith("#"):
sys.exit(
"\nName format in cext.txt is incorrect. Should be 'packageName==packageVersion'.\n"
)
if __name__ == "__main__":
# Parsing the requirement.txt file argument
parser = argparse.ArgumentParser(
description="Downloading and installing Python C extensions tool."
)
parser.add_argument(
"--file", required=True, help="The name of the requirements.txt"
)
parser.add_argument(
"--cache-path",
default="/cext_cache",
help="The path in which pip cache data is stored",
)
args = parser.parse_args()
parse_requirements(args)
|
tests/2d/numpy/methods.py
|
oojBuffalo/micropython-ulab
| 232 |
90890
|
<gh_stars>100-1000
try:
from ulab import numpy as np
except ImportError:
import numpy as np
a = np.array([1, 2, 3, 4], dtype=np.int8)
b = a.copy()
print(b)
a = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype=np.int16)
b = a.copy()
print(b)
a = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype=np.float)
b = a.copy()
print(b)
print(a.dtype)
print(a.flatten())
print(np.array([1,2,3], dtype=np.uint8).itemsize)
print(np.array([1,2,3], dtype=np.uint16).itemsize)
print(np.array([1,2,3], dtype=np.int8).itemsize)
print(np.array([1,2,3], dtype=np.int16).itemsize)
print(np.array([1,2,3], dtype=np.float).itemsize)
print(np.array([1,2,3], dtype=np.float).shape)
print(np.array([[1],[2],[3]], dtype=np.float).shape)
print(np.array([[1],[2],[3]], dtype=np.float).reshape((1,3)))
print(np.array([[1],[2],[3]]).size)
print(np.array([1,2,3], dtype=np.float).size)
print(np.array([1,2,3], dtype=np.uint8).tobytes())
print(np.array([1,2,3], dtype=np.int8).tobytes())
print(np.array([1,2,3], dtype=np.float).transpose().shape)
print(np.array([[1],[2],[3]], dtype=np.float).transpose().shape)
a = np.array([1, 2, 3, 4, 5, 6], dtype=np.uint8)
b = a.byteswap(inplace=False)
print(a)
print(b)
c = a.byteswap(inplace=True)
print(a)
print(c)
a = np.array([1, 2, 3, 4, 5, 6], dtype=np.uint16)
b = a.byteswap(inplace=False)
print(a)
print(b)
c = a.byteswap(inplace=True)
print(a)
print(c)
a = np.array([1, 2, 3, 4, 5, 6], dtype=np.float)
b = a.byteswap(inplace=False)
print(a)
print(b)
c = a.byteswap(inplace=True)
print(a)
print(c)
|
python-sdk/tutorials/automl-with-azureml/forecasting-bike-share/run_forecast.py
|
0mza987/azureml-examples
| 331 |
90899
|
<filename>python-sdk/tutorials/automl-with-azureml/forecasting-bike-share/run_forecast.py
from azureml.core import ScriptRunConfig
def run_rolling_forecast(
test_experiment,
compute_target,
train_run,
test_dataset,
target_column_name,
inference_folder="./forecast",
):
train_run.download_file("outputs/model.pkl", inference_folder + "/model.pkl")
inference_env = train_run.get_environment()
config = ScriptRunConfig(
source_directory=inference_folder,
script="forecasting_script.py",
arguments=[
"--target_column_name",
target_column_name,
"--test_dataset",
test_dataset.as_named_input(test_dataset.name),
],
compute_target=compute_target,
environment=inference_env,
)
run = test_experiment.submit(
config,
tags={
"training_run_id": train_run.id,
"run_algorithm": train_run.properties["run_algorithm"],
"valid_score": train_run.properties["score"],
"primary_metric": train_run.properties["primary_metric"],
},
)
run.log("run_algorithm", run.tags["run_algorithm"])
return run
|
lang/tags/comment_block.py
|
quadfather85/knausj_talon
| 298 |
90926
|
<gh_stars>100-1000
from talon import Context, Module
ctx = Context()
mod = Module()
mod.tag("code_comment_block", desc="Tag for enabling generic block comment commands")
@mod.action_class
class Actions:
def code_comment_block():
"""Block comment"""
def code_comment_block_prefix():
"""Block comment start syntax"""
def code_comment_block_suffix():
"""Block comment end syntax"""
|
tests/test_memdatasource.py
|
toni-moreno/loudml
| 245 |
90930
|
<reponame>toni-moreno/loudml
from loudml.misc import (
nan_to_none,
)
from loudml.donut import DonutModel
import logging
import unittest
from loudml.membucket import MemBucket
logging.getLogger('tensorflow').disabled = True
FEATURES = [
{
'name': 'avg_foo',
'metric': 'avg',
'field': 'foo',
'default': 0,
},
]
class TestMemBucket(unittest.TestCase):
def setUp(self):
self.source = MemBucket()
self.model = DonutModel(dict(
name='test',
offset=30,
span=300,
bucket_interval=3,
interval=60,
features=FEATURES,
max_threshold=70,
min_threshold=60,
))
data = [
# (foo, timestamp)
(1, 0), # excluded
(2, 1), (3, 2),
# empty
(4, 8),
(5, 10), # excluded
]
for entry in data:
self.source.insert_times_data({
'foo': entry[0],
'timestamp': entry[1],
})
self.source.commit()
def test_get_times_buckets(self):
res = self.source.get_times_buckets(
from_date=1,
to_date=9,
bucket_interval=3,
)
self.assertEqual(
[[entry.data['foo'] for entry in bucket.data] for bucket in res],
[[2, 3], [], [4]],
)
def test_get_times_data(self):
res = self.source.get_times_data(
bucket_interval=self.model.bucket_interval,
features=self.model.features,
from_date=1,
to_date=9,
)
foo_avg = []
for line in res:
foo_avg.append(nan_to_none(line[1][0]))
self.assertEqual(foo_avg, [2.5, None, 4.0])
|
input_pipeline.py
|
zmskye/unsupervised_captioning
| 229 |
90940
|
<filename>input_pipeline.py
import tensorflow as tf
from misc_fn import controlled_shuffle
from misc_fn import random_drop
FLAGS = tf.flags.FLAGS
AUTOTUNE = tf.data.experimental.AUTOTUNE
def batching_func(x, batch_size):
"""Forms a batch with dynamic padding."""
return x.padded_batch(
batch_size,
padded_shapes=((
tf.TensorShape([299, 299, 3]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([])),
(tf.TensorShape([None]),
tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([]))),
drop_remainder=True)
def preprocess_image(encoded_image, classes, scores):
"""Decodes an image."""
image = tf.image.decode_jpeg(encoded_image, 3)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize_images(image, [346, 346])
image = tf.random_crop(image, [299, 299, 3])
image = image * 2 - 1
return image, classes, scores, tf.shape(classes)[0]
def parse_image(serialized):
"""Parses a tensorflow.SequenceExample into an image and detected objects.
Args:
serialized: A scalar string Tensor; a single serialized SequenceExample.
Returns:
encoded_image: A scalar string Tensor containing a JPEG encoded image.
classes: A 1-D int64 Tensor containing the detected objects.
scores: A 1-D float32 Tensor containing the detection scores.
"""
context, sequence = tf.parse_single_sequence_example(
serialized,
context_features={
'image/data': tf.FixedLenFeature([], dtype=tf.string)
},
sequence_features={
'classes': tf.FixedLenSequenceFeature([], dtype=tf.int64),
'scores': tf.FixedLenSequenceFeature([], dtype=tf.float32),
})
encoded_image = context['image/data']
classes = tf.to_int32(sequence['classes'])
scores = sequence['scores']
return encoded_image, classes, scores
def parse_sentence(serialized):
"""Parses a tensorflow.SequenceExample into an caption.
Args:
serialized: A scalar string Tensor; a single serialized SequenceExample.
Returns:
key: The keywords in a sentence.
num_key: The number of keywords.
sentence: A description.
sentence_length: The length of the description.
"""
context, sequence = tf.parse_single_sequence_example(
serialized,
context_features={},
sequence_features={
'sentence': tf.FixedLenSequenceFeature([], dtype=tf.int64),
})
sentence = tf.to_int32(sequence['sentence'])
key = controlled_shuffle(sentence[1:-1])
key = random_drop(key)
key = tf.concat([key, [FLAGS.end_id]], axis=0)
return key, tf.shape(key)[0], sentence, tf.shape(sentence)[0]
def input_fn(batch_size):
"""Input function."""
image_ds = tf.data.TFRecordDataset('data/image_train.tfrec')
image_ds = image_ds.map(parse_image, num_parallel_calls=AUTOTUNE)
image_ds = image_ds.map(preprocess_image, num_parallel_calls=AUTOTUNE)
image_ds = image_ds.shuffle(8192).repeat()
sentence_ds = tf.data.TFRecordDataset('data/sentence.tfrec')
sentence_ds = sentence_ds.map(parse_sentence, num_parallel_calls=AUTOTUNE)
sentence_ds = sentence_ds.shuffle(65536).repeat()
dataset = tf.data.Dataset.zip((image_ds, sentence_ds))
dataset = batching_func(dataset, batch_size)
dataset = dataset.prefetch(AUTOTUNE)
iterator = dataset.make_one_shot_iterator()
image, sentence = iterator.get_next()
im, classes, scores, num = image
key, lk, sentence, ls = sentence
return {'im': im, 'classes': classes, 'scores': scores, 'num': num,
'key': key, 'lk': lk}, {'sentence': sentence, 'len': ls}
|
release_task_lock.py
|
bopopescu/redis-ctl
| 109 |
90979
|
import config
import models.base
from models.task import TaskLock
def main():
app = config.App(config)
with app.app_context():
for lock in models.base.db.session.query(TaskLock).all():
if lock.step is not None:
lock.step.complete('Force release lock')
models.base.db.session.delete(lock)
models.base.db.session.commit()
if __name__ == '__main__':
main()
|
lib/tests/py_test/test_protobuf.py
|
nccgroup/blackboxprotobuf
| 261 |
90983
|
from hypothesis import given, example, note
import hypothesis.strategies as st
import hypothesis
import strategies
import warnings
import base64
import json
import six
import blackboxprotobuf
warnings.filterwarnings(
"ignore",
"Call to deprecated create function.*",
)
try:
import Test_pb2
except:
import os
os.system(
"cd tests/payloads; protoc --python_out ../py_test/ Test.proto; cd ../../"
)
import Test_pb2
# TODO: need to find a different way to generate protobuf messages off of this
testMessage_typedef = {
"1": {"type": "double", "name": "testDouble"},
"2": {"type": "float", "name": "testFloat"},
# "4": {"type": "int", "name": "testInt32"},
"8": {"type": "int", "name": "testInt64"},
# "16": {"type": "uint", "name": "testUInt32"},
"32": {"type": "uint", "name": "testUInt64"},
# "64": {"type": "sint", "name": "testSInt32"},
"128": {"type": "sint", "name": "testSInt64"},
"256": {"type": "fixed32", "name": "testFixed32"},
"512": {"type": "fixed64", "name": "testFixed64"},
"1024": {"type": "sfixed32", "name": "testSFixed32"},
"2048": {"type": "sfixed64", "name": "testSFixed64"},
# "4096": {"type": "int", "name": "testBool"},
"8192": {"type": "string", "name": "testString"},
"16384": {"type": "bytes", "name": "testBytes"},
# "32768": {"type": "message", "name": "testEmbed",
# "message_typedef": {
# "3": {"type": "double", "name": "embedDouble"},
# "2": {"type": "bytes", "name": "embedString"}}
# },
# "65536": {"type": "packed_int", "name": "testRepeatedInt32"}
}
# Test decoding from blackboxprotobuf
@given(x=strategies.gen_message_data(testMessage_typedef))
def test_decode(x):
message = Test_pb2.TestMessage()
for key, value in x.items():
setattr(message, key, value)
encoded = message.SerializeToString()
decoded, typedef = blackboxprotobuf.decode_message(encoded, testMessage_typedef)
hypothesis.note("Decoded: %r" % decoded)
for key in decoded.keys():
assert x[key] == decoded[key]
# Test encoding with blackboxprotobuf
@given(x=strategies.gen_message_data(testMessage_typedef))
def test_encode(x):
encoded = blackboxprotobuf.encode_message(x, testMessage_typedef)
message = Test_pb2.TestMessage()
message.ParseFromString(encoded)
for key in x.keys():
assert getattr(message, key) == x[key]
# Try to modify a random key with blackbox and re-encode
# TODO: In the future do more random modifications, like swap the whole value
@given(
x=strategies.gen_message_data(testMessage_typedef),
modify_num=st.sampled_from(sorted(testMessage_typedef.keys())),
)
def test_modify(x, modify_num):
modify_key = testMessage_typedef[modify_num]["name"]
message = Test_pb2.TestMessage()
for key, value in x.items():
setattr(message, key, value)
encoded = message.SerializeToString()
decoded, typedef = blackboxprotobuf.decode_message(encoded, testMessage_typedef)
# eliminate any cases where protobuf defaults out a field
hypothesis.assume(modify_key in decoded)
if isinstance(decoded[modify_key], str):
mod_func = lambda x: "test"
elif six.PY2 and isinstance(decoded[modify_key], unicode):
mod_func = lambda x: six.u("test")
elif isinstance(decoded[modify_key], bytes):
mod_func = lambda x: b"test"
elif isinstance(decoded[modify_key], six.integer_types):
mod_func = lambda x: 10
elif isinstance(decoded[modify_key], float):
mod_func = lambda x: 10
else:
hypothesis.note(
"Failed to modify key: %s (%r)" % (modify_key, type(decoded[modify_key]))
)
assert False
decoded[modify_key] = mod_func(decoded[modify_key])
x[modify_key] = mod_func(x[modify_key])
encoded = blackboxprotobuf.encode_message(decoded, testMessage_typedef)
message = Test_pb2.TestMessage()
message.ParseFromString(encoded)
for key in decoded.keys():
assert getattr(message, key) == x[key]
## Second copies of the above methods that use the protobuf to/from json functions
@given(x=strategies.gen_message_data(testMessage_typedef))
@example(x={"testBytes": b"test123"})
@example(x={"testBytes": b"\x80"})
def test_decode_json(x):
# Test with JSON payload
message = Test_pb2.TestMessage()
for key, value in x.items():
setattr(message, key, value)
encoded = message.SerializeToString()
decoded_json, typedef_json = blackboxprotobuf.protobuf_to_json(
encoded, testMessage_typedef
)
hypothesis.note("Encoded JSON:")
hypothesis.note(decoded_json)
decoded = json.loads(decoded_json)
hypothesis.note("Original value:")
hypothesis.note(x)
hypothesis.note("Decoded valuec:")
hypothesis.note(decoded)
for key in decoded.keys():
if key == "testBytes":
decoded[key] = six.ensure_binary(decoded[key], encoding="latin1")
assert x[key] == decoded[key]
@given(x=strategies.gen_message_data(testMessage_typedef))
@example(x={"testBytes": b"\x80"})
def test_encode_json(x):
# Test with JSON payload
if "testBytes" in x:
x["testBytes"] = x["testBytes"].decode("latin1")
json_str = json.dumps(x)
hypothesis.note("JSON Str Input:")
hypothesis.note(json_str)
hypothesis.note(json.loads(json_str))
encoded = blackboxprotobuf.protobuf_from_json(json_str, testMessage_typedef)
hypothesis.note("BBP decoding:")
test_decode, _ = blackboxprotobuf.decode_message(encoded, testMessage_typedef)
hypothesis.note(test_decode)
message = Test_pb2.TestMessage()
message.ParseFromString(encoded)
hypothesis.note("Message:")
hypothesis.note(message)
for key in x.keys():
hypothesis.note("Message value")
hypothesis.note(type(getattr(message, key)))
hypothesis.note("Original value")
hypothesis.note(type(x[key]))
if key == "testBytes":
x[key] = six.ensure_binary(x[key], encoding="latin1")
assert getattr(message, key) == x[key]
@given(
x=strategies.gen_message_data(testMessage_typedef),
modify_num=st.sampled_from(sorted(testMessage_typedef.keys())),
)
def test_modify_json(x, modify_num):
modify_key = testMessage_typedef[modify_num]["name"]
message = Test_pb2.TestMessage()
for key, value in x.items():
setattr(message, key, value)
encoded = message.SerializeToString()
decoded_json, typedef = blackboxprotobuf.protobuf_to_json(
encoded, testMessage_typedef
)
decoded = json.loads(decoded_json)
# eliminate any cases where protobuf defaults out a field
hypothesis.assume(modify_key in decoded)
if isinstance(decoded[modify_key], str):
mod_func = lambda x: "test"
elif six.PY2 and isinstance(decoded[modify_key], unicode):
mod_func = lambda x: six.u("test")
elif isinstance(decoded[modify_key], bytes):
mod_func = lambda x: b"test"
elif isinstance(decoded[modify_key], six.integer_types):
mod_func = lambda x: 10
elif isinstance(decoded[modify_key], float):
mod_func = lambda x: 10
else:
hypothesis.note(
"Failed to modify key: %s (%r)" % (modify_key, type(decoded[modify_key]))
)
assert False
decoded[modify_key] = mod_func(decoded[modify_key])
x[modify_key] = mod_func(x[modify_key])
encoded = blackboxprotobuf.protobuf_from_json(
json.dumps(decoded), testMessage_typedef
)
message = Test_pb2.TestMessage()
message.ParseFromString(encoded)
for key in decoded.keys():
hypothesis.note("Message value:")
hypothesis.note(type(getattr(message, key)))
hypothesis.note("Orig value:")
hypothesis.note((x[key]))
if key == "testBytes":
x[key] = six.ensure_binary(x[key], encoding="latin1")
assert getattr(message, key) == x[key]
|
bazel/container_push.bzl
|
Wyverald/core
| 161 |
90985
|
<gh_stars>100-1000
load("@io_bazel_rules_docker//container:container.bzl", _container_push = "container_push")
def container_push(*args, **kwargs):
"""Creates a script to push a container image to a Docker registry. The
target name must be specified when invoking the push script."""
if "registry" in kwargs:
fail(
"Cannot set 'registry' attribute on container_push",
attr = "registry",
)
if "repository" in kwargs:
fail(
"Cannot set 'repository' attribute on container_push",
attr = "repository",
)
kwargs["registry"] = "IGNORE"
kwargs["repository"] = "IGNORE"
_container_push(*args, **kwargs)
|
geometric_registration/evaluate.py
|
HOUYONGKUO/D3Feat
| 214 |
90987
|
import sys
import open3d
import numpy as np
import time
import os
from geometric_registration.utils import get_pcd, get_keypts, get_desc, loadlog
import cv2
from functools import partial
def build_correspondence(source_desc, target_desc):
"""
Find the mutually closest point pairs in feature space.
source and target are descriptor for 2 point cloud key points. [5000, 32]
"""
distance = np.sqrt(2 - 2 * (source_desc @ target_desc.T))
source_idx = np.argmin(distance, axis=1)
source_dis = np.min(distance, axis=1)
target_idx = np.argmin(distance, axis=0)
target_dis = np.min(distance, axis=0)
result = []
for i in range(len(source_idx)):
if target_idx[source_idx[i]] == i:
result.append([i, source_idx[i]])
return np.array(result)
def register2Fragments(id1, id2, keyptspath, descpath, resultpath, logpath, gtLog, desc_name, inlier_ratio, distance_threshold):
"""
Register point cloud {id1} and {id2} using the keypts location and descriptors.
"""
cloud_bin_s = f'cloud_bin_{id1}'
cloud_bin_t = f'cloud_bin_{id2}'
write_file = f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'
if os.path.exists(os.path.join(resultpath, write_file)):
return 0, 0, 0
source_keypts = get_keypts(keyptspath, cloud_bin_s)
target_keypts = get_keypts(keyptspath, cloud_bin_t)
source_desc = get_desc(descpath, cloud_bin_s, desc_name)
target_desc = get_desc(descpath, cloud_bin_t, desc_name)
source_desc = np.nan_to_num(source_desc)
target_desc = np.nan_to_num(target_desc)
# Select {num_keypts} points based on the scores. The descriptors and keypts are already sorted based on the detection score.
num_keypts = 250
source_keypts = source_keypts[-num_keypts:, :]
source_desc = source_desc[-num_keypts:, :]
target_keypts = target_keypts[-num_keypts:, :]
target_desc = target_desc[-num_keypts:, :]
# Select {num_keypts} points randomly.
# num_keypts = 250
# source_indices = np.random.choice(range(source_keypts.shape[0]), num_keypts)
# target_indices = np.random.choice(range(target_keypts.shape[0]), num_keypts)
# source_keypts = source_keypts[source_indices, :]
# source_desc = source_desc[source_indices, :]
# target_keypts = target_keypts[target_indices, :]
# target_desc = target_desc[target_indices, :]
key = f'{cloud_bin_s.split("_")[-1]}_{cloud_bin_t.split("_")[-1]}'
if key not in gtLog.keys():
# skip the pairs that have less than 30% overlap.
num_inliers = 0
inlier_ratio = 0
gt_flag = 0
else:
# build correspondence set in feature space.
corr = build_correspondence(source_desc, target_desc)
# calculate the inlier ratio, this is for Feature Matching Recall.
gt_trans = gtLog[key]
frag1 = source_keypts[corr[:, 0]]
frag2_pc = open3d.PointCloud()
frag2_pc.points = open3d.utility.Vector3dVector(target_keypts[corr[:, 1]])
frag2_pc.transform(gt_trans)
frag2 = np.asarray(frag2_pc.points)
distance = np.sqrt(np.sum(np.power(frag1 - frag2, 2), axis=1))
num_inliers = np.sum(distance < distance_threshold)
if num_inliers / len(distance) < inlier_ratio:
print(key)
print("num_corr:", len(corr), "inlier_ratio:", num_inliers / len(distance))
inlier_ratio = num_inliers / len(distance)
gt_flag = 1
# calculate the transformation matrix using RANSAC, this is for Registration Recall.
source_pcd = open3d.PointCloud()
source_pcd.points = open3d.utility.Vector3dVector(source_keypts)
target_pcd = open3d.PointCloud()
target_pcd.points = open3d.utility.Vector3dVector(target_keypts)
s_desc = open3d.registration.Feature()
s_desc.data = source_desc.T
t_desc = open3d.registration.Feature()
t_desc.data = target_desc.T
result = open3d.registration_ransac_based_on_feature_matching(
source_pcd, target_pcd, s_desc, t_desc,
0.05,
open3d.TransformationEstimationPointToPoint(False), 3,
[open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
open3d.CorrespondenceCheckerBasedOnDistance(0.05)],
open3d.RANSACConvergenceCriteria(50000, 1000))
# write the transformation matrix into .log file for evaluation.
with open(os.path.join(logpath, f'{desc_name}_{timestr}.log'), 'a+') as f:
trans = result.transformation
trans = np.linalg.inv(trans)
s1 = f'{id1}\t {id2}\t 37\n'
f.write(s1)
f.write(f"{trans[0,0]}\t {trans[0,1]}\t {trans[0,2]}\t {trans[0,3]}\t \n")
f.write(f"{trans[1,0]}\t {trans[1,1]}\t {trans[1,2]}\t {trans[1,3]}\t \n")
f.write(f"{trans[2,0]}\t {trans[2,1]}\t {trans[2,2]}\t {trans[2,3]}\t \n")
f.write(f"{trans[3,0]}\t {trans[3,1]}\t {trans[3,2]}\t {trans[3,3]}\t \n")
# write the result into resultpath so that it can be re-shown.
s = f"{cloud_bin_s}\t{cloud_bin_t}\t{num_inliers}\t{inlier_ratio:.8f}\t{gt_flag}"
with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'w+') as f:
f.write(s)
return num_inliers, inlier_ratio, gt_flag
def read_register_result(resultpath, id1, id2):
"""
Read the registration result of {id1} & {id2} from the resultpath
Return values contain the inlier_number, inlier_ratio, flag(indicating whether this pair is a ground truth match).
"""
cloud_bin_s = f'cloud_bin_{id1}'
cloud_bin_t = f'cloud_bin_{id2}'
with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'r') as f:
content = f.readlines()
nums = content[0].replace("\n", "").split("\t")[2:5]
return nums
def deal_with_one_scene(inlier_ratio, distance_threshold, scene):
"""
Function to register all the fragments pairs in one scene.
"""
logpath = f"log_result/{scene}-evaluation"
pcdpath = f"../data/3DMatch/fragments/{scene}/"
keyptspath = f"{desc_name}_{timestr}/keypoints/{scene}"
descpath = f"{desc_name}_{timestr}/descriptors/{scene}"
gtpath = f'gt_result/{scene}-evaluation/'
gtLog = loadlog(gtpath)
resultpath = f"pred_result/{scene}/{desc_name}_result_{timestr}"
if not os.path.exists(f"pred_result/{scene}/"):
os.mkdir(f"pred_result/{scene}/")
if not os.path.exists(resultpath):
os.mkdir(resultpath)
if not os.path.exists(logpath):
os.mkdir(logpath)
# register each pair
num_frag = len([filename for filename in os.listdir(pcdpath) if filename.endswith('ply')])
print(f"Start Evaluate Descriptor {desc_name} for {scene}")
start_time = time.time()
for id1 in range(num_frag):
for id2 in range(id1 + 1, num_frag):
register2Fragments(id1, id2, keyptspath, descpath, resultpath, logpath, gtLog, desc_name, inlier_ratio, distance_threshold)
print(f"Finish Evaluation, time: {time.time() - start_time:.2f}s")
if __name__ == '__main__':
scene_list = [
'7-scenes-redkitchen',
'sun3d-home_at-home_at_scan1_2013_jan_1',
'sun3d-home_md-home_md_scan9_2012_sep_30',
'sun3d-hotel_uc-scan3',
'sun3d-hotel_umd-maryland_hotel1',
'sun3d-hotel_umd-maryland_hotel3',
'sun3d-mit_76_studyroom-76-1studyroom2',
'sun3d-mit_lab_hj-lab_hj_tea_nov_2_2012_scan1_erika'
]
# will evaluate the descriptor in `{desc_name}_{timestr}` folder.
desc_name = sys.argv[1]
timestr = sys.argv[2]
# inlier_ratio = float(sys.argv[3])
# distance_threshold = float(sys.argv[4])
inlier_ratio = 0.05 # 5%
distance_threshold = 0.10 # 10cm
# multiprocessing to register each pair in each scene.
# this part is time-consuming
from multiprocessing import Pool
pool = Pool(len(scene_list))
func = partial(deal_with_one_scene, inlier_ratio, distance_threshold)
pool.map(func, scene_list)
pool.close()
pool.join()
# collect all the data and print the results.
inliers_list = []
recall_list = []
inliers_ratio_list = []
pred_match = 0
gt_match = 0
for scene in scene_list:
# evaluate
pcdpath = f"../data/3DMatch/fragments/{scene}/"
resultpath = f"pred_result/{scene}/{desc_name}_result_{timestr}"
num_frag = len([filename for filename in os.listdir(pcdpath) if filename.endswith('ply')])
result = []
for id1 in range(num_frag):
for id2 in range(id1 + 1, num_frag):
line = read_register_result(resultpath, id1, id2)
result.append([int(line[0]), float(line[1]), int(line[2])]) # inlier_number, inlier_ratio, flag.
result = np.array(result)
gt_results = np.sum(result[:, 2] == 1)
pred_results = np.sum(result[:, 1] > inlier_ratio)
pred_match += pred_results
gt_match += gt_results
recall = float(pred_results / gt_results) * 100
print(f"Correct Match {pred_results}, ground truth Match {gt_results}")
print(f"Recall {recall}%")
ave_num_inliers = np.sum(np.where(result[:, 2] == 1, result[:, 0], np.zeros(result.shape[0]))) / pred_results
print(f"Average Num Inliners: {ave_num_inliers}")
ave_inlier_ratio = np.sum(np.where(result[:, 2] == 1, result[:, 1], np.zeros(result.shape[0]))) / pred_results
print(f"Average Num Inliner Ratio: {ave_inlier_ratio}")
recall_list.append(recall)
inliers_list.append(ave_num_inliers)
inliers_ratio_list.append(ave_inlier_ratio)
print("*" * 40)
print(recall_list)
# print(f"True Avarage Recall: {pred_match / gt_match * 100}%")
print(f"Matching Recall Std: {np.std(recall_list)}")
average_recall = sum(recall_list) / len(recall_list)
print(f"All 8 scene, average recall: {average_recall}%")
average_inliers = sum(inliers_list) / len(inliers_list)
print(f"All 8 scene, average num inliers: {average_inliers}")
average_inliers_ratio = sum(inliers_ratio_list) / len(inliers_list)
print(f"All 8 scene, average num inliers ratio: {average_inliers_ratio}")
|
pyorient/messages/base.py
|
mogui/pyorient
| 138 |
90991
|
<filename>pyorient/messages/base.py
__author__ = 'Ostico <<EMAIL>>'
import struct
import sys
from ..exceptions import PyOrientBadMethodCallException, \
PyOrientCommandException, PyOrientNullRecordException
from ..otypes import OrientRecord, OrientRecordLink, OrientNode
from ..hexdump import hexdump
from ..constants import BOOLEAN, BYTE, BYTES, CHAR, FIELD_BOOLEAN, FIELD_BYTE, \
FIELD_INT, FIELD_RECORD, FIELD_SHORT, FIELD_STRING, FIELD_TYPE_LINK, INT, \
LINK, LONG, RECORD, SHORT, STRING, STRINGS
from ..utils import is_debug_active
from ..orient import OrientSocket
from ..serializations import OrientSerialization
class BaseMessage(object):
def __init__(self, sock=OrientSocket):
"""
:type sock: OrientSocket
"""
sock.get_connection()
self._orientSocket = sock
self._protocol = self._orientSocket.protocol
self._session_id = self._orientSocket.session_id
# handles token auth
self._auth_token = self._orientSocket.auth_token
self._request_token = False
self._header = []
""":type : list of [str]"""
self._body = []
""":type : list of [str]"""
self._fields_definition = []
""":type : list of [object]"""
self._command = chr(0)
self._db_opened = self._orientSocket.db_opened
self._connected = self._orientSocket.connected
self._node_list = []
""":type : list of [OrientNode]"""
self._serializer = None
self._output_buffer = b''
self._input_buffer = b''
# callback function for async queries
self._callback = None
# callback for push received from the server
self._push_callback = None
self._need_token = True
global in_transaction
in_transaction = False
def get_serializer(self):
"""
Lazy return of the serialization, we retrieve the type from the :class: `OrientSocket <pyorient.orient.OrientSocket>` object
:return: an Instance of the serializer suitable for decoding or encoding
"""
if self._orientSocket.serialization_type==OrientSerialization.Binary:
return OrientSerialization.get_impl(self._orientSocket.serialization_type,
self._orientSocket._props)
else:
return OrientSerialization.get_impl(self._orientSocket.serialization_type)
def get_orient_socket_instance(self):
return self._orientSocket
def is_connected(self):
return self._connected is True
def database_opened(self):
return self._db_opened
def get_cluster_map(self):
""":type : list of [OrientNode]"""
return self._node_list
def set_session_token( self, token='' ):
"""
:param token: Set the request to True to use the token authentication
:type token: bool|string
:return: self
"""
if token != '' and token is not None:
if type(token) is bool:
self._request_token = token
elif type(token) is str or type(token) is bytes:
self._request_token = True
self._auth_token = token
self._db_opened = True
self._connected = True
self._update_socket_token()
return self
def get_session_token( self ):
"""
Retrieve the session token to reuse after
:return:
"""
return self._auth_token
def _update_socket_id(self):
"""Force update of socket id from inside the class"""
self._orientSocket.session_id = self._session_id
return self
def _update_socket_token(self):
"""Force update of socket token from inside the class"""
self._orientSocket.auth_token = self._auth_token
return self
def _reset_fields_definition(self):
self._fields_definition = []
def prepare(self, *args):
# session_id
self._fields_definition.insert( 1, ( FIELD_INT, self._session_id ) )
"""
# Token authentication handling
# we must recognize ConnectMessage and DbOpenMessage messages
"""
if self._need_token and self._request_token is True:
self._fields_definition.insert(
2, ( FIELD_STRING, self._auth_token )
)
self._output_buffer = b''.join(
self._encode_field( x ) for x in self._fields_definition
)
return self
def get_protocol(self):
if self._protocol < 0:
self._protocol = self._orientSocket.protocol
return self._protocol
def _decode_header(self):
# read header's information
self._header = [ self._decode_field( FIELD_BYTE ),
self._decode_field( FIELD_INT ) ]
# decode message errors and raise an exception
if self._header[0] == 1:
# Parse the error
exception_class = b''
exception_message = b''
more = self._decode_field( FIELD_BOOLEAN )
while more:
# read num bytes by the field definition
exception_class += self._decode_field( FIELD_STRING )
exception_message += self._decode_field( FIELD_STRING )
more = self._decode_field( FIELD_BOOLEAN )
if self.get_protocol() > 18: # > 18 1.6-snapshot
# read serialized version of exception thrown on server side
# useful only for java clients
serialized_exception = self._decode_field( FIELD_STRING )
# trash
del serialized_exception
raise PyOrientCommandException(
exception_class.decode( 'utf8' ),
[ exception_message.decode( 'utf8' ) ]
)
elif self._header[0] == 3:
# Push notification, Node cluster changed
# TODO: UNTESTED CODE!!!
# FIELD_BYTE (OChannelBinaryProtocol.PUSH_DATA); # WRITE 3
# FIELD_INT (Integer.MIN_VALUE); # SESSION ID = 2^-31
# 80: \x50 Request Push 1 byte: Push command id
push_command_id = self._decode_field(FIELD_BYTE)
push_message = self._decode_field( FIELD_STRING )
_, payload = self.get_serializer().decode(push_message)
if self._push_callback:
self._push_callback(push_command_id, payload)
end_flag = self._decode_field( FIELD_BYTE )
# this flag can be set more than once
while end_flag == 3:
self._decode_field( FIELD_INT ) # FAKE SESSION ID = 2^-31
op_code = self._decode_field( FIELD_BYTE ) # 80: 0x50 Request Push
# REQUEST_PUSH_RECORD 79
# REQUEST_PUSH_DISTRIB_CONFIG 80
# REQUEST_PUSH_LIVE_QUERY 81
if op_code == 80:
# for node in
payload = self.get_serializer().decode(
self._decode_field( FIELD_STRING )
) # JSON WITH THE NEW CLUSTER CFG
# reset the nodelist
self._node_list = []
for node in payload['members']:
self._node_list.append( OrientNode( node ) )
end_flag = self._decode_field( FIELD_BYTE )
# Try to set the new session id???
self._header[1] = self._decode_field( FIELD_INT ) # REAL SESSION ID
pass
from .connection import ConnectMessage
from .database import DbOpenMessage
"""
# Token authentication handling
# we must recognize ConnectMessage and DbOpenMessage messages
TODO: change this check avoiding cross import,
importing a subclass in a super class is bad
"""
if not isinstance(self, (ConnectMessage, DbOpenMessage)) \
and self._request_token is True:
token_refresh = self._decode_field(FIELD_STRING)
if token_refresh != b'':
self._auth_token = token_refresh
self._update_socket_token()
def _decode_body(self):
# read body
for field in self._fields_definition:
self._body.append( self._decode_field( field ) )
# clear field stack
self._reset_fields_definition()
return self
def _decode_all(self):
self._decode_header()
self._decode_body()
def fetch_response(self, *_continue):
"""
# Decode header and body
# If flag continue is set( Header already read ) read only body
:param _continue:
:return:
"""
if len(_continue) is not 0:
self._body = []
self._decode_body()
self.dump_streams()
# already fetched, get last results as cache info
elif len(self._body) is 0:
self._decode_all()
self.dump_streams()
return self._body
def dump_streams(self):
if is_debug_active():
if len( self._output_buffer ):
print("\nRequest :")
hexdump( self._output_buffer )
# print(repr(self._output_buffer))
if len( self._input_buffer ):
print("\nResponse:")
hexdump( self._input_buffer )
# print(repr(self._input_buffer))
def _append(self, field):
"""
@:rtype self: BaseMessage
@type field: object
"""
self._fields_definition.append( field )
return self
def __str__(self):
return "\n_output_buffer: \n" + hexdump( self._output_buffer, 'return' ) \
+ "\n\n_input_buffer: \n" + hexdump( self._input_buffer, 'return' )
def send(self):
if self._orientSocket.in_transaction is False:
self._orientSocket.write( self._output_buffer )
self._reset_fields_definition()
if is_debug_active():
self.dump_streams()
# reset output buffer
self._output_buffer = b""
return self
def close(self):
self._orientSocket.close()
@staticmethod
def _encode_field(field):
# tuple with type
t, v = field
_content = None
if t['type'] == INT:
_content = struct.pack("!i", v)
elif t['type'] == SHORT:
_content = struct.pack("!h", v)
elif t['type'] == LONG:
_content = struct.pack("!q", v)
elif t['type'] == BOOLEAN:
if sys.version_info[0] < 3:
_content = chr(1) if v else chr(0)
else:
_content = bytes([1]) if v else bytes([0])
elif t['type'] == BYTE:
if sys.version_info[0] < 3:
_content = v
else:
_content = bytes([ord(v)])
elif t['type'] == BYTES:
_content = struct.pack("!i", len(v)) + v
elif t['type'] == STRING:
if sys.version_info[0] >= 3:
if isinstance(v, str):
v = v.encode('utf-8')
else:
if isinstance(v, unicode):
v = v.encode('utf-8')
_content = struct.pack("!i", len(v)) + v
elif t['type'] == STRINGS:
_content = b''
for s in v:
if sys.version_info[0] >= 3:
if isinstance(s, str):
s = s.encode('utf-8')
else:
if isinstance(s, unicode):
s = s.encode('utf-8')
_content += struct.pack("!i", len(s)) + s
return _content
def _decode_field(self, _type):
_value = b""
# read buffer length and decode value by field definition
if _type['bytes'] is not None:
_value = self._orientSocket.read( _type['bytes'] )
# if it is a string decode first 4 Bytes as INT
# and try to read the buffer
if _type['type'] == STRING or _type['type'] == BYTES:
_len = struct.unpack('!i', _value)[0]
if _len == -1 or _len == 0:
_decoded_string = b''
else:
_decoded_string = self._orientSocket.read( _len )
self._input_buffer += _value
self._input_buffer += _decoded_string
return _decoded_string
elif _type['type'] == RECORD:
# record_type
record_type = self._decode_field( _type['struct'][0] )
rid = "#" + str( self._decode_field( _type['struct'][1] ) )
rid += ":" + str( self._decode_field( _type['struct'][2] ) )
version = self._decode_field( _type['struct'][3] )
content = self._decode_field( _type['struct'][4] )
return {'rid': rid, 'record_type': record_type,
'content': content, 'version': version}
elif _type['type'] == LINK:
rid = "#" + str( self._decode_field( _type['struct'][0] ) )
rid += ":" + str( self._decode_field( _type['struct'][1] ) )
return rid
else:
self._input_buffer += _value
if _type['type'] == BOOLEAN:
return ord(_value) == 1
elif _type['type'] == BYTE:
return ord(_value)
elif _type['type'] == CHAR:
return _value
elif _type['type'] == SHORT:
return struct.unpack('!h', _value)[0]
elif _type['type'] == INT:
return struct.unpack('!i', _value)[0]
elif _type['type'] == LONG:
return struct.unpack('!q', _value)[0]
def _read_async_records(self):
"""
# async-result-type byte as trailing byte of a record can be:
# 0: no records remain to be fetched
# 1: a record is returned as a result set
# 2: a record is returned as pre-fetched to be loaded in client's
# cache only. It's not part of the result set but the client
# knows that it's available for later access
"""
_status = self._decode_field( FIELD_BYTE ) # status
while _status != 0:
try:
# if a callback for the cache is not set, raise exception
if not hasattr(self._callback, '__call__'):
raise AttributeError()
_record = self._read_record()
if _status == 1: # async record type
# async_records.append( _record ) # save in async
self._callback( _record ) # save in async
elif _status == 2: # cache
# cached_records.append( _record ) # save in cache
self._callback( _record ) # save in cache
except AttributeError:
# AttributeError: 'RecordLoadMessage' object has
# no attribute '_command_type'
raise PyOrientBadMethodCallException(
str(self._callback) + " is not a callable function", [])
finally:
# read new status and flush the debug buffer
_status = self._decode_field( FIELD_BYTE ) # status
def _read_record(self):
"""
# The format depends if a RID is passed or an entire
record with its content.
# In case of null record then -2 as short is passed.
# In case of RID -3 is passes as short and then the RID:
(-3:short)(cluster-id:short)(cluster-position:long).
# In case of record:
(0:short)(record-type:byte)(cluster-id:short)
(cluster-position:long)(record-version:int)(record-content:bytes)
:raise: PyOrientNullRecordException
:return: OrientRecordLink,OrientRecord
"""
marker = self._decode_field( FIELD_SHORT ) # marker
if marker is -2:
raise PyOrientNullRecordException('NULL Record', [])
elif marker is -3:
res = OrientRecordLink( self._decode_field( FIELD_TYPE_LINK ) )
else:
# read record
__res = self._decode_field( FIELD_RECORD )
if self._orientSocket.serialization_type==OrientSerialization.Binary:
class_name, data = self.get_serializer().decode(__res['content'])
else:
# bug in orientdb csv serialization in snapshot 2.0
class_name, data = self.get_serializer().decode(__res['content'].rstrip())
res = OrientRecord(
dict(
__o_storage=data,
__o_class=class_name,
__version=__res['version'],
__rid=__res['rid']
)
)
self.dump_streams() # debug log
self._output_buffer = b''
self._input_buffer = b''
return res
|
Python/cp/staircase_problem.py
|
zhcet19/NeoAlgo-1
| 897 |
91023
|
<filename>Python/cp/staircase_problem.py
"""This program finds the total number of possible combinations that can be used to
climb statirs . EG : for 3 stairs ,combination and output will be 1,1,1 , 1,2 , 2,1 i.e 3 . """
def counting_stairs(stair_number):
result = stair_number
# This function uses Recursion.
if(stair_number <=1):
result = 1
else:
result = (counting_stairs(stair_number-1) + counting_stairs(stair_number-2))
return result
if __name__ == '__main__':
count_stair = int(input("Enter total number of stairs: "))
print(f"Total Number of possible Combinations = {counting_stairs(count_stair)}")
"""Output
Total Number of possible Combinations = 8
Enter total number of stairs: 5
Time Complexity : O(2^n)
Space Complexity :O(1)
Created by <NAME> on 16-12-2020 on WoC
"""
|
tests/test_providers/test_food.py
|
chinghwayu/mimesis
| 2,619 |
91026
|
# -*- coding: utf-8 -*-
import re
import pytest
from mimesis import Food
from . import patterns
class TestFood(object):
def test_str(self, food):
assert re.match(patterns.DATA_PROVIDER_STR_REGEX, str(food))
def test_vegetable(self, food):
result = food.vegetable()
assert result in food._data["vegetables"]
def test_fruit(self, food):
result = food.fruit()
assert result in food._data["fruits"]
def test_dish(self, food):
result = food.dish()
assert result in food._data["dishes"]
def test_drink(self, food):
result = food.drink()
assert result in food._data["drinks"]
def test_spices(self, food):
result = food.spices()
assert result in food._data["spices"]
class TestSeededFood(object):
@pytest.fixture
def fd1(self, seed):
return Food(seed=seed)
@pytest.fixture
def fd2(self, seed):
return Food(seed=seed)
def test_vegetable(self, fd1, fd2):
assert fd1.vegetable() == fd2.vegetable()
def test_fruit(self, fd1, fd2):
assert fd1.fruit() == fd2.fruit()
def test_dish(self, fd1, fd2):
assert fd1.dish() == fd2.dish()
def test_drink(self, fd1, fd2):
assert fd1.drink() == fd2.drink()
def test_spices(self, fd1, fd2):
assert fd1.spices() == fd2.spices()
|
Packs/GoogleChronicleBackstory/Scripts/ExtractDomainFromIOCDomainMatchRes/ExtractDomainFromIOCDomainMatchRes_test.py
|
diCagri/content
| 799 |
91049
|
<reponame>diCagri/content
from unittest.mock import patch
import demistomock as demisto
import ExtractDomainFromIOCDomainMatchRes
ARGS = {'json_response': "{\"Artifact\": \"e9428.b.akamaiedge.net\", \"IocIngestTime\": \"2020-07-17T20:00:00Z\", "
"\"FirstAccessedTime\": \"2018-11-05T12:01:29Z\", \"LastAccessedTime\": "
"\"2018-11-09T11:51:03Z\", \"Sources\": [{ \"Category\": \"Observed serving executable\", "
"\"IntRawConfidenceScore\": 0, \"NormalizedConfidenceScore\": \"Low\", \"RawSeverity\": "
"\"Low\", \"Source\": \"ET Intelligence Rep List\"}]}"}
def test_main_success(mocker):
"""
When main function is called, get_entry_context should be called.
"""
mocker.patch.object(demisto, 'args', return_value=ARGS)
mocker.patch.object(ExtractDomainFromIOCDomainMatchRes, 'get_entry_context',
return_value={})
ExtractDomainFromIOCDomainMatchRes.main()
assert ExtractDomainFromIOCDomainMatchRes.get_entry_context.called
@patch('ExtractDomainFromIOCDomainMatchRes.return_error')
def test_main_failure(mock_return_error, capfd, mocker):
"""
When main function gets some exception then valid message should be printed.
"""
mocker.patch.object(demisto, 'args', return_value=ARGS)
mocker.patch.object(ExtractDomainFromIOCDomainMatchRes, 'get_entry_context', side_effect=Exception)
with capfd.disabled():
ExtractDomainFromIOCDomainMatchRes.main()
mock_return_error.assert_called_once_with('Error occurred while extracting Domain from IOC Domain Matches '
'response:\n')
|
pyquil/quantum_processor/transformers/__init__.py
|
stjordanis/pyquil
| 677 |
91060
|
<reponame>stjordanis/pyquil
from pyquil.quantum_processor.transformers.qcs_isa_to_compiler_isa import (
qcs_isa_to_compiler_isa,
QCSISAParseError,
)
from pyquil.quantum_processor.transformers.qcs_isa_to_graph import qcs_isa_to_graph
from pyquil.quantum_processor.transformers.compiler_isa_to_graph import compiler_isa_to_graph
from pyquil.quantum_processor.transformers.graph_to_compiler_isa import graph_to_compiler_isa, GraphGateError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.