gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from time import *
from tkinter import *
from random import *
import webbrowser
from modules.affichage import *
from modules.mouvements import *
from modules.recherche import *
from modules.menu import *
from modules.gestionnaire import *
def dec(cube,can,faceT):
if faceT[0]==0:
if faceT[1]==2: faceT[1]=0
elif faceT[1]==5: faceT[1]=1
elif faceT[1]==8: faceT[1]=2
elif faceT[1]==1: faceT[1]=3
elif faceT[1]==7: faceT[1]=5
elif faceT[1]==0: faceT[1]=6
elif faceT[1]==3: faceT[1]=7
elif faceT[1]==6: faceT[1]=8
if faceT[0]==2:
if faceT[1]==6: faceT[1]=0
elif faceT[1]==3: faceT[1]=1
elif faceT[1]==0: faceT[1]=2
elif faceT[1]==7: faceT[1]=3
elif faceT[1]==1: faceT[1]=5
elif faceT[1]==8: faceT[1]=6
elif faceT[1]==5: faceT[1]=7
elif faceT[1]==2: faceT[1]=8
if faceT[0]==4:
if faceT[1]==8: faceT[1]=0
elif faceT[1]==7: faceT[1]=1
elif faceT[1]==6: faceT[1]=2
elif faceT[1]==5: faceT[1]=3
elif faceT[1]==3: faceT[1]=5
elif faceT[1]==2: faceT[1]=6
elif faceT[1]==1: faceT[1]=7
elif faceT[1]==0: faceT[1]=8
return faceT
def verificateur(cube,etape):
i=0
if etape==1:
if cube[1][1]=="red":
i=i+1
if cube[1][3]=="red":
i=i+1
if cube[1][5]=="red":
i=i+1
if cube[1][7]=="red":
i=i+1
if cube[0][5]=="green":
i=i+1
if cube[5][1]=="yellow":
i=i+1
if cube[2][3]=="blue":
i=i+1
if cube[4][7]=="white":
i=i+1
if i==8:
return 1
else:
return 0
if etape==2:
if cube[1][0]=="red":
i=i+1
if cube[1][6]=="red":
i=i+1
if cube[1][8]=="red":
i=i+1
if cube[1][2]=="red":
i=i+1
if cube[0][2]=="green":
i=i+1
if cube[0][8]=="green":
i=i+1
if cube[2][0]=="blue":
i=i+1
if cube[2][6]=="blue":
i=i+1
if i==8:
return 2
else:
return 0
if etape==3:
if cube[0][1]=="green":
i=i+1
if cube[0][7]=="green":
i=i+1
if cube[2][1]=="blue":
i=i+1
if cube[2][7]=="blue":
i=i+1
if cube[4][3]=="white":
i=i+1
if cube[4][5]=="white":
i=i+1
if cube[5][3]=="yellow":
i=i+1
if cube[5][5]=="yellow":
i=i+1
if i==8:
return 3
else:
return 0
if etape==4:
if cube[3][1]=="orange":
i=i+1
if cube[3][3]=="orange":
i=i+1
if cube[3][5]=="orange":
i=i+1
if cube[3][7]=="orange":
i=i+1
if i==4:
return 4
else:
return 0
if etape==5:
if cube[0][1]=="green":
i=i+1
if cube[2][5]=="blue":
i=i+1
if cube[4][1]=="white":
i=i+1
if cube[5][7]=="yellow":
i=i+1
if i==4:
return 5
else:
return 0
if etape==6:
if cube[3][1]=="orange":
i=i+1
if cube[3][3]=="orange":
i=i+1
if cube[3][5]=="orange":
i=i+1
if cube[3][7]=="orange":
i=i+1
if i==4:
return 6
else:
return 0
if etape==7:
if cube[4][0]=="white":
i=i+1
elif cube[4][0]=="green":
i=i+1
elif cube[4][0]=="orange":
i=i+1
if cube[0][0]=="white":
i=i+1
elif cube[0][0]=="green":
i=i+1
elif cube[0][0]=="orange":
i=i+1
if cube[3][2]=="white":
i=i+1
elif cube[3][2]=="green":
i=i+1
elif cube[3][2]=="orange":
i=i+1
if cube[2][2]=="white":
i=i+1
elif cube[2][2]=="blue":
i=i+1
elif cube[2][2]=="orange":
i=i+1
if cube[3][0]=="white":
i=i+1
elif cube[3][0]=="blue":
i=i+1
elif cube[3][0]=="orange":
i=i+1
if cube[4][2]=="white":
i=i+1
elif cube[4][2]=="blue":
i=i+1
elif cube[4][2]=="orange":
i=i+1
if i==6:
return i
else:
return 0
if etape==8:
if cube==[["green"]*9,["red"]*9,["blue"]*9,["orange"]*9,["white"]*9,["yellow"]*9]:
return 8
else:
return 0
def test(cube,can):
print(verificateur(cube,8))
def step1m(cube,can,face,indice):
if indice==1:
rotative (cube,can,face,1,"F")
rotative (cube,can,face,1,"F")
elif indice==2:
rotative (cube,can,face,1,"D")
if face==2:
face=4
elif face==4:
face=0
elif face==0:
face=5
elif face==5:
face=2
rotative (cube,can,face,1,"F")
rotative (cube,can,face,1,"L'")
rotative (cube,can,face,1,"F'")
def step2m(cube,can,face,indice):
if indice==1: #en desous a gauche
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"L")
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"L'")
elif indice==12: # en dessous a droite
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"R'")
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"R")
elif indice==2: #au dessus a gauche
rotative (cube,can,face,1,"L")
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"L'")
step2m(cube,can,face,1)
elif indice==21: #au dessis a droite
rotative (cube,can,face,1,"R'")
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"R")
step2m(cube,can,face,11)
elif indice==3: #en dessous a gauche
rotative (cube,can,face,1,"L")
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"L'")
rotative (cube,can,face,1,"F'")
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"F")
elif indice==31: #en dessous a droite
rotative (cube,can,face,1,"R'")
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"R")
rotative (cube,can,face,1,"F")
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"F'")
def step3m(cube,can,face,indice):
if indice==1: #belge a gauche
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"L")
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"L'")
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"F'")
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"F")
elif indice==2: #belge a droite
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"R'")
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"R")
rotative (cube,can,face,1,"D")
rotative (cube,can,face,1,"F")
rotative (cube,can,face,1,"D'")
rotative (cube,can,face,1,"F'")
def step1(cube,can):
i=0
while verificateur(cube,1)!=1:
print("1")
i=i+1
if i==1:
coulR="yellow"
coulR1=5
if i==2:
coulR="blue"
coulR1=2
if i==3:
coulR="white"
coulR1=4
if i==4:
coulR="green"
coulR1=0
ct=dec(cube,can,findArrete(cube,can,coulR,"red"))
print(ct)
if ct[0]==1:
if ct[1]==1: facei=4
if ct[1]==3: facei=0
if ct[1]==5: facei=2
if ct[1]==7: facei=5
step1m(cube,can,facei,1)
elif ct[0]==3:
if i==1: posfin=5
if i==2: posfin=7
if i==3: posfin=3
if i==4: posfin=1
if posfin: facei=4
if posfin: facei=0
if posfin: facei=2
if posfin: facei=5
print(posfin)
while ct[1]!=posfin:
rotative (cube,can,0,1,"D")
step1m(cube,can,facei,1)
if ct[1]==7 and ct[0] !=1:
while ct[0]!=coulR1:
rotative (cube,can,0,1,"D")
step1m(cube,can,coulR1,2)
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import os
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Iterable, Mapping, Sequence
from pants.base.build_environment import get_default_pants_config_file, pants_version
from pants.base.exceptions import BuildConfigurationError
from pants.option.alias import CliAlias
from pants.option.config import Config
from pants.option.custom_types import ListValueComponent
from pants.option.global_options import BootstrapOptions, GlobalOptions
from pants.option.option_types import collect_options_info
from pants.option.options import Options
from pants.option.scope import GLOBAL_SCOPE, ScopeInfo
from pants.option.subsystem import Subsystem
from pants.util.dirutil import read_file
from pants.util.eval import parse_expression
from pants.util.memo import memoized_method, memoized_property
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import ensure_text
if TYPE_CHECKING:
from pants.build_graph.build_configuration import BuildConfiguration
@dataclass(frozen=True)
class OptionsBootstrapper:
"""Holds the result of the first stage of options parsing, and assists with parsing full
options."""
env_tuples: tuple[tuple[str, str], ...]
bootstrap_args: tuple[str, ...]
args: tuple[str, ...]
config: Config
alias: CliAlias
def __repr__(self) -> str:
env = {pair[0]: pair[1] for pair in self.env_tuples}
# Bootstrap args are included in `args`. We also drop the first argument, which is the path
# to `pants_loader.py`.
args = list(self.args[1:])
return f"OptionsBootstrapper(args={args}, env={env}, config={self.config})"
@staticmethod
def get_config_file_paths(env, args) -> list[str]:
"""Get the location of the config files.
The locations are specified by the --pants-config-files option. However we need to load the
config in order to process the options. This method special-cases --pants-config-files
in order to solve this chicken-and-egg problem.
Note that, obviously, it's not possible to set the location of config files in a config file.
Doing so will have no effect.
"""
# This exactly mirrors the logic applied in Option to all regular options. Note that we'll
# also parse --pants-config as a regular option later, but there's no harm in that. In fact,
# it's preferable, so that any code that happens to want to know where we read config from
# can inspect the option.
flag = "--pants-config-files="
evars = [
"PANTS_GLOBAL_PANTS_CONFIG_FILES",
"PANTS_PANTS_CONFIG_FILES",
"PANTS_CONFIG_FILES",
]
path_list_values = []
default = get_default_pants_config_file()
if Path(default).is_file():
path_list_values.append(ListValueComponent.create(default))
for var in evars:
if var in env:
path_list_values.append(ListValueComponent.create(env[var]))
break
for arg in args:
# Technically this is very slightly incorrect, as we don't check scope. But it's
# very unlikely that any task or subsystem will have an option named --pants-config-files.
# TODO: Enforce a ban on options with a --pants- prefix outside our global options?
if arg.startswith(flag):
path_list_values.append(ListValueComponent.create(arg[len(flag) :]))
return ListValueComponent.merge(path_list_values).val
@staticmethod
def parse_bootstrap_options(
env: Mapping[str, str], args: Sequence[str], config: Config
) -> Options:
bootstrap_options = Options.create(
env=env,
config=config,
known_scope_infos=[GlobalOptions.get_scope_info()],
args=args,
)
for options_info in collect_options_info(BootstrapOptions):
# Only use of Options.register?
bootstrap_options.register(
GLOBAL_SCOPE, *options_info.flag_names, **options_info.flag_options
)
return bootstrap_options
@classmethod
def create(
cls, env: Mapping[str, str], args: Sequence[str], *, allow_pantsrc: bool
) -> OptionsBootstrapper:
"""Parses the minimum amount of configuration necessary to create an OptionsBootstrapper.
:param env: An environment dictionary.
:param args: An args array.
:param allow_pantsrc: True to allow pantsrc files to be used. Unless tests are expecting to
consume pantsrc files, they should pass False in order to avoid reading files from
absolute paths. Production usecases should pass True to allow options values to make the
decision of whether to respect pantsrc files.
"""
with warnings.catch_warnings(record=True):
# We can't use pants.engine.fs.FileContent here because it would cause a circular dep.
@dataclass(frozen=True)
class FileContent:
path: str
content: bytes
def filecontent_for(path: str) -> FileContent:
return FileContent(
ensure_text(path),
read_file(path, binary_mode=True),
)
env = {k: v for k, v in env.items() if k.startswith("PANTS_")}
bargs = cls._get_bootstrap_args(args)
config_file_paths = cls.get_config_file_paths(env=env, args=args)
config_files_products = [filecontent_for(p) for p in config_file_paths]
pre_bootstrap_config = Config.load(config_files_products)
initial_bootstrap_options = cls.parse_bootstrap_options(
env, bargs, pre_bootstrap_config
)
bootstrap_option_values = initial_bootstrap_options.for_global_scope()
# Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped
# from (typically pants.toml), then config override, then rcfiles.
full_config_paths = pre_bootstrap_config.sources()
if allow_pantsrc and bootstrap_option_values.pantsrc:
rcfiles = [
os.path.expanduser(str(rcfile))
for rcfile in bootstrap_option_values.pantsrc_files
]
existing_rcfiles = list(filter(os.path.exists, rcfiles))
full_config_paths.extend(existing_rcfiles)
full_config_files_products = [filecontent_for(p) for p in full_config_paths]
post_bootstrap_config = Config.load(
full_config_files_products,
seed_values=bootstrap_option_values.as_dict(),
)
env_tuples = tuple(sorted(env.items(), key=lambda x: x[0]))
# Finally, we expand any aliases and re-populate the bootstrap args, in case there
# were any from aliases.
# stuhood: This could potentially break the rust client when aliases are used:
# https://github.com/pantsbuild/pants/pull/13228#discussion_r728223889
alias_dict = parse_expression(
name="cli.alias",
val=post_bootstrap_config.get("cli", "alias") or "{}",
acceptable_types=dict,
)
alias = CliAlias.from_dict(alias_dict)
args = alias.expand_args(tuple(args))
bargs = cls._get_bootstrap_args(args)
# We need to set this env var to allow various static help strings to reference the
# right name (via `pants.util.docutil`), and we need to do it as early as possible to
# avoid needing to lazily import code to avoid chicken-and-egg-problems. This is the
# earliest place it makes sense to do so and is generically used by both the local and
# remote pants runners.
os.environ["PANTS_BIN_NAME"] = bootstrap_option_values.pants_bin_name
return cls(
env_tuples=env_tuples,
bootstrap_args=bargs,
args=args,
config=post_bootstrap_config,
alias=alias,
)
@classmethod
def _get_bootstrap_args(cls, args: Sequence[str]) -> tuple[str, ...]:
# TODO(13244): there is a typing issue with `memoized_classmethod`.
options = GlobalOptions.get_options_flags() # type: ignore[call-arg]
def is_bootstrap_option(arg: str) -> bool:
components = arg.split("=", 1)
if components[0] in options.flags:
return True
for flag in options.short_flags:
if arg.startswith(flag):
return True
return False
# Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line.
# Stop before '--' since args after that are pass-through and may have duplicate names to our
# bootstrap options.
bargs = ("<ignored>",) + tuple(
filter(is_bootstrap_option, itertools.takewhile(lambda arg: arg != "--", args))
)
return bargs
@memoized_property
def env(self) -> dict[str, str]:
return dict(self.env_tuples)
@memoized_property
def bootstrap_options(self) -> Options:
"""The post-bootstrap options, computed from the env, args, and fully discovered Config.
Re-computing options after Config has been fully expanded allows us to pick up bootstrap values
(such as backends) from a config override file, for example.
Because this can be computed from the in-memory representation of these values, it is not part
of the object's identity.
"""
return self.parse_bootstrap_options(self.env, self.bootstrap_args, self.config)
def get_bootstrap_options(self) -> Options:
"""Returns an Options instance that only knows about the bootstrap options."""
return self.bootstrap_options
@memoized_method
def _full_options(
self, known_scope_infos: FrozenOrderedSet[ScopeInfo], allow_unknown_options: bool = False
) -> Options:
bootstrap_option_values = self.get_bootstrap_options().for_global_scope()
options = Options.create(
self.env,
self.config,
known_scope_infos,
args=self.args,
bootstrap_option_values=bootstrap_option_values,
allow_unknown_options=allow_unknown_options,
)
distinct_subsystem_classes: set[type[Subsystem]] = set()
for ksi in known_scope_infos:
if not ksi.subsystem_cls or ksi.subsystem_cls in distinct_subsystem_classes:
continue
distinct_subsystem_classes.add(ksi.subsystem_cls)
ksi.subsystem_cls.register_options_on_scope(options)
return options
def full_options_for_scopes(
self, known_scope_infos: Iterable[ScopeInfo], allow_unknown_options: bool = False
) -> Options:
"""Get the full Options instance bootstrapped by this object for the given known scopes.
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:returns: A bootstrapped Options instance that also carries options for all the supplied known
scopes.
"""
return self._full_options(
FrozenOrderedSet(sorted(known_scope_infos, key=lambda si: si.scope)),
allow_unknown_options=allow_unknown_options,
)
def full_options(self, build_configuration: BuildConfiguration) -> Options:
global_bootstrap_options = self.get_bootstrap_options().for_global_scope()
if global_bootstrap_options.pants_version != pants_version():
raise BuildConfigurationError(
f"Version mismatch: Requested version was {global_bootstrap_options.pants_version}, "
f"our version is {pants_version()}."
)
# Parse and register options.
known_scope_infos = [
subsystem.get_scope_info() for subsystem in build_configuration.all_subsystems
]
options = self.full_options_for_scopes(
known_scope_infos, allow_unknown_options=build_configuration.allow_unknown_options
)
GlobalOptions.validate_instance(options.for_global_scope())
self.alias.check_name_conflicts(options.known_scope_to_info)
return options
|
|
from __future__ import absolute_import
import responses
import six
from django.core.urlresolvers import reverse
from sentry.models import Environment, Integration, Rule, RuleActivity, RuleActivityType, RuleStatus
from sentry.testutils import APITestCase
from sentry.utils import json
class ProjectRuleDetailsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo", fire_project_created=True)
self.create_project(teams=[team], name="bar", fire_project_created=True)
rule = project1.rule_set.all()[0]
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project1.organization.slug,
"project_slug": project1.slug,
"rule_id": rule.id,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] is None
def test_non_existing_rule(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo", fire_project_created=True)
self.create_project(teams=[team], name="bar", fire_project_created=True)
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project1.organization.slug,
"project_slug": project1.slug,
"rule_id": 12345,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 404
def test_with_environment(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo", fire_project_created=True)
self.create_project(teams=[team], name="bar", fire_project_created=True)
rule = project1.rule_set.all()[0]
rule.update(environment_id=Environment.get_or_create(rule.project, "production").id)
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project1.organization.slug,
"project_slug": project1.slug,
"rule_id": rule.id,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] == "production"
def test_with_null_environment(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo", fire_project_created=True)
self.create_project(teams=[team], name="bar", fire_project_created=True)
rule = project1.rule_set.all()[0]
rule.update(environment_id=None)
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project1.organization.slug,
"project_slug": project1.slug,
"rule_id": rule.id,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] is None
def test_with_filters(self):
self.login_as(user=self.user)
project = self.create_project()
conditions = [
{"id": "sentry.rules.conditions.every_event.EveryEventCondition"},
{"id": "sentry.rules.filters.issue_occurrences.IssueOccurrencesFilter", "value": 10},
]
actions = [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}]
data = {
"conditions": conditions,
"actions": actions,
"filter_match": "all",
"action_match": "all",
"frequency": 30,
}
rule = Rule.objects.create(project=project, label="foo", data=data)
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
# ensure that conditions and filters are split up correctly
assert len(response.data["conditions"]) == 1
assert response.data["conditions"][0]["id"] == conditions[0]["id"]
assert len(response.data["filters"]) == 1
assert response.data["filters"][0]["id"] == conditions[1]["id"]
class UpdateProjectRuleTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"filterMatch": "any",
"actions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"conditions": conditions,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
rule = Rule.objects.get(id=rule.id)
assert rule.label == "hello world"
assert rule.environment_id is None
assert rule.data["action_match"] == "any"
assert rule.data["filter_match"] == "any"
assert rule.data["actions"] == [
{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}
]
assert rule.data["conditions"] == conditions
assert RuleActivity.objects.filter(rule=rule, type=RuleActivityType.UPDATED.value).exists()
def test_update_name(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"environment": None,
"actionMatch": "all",
"filterMatch": "all",
"frequency": 30,
"name": "test",
"conditions": [
{
"interval": "1h",
"id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
"value": 666,
"name": "The issue is seen more than 30 times in 1m",
}
],
"id": rule.id,
"actions": [
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"name": "Send a notification (for all legacy integrations)",
}
],
"dateCreated": "2018-04-24T23:37:21.246Z",
},
format="json",
)
assert response.status_code == 200, response.content
assert (
response.data["conditions"][0]["name"] == "The issue is seen more than 666 times in 1h"
)
assert RuleActivity.objects.filter(rule=rule, type=RuleActivityType.UPDATED.value).exists()
def test_with_environment(self):
self.login_as(user=self.user)
project = self.create_project()
Environment.get_or_create(project, "production")
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"environment": "production",
"actionMatch": "any",
"filterMatch": "any",
"actions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"conditions": [
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}
],
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] == "production"
rule = Rule.objects.get(id=rule.id)
assert rule.label == "hello world"
assert rule.environment_id == Environment.get_or_create(rule.project, "production").id
def test_with_null_environment(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(
project=project,
environment_id=Environment.get_or_create(project, "production").id,
label="foo",
)
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"environment": None,
"actionMatch": "any",
"filterMatch": "any",
"actions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"conditions": [
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}
],
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] is None
rule = Rule.objects.get(id=rule.id)
assert rule.label == "hello world"
assert rule.environment_id is None
@responses.activate
def test_update_channel_slack(self):
self.login_as(user=self.user)
project = self.create_project()
integration = Integration.objects.create(
provider="slack",
name="Awesome Team",
external_id="TXXXXXXX1",
metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"},
)
integration.add_organization(project.organization, self.user)
conditions = [{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}]
actions = [
{
"channel_id": "old_channel_id",
"workspace": integration.id,
"id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction",
"channel": "#old_channel_name",
}
]
rule = Rule.objects.create(
project=project, data={"conditions": [conditions], "actions": [actions]},
)
actions[0]["channel"] = "#new_channel_name"
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
channels = {
"ok": "true",
"channels": [
{"name": "old_channel_name", "id": "old_channel_id"},
{"name": "new_channel_name", "id": "new_channel_id"},
],
}
responses.add(
method=responses.GET,
url="https://slack.com/api/channels.list",
status=200,
content_type="application/json",
body=json.dumps(channels),
)
response = self.client.put(
url,
data={
"name": "#new_channel_name",
"actionMatch": "any",
"filterMatch": "any",
"actions": actions,
"conditions": conditions,
"frequency": 30,
},
format="json",
)
assert response.status_code == 200, response.content
rule = Rule.objects.get(id=response.data["id"])
assert rule.label == "#new_channel_name"
assert rule.data["actions"][0]["channel_id"] == "new_channel_id"
def test_slack_channel_id_saved(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(
project=project,
environment_id=Environment.get_or_create(project, "production").id,
label="foo",
)
integration = Integration.objects.create(
provider="slack",
name="Awesome Team",
external_id="TXXXXXXX1",
metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"},
)
integration.add_organization(project.organization, self.user)
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"environment": None,
"actionMatch": "any",
"actions": [
{
"id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction",
"name": "Send a notification to the funinthesun Slack workspace to #team-team-team and show tags [] in notification",
"workspace": integration.id,
"channel": "#team-team-team",
"input_channel_id": "CSVK0921",
}
],
"conditions": [
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}
],
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["actions"][0]["channel_id"] == "CSVK0921"
def test_invalid_rule_node_type(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"filterMatch": "any",
"conditions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"actions": [],
},
format="json",
)
assert response.status_code == 400, response.content
def test_invalid_rule_node(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"filterMatch": "any",
"conditions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"actions": [{"id": "foo"}],
},
format="json",
)
assert response.status_code == 400, response.content
def test_rule_form_not_valid(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"filterMatch": "any",
"conditions": [{"id": "sentry.rules.conditions.tagged_event.TaggedEventCondition"}],
"actions": [],
},
format="json",
)
assert response.status_code == 400, response.content
def test_rule_form_missing_condition(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"filterMatch": "any",
"conditions": [],
"actions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
},
format="json",
)
assert response.status_code == 400, response.content
def test_rule_form_missing_action(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"filterMatch": "any",
"action": [],
"conditions": [
{"id": "sentry.rules.conditions.tagged_event.TaggedEventCondition"}
],
},
format="json",
)
assert response.status_code == 400, response.content
def test_update_filters(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
conditions = [{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}]
filters = [
{"id": "sentry.rules.filters.issue_occurrences.IssueOccurrencesFilter", "value": 10}
]
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"filterMatch": "any",
"actions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"conditions": conditions,
"filters": filters,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
rule = Rule.objects.get(id=rule.id)
assert rule.label == "hello world"
assert rule.environment_id is None
assert rule.data["action_match"] == "any"
assert rule.data["filter_match"] == "any"
assert rule.data["actions"] == [
{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}
]
assert rule.data["conditions"] == conditions + filters
assert RuleActivity.objects.filter(rule=rule, type=RuleActivityType.UPDATED.value).exists()
class DeleteProjectRuleTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.delete(url)
assert response.status_code == 202, response.content
rule = Rule.objects.get(id=rule.id)
assert rule.status == RuleStatus.PENDING_DELETION
assert RuleActivity.objects.filter(rule=rule, type=RuleActivityType.DELETED.value).exists()
|
|
#! /usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
cmdline utility to perform cluster reconnaissance
"""
from __future__ import print_function
from eventlet.green import urllib2
from swift.common.utils import SWIFT_CONF_FILE
from swift.common.ring import Ring
from urlparse import urlparse
from hashlib import md5
import eventlet
import json
import optparse
import time
import sys
import os
def seconds2timeunit(seconds):
elapsed = seconds
unit = 'seconds'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'minutes'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'hours'
if elapsed >= 24:
elapsed = elapsed / 24.0
unit = 'days'
return elapsed, unit
def size_suffix(size):
suffixes = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
for suffix in suffixes:
if size < 1000:
return "%s %s" % (size, suffix)
size = size // 1000
return "%s %s" % (size, suffix)
class Scout(object):
"""
Obtain swift recon information
"""
def __init__(self, recon_type, verbose=False, suppress_errors=False,
timeout=5):
self.recon_type = recon_type
self.verbose = verbose
self.suppress_errors = suppress_errors
self.timeout = timeout
def scout_host(self, base_url, recon_type):
"""
Perform the actual HTTP request to obtain swift recon telemtry.
:param base_url: the base url of the host you wish to check. str of the
format 'http://127.0.0.1:6000/recon/'
:param recon_type: the swift recon check to request.
:returns: tuple of (recon url used, response body, and status)
"""
url = base_url + recon_type
try:
body = urllib2.urlopen(url, timeout=self.timeout).read()
content = json.loads(body)
if self.verbose:
print("-> %s: %s" % (url, content))
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except urllib2.URLError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
def scout(self, host):
"""
Obtain telemetry from a host running the swift recon middleware.
:param host: host to check
:returns: tuple of (recon url used, response body, status, time start
and time end)
"""
base_url = "http://%s:%s/recon/" % (host[0], host[1])
ts_start = time.time()
url, content, status = self.scout_host(base_url, self.recon_type)
ts_end = time.time()
return url, content, status, ts_start, ts_end
def scout_server_type(self, host):
"""
Obtain Server header by calling OPTIONS.
:param host: host to check
:returns: Server type, status
"""
try:
url = "http://%s:%s/" % (host[0], host[1])
req = urllib2.Request(url)
req.get_method = lambda: 'OPTIONS'
conn = urllib2.urlopen(req)
header = conn.info().getheader('Server')
server_header = header.split('/')
content = server_header[0]
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except urllib2.URLError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
class SwiftRecon(object):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
def __init__(self):
self.verbose = False
self.suppress_errors = False
self.timeout = 5
self.pool_size = 30
self.pool = eventlet.GreenPool(self.pool_size)
self.check_types = ['account', 'container', 'object']
self.server_type = 'object'
def _gen_stats(self, stats, name=None):
"""Compute various stats from a list of values."""
cstats = [x for x in stats if x is not None]
if len(cstats) > 0:
ret_dict = {'low': min(cstats), 'high': max(cstats),
'total': sum(cstats), 'reported': len(cstats),
'number_none': len(stats) - len(cstats), 'name': name}
ret_dict['average'] = \
ret_dict['total'] / float(len(cstats))
ret_dict['perc_none'] = \
ret_dict['number_none'] * 100.0 / len(stats)
else:
ret_dict = {'reported': 0}
return ret_dict
def _print_stats(self, stats):
"""
print out formatted stats to console
:param stats: dict of stats generated by _gen_stats
"""
print('[%(name)s] low: %(low)d, high: %(high)d, avg: '
'%(average).1f, total: %(total)d, '
'Failed: %(perc_none).1f%%, no_result: %(number_none)d, '
'reported: %(reported)d' % stats)
def _ptime(self, timev=None):
"""
:param timev: a unix timestamp or None
:returns: a pretty string of the current time or provided time
"""
if timev:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timev))
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def _md5_file(self, path):
"""
Get the MD5 checksum of a file.
:param path: path to file
:returns: MD5 checksum, hex encoded
"""
md5sum = md5()
with open(path, 'rb') as f:
block = f.read(4096)
while block:
md5sum.update(block)
block = f.read(4096)
return md5sum.hexdigest()
def get_devices(self, region_filter, zone_filter, swift_dir, ring_name):
"""
Get a list of hosts in the ring
:param region_filter: Only list regions matching given filter
:param zone_filter: Only list zones matching given filter
:param swift_dir: Directory of swift config, usually /etc/swift
:param ring_name: Name of the ring, such as 'object'
:returns: a set of tuples containing the ip and port of hosts
"""
ring_data = Ring(swift_dir, ring_name=ring_name)
devs = [d for d in ring_data.devs if d]
if region_filter is not None:
devs = [d for d in devs if d['region'] == region_filter]
if zone_filter is not None:
devs = [d for d in devs if d['zone'] == zone_filter]
return set((d['ip'], d['port']) for d in devs)
def get_ringmd5(self, hosts, swift_dir):
"""
Compare ring md5sum's with those on remote host
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param swift_dir: The local directory with the ring files.
"""
matches = 0
errors = 0
ring_names = set()
for server_type in ('account', 'container'):
ring_name = '%s.ring.gz' % server_type
ring_names.add(ring_name)
# include any other object ring files
for ring_name in os.listdir(swift_dir):
if ring_name.startswith('object') and \
ring_name.endswith('ring.gz'):
ring_names.add(ring_name)
rings = {}
for ring_name in ring_names:
md5sum = md5()
with open(os.path.join(swift_dir, ring_name), 'rb') as f:
block = f.read(4096)
while block:
md5sum.update(block)
block = f.read(4096)
ring_sum = md5sum.hexdigest()
rings[ring_name] = ring_sum
recon = Scout("ringmd5", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking ring md5sums" % self._ptime())
if self.verbose:
for ring_file, ring_sum in rings.items():
print("-> On disk %s md5sum: %s" % (ring_file, ring_sum))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
success = True
for remote_ring_file, remote_ring_sum in response.items():
remote_ring_name = os.path.basename(remote_ring_file)
ring_sum = rings.get(remote_ring_name, None)
if remote_ring_sum != ring_sum:
success = False
print("!! %s (%s => %s) doesn't match on disk md5sum" % (
url, remote_ring_name, remote_ring_sum))
if not success:
errors += 1
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def get_swiftconfmd5(self, hosts, printfn=print):
"""
Compare swift.conf md5sum with that on remote hosts
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param printfn: function to print text; defaults to print()
"""
matches = 0
errors = 0
conf_sum = self._md5_file(SWIFT_CONF_FILE)
recon = Scout("swiftconfmd5", self.verbose, self.suppress_errors,
self.timeout)
printfn("[%s] Checking swift.conf md5sum" % self._ptime())
if self.verbose:
printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response[SWIFT_CONF_FILE] != conf_sum:
printfn("!! %s (%s) doesn't match on disk md5sum" %
(url, response[SWIFT_CONF_FILE]))
else:
matches = matches + 1
if self.verbose:
printfn("-> %s matches." % url)
else:
errors = errors + 1
printfn("%s/%s hosts matched, %s error[s] while checking hosts."
% (matches, len(hosts), errors))
printfn("=" * 79)
def async_check(self, hosts):
"""
Obtain and print async pending statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
scan = {}
recon = Scout("async", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking async pendings" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response['async_pending']
stats = self._gen_stats(scan.values(), 'async_pending')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[async_pending] - No hosts returned valid data.")
print("=" * 79)
def driveaudit_check(self, hosts):
"""
Obtain and print drive audit error statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]
"""
scan = {}
recon = Scout("driveaudit", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking drive-audit errors" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response['drive_audit_errors']
stats = self._gen_stats(scan.values(), 'drive_audit_errors')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[drive_audit_errors] - No hosts returned valid data.")
print("=" * 79)
def umount_check(self, hosts):
"""
Check for and print unmounted drives
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
unmounted = {}
errors = {}
recon = Scout("unmounted", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Getting unmounted drives from %s hosts..." %
(self._ptime(), len(hosts)))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
unmounted[url] = []
errors[url] = []
for i in response:
if not isinstance(i['mounted'], bool):
errors[url].append(i['device'])
else:
unmounted[url].append(i['device'])
for host in unmounted:
node = urlparse(host).netloc
for entry in unmounted[host]:
print("Not mounted: %s on %s" % (entry, node))
for host in errors:
node = urlparse(host).netloc
for entry in errors[host]:
print("Device errors: %s on %s" % (entry, node))
print("=" * 79)
def server_type_check(self, hosts):
"""
Check for server types on the ring
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
errors = {}
recon = Scout("server_type_check", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Validating server type '%s' on %s hosts..." %
(self._ptime(), self.server_type, len(hosts)))
for url, response, status in self.pool.imap(
recon.scout_server_type, hosts):
if status == 200:
if response != self.server_type + '-server':
errors[url] = response
print("%s/%s hosts ok, %s error[s] while checking hosts." % (
len(hosts) - len(errors), len(hosts), len(errors)))
for host in errors:
print("Invalid: %s is %s" % (host, errors[host]))
print("=" * 79)
def expirer_check(self, hosts):
"""
Obtain and print expirer statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {'object_expiration_pass': [], 'expired_last_pass': []}
recon = Scout("expirer/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on expirers" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats['object_expiration_pass'].append(
response.get('object_expiration_pass'))
stats['expired_last_pass'].append(
response.get('expired_last_pass'))
for k in stats:
if stats[k]:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
print("=" * 79)
def replication_check(self, hosts):
"""
Obtain and print replication statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {'replication_time': [], 'failure': [], 'success': [],
'attempted': []}
recon = Scout("replication/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on replication" % self._ptime())
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
most_recent_url = None
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats['replication_time'].append(
response.get('replication_time',
response.get('object_replication_time', 0)))
repl_stats = response.get('replication_stats')
if repl_stats:
for stat_key in ['attempted', 'failure', 'success']:
stats[stat_key].append(repl_stats.get(stat_key))
last = response.get('replication_last',
response.get('object_replication_last', 0))
if last < least_recent_time:
least_recent_time = last
least_recent_url = url
if last > most_recent_time:
most_recent_time = last
most_recent_url = url
for k in stats:
if stats[k]:
if k != 'replication_time':
computed = self._gen_stats(stats[k],
name='replication_%s' % k)
else:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Oldest completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(least_recent_time)),
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Most recent completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(most_recent_time)),
elapsed, elapsed_unit, host))
print("=" * 79)
def updater_check(self, hosts):
"""
Obtain and print updater statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = []
recon = Scout("updater/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking updater times" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response['%s_updater_sweep' % self.server_type]:
stats.append(response['%s_updater_sweep' %
self.server_type])
if len(stats) > 0:
computed = self._gen_stats(stats, name='updater_last_sweep')
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[updater_last_sweep] - No hosts returned valid data.")
else:
print("[updater_last_sweep] - No hosts returned valid data.")
print("=" * 79)
def auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
scan = {}
adone = '%s_auditor_pass_completed' % self.server_type
afail = '%s_audits_failed' % self.server_type
apass = '%s_audits_passed' % self.server_type
asince = '%s_audits_since' % self.server_type
recon = Scout("auditor/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking auditor stats" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response
if len(scan) < 1:
print("Error: No hosts available")
return
stats = {}
stats[adone] = [scan[i][adone] for i in scan
if scan[i][adone] is not None]
stats[afail] = [scan[i][afail] for i in scan
if scan[i][afail] is not None]
stats[apass] = [scan[i][apass] for i in scan
if scan[i][apass] is not None]
stats[asince] = [scan[i][asince] for i in scan
if scan[i][asince] is not None]
for k in stats:
if len(stats[k]) < 1:
print("[%s] - No hosts returned valid data." % k)
else:
if k != asince:
computed = self._gen_stats(stats[k], k)
if computed['reported'] > 0:
self._print_stats(computed)
if len(stats[asince]) >= 1:
low = min(stats[asince])
high = max(stats[asince])
total = sum(stats[asince])
average = total / len(stats[asince])
print('[last_pass] oldest: %s, newest: %s, avg: %s' %
(self._ptime(low), self._ptime(high), self._ptime(average)))
print("=" * 79)
def nested_get_value(self, key, recon_entry):
"""
Generator that yields all values for given key in a recon cache entry.
This is for use with object auditor recon cache entries. If the
object auditor has run in parallel, the recon cache will have entries
of the form: {'object_auditor_stats_ALL': { 'disk1': {..},
'disk2': {..},
'disk3': {..},
...}}
If the object auditor hasn't run in parallel, the recon cache will have
entries of the form: {'object_auditor_stats_ALL': {...}}.
The ZBF auditor doesn't run in parallel. However, if a subset of
devices is selected for auditing, the recon cache will have an entry
of the form: {'object_auditor_stats_ZBF': { 'disk1disk2..diskN': {}}
We use this generator to find all instances of a particular key in
these multi-level dictionaries.
"""
for k, v in recon_entry.items():
if isinstance(v, dict):
for value in self.nested_get_value(key, v):
yield value
if k == key:
yield v
def object_auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
all_scan = {}
zbf_scan = {}
atime = 'audit_time'
bprocessed = 'bytes_processed'
passes = 'passes'
errors = 'errors'
quarantined = 'quarantined'
recon = Scout("auditor/object", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking auditor stats " % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response['object_auditor_stats_ALL']:
all_scan[url] = response['object_auditor_stats_ALL']
if response['object_auditor_stats_ZBF']:
zbf_scan[url] = response['object_auditor_stats_ZBF']
if len(all_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, all_scan[i]))
for i in all_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
all_scan[i])) for i in all_scan]
stats[passes] = [sum(self.nested_get_value(passes, all_scan[i]))
for i in all_scan]
stats[errors] = [sum(self.nested_get_value(errors, all_scan[i]))
for i in all_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
all_scan[i])) for i in all_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ALL_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ALL_auditor] - No hosts returned valid data.")
else:
print("[ALL_auditor] - No hosts returned valid data.")
if len(zbf_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, zbf_scan[i]))
for i in zbf_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
zbf_scan[i])) for i in zbf_scan]
stats[errors] = [sum(self.nested_get_value(errors, zbf_scan[i]))
for i in zbf_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
zbf_scan[i])) for i in zbf_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ZBF_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ZBF_auditor] - No hosts returned valid data.")
else:
print("[ZBF_auditor] - No hosts returned valid data.")
print("=" * 79)
def load_check(self, hosts):
"""
Obtain and print load average statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
load1 = {}
load5 = {}
load15 = {}
recon = Scout("load", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking load averages" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
load1[url] = response['1m']
load5[url] = response['5m']
load15[url] = response['15m']
stats = {"1m": load1, "5m": load5, "15m": load15}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='%s_load_avg' % item)
self._print_stats(computed)
else:
print("[%s_load_avg] - No hosts returned valid data." % item)
print("=" * 79)
def quarantine_check(self, hosts):
"""
Obtain and print quarantine statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
objq = {}
conq = {}
acctq = {}
stats = {}
recon = Scout("quarantined", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking quarantine" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
objq[url] = response['objects']
conq[url] = response['containers']
acctq[url] = response['accounts']
for key in response.get('policies', {}):
pkey = "objects_%s" % key
stats.setdefault(pkey, {})
stats[pkey][url] = response['policies'][key]['objects']
stats.update({"objects": objq, "containers": conq, "accounts": acctq})
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='quarantined_%s' % item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def socket_usage(self, hosts):
"""
Obtain and print /proc/net/sockstat statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
inuse4 = {}
mem = {}
inuse6 = {}
timewait = {}
orphan = {}
recon = Scout("sockstat", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking socket usage" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
inuse4[url] = response['tcp_in_use']
mem[url] = response['tcp_mem_allocated_bytes']
inuse6[url] = response.get('tcp6_in_use', 0)
timewait[url] = response['time_wait']
orphan[url] = response['orphan']
stats = {"tcp_in_use": inuse4, "tcp_mem_allocated_bytes": mem,
"tcp6_in_use": inuse6, "time_wait": timewait,
"orphan": orphan}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(), item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def disk_usage(self, hosts, top=0, lowest=0, human_readable=False):
"""
Obtain and print disk usage statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {}
highs = []
lows = []
raw_total_used = []
raw_total_avail = []
percents = {}
top_percents = [(None, 0)] * top
low_percents = [(None, 100)] * lowest
recon = Scout("diskusage", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking disk usage now" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
hostusage = []
for entry in response:
if not isinstance(entry['mounted'], bool):
print("-> %s/%s: Error: %s" % (url, entry['device'],
entry['mounted']))
elif entry['mounted']:
used = float(entry['used']) / float(entry['size']) \
* 100.0
raw_total_used.append(entry['used'])
raw_total_avail.append(entry['avail'])
hostusage.append(round(used, 2))
for ident, oused in top_percents:
if oused < used:
top_percents.append(
(url + ' ' + entry['device'], used))
top_percents.sort(key=lambda x: -x[1])
top_percents.pop()
break
for ident, oused in low_percents:
if oused > used:
low_percents.append(
(url + ' ' + entry['device'], used))
low_percents.sort(key=lambda x: x[1])
low_percents.pop()
break
stats[url] = hostusage
for url in stats:
if len(stats[url]) > 0:
# get per host hi/los for another day
low = min(stats[url])
high = max(stats[url])
highs.append(high)
lows.append(low)
for percent in stats[url]:
percents[int(percent)] = percents.get(int(percent), 0) + 1
else:
print("-> %s: Error. No drive info available." % url)
if len(lows) > 0:
low = min(lows)
high = max(highs)
# dist graph shamelessly stolen from https://github.com/gholt/tcod
print("Distribution Graph:")
mul = 69.0 / max(percents.values())
for percent in sorted(percents):
print('% 3d%%%5d %s' % (percent, percents[percent],
'*' * int(percents[percent] * mul)))
raw_used = sum(raw_total_used)
raw_avail = sum(raw_total_avail)
raw_total = raw_used + raw_avail
avg_used = 100.0 * raw_used / raw_total
if human_readable:
raw_used = size_suffix(raw_used)
raw_avail = size_suffix(raw_avail)
raw_total = size_suffix(raw_total)
print("Disk usage: space used: %s of %s" % (raw_used, raw_total))
print("Disk usage: space free: %s of %s" % (raw_avail, raw_total))
print("Disk usage: lowest: %s%%, highest: %s%%, avg: %s%%" %
(low, high, avg_used))
else:
print("No hosts returned valid data.")
print("=" * 79)
if top_percents:
print('TOP %s' % top)
for ident, used in top_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
if low_percents:
print('LOWEST %s' % lowest)
for ident, used in low_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
def time_check(self, hosts):
"""
Check a time synchronization of hosts with current time
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
matches = 0
errors = 0
recon = Scout("time", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking time-sync" % self._ptime())
for url, ts_remote, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
if (ts_remote < ts_start or ts_remote > ts_end):
diff = abs(ts_end - ts_remote)
ts_end_f = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(ts_end))
ts_remote_f = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(ts_remote))
print("!! %s current time is %s, but remote is %s, "
"differs by %.2f sec" % (
url,
ts_end_f,
ts_remote_f,
diff))
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def main(self):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
print("=" * 79)
usage = '''
usage: %prog <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d]
[-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
[--human-readable]
<server_type>\taccount|container|object
Defaults to object server.
ex: %prog container -l --auditor
'''
args = optparse.OptionParser(usage)
args.add_option('--verbose', '-v', action="store_true",
help="Print verbose info")
args.add_option('--suppress', action="store_true",
help="Suppress most connection related errors")
args.add_option('--async', '-a', action="store_true",
help="Get async stats")
args.add_option('--replication', '-r', action="store_true",
help="Get replication stats")
args.add_option('--auditor', action="store_true",
help="Get auditor stats")
args.add_option('--updater', action="store_true",
help="Get updater stats")
args.add_option('--expirer', action="store_true",
help="Get expirer stats")
args.add_option('--unmounted', '-u', action="store_true",
help="Check cluster for unmounted devices")
args.add_option('--diskusage', '-d', action="store_true",
help="Get disk usage stats")
args.add_option('--human-readable', action="store_true",
help="Use human readable suffix for disk usage stats")
args.add_option('--loadstats', '-l', action="store_true",
help="Get cluster load average stats")
args.add_option('--quarantined', '-q', action="store_true",
help="Get cluster quarantine stats")
args.add_option('--validate-servers', action="store_true",
help="Validate servers on the ring")
args.add_option('--md5', action="store_true",
help="Get md5sum of servers ring and compare to "
"local copy")
args.add_option('--sockstat', action="store_true",
help="Get cluster socket usage stats")
args.add_option('--driveaudit', action="store_true",
help="Get drive audit error stats")
args.add_option('--time', '-T', action="store_true",
help="Check time synchronization")
args.add_option('--top', type='int', metavar='COUNT', default=0,
help='Also show the top COUNT entries in rank order.')
args.add_option('--lowest', type='int', metavar='COUNT', default=0,
help='Also show the lowest COUNT entries in rank \
order.')
args.add_option('--all', action="store_true",
help="Perform all checks. Equal to \t\t\t-arudlqT "
"--md5 --sockstat --auditor --updater --expirer")
args.add_option('--region', type="int",
help="Only query servers in specified region")
args.add_option('--zone', '-z', type="int",
help="Only query servers in specified zone")
args.add_option('--timeout', '-t', type="int", metavar="SECONDS",
help="Time to wait for a response from a server",
default=5)
args.add_option('--swiftdir', default="/etc/swift",
help="Default = /etc/swift")
options, arguments = args.parse_args()
if len(sys.argv) <= 1 or len(arguments) > 1:
args.print_help()
sys.exit(0)
if arguments:
if arguments[0] in self.check_types:
self.server_type = arguments[0]
else:
print("Invalid Server Type")
args.print_help()
sys.exit(1)
else:
self.server_type = 'object'
swift_dir = options.swiftdir
self.verbose = options.verbose
self.suppress_errors = options.suppress
self.timeout = options.timeout
hosts = self.get_devices(options.region, options.zone,
swift_dir, self.server_type)
print("--> Starting reconnaissance on %s hosts" % len(hosts))
print("=" * 79)
if options.all:
if self.server_type == 'object':
self.async_check(hosts)
self.replication_check(hosts)
self.object_auditor_check(hosts)
self.updater_check(hosts)
self.expirer_check(hosts)
elif self.server_type == 'container':
self.replication_check(hosts)
self.auditor_check(hosts)
self.updater_check(hosts)
elif self.server_type == 'account':
self.replication_check(hosts)
self.auditor_check(hosts)
self.umount_check(hosts)
self.load_check(hosts)
self.disk_usage(hosts, options.top, options.lowest,
options.human_readable)
self.get_ringmd5(hosts, swift_dir)
self.quarantine_check(hosts)
self.socket_usage(hosts)
self.server_type_check(hosts)
self.driveaudit_check(hosts)
self.time_check(hosts)
else:
if options.async:
if self.server_type == 'object':
self.async_check(hosts)
else:
print("Error: Can't check asyncs on non object servers.")
if options.unmounted:
self.umount_check(hosts)
if options.replication:
self.replication_check(hosts)
if options.auditor:
if self.server_type == 'object':
self.object_auditor_check(hosts)
else:
self.auditor_check(hosts)
if options.updater:
if self.server_type == 'account':
print("Error: Can't check updaters on account servers.")
else:
self.updater_check(hosts)
if options.expirer:
if self.server_type == 'object':
self.expirer_check(hosts)
else:
print("Error: Can't check expired on non object servers.")
if options.validate_servers:
self.server_type_check(hosts)
if options.loadstats:
self.load_check(hosts)
if options.diskusage:
self.disk_usage(hosts, options.top, options.lowest,
options.human_readable)
if options.md5:
self.get_ringmd5(hosts, swift_dir)
self.get_swiftconfmd5(hosts)
if options.quarantined:
self.quarantine_check(hosts)
if options.sockstat:
self.socket_usage(hosts)
if options.driveaudit:
self.driveaudit_check(hosts)
if options.time:
self.time_check(hosts)
def main():
try:
reconnoiter = SwiftRecon()
reconnoiter.main()
except KeyboardInterrupt:
print('\n')
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Segment pixel-wise street/not street for a single image with a lasagne model.
"""
import logging
import sys
import time
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
import scipy
import numpy as np
import pickle
# sst modules
from . import utils
def main(image_path, output_path, model_path_trained, stride,
hard_classification=True):
with Timer() as t:
nn, parameters = utils.deserialize_model(model_path_trained)
assert stride <= parameters['patch_size']
logging.info("Patch size: %i", parameters['patch_size'])
logging.info("Fully: %s", str(parameters['fully']))
logging.info("Stride: %i", stride)
logging.info("=> elasped deserialize model: %s s", t.secs)
with Timer() as t:
result = eval_net(trained=nn,
photo_path=image_path,
parameters=parameters,
stride=stride,
hard_classification=hard_classification)
logging.info("=> elasped evaluating model: %s s", t.secs)
scipy.misc.imsave(output_path, result)
utils.overlay_images(image_path, result, output_path,
hard_classification=hard_classification)
def eval_net(trained,
photo_path,
parameters=None,
stride=10,
hard_classification=True,
verbose=False):
"""
Parameters
----------
trained : theano expression
A trained neural network
photo_path : string
Path to the photo which will get classified
parameters : dict
Parameters relevant for the model such as patch_size
stride : int
hard_classification : bool
If True, the image will only show either street or no street.
If False, the image will show probabilities.
verbose : bool
"""
patch_size = parameters['patch_size']
fully = parameters['fully']
# read images
feats = utils.load_color_image_features(photo_path)
orig_dimensions = feats.shape
patches = []
px_left_patchcenter = (patch_size - 1) / 2
height, width = feats.shape[0], feats.shape[1]
if fully:
to_pad_width = (patch_size - width) % stride
to_pad_height = (patch_size - height) % stride
# Order of to_pad_height / to_pad_width tested with scipy.misc.imsave
feats = np.pad(feats,
[(to_pad_height, 0),
(to_pad_width / 2, to_pad_width - (to_pad_width / 2)),
(0, 0)],
mode='edge')
else:
feats = np.pad(feats,
[(px_left_patchcenter, px_left_patchcenter),
(px_left_patchcenter, px_left_patchcenter),
(0, 0)],
mode='edge')
start_x = px_left_patchcenter
end_x = feats.shape[0] - px_left_patchcenter
start_y = start_x
end_y = feats.shape[1] - px_left_patchcenter
new_height, new_width = 0, 0
for patch_center_x in range(start_x, end_x, stride):
new_height += 1
for patch_center_y in range(start_y, end_y, stride):
if new_height == 1:
new_width += 1
# Get patch from original image
new_patch = feats[patch_center_x - px_left_patchcenter:
patch_center_x + px_left_patchcenter + 1,
patch_center_y - px_left_patchcenter:
patch_center_y + px_left_patchcenter + 1,
:]
patches.append(new_patch)
if verbose:
logging.info("stride: %s", stride)
logging.info("patch_size: %i", patch_size)
logging.info("fully: %s", str(fully))
logging.info("Generated %i patches for evaluation", len(patches))
to_classify = np.array(patches, dtype=np.float32)
x_new = []
for ac in to_classify:
c = []
c.append(ac[:, :, 0])
c.append(ac[:, :, 1])
c.append(ac[:, :, 2])
x_new.append(c)
to_classify = np.array(x_new, dtype=np.float32)
if hard_classification:
result = trained.predict(to_classify)
else:
result = trained.predict_proba(to_classify)
if not fully:
result_vec = np.zeros(result.shape[0])
for i, el in enumerate(result):
result_vec[i] = el[1]
result = result_vec
# Compute combined segmentation of image
if fully:
result = result.reshape(result.shape[0], patch_size, patch_size)
result = result.reshape(new_height, new_width, patch_size, patch_size)
# Merge patch classifications into a single image (result2)
result2 = np.zeros((height, width))
left_px = (patch_size - stride) / 2
right_px = left_px + stride # avoid rounding problems with even stride
offset = {'h': 0, 'w': 0}
if verbose:
logging.info("new_height=%i, new_width=%i", new_height, new_width)
logging.info("result.shape = %s", str(result.shape))
for j in range(0, new_height):
for i in range(0, new_width):
if i == 0:
left_margin_px = to_pad_width / 2
right_margin_px = right_px
elif i == new_width - 1:
left_margin_px = left_px
# TODO (TOTHINK): -1: it's a kind of magic magic...
# seems to do the right thing...
right_margin_px = patch_size - (to_pad_width -
(to_pad_width / 2)) - 1
else:
left_margin_px = left_px
right_margin_px = right_px
if j == 0:
top_px = to_pad_height
bottom_px = right_px
elif j == new_height - 1:
top_px = left_px
bottom_px = patch_size
else:
top_px = left_px
bottom_px = right_px
# TOTHINK: no +1?
to_write = result[j, i,
top_px:(bottom_px),
left_margin_px:(right_margin_px)]
if i == 0 and j == 0:
offset['h'] = to_write.shape[0]
offset['w'] = to_write.shape[1]
start_h = (offset['h'] + (j - 1) * stride) * (j != 0)
start_w = (offset['w'] + (i - 1) * stride) * (i != 0)
result2[start_h:start_h + to_write.shape[0],
start_w:start_w + to_write.shape[1]] = to_write
if hard_classification:
result2 = np.round((result2 - np.amin(result2)) /
(np.amax(result2) - np.amin(result2)))
result2 = result2 * 255
return result2
else:
result = result.reshape((new_height, new_width)) * 255
# Scale image to correct size
result = scale_output(result, orig_dimensions)
return result
def eval_pickle(trained, parameters, test_pickle_path, stride=1):
"""
Parameters
----------
trained : theano expression
A trained neural network
parameters : dict
parameters relevant for the model (e.g. patch size)
test_pickle_path : str
Path to a pickle file
"""
with open(test_pickle_path, 'rb') as f:
list_tuples = pickle.load(f)
total_results = {'tp': 0,
'tn': 0,
'fp': 0,
'fn': 0}
relative_results = {'tp': 0.0,
'tn': 0.0,
'fp': 0.0,
'fn': 0.0}
for i, (data_image_path, gt_image_path) in enumerate(list_tuples):
logging.info("Processing image: %s of %s", i + 1, len(list_tuples))
result = eval_net(trained,
photo_path=data_image_path,
parameters=parameters,
stride=stride)
tmp = get_error_matrix(result, gt_image_path)
for key, val in tmp.items():
total_results[key] += val
relative_results['tp'] = (float(total_results['tp']) /
(total_results['tp'] + total_results['fn']))
relative_results['fn'] = (float(total_results['fn']) /
(total_results['tp'] + total_results['fn']))
relative_results['fp'] = (float(total_results['fp']) /
(total_results['fp'] + total_results['tn']))
relative_results['tn'] = (float(total_results['tn']) /
(total_results['fp'] + total_results['tn']))
logging.info("Eval results: %s", total_results)
logging.info("Eval results relativ: %s", relative_results)
logging.info("Positive Examples: %s ", total_results['tp'] +
total_results['fn'])
logging.info("Negative Examples: %s ", total_results['fp'] +
total_results['tn'])
logging.info("Accurity: %s ", float((total_results['tp']
+ total_results['tn'])) /
(total_results['tp'] + total_results['fn']
+ total_results['fp'] + total_results['tn']))
logging.info("%i images evaluated.", len(list_tuples))
def get_error_matrix(result, gt_image_path):
"""
Get true positive, false positive, true negative, false negative.
Parameters
----------
result : numpy array
gt_image_path : str
Path to an image file with the labeled data.
Returns
-------
dict
with keys tp, tn, fp, fn
"""
total_results = {'tp': 0,
'tn': 0,
'fp': 0,
'fn': 0}
img = scipy.misc.imread(gt_image_path)
new_img = np.zeros(img.shape)
for i, row in enumerate(img):
for j, pixel in enumerate(row):
new_img[i][j] = (105 == pixel)
for gt, predict in zip(new_img.flatten(), result.flatten()):
if gt == 0:
if predict == 0:
total_results['tn'] += 1
else:
total_results['fp'] += 1
else:
if predict == 0:
total_results['fn'] += 1
else:
total_results['tp'] += 1
return total_results
def scale_output(classify_image, new_shape):
"""Scale `classify_image` to `new_shape`.
Parameters
----------
classify_image : numpy array
new_shape : tuple
Returns
-------
numpy array
"""
return scipy.misc.imresize(classify_image, new_shape, interp='nearest')
def get_parser():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input',
dest='image_path',
type=lambda x: utils.is_valid_file(parser, x),
help='load IMAGE for pixel-wise street segmenation',
default=utils.get_default_data_image_path(),
metavar='IMAGE')
parser.add_argument('-o', '--output',
dest='output_path',
help='store semantic segmentation here',
default="out.png",
metavar='IMAGE')
parser.add_argument('-m', '--model',
dest='model_path_trained',
help='path to the trained .caffe model file',
default=utils.get_model_path(),
metavar='MODEL')
parser.add_argument("--stride",
dest="stride",
default=10,
type=int,
help=("the higher this value, the longer the "
"evaluation takes, but the more accurate it is"))
return parser
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print('elapsed time: %f ms' % self.msecs)
if __name__ == '__main__':
args = get_parser().parse_args()
main(image_path=args.image_path,
output_path=args.output_path,
model_path_trained=args.model_path_trained,
stride=args.stride)
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run a group of subprocesses and then finish."""
from __future__ import print_function
import logging
import multiprocessing
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
# cpu cost measurement
measure_cpu_costs = False
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
_MAX_RESULT_SIZE = 8192
# NOTE: If you change this, please make sure to test reviewing the
# github PR with http://reviewable.io, which is known to add UTF-8
# characters to the PR description, which leak into the environment here
# and cause failures.
def strip_non_ascii_chars(s):
return ''.join(c for c in s if ord(c) < 128)
def sanitized_environment(env):
sanitized = {}
for key, value in env.items():
sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
return sanitized
def platform_string():
if platform.system() == 'Windows':
return 'windows'
elif platform.system()[:7] == 'MSYS_NT':
return 'windows'
elif platform.system() == 'Darwin':
return 'mac'
elif platform.system() == 'Linux':
return 'linux'
else:
return 'posix'
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
if platform_string() == 'windows':
pass
else:
have_alarm = False
def alarm_handler(unused_signum, unused_frame):
global have_alarm
have_alarm = False
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
'red': [ 31, 0 ],
'green': [ 32, 0 ],
'yellow': [ 33, 0 ],
'lightgray': [ 37, 0],
'gray': [ 30, 1 ],
'purple': [ 35, 0 ],
'cyan': [ 36, 0 ]
}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'FLAKE': 'purple',
'TIMEOUT_FLAKE': 'purple',
'WARNING': 'yellow',
'TIMEOUT': 'red',
'PASSED': 'green',
'START': 'gray',
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
'SKIPPED': 'cyan'
}
_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
return
message.old_tag = tag
message.old_msg = msg
try:
if platform_string() == 'windows' or not sys.stdout.isatty():
if explanatory_text:
logging.info(explanatory_text)
logging.info('%s: %s', tag, msg)
else:
sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
_BEGINNING_OF_LINE,
_CLEAR_LINE,
'\n%s' % explanatory_text if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1],
_COLORS[_TAG_COLOR[tag]][0],
tag,
msg,
'\n' if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
except:
pass
message.old_tag = ''
message.old_msg = ''
def which(filename):
if '/' in filename:
return filename
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
raise Exception('%s not found' % filename)
class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(self, cmdline, shortname=None, environ=None,
cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
timeout_retries=0, kill_handler=None, cpu_cost=1.0,
verbose_success=False):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
environ: a dictionary of environment variables to set in the child process
kill_handler: a handler that will be called whenever job.kill() is invoked
cpu_cost: number of cores per second this job needs
"""
if environ is None:
environ = {}
self.cmdline = cmdline
self.environ = environ
self.shortname = cmdline[0] if shortname is None else shortname
self.cwd = cwd
self.shell = shell
self.timeout_seconds = timeout_seconds
self.flake_retries = flake_retries
self.timeout_retries = timeout_retries
self.kill_handler = kill_handler
self.cpu_cost = cpu_cost
self.verbose_success = verbose_success
def identity(self):
return '%r %r' % (self.cmdline, self.environ)
def __hash__(self):
return hash(self.identity())
def __cmp__(self, other):
return self.identity() == other.identity()
def __repr__(self):
return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname, self.cmdline)
class JobResult(object):
def __init__(self):
self.state = 'UNKNOWN'
self.returncode = -1
self.elapsed_time = 0
self.num_failures = 0
self.retries = 0
self.message = ''
class Job(object):
"""Manages one job."""
def __init__(self, spec, newline_on_success, travis, add_env,
quiet_success=False):
self._spec = spec
self._newline_on_success = newline_on_success
self._travis = travis
self._add_env = add_env.copy()
self._retries = 0
self._timeout_retries = 0
self._suppress_failure_message = False
self._quiet_success = quiet_success
if not self._quiet_success:
message('START', spec.shortname, do_newline=self._travis)
self.result = JobResult()
self.start()
def GetSpec(self):
return self._spec
def start(self):
self._tempfile = tempfile.TemporaryFile()
env = dict(os.environ)
env.update(self._spec.environ)
env.update(self._add_env)
env = sanitized_environment(env)
self._start = time.time()
cmdline = self._spec.cmdline
if measure_cpu_costs:
cmdline = ['time', '--portability'] + cmdline
try_start = lambda: subprocess.Popen(args=cmdline,
stderr=subprocess.STDOUT,
stdout=self._tempfile,
cwd=self._spec.cwd,
shell=self._spec.shell,
env=env)
delay = 0.3
for i in range(0, 4):
try:
self._process = try_start()
break
except OSError:
message('WARNING', 'Failed to start %s, retrying in %f seconds' % (self._spec.shortname, delay))
time.sleep(delay)
delay *= 2
else:
self._process = try_start()
self._state = _RUNNING
def state(self):
"""Poll current state of the job. Prints messages at completion."""
def stdout(self=self):
self._tempfile.seek(0)
stdout = self._tempfile.read()
self.result.message = stdout[-_MAX_RESULT_SIZE:]
return stdout
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._process.returncode != 0:
if self._retries < self._spec.flake_retries:
message('FLAKE', '%s [ret=%d, pid=%d]' % (
self._spec.shortname, self._process.returncode, self._process.pid),
stdout(), do_newline=True)
self._retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
self.start()
else:
self._state = _FAILURE
if not self._suppress_failure_message:
message('FAILED', '%s [ret=%d, pid=%d]' % (
self._spec.shortname, self._process.returncode, self._process.pid),
stdout(), do_newline=True)
self.result.state = 'FAILED'
self.result.num_failures += 1
self.result.returncode = self._process.returncode
else:
self._state = _SUCCESS
measurement = ''
if measure_cpu_costs:
m = re.search(r'real ([0-9.]+)\nuser ([0-9.]+)\nsys ([0-9.]+)', stdout())
real = float(m.group(1))
user = float(m.group(2))
sys = float(m.group(3))
if real > 0.5:
cores = (user + sys) / real
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost)
if not self._quiet_success:
message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % (
self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
self.result.state = 'PASSED'
elif (self._state == _RUNNING and
self._spec.timeout_seconds is not None and
time.time() - self._start > self._spec.timeout_seconds):
if self._timeout_retries < self._spec.timeout_retries:
message('TIMEOUT_FLAKE', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
self._timeout_retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
self.start()
else:
message('TIMEOUT', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
self.kill()
self.result.state = 'TIMEOUT'
self.result.num_failures += 1
return self._state
def kill(self):
if self._state == _RUNNING:
self._state = _KILLED
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
def suppress_failure_message(self):
self._suppress_failure_message = True
class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
stop_on_failure, add_env, quiet_success):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
self._failures = 0
self._completed = 0
self._maxjobs = maxjobs
self._newline_on_success = newline_on_success
self._travis = travis
self._stop_on_failure = stop_on_failure
self._add_env = add_env
self._quiet_success = quiet_success
self.resultset = {}
self._remaining = None
self._start_time = time.time()
def set_remaining(self, remaining):
self._remaining = remaining
def get_num_failures(self):
return self._failures
def cpu_cost(self):
c = 0
for job in self._running:
c += job._spec.cpu_cost
return c
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while True:
if self.cancelled(): return False
current_cpu_cost = self.cpu_cost()
if current_cpu_cost == 0: break
if current_cpu_cost + spec.cpu_cost <= self._maxjobs: break
self.reap()
if self.cancelled(): return False
job = Job(spec,
self._newline_on_success,
self._travis,
self._add_env,
self._quiet_success)
self._running.add(job)
if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = []
return True
def reap(self):
"""Collect the dead jobs."""
while self._running:
dead = set()
for job in self._running:
st = job.state()
if st == _RUNNING: continue
if st == _FAILURE or st == _KILLED:
self._failures += 1
if self._stop_on_failure:
self._cancelled = True
for job in self._running:
job.kill()
dead.add(job)
break
for job in dead:
self._completed += 1
if not self._quiet_success or job.result.state != 'PASSED':
self.resultset[job.GetSpec().shortname].append(job.result)
self._running.remove(job)
if dead: return
if not self._travis and platform_string() != 'windows':
rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
if self._remaining is not None and self._completed > 0:
now = time.time()
sofar = now - self._start_time
remaining = sofar / self._completed * (self._remaining + len(self._running))
rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
message('WAITING', '%s%d jobs running, %d complete, %d failed' % (
rstr, len(self._running), self._completed, self._failures))
if platform_string() == 'windows':
time.sleep(0.1)
else:
global have_alarm
if not have_alarm:
have_alarm = True
signal.alarm(10)
signal.pause()
def cancelled(self):
"""Poll for cancellation."""
if self._cancelled: return True
if not self._check_cancelled(): return False
for job in self._running:
job.kill()
self._cancelled = True
return True
def finish(self):
while self._running:
if self.cancelled(): pass # poll cancellation
self.reap()
return not self.cancelled() and self._failures == 0
def _never_cancelled():
return False
def tag_remaining(xs):
staging = []
for x in xs:
staging.append(x)
if len(staging) > 5000:
yield (staging.pop(0), None)
n = len(staging)
for i, x in enumerate(staging):
yield (x, n - i - 1)
def run(cmdlines,
check_cancelled=_never_cancelled,
maxjobs=None,
newline_on_success=False,
travis=False,
infinite_runs=False,
stop_on_failure=False,
add_env={},
skip_jobs=False,
quiet_success=False):
if skip_jobs:
resultset = {}
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
for job in cmdlines:
message('SKIPPED', job.shortname, do_newline=True)
resultset[job.shortname] = [skipped_job_result]
return 0, resultset
js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success)
for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline):
break
if remaining is not None:
js.set_remaining(remaining)
js.finish()
return js.get_num_failures(), js.resultset
|
|
import numpy as np
import re
from writeOnFiles import writeFile
from taxoTree import TaxoTree,printTree
from misc import mem,isInDatabase,partitionSampleByMetadatumValue,setOperations
from graph import Graph
from kruskal import kruskal
from comparisonFunctions import printComparison,f
from distanceFunctions import printDistance,applyFctD
#@dataArray = [samplesInfoList,infoList,samplesOccList,speciesList,paths,n,nodesList,taxoTree,sampleIDList]
integer = re.compile("[0-9]+")
#Parsing functions
def parseList(string):
if not (len(string.split(",")) == 1):
print "\n/!\ ERROR: Do not use ',' as a separator: rather use ';'."
raise ValueError
elif not (len(string.split(":")) == 1):
print "\n/!\ ERROR: Do not use ':' as a separator: rather use ';'."
raise ValueError
return string.split(";")
def parseListNode(string):
if not (len(string.split(":")) == 1):
print "\n/!\ ERROR: Do not use ':' as a separator: rather use ';'."
raise ValueError
ls = string.split(";")
res = []
for node in ls:
nodeSplit = node.split(",")
if not (len(nodeSplit) == 2):
print "\n/!\ ERROR: Please use ',' as a separator for name,rank of a bacteria."
raise ValueError
nodeSplitName = nodeSplit[0].split("(")
if not (len(nodeSplitName) == 2):
print "\n/!\ ERROR: Please use the syntax '([name],[rank])' for each bacteria."
raise ValueError
nodeSplitRank = nodeSplit[-1].split(")")
if not (len(nodeSplitRank) == 2):
print "\n/!\ ERROR: Please use the syntax '([name],[rank])' for each bacteria."
raise ValueError
name,rank = nodeSplitName[-1],nodeSplitRank[0]
res.append((name,rank))
return res
def parseIntList(string):
if not (len(string.split(",")) == 1):
print "\n/!\ ERROR: Do not use ',' as a separator: rather use ';'."
raise ValueError
elif not (len(string.split(":")) == 1):
print "\n/!\ ERROR: Do not use ':' as a separator: rather use ';'."
raise ValueError
l = string.split(";")
resultList = []
for s in l:
if integer.match(s):
resultList.append(int(s))
elif s == "+inf" or s == "-inf":
resultList.append(s)
else:
print "\n/!\ ERROR: Here you can only use integers or '+inf' or '-inf'."
raise ValueError
return resultList
#___________________________________________________________________________
#Macros for formatting
#Printing pretty lists of nodes
def listNodes(nodeList):
string = ""
for l in nodeList[:-1]:
string += str(l) + ", "
string += str(nodeList[-1])
return string
#@stringNode is assumed to be a (name,rank) pair, with name and rank being strings
#@sanitizeNode allows it to be printed "(name,rank)" and not "('name','rank')"
def sanitizeNode(stringNode):
return "(" + stringNode[0] + "," + stringNode[1] + ")"
#Printing pretty lists of metadata with their default values
def listSampleInvolved(metadataList,interval1List,interval2List,sampleNameList):
string = ""
if not metadataList and not interval1List and not interval2List and not sampleNameList:
print "\n/!\ ERROR: You have selected no sample."
raise ValueError
#If samples were selected one by one
elif sampleNameList:
string += "\ndepending on the group of samples: "
for sl in sampleNameList[:-1]:
string += str(sl) + ", "
string += str(sampleNameList[-1])
#If samples were selected according to metadata values (len(metadataList) = len(interval1List) = len(interval2List))
if metadataList:
string += "\nselected on metadata (for each line): "
n = len(metadataList)
for i in range(n-1):
if (interval1List[i] == interval2List[i]):
string += metadataList[i] + " (value equal to " + str(interval1List[i]) + "), "
else:
string += metadataList[i] + " (value between " + str(interval1List[i]) + " and " + str(interval2List[i]) + "), "
if (interval1List[-1] == interval2List[-1]):
string += metadataList[-1] + " (value equal to " + str(interval1List[-1]) + ")"
else:
string += metadataList[-1] + " (value between " + str(interval1List[-1]) + " and " + str(interval2List[-1]) + ")"
return string
#Selecting samples in two ways: either choose each of them one by one, or selecting according to default values of certain metadatum
def createSampleNameList(dataArray):
metadataList = []
interval1List = []
interval2List = []
sampleIDList = dataArray[8]
i = raw_input("/!\ How many different lists of samples do you want?\n")
if not integer.match(i):
print "\n/!\ ERROR: You need to enter a integer here!"
raise ValueError
numberList = int(i)
sampleNameList = []
if (numberList < 1):
print "\n/!\ ERROR: Empty set of lists of samples!"
raise ValueError
while numberList:
answer = raw_input("Do you want to select samples one by one, or to select samples matching requirements on metadata? one/matching \n")
if (answer == "one"):
if (len(sampleIDList) < 2):
print "\n/!\ ERROR: List of samples is empty or only of length one!..."
raise ValueError
print sampleIDList
sampleNameList11 = parseList(raw_input("Input the list of samples using the ID printed above. [e.g. " + sampleIDList[0] + ";"+ sampleIDList[1] + " ]\n"))
elif (answer == "matching"):
print dataArray[1]
metadataList = parseList(raw_input("Input the list of metadata you want to consider among those written above. [ e.g. " + dataArray[1][0] + ";" + dataArray[1][-1] + " ]\n"))
isInDatabase(metadataList,dataArray[1])
interval1List = parseIntList(raw_input("Input the list of lower interval bounds corresponding to metadatum/metadata above. [ Please refer to README for more details. e.g. 1;2 ]\n"))
if not (len(interval1List) == len(metadataList)):
print "\n/!\ ERROR: You need to enter the same number of lower bounds than of metadata!"
raise ValueError
interval2List = parseIntList(raw_input("Input the list of upper interval bounds corresponding to metadatum/metadata above. [ Please refer to README for more details. e.g. 3;2 ]\n"))
if not (len(interval2List) == len(metadataList)):
print "\n/!\ ERROR: You need to enter the same number of upper bounds than of metadata!"
raise ValueError
sampleNameList11 = computeSamplesInGroup(dataArray[0],dataArray[1],metadataList,interval1List,interval2List)[0]
else:
print "\n/!\ ERROR: You need to answer either 'one' or 'matching' and not: \"",answer,"\"."
raise ValueError
isInDatabase(sampleNameList11,sampleIDList)
sampleNameList.append(sampleNameList11)
numberList -= 1
return sampleNameList,metadataList,interval1List,interval2List
#____________________________________________________________________________
#Actions
def runAct(dataArray):
print "Choosing the list of samples."
#or use partition by metadatum values
sampleNameList,metadataList,interval1List,interval2List = createSampleNameList(dataArray)
n = len(sampleNameList)
print "\nAVAILABLE COMPARISON FUNCTION(S):"
fctF = printComparison()
f = raw_input("\nChoose your comparison function above those printed above.\n")
isInDatabase([f],fctF)
completeGraph = Graph(n).constructComplete(sampleNameList,dataArray[7],f)
superTree,w = kruskal(completeGraph)
#Constructing distance matrix
matrix = np.zeros((n,n))
print "\nAVAILABLE DISTANCE FUNCTION(S):"
fctD = printDistance()
d = raw_input("\nChoose your distance function above those printed above.\n")
isInDatabase([d],fctD)
valueArray = []
print "\nSUPERTREE of weight:",w
print superTree.vertices
print superTree.edges
for i in range(n):
for j in range(i,n):
#matrix is symmetric (distance)
s = applyFctD(d,superTree,i,j)
matrix[i][j] = s
matrix[j][i] = s
valueArray.append(s)
valueArray = sorted(valueArray)
valueNumber = n*n/2
quartile3 = valueNumber*3/4
valueQuartile = valueArray[quartile3]
mostDifferent = []
#Distance is symmetric
for i in range(n):
for j in range(i+1,n):
if matrix[i][j] >= valueQuartile:
mostDifferent.append((sampleNameList[i],sampleNameList[j]))
print "\nRESULTING MATRIX:"
print matrix
print "\n---\nMost different samples groups from:\n"
for sampleGroup in sampleNameList:
print sampleGroup
print "\nare:\n"
print mostDifferent
print "\n--- END OF DISPLAY\n"
#____________________________________________________________________________
def printTreeAct(dataArray):
answer = raw_input("Do you want to print sample hit lists? Y/N\n")
if not ((answer == "Y") or (answer == "N")):
print "\n/!\ ERROR: You need to answer 'Y' or 'N'."
raise ValueError
printTree(dataArray[7],(answer == "Y"))
|
|
# Computes the t-statistics on Gini. Not very elegant, but faster.
import math
import csv
from scipy import stats
import numpy
def tstat(x1, x2, s1, s2, n):
'''
(float, float, float, float, int) => float
Computes the t-statistic for two datasets.
'''
t = (x1 - x2) / math.sqrt((s1**2 +s2**2)/n)
return t
def readFile(filename):
'''
(str) => list of dicts
loads file filename into a list. Each item is a dict encoding one run in the model.
'''
with open (filename, 'r') as csvFile:
csvReader = csv.DictReader (csvFile, delimiter = ',', quotechar = '"')
runs = []
for row in csvReader:
runs.append(row)
return runs
def tstats_gini_by_policy(dataset):
'''
(list of dicts) => list of dicts
Accepts the output of readFile as an input. Computes t-statistics by policy, for each value of the parameter vector
found in dataset.
Returns a synthetic table.
'''
# the output table!
resultTable = []
chattiness_values = [".1", ".2", ".4"]
intimacy_values = ["1", "5", "11"]
policies = ['engage', 'both']
priorities = ['newer', 'more active']
randomisedchattiness =['true', 'false']
listOfTests = []
nc_neg_significant = 0
nc_pos_significant = 0
ms_neg_significant = 0
ms_pos_significant = 0
for c in chattiness_values:
for i in intimacy_values:
for rand in randomisedchattiness:
for pri in priorities:
row = {} # the row of the output table
to_compare = []
checkpoint = 0
for ob in dataset:
if (ob['globalchattiness'] == c and ob['intimacystrength'] == i and ob['randomisedchattiness'] == rand and
ob['priority'] == pri and ob['nc_inblockmean'] != checkpoint):
# I do not have this particular unique value of nc_inblockmean, so I append the ob and replace the value
to_compare.append(ob)
checkpoint = ob['nc_inblockmean']
# print checkpoint
# print 'Global chattiness = ' + c
# print 'Intimacystrength = ' + i
# print 'Randomised chattiness = ' + rand
# print 'Priority = ' + pri
# now the T-stats proper.
# print 'T-stat of H0: nc(' + to_compare[0]['policy'] + ' vs. ' + to_compare[1]['policy'] + ')'
nc_tstat = tstat(float(to_compare[0]['nc_inblockmean']), float(to_compare[1]['nc_inblockmean']),
float(to_compare[0]['nc_inblockse']), float(to_compare[1]['nc_inblockse']), 46)
if nc_tstat > 2:
nc_pos_significant += 1
elif nc_tstat < -2:
nc_neg_significant += 1
# print nc_tstat
# print 'T-stat of H0: ms(' + to_compare[0]['policy'] + ' vs. ' + to_compare[1]['policy'] + ')'
ms_tstat = tstat(float(to_compare[0]['ms_inblockmean']), float(to_compare[1]['ms_inblockmean']),
float(to_compare[0]['ms_inblockse']), float(to_compare[1]['ms_inblockse']), 46)
if ms_tstat > 2:
ms_pos_significant += 1
elif ms_tstat < -2:
ms_neg_significant += 1
# print ms_tstat
# print '\n'
# add the record to the output table
# parameters
row['globalchattiness'] = c
row['intimacystrength'] = i
row['randomisedchattiness'] = rand
row['priority'] = pri
row['ms_t_engage_vs_both'] = ms_tstat
row['nc_t_engage_vs_both'] = nc_tstat
resultTable.append(row)
print 'Policies: engage vs. both'
print 'nc, positive and significant: ' + str (nc_pos_significant)
print 'nc, negative and significant: ' + str (nc_neg_significant)
print 'nc, nonsignificant:' + str (36 - nc_pos_significant - nc_neg_significant)
print 'ms, positive and significant: ' + str (ms_pos_significant)
print 'ms, negative and significant: ' + str (ms_neg_significant)
print 'ms, nonsignificant:' + str (36 - ms_pos_significant - ms_neg_significant)
return resultTable
def tstats_gini_by_priority(dataset, resultTable):
'''
(list of dicts, list of dicts) => list of dicts
Accepts the output of readFile as an input. Computes t-statistics by priority, for each value of the parameter vector
found in dataset.
The computed values are added to outputTable.
'''
chattiness_values = [".1", ".2", ".4"]
intimacy_values = ["1", "5", "11"]
policies = ['engage', 'both']
priorities = ['newer', 'more active']
randomisedchattiness =['true', 'false']
listOfTests = []
nc_neg_significant = 0
nc_pos_significant = 0
ms_neg_significant = 0
ms_pos_significant = 0
for c in chattiness_values:
for i in intimacy_values:
for rand in randomisedchattiness:
for p in policies:
row = {}
to_compare = []
checkpoint = 0
for ob in dataset:
if (ob['globalchattiness'] == c and ob['intimacystrength'] == i and ob['randomisedchattiness'] == rand and
ob['policy'] == p and ob['nc_inblockmean'] != checkpoint):
# I do not have this particular unique value of nc_inblockmean, so I append the ob and replace the value
to_compare.append(ob)
checkpoint = ob['nc_inblockmean']
# print checkpoint
# print 'Global chattiness = ' + c
# print 'Intimacystrength = ' + i
# print 'Randomised chattiness = ' + rand
# print 'Policy = ' + p
# now the T-stats proper.
# print 'T-stat of H0: nc(' + to_compare[0]['priority'] + ' vs. ' + to_compare[1]['priority'] + ')'
nc_tstat = tstat(float(to_compare[0]['nc_inblockmean']), float(to_compare[1]['nc_inblockmean']),
float(to_compare[0]['nc_inblockse']), float(to_compare[1]['nc_inblockse']), 46)
if nc_tstat > 2:
nc_pos_significant += 1
elif nc_tstat < -2:
nc_neg_significant += 1
# print nc_tstat
# print 'T-stat of H0: ms(' + to_compare[0]['priority'] + ' vs. ' + to_compare[1]['priority'] + ')'
ms_tstat = tstat(float(to_compare[0]['ms_inblockmean']), float(to_compare[1]['ms_inblockmean']),
float(to_compare[0]['ms_inblockse']), float(to_compare[1]['ms_inblockse']), 46)
if ms_tstat > 2:
ms_pos_significant += 1
elif ms_tstat < -2:
ms_neg_significant += 1
#print ms_tstat
# print '\n'
# add the record to the output table
# parameters
row['globalchattiness'] = c
row['intimacystrength'] = i
row['randomisedchattiness'] = rand
row['policy'] = p
row['ms_t_more_active_vs_newer'] = ms_tstat
row['nc_t_more_active_vs_newer'] = nc_tstat
resultTable.append(row) # I do not initiatilise a resultTable, I append to the one passed as an ergument to the function
print 'Priorities: more active vs. newer'
print 'nc, positive and significant: ' + str (nc_pos_significant)
print 'nc, negative and significant: ' + str (nc_neg_significant)
print 'nc, nonsignificant:' + str (36 - nc_pos_significant - nc_neg_significant)
print 'ms, positive and significant: ' + str (ms_pos_significant)
print 'ms, negative and significant: ' + str (ms_neg_significant)
print 'ms, nonsignificant:' + str (36 - ms_pos_significant - ms_neg_significant)
return resultTable
def tstats_others_by_policy(dataset, resultTable):
'''
(list of dicts, list of dicts) => list of dicts
Accepts the output of readFile as an input. Computes t-statistics by priority, for each value of the parameter vector
found in dataset.
The computed values are added to outputTable.
'''
chattiness_values = [".1", ".2", ".4"]
intimacy_values = ["1", "5", "11"]
policies = ['engage', 'both']
priorities = ['newer', 'more active']
randomisedchattiness =['true', 'false']
targets = ['mgmteffort', 'dropouts', 'totalmembershipstrength', 'totalcomments']
listOfTests = []
for c in chattiness_values:
for i in intimacy_values:
for rand in randomisedchattiness:
for pri in priorities:
row = {}
for target in targets:
array_engage = [] # values when policy == engage
array_both = [] # values when policy == both
for ob in dataset:
if (ob['globalchattiness'] == c and ob['intimacystrength'] == i and ob['randomisedchattiness'] == rand and
ob['priority'] == pri and ob['policy'] == 'engage'):
# store the value of target contained in ob into the first array
array_engage.append(float(ob[target]))
elif (ob['globalchattiness'] == c and ob['intimacystrength'] == i and ob['randomisedchattiness'] == rand and
ob['priority'] == pri and ob['policy'] == 'both'):
# store the value of target contained in ob into the second array
array_both.append(float(ob[target]))
tstat = numpy.asscalar(stats.ttest_ind(array_both, array_engage, equal_var = False)[0])
# tstat = float(stats.ttest_ind(array_engage, array_both, equal_var = False)[0])
# add the value to the result table is a pain, because I need to iterate and dig out the right row.
for result in resultTable:
if result['globalchattiness'] == c and result['intimacystrength'] == i and result['randomisedchattiness'] == rand and result['priority'] == pri:
# no condition for policy!
result[target + '_both_vs_engage'] = tstat
return resultTable
def tstats_others_by_priority(dataset, resultTable):
'''
(list of dicts, list of dicts) => list of dicts
Accepts the output of readFile as an input. Computes t-statistics by priority, for each value of the parameter vector
found in dataset.
The computed values are added to outputTable.
'''
chattiness_values = [".1", ".2", ".4"]
intimacy_values = ["1", "5", "11"]
policies = ['engage', 'both']
priorities = ['newer', 'more active']
randomisedchattiness =['true', 'false']
targets = ['mgmteffort', 'dropouts', 'totalmembershipstrength', 'totalcomments']
listOfTests = []
for c in chattiness_values:
for i in intimacy_values:
for rand in randomisedchattiness:
for p in policies:
row = {}
for target in targets:
array_more_active = [] # values when policy == engage
array_newer = [] # values when policy == both
for ob in dataset:
if (ob['globalchattiness'] == c and ob['intimacystrength'] == i and ob['randomisedchattiness'] == rand and
ob['policy'] == p and ob['priority'] == 'more active'):
# store the value of target contained in ob into the first array
array_more_active.append(float(ob[target]))
elif (ob['globalchattiness'] == c and ob['intimacystrength'] == i and ob['randomisedchattiness'] == rand and
ob['policy'] == p and ob['priority'] == 'newer'):
# store the value of target contained in ob into the second array
array_newer.append(float(ob[target]))
tstat = float(stats.ttest_ind(array_more_active, array_newer, equal_var = False)[0])
# add the value to the result table is a pain, because I need to iterate and dig out the right row.
for result in resultTable:
if (result['globalchattiness'] == c and result['intimacystrength'] == i and result['randomisedchattiness'] == rand and
result['policy'] == p):
# no condition for priority!
result[target + '_more_active_vs_newer'] = tstat
return resultTable
def normalize_rows(table):
'''
(list of dicts) => list of dicts
All rows of the table need to have the same fields. If not, the function adds them.
'''
check = []
for key in table[0]:
check.append(key)
# first pass: store all keys of all rows into checks
for row in table:
for key in row:
if key not in check:
check.append(key)
# second pass: add to each row all keys as needed
for row in table:
for key in check:
if key not in row:
row[key] = ''
return table
def probe(table):
'''
(list of dicts) => bool
making sure the table is well formed
'''
success = True
check = len (table[0])
for row in table:
if len(row) != check:
print "This table has a problem"
quit ()
return success
def find_value(dataset):
for ob in dataset:
if ob['nc_inblockmean'] == '.3548942':
print ob['runnumber']
def writeFile(listOfDicts, filename):
'''
(listOfDicts, str) => noneType
write the file to csv
'''
fieldnames = []
for key in listOfDicts[0]:
fieldnames.append(key)
for key in listOfDicts[-1]:
if key not in fieldnames:
fieldnames.append(key)
with open(filename, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for run in listOfDicts:
writer.writerow(run)
if __name__ == '__main__':
dirPath = '/Users/albertocottica/github/local/community-management-simulator/Data/'
data = readFile(dirPath + 'data-w-gini-retry_replica_merge_batches.csv')
stage1 = tstats_gini_by_policy(data)
stage2 = tstats_gini_by_priority(data, stage1)
stage3 = normalize_rows(stage2)
formed = probe(stage3)
stage4 = tstats_others_by_policy(data, stage3)
final = tstats_others_by_priority(data, stage4)
print final[0]
success = writeFile(final, dirPath + 'MOAT_stage2.csv')
|
|
"""Tests for go.apps.sequential_send.vumi_app"""
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import Clock, LoopingCall
from vumi.message import TransportUserMessage
from vumi.tests.helpers import VumiTestCase
from vumi.tests.utils import LogCatcher
from go.apps.sequential_send.vumi_app import SequentialSendApplication
from go.apps.sequential_send import vumi_app as sequential_send_module
from go.apps.tests.helpers import AppWorkerHelper
class TestSequentialSendApplication(VumiTestCase):
transport_type = u'sms'
@inlineCallbacks
def setUp(self):
self.app_helper = self.add_helper(
AppWorkerHelper(SequentialSendApplication))
self.clock = Clock()
self.patch(sequential_send_module, 'LoopingCall', self.looping_call)
self.app = yield self.app_helper.get_app_worker({})
def looping_call(self, *args, **kwargs):
looping_call = LoopingCall(*args, **kwargs)
looping_call.clock = self.clock
return looping_call
@inlineCallbacks
def reply_to(self, msg, content, continue_session=True, **kw):
session_event = (None if continue_session
else TransportUserMessage.SESSION_CLOSE)
reply = TransportUserMessage(
to_addr=msg['from_addr'],
from_addr=msg['to_addr'],
group=msg['group'],
in_reply_to=msg['message_id'],
content=content,
session_event=session_event,
transport_name=msg['transport_name'],
transport_type=msg['transport_type'],
transport_metadata=msg['transport_metadata'],
helper_metadata=msg['helper_metadata'],
**kw)
yield self.dispatch(reply)
@inlineCallbacks
def wait_for_messages(self, nr_of_messages, total_length):
msgs = yield self.wait_for_dispatched_messages(total_length)
returnValue(msgs[-1 * nr_of_messages:])
@inlineCallbacks
def _stub_out_async(self, *convs):
"""Stub out async components.
NOTE: Riak stuff takes a while and messes up fake clock timing, so we
stub it out. It gets tested in other test methods. Also, we replace the
redis manager for the same reason.
"""
# Avoid hitting Riak for the conversation and Redis for poll times.
expected = [[conv.user_account.key, conv.key] for conv in convs]
poll_times = [(yield self.app._get_last_poll_time())]
scheduled_conversations = yield self.app._get_scheduled_conversations()
def get_conversations(conv_pointers):
self.assertEqual(sorted(conv_pointers), sorted(expected))
return list(convs)
self.app.get_conversations = get_conversations
self.app._get_last_poll_time = lambda: poll_times[-1]
self.app._set_last_poll_time = lambda t: poll_times.append(str(t))
self.app._get_scheduled_conversations = lambda: scheduled_conversations
self.message_convs = []
# Fake the message send by adding the convs to a list.
def send_scheduled_messages(sched_conv):
self.message_convs.append(sched_conv)
self.app.send_scheduled_messages = send_scheduled_messages
def _patch_with_raise_once(self, obj, attr, err):
def raiser(*args, **kw):
patch.restore()
raise err
patch = self.patch(obj, attr, raiser)
return err
def check_message_convs_and_advance(self, convs, seconds):
self.assertEqual(convs, self.message_convs)
return self.clock.advance(seconds)
@inlineCallbacks
def test_schedule_daily_conv(self):
conv = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:01:40'}})
yield self.app_helper.start_conversation(conv)
conv = yield self.app_helper.get_conversation(conv.key)
yield self._stub_out_async(conv)
yield self.check_message_convs_and_advance([], 70)
yield self.check_message_convs_and_advance([], 70)
yield self.check_message_convs_and_advance([conv], 70)
yield self.check_message_convs_and_advance([conv], 3600 * 24 - 140)
yield self.check_message_convs_and_advance([conv], 70)
yield self.check_message_convs_and_advance([conv, conv], 70)
self.assertEqual(self.message_convs, [conv, conv])
@inlineCallbacks
def test_schedule_daily_with_stopped_conv(self):
conv = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:01:40'}})
yield self.app_helper.start_conversation(conv)
conv = yield self.app_helper.get_conversation(conv.key)
yield self.app_helper.stop_conversation(conv)
conv = yield self.app_helper.get_conversation(conv.key)
yield self._stub_out_async()
yield self.check_message_convs_and_advance([], 70)
yield self.check_message_convs_and_advance([], 70)
# had it been scheduled it should show up after from here on onwards
yield self.check_message_convs_and_advance([], 70)
yield self.check_message_convs_and_advance([], 3600 * 24 - 140)
yield self.check_message_convs_and_advance([], 70)
yield self.check_message_convs_and_advance([], 70)
self.assertEqual(self.message_convs, [])
@inlineCallbacks
def test_schedule_day_of_month_conv(self):
conv = yield self.app_helper.create_conversation(config={
'schedule': {
'recurring': 'day_of_month',
'time': '12:00:00', 'days':
'1, 5',
},
})
yield self.app_helper.start_conversation(conv)
conv = yield self.app_helper.get_conversation(conv.key)
yield self._stub_out_async(conv)
yield self.check_message_convs_and_advance([], 3600 * 11)
yield self.check_message_convs_and_advance([], 3600 * 13)
yield self.check_message_convs_and_advance([conv], 3600 * 24)
yield self.check_message_convs_and_advance([conv], 3600 * 48)
yield self.check_message_convs_and_advance([conv], 3600 * 13)
yield self.check_message_convs_and_advance([conv, conv], 3600 * 11)
yield self.check_message_convs_and_advance(
[conv, conv], 3600 * 24 * 20)
self.assertEqual(self.message_convs, [conv, conv])
@inlineCallbacks
def test_schedule_convs(self):
"""Test multiple conversation scheduling.
NOTE: Riak stuff takes a while and messes up fake clock timing, so we
stub it out. It gets tested in other test methods.
"""
conv1 = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:01:40'}})
yield self.app_helper.start_conversation(conv1)
conv1 = yield self.app_helper.get_conversation(conv1.key)
conv2 = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:02:30'}})
yield self.app_helper.start_conversation(conv2)
conv2 = yield self.app_helper.get_conversation(conv2.key)
yield self._stub_out_async(conv1, conv2)
yield self.check_message_convs_and_advance([], 70)
yield self.check_message_convs_and_advance([], 70)
yield self.check_message_convs_and_advance([conv1], 70)
yield self.check_message_convs_and_advance(
[conv1, conv2], 3600 * 24 - 140)
yield self.check_message_convs_and_advance([conv1, conv2], 70)
yield self.check_message_convs_and_advance([conv1, conv2, conv1], 70)
self.assertEqual(self.message_convs, [conv1, conv2, conv1, conv2])
@inlineCallbacks
def test_poll_conversations_errors(self):
"""Test that polling for conversations continues after errors."""
conv1 = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:01:40'}})
yield self.app_helper.start_conversation(conv1)
conv1 = yield self.app_helper.get_conversation(conv1.key)
yield self._stub_out_async(conv1)
yield self.check_message_convs_and_advance([], 70)
self.assertEqual(self.flushLoggedErrors(), [])
err = self._patch_with_raise_once(
self.app, 'get_conversations', ValueError("Failed"))
yield self.check_message_convs_and_advance([], 70)
[failure] = self.flushLoggedErrors()
self.assertEqual(failure.value, err)
# no conversations processed initially because of the error
yield self.check_message_convs_and_advance([], 3600 * 24 - 70)
yield self.check_message_convs_and_advance([], 70)
# now a conversation has been processed
self.assertEqual(self.message_convs, [conv1])
@inlineCallbacks
def test_process_conversation_schedule_errors(self):
"""
Test that errors for one conversation do not prevent other
conversations sending messages.
"""
conv1 = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:01:40'}})
yield self.app_helper.start_conversation(conv1)
conv1 = yield self.app_helper.get_conversation(conv1.key)
conv2 = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:01:40'}})
yield self.app_helper.start_conversation(conv2)
conv2 = yield self.app_helper.get_conversation(conv2.key)
yield self._stub_out_async(conv1, conv2)
err = self._patch_with_raise_once(
self.app, 'send_scheduled_messages', ValueError("Failed"))
self.assertEqual(self.message_convs, [])
yield self.check_message_convs_and_advance([], 140)
[failure] = self.flushLoggedErrors()
self.assertEqual(failure.value, err)
self.assertEqual(self.message_convs, [conv2])
@inlineCallbacks
def test_get_conversations(self):
"""Test get_conversation, because we stub it out elsewhere.
"""
conv1 = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:01:40'}})
yield self.app_helper.start_conversation(conv1)
conv2 = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:02:30'}})
yield self.app_helper.start_conversation(conv2)
yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:02:30'}})
convs = yield self.app.get_conversations([
[conv1.user_account.key, conv1.key],
[conv2.user_account.key, conv2.key]])
self.assertEqual(sorted([c.key for c in convs]),
sorted([conv1.key, conv2.key]))
@inlineCallbacks
def test_get_conversations_missing_conv(self):
"""
Test get_conversation when it's expecting a conversation that doesn't
exist.
"""
conv = yield self.app_helper.create_conversation(
config={'schedule': {'recurring': 'daily', 'time': '00:01:40'}})
yield self.app_helper.start_conversation(conv)
with LogCatcher(message='Conversation .* not found.') as lc:
convs = yield self.app.get_conversations(
[[conv.user_account.key, conv.key], ['badaccount', 'badkey']])
self.assertEqual(
lc.messages(),
['Conversation badkey for account badaccount not found.'])
self.assertEqual([c.key for c in convs], [conv.key])
@inlineCallbacks
def test_sends(self):
"""Test send_scheduled_messages, because we stub it out elsewhere.
"""
group = yield self.app_helper.create_group(u'group')
contact1 = yield self.app_helper.create_contact(
u'27831234567', name=u'First', surname=u'Contact', groups=[group])
contact2 = yield self.app_helper.create_contact(
u'27831234568', name=u'Second', surname=u'Contact', groups=[group])
conv = yield self.app_helper.create_conversation(config={
'schedule': {'recurring': 'daily', 'time': '00:01:40'},
'messages': ['foo', 'bar'],
}, groups=[group])
yield self.app_helper.start_conversation(conv)
conv = yield self.app_helper.get_conversation(conv.key)
# Send to two contacts.
yield self.app.send_scheduled_messages(conv)
[msg1, msg2] = sorted(
self.app_helper.get_dispatched_outbound(),
key=lambda m: m['to_addr'])
self.assertEqual(msg1['content'], 'foo')
self.assertEqual(msg1['to_addr'], contact1.msisdn)
self.assertEqual(msg1['helper_metadata']['go'], {
'user_account': conv.user_account.key,
'conversation_type': 'sequential_send',
'conversation_key': conv.key,
})
self.assertEqual(msg2['content'], 'foo')
self.assertEqual(msg2['to_addr'], contact2.msisdn)
self.assertEqual(msg2['helper_metadata']['go'], {
'user_account': conv.user_account.key,
'conversation_type': 'sequential_send',
'conversation_key': conv.key,
})
# Send to previous two contacts and a new third contact.
contact3 = yield self.app_helper.create_contact(
u'27831234569', name=u'Third', surname=u'Contact', groups=[group])
yield self.app.send_scheduled_messages(conv)
[msg1, msg2, msg3] = sorted(
self.app_helper.get_dispatched_outbound()[2:],
key=lambda m: m['to_addr'])
self.assertEqual(msg1['content'], 'bar')
self.assertEqual(msg1['to_addr'], contact1.msisdn)
self.assertEqual(msg2['content'], 'bar')
self.assertEqual(msg2['to_addr'], contact2.msisdn)
self.assertEqual(msg3['content'], 'foo')
self.assertEqual(msg3['to_addr'], contact3.msisdn)
# Previous two contacts are done, so we should only send to the third.
yield self.app.send_scheduled_messages(conv)
[msg] = sorted(
self.app_helper.get_dispatched_outbound()[5:],
key=lambda m: m['to_addr'])
self.assertEqual(msg['content'], 'bar')
self.assertEqual(msg['to_addr'], contact3.msisdn)
|
|
#!/usr/bin/env python
"""Windows specific utils."""
import ctypes
import exceptions
import logging
import os
import re
import time
import _winreg
import ntsecuritycon
import pywintypes
import win32api
import win32file
import win32security
from google.protobuf import message
from grr.lib import config_lib
from grr.lib import utils
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
DACL_PRESENT = 1
DACL_DEFAULT = 0
def CanonicalPathToLocalPath(path):
r"""Converts the canonical paths as used by GRR to OS specific paths.
Due to the inconsistencies between handling paths in windows we need to
convert a path to an OS specific version prior to using it. This function
should be called just before any OS specific functions.
Canonical paths on windows have:
- / instead of \.
- Begin with /X:// where X is the drive letter.
Args:
path: A canonical path specification.
Returns:
A windows specific path.
"""
# Account for raw devices
path = path.replace("/\\", "\\")
path = path.replace("/", "\\")
m = re.match(r"\\([a-zA-Z]):(.*)$", path)
if m:
path = "%s:\\%s" % (m.group(1), m.group(2).lstrip("\\"))
return path
def LocalPathToCanonicalPath(path):
"""Converts path from the local system's convention to the canonical."""
path_components = path.split("/")
result = []
for component in path_components:
# Devices must maintain their \\ so they do not get broken up.
m = re.match(r"\\\\.\\", component)
# The component is not special and can be converted as normal
if not m:
component = component.replace("\\", "/")
result.append(component)
return utils.JoinPath(*result)
def WinChmod(filename, acl_list, user=None):
"""Provide chmod-like functionality for windows.
Doco links:
goo.gl/n7YR1
goo.gl/rDv81
goo.gl/hDobb
Args:
filename: target filename for acl
acl_list: list of ntsecuritycon acl strings to be applied with bitwise OR.
e.g. ["FILE_GENERIC_READ", "FILE_GENERIC_WRITE"]
user: username string. If not specified we use the user we are running as.
Raises:
AttributeError: if a bad permission is passed
RuntimeError: if filename doesn't exist
"""
if user is None:
user = win32api.GetUserName()
if not os.path.exists(filename):
raise RuntimeError("filename %s does not exist" % filename)
acl_bitmask = 0
for acl in acl_list:
acl_bitmask |= getattr(ntsecuritycon, acl)
dacl = win32security.ACL()
win_user, _, _ = win32security.LookupAccountName("", user)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, acl_bitmask, win_user)
security_descriptor = win32security.GetFileSecurity(
filename, win32security.DACL_SECURITY_INFORMATION)
# Tell windows to set the acl and mark it as explicitly set
security_descriptor.SetSecurityDescriptorDacl(DACL_PRESENT, dacl,
DACL_DEFAULT)
win32security.SetFileSecurity(filename,
win32security.DACL_SECURITY_INFORMATION,
security_descriptor)
def WinFindProxies():
"""Tries to find proxies by interrogating all the user's settings.
This function is a modified urillib.getproxies_registry() from the
standard library. We just store the proxy value in the environment
for urllib to find it.
TODO(user): Iterate through all the possible values if one proxy
fails, in case more than one proxy is specified in different users
profiles.
Returns:
A list of proxies.
"""
proxies = []
for i in range(0, 100):
try:
sid = _winreg.EnumKey(_winreg.HKEY_USERS, i)
except exceptions.WindowsError:
break
try:
subkey = (sid + "\\Software\\Microsoft\\Windows"
"\\CurrentVersion\\Internet Settings")
internet_settings = _winreg.OpenKey(_winreg.HKEY_USERS, subkey)
proxy_enable = _winreg.QueryValueEx(internet_settings, "ProxyEnable")[0]
if proxy_enable:
# Returned as Unicode but problems if not converted to ASCII
proxy_server = str(
_winreg.QueryValueEx(internet_settings, "ProxyServer")[0])
if "=" in proxy_server:
# Per-protocol settings
for p in proxy_server.split(";"):
protocol, address = p.split("=", 1)
# See if address has a type:// prefix
if not re.match("^([^/:]+)://", address):
address = "%s://%s" % (protocol, address)
proxies.append(address)
else:
# Use one setting for all protocols
if proxy_server[:5] == "http:":
proxies.append(proxy_server)
else:
proxies.append("http://%s" % proxy_server)
internet_settings.Close()
except (exceptions.WindowsError, ValueError, TypeError):
continue
logging.debug("Found proxy servers: %s", proxies)
return proxies
def WinGetRawDevice(path):
"""Resolves the raw device that contains the path.
Args:
path: A path to examine.
Returns:
A pathspec to read the raw device as well as the modified path to read
within the raw device. This is usually the path without the mount point.
Raises:
IOError: if the path does not exist or some unexpected behaviour occurs.
"""
path = CanonicalPathToLocalPath(path)
# Try to expand the shortened paths
try:
path = win32file.GetLongPathName(path)
except pywintypes.error:
pass
try:
mount_point = win32file.GetVolumePathName(path)
except pywintypes.error as details:
logging.info("path not found. %s", details)
raise IOError("No mountpoint for path: %s", path)
if not path.startswith(mount_point):
stripped_mp = mount_point.rstrip("\\")
if not path.startswith(stripped_mp):
raise IOError("path %s is not mounted under %s" % (path, mount_point))
corrected_path = LocalPathToCanonicalPath(path[len(mount_point):])
corrected_path = utils.NormalizePath(corrected_path)
volume = win32file.GetVolumeNameForVolumeMountPoint(mount_point).rstrip("\\")
volume = LocalPathToCanonicalPath(volume)
# The pathspec for the raw volume
result = rdf_paths.PathSpec(
path=volume,
pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point=mount_point.rstrip("\\"))
return result, corrected_path
class NannyController(object):
"""Controls communication with the nanny."""
_service_key = None
synced = True
def _GetKey(self):
"""Returns the service key."""
if self._service_key is None:
hive = getattr(_winreg, config_lib.CONFIG["Nanny.service_key_hive"])
path = config_lib.CONFIG["Nanny.service_key"]
# Don't use _winreg.KEY_WOW64_64KEY since it breaks on Windows 2000
self._service_key = _winreg.CreateKeyEx(hive, path, 0,
_winreg.KEY_ALL_ACCESS)
return self._service_key
def Heartbeat(self):
"""Writes a heartbeat to the registry."""
try:
_winreg.SetValueEx(self._GetKey(), "Nanny.heartbeat", 0,
_winreg.REG_DWORD, int(time.time()))
except exceptions.WindowsError, e:
logging.debug("Failed to heartbeat nanny at %s: %s",
config_lib.CONFIG["Nanny.service_key"], e)
def WriteTransactionLog(self, grr_message):
"""Write the message into the transaction log.
Args:
grr_message: A GrrMessage instance.
"""
grr_message = grr_message.SerializeToString()
try:
_winreg.SetValueEx(self._GetKey(), "Transaction", 0, _winreg.REG_BINARY,
grr_message)
NannyController.synced = False
except exceptions.WindowsError:
pass
def SyncTransactionLog(self):
if not NannyController.synced:
_winreg.FlushKey(self._GetKey())
NannyController.synced = True
def CleanTransactionLog(self):
"""Wipes the transaction log."""
try:
_winreg.DeleteValue(self._GetKey(), "Transaction")
NannyController.synced = False
except exceptions.WindowsError:
pass
def GetTransactionLog(self):
"""Return a GrrMessage instance from the transaction log or None."""
try:
value, reg_type = _winreg.QueryValueEx(self._GetKey(), "Transaction")
except exceptions.WindowsError:
return
if reg_type != _winreg.REG_BINARY:
return
try:
return rdf_flows.GrrMessage.FromSerializedString(value)
except message.Error:
return
def GetNannyStatus(self):
try:
value, _ = _winreg.QueryValueEx(self._GetKey(), "Nanny.status")
except exceptions.WindowsError:
return None
return value
def GetNannyMessage(self):
try:
value, _ = _winreg.QueryValueEx(self._GetKey(), "Nanny.message")
except exceptions.WindowsError:
return None
return value
def ClearNannyMessage(self):
"""Wipes the nanny message."""
try:
_winreg.DeleteValue(self._GetKey(), "Nanny.message")
NannyController.synced = False
except exceptions.WindowsError:
pass
def StartNanny(self):
"""Not used for the Windows nanny."""
def StopNanny(self):
"""Not used for the Windows nanny."""
class Kernel32(object):
_kernel32 = None
def __init__(self):
if not Kernel32._kernel32:
Kernel32._kernel32 = ctypes.windll.LoadLibrary("Kernel32.dll")
@property
def kernel32(self):
return self._kernel32
def KeepAlive():
es_system_required = 0x00000001
kernel32 = Kernel32().kernel32
kernel32.SetThreadExecutionState(ctypes.c_int(es_system_required))
def RtlGetVersion(os_version_info_struct):
"""Wraps the lowlevel RtlGetVersion routine.
Args:
os_version_info_struct: instance of either a RTL_OSVERSIONINFOW structure
or a RTL_OSVERSIONINFOEXW structure,
ctypes.Structure-wrapped, with the
dwOSVersionInfoSize field preset to
ctypes.sizeof(self).
Raises:
WindowsError: if the underlaying routine fails.
See: https://msdn.microsoft.com/en-us/library/
windows/hardware/ff561910(v=vs.85).aspx .
"""
rc = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version_info_struct))
if rc != 0:
raise exceptions.WindowsError("Getting Windows version failed.")
class RtlOSVersionInfoExw(ctypes.Structure):
"""Wraps the lowlevel RTL_OSVERSIONINFOEXW struct.
See: https://msdn.microsoft.com/en-us/library/
windows/hardware/ff563620(v=vs.85).aspx .
"""
_fields_ = [("dwOSVersionInfoSize", ctypes.c_ulong),
("dwMajorVersion", ctypes.c_ulong),
("dwMinorVersion", ctypes.c_ulong),
("dwBuildNumber", ctypes.c_ulong),
("dwPlatformId", ctypes.c_ulong),
("szCSDVersion", ctypes.c_wchar * 128),
("wServicePackMajor", ctypes.c_ushort),
("wServicePackMinor", ctypes.c_ushort),
("wSuiteMask", ctypes.c_ushort), ("wProductType", ctypes.c_byte),
("wReserved", ctypes.c_byte)]
def __init__(self, **kwargs):
kwargs["dwOSVersionInfoSize"] = ctypes.sizeof(self)
super(RtlOSVersionInfoExw, self).__init__(**kwargs)
def KernelVersion():
"""Gets the kernel version as string, eg. "5.1.2600".
Returns:
The kernel version, or "unknown" in the case of failure.
"""
rtl_osversioninfoexw = RtlOSVersionInfoExw()
try:
RtlGetVersion(rtl_osversioninfoexw)
except exceptions.WindowsError:
return "unknown"
return "%d.%d.%d" % (rtl_osversioninfoexw.dwMajorVersion,
rtl_osversioninfoexw.dwMinorVersion,
rtl_osversioninfoexw.dwBuildNumber)
|
|
"""
Tests for the BNMF Variational Bayes algorithm, with optimised matrix operation updates.
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../"
sys.path.append(project_location)
import numpy, math, pytest, itertools, random
from BNMTF.code.models.bnmf_vb_optimised import bnmf_vb_optimised
""" Test constructor """
def test_init():
# Test getting an exception when R and M are different sizes, and when R is not a 2D array.
R1 = numpy.ones(3)
M = numpy.ones((2,3))
I,J,K = 5,3,1
lambdaU = numpy.ones((I,K))
lambdaV = numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
with pytest.raises(AssertionError) as error:
bnmf_vb_optimised(R1,M,K,priors)
assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 1-dimensional."
R2 = numpy.ones((4,3,2))
with pytest.raises(AssertionError) as error:
bnmf_vb_optimised(R2,M,K,priors)
assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 3-dimensional."
R3 = numpy.ones((3,2))
with pytest.raises(AssertionError) as error:
bnmf_vb_optimised(R3,M,K,priors)
assert str(error.value) == "Input matrix R is not of the same size as the indicator matrix M: (3, 2) and (2, 3) respectively."
# Similarly for lambdaU, lambdaV
R4 = numpy.ones((2,3))
lambdaU = numpy.ones((2+1,1))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
with pytest.raises(AssertionError) as error:
bnmf_vb_optimised(R4,M,K,priors)
assert str(error.value) == "Prior matrix lambdaU has the wrong shape: (3, 1) instead of (2, 1)."
lambdaU = numpy.ones((2,1))
lambdaV = numpy.ones((3+1,1))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
with pytest.raises(AssertionError) as error:
bnmf_vb_optimised(R4,M,K,priors)
assert str(error.value) == "Prior matrix lambdaV has the wrong shape: (4, 1) instead of (3, 1)."
# Test getting an exception if a row or column is entirely unknown
lambdaU = numpy.ones((2,1))
lambdaV = numpy.ones((3,1))
M1 = [[1,1,1],[0,0,0]]
M2 = [[1,1,0],[1,0,0]]
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
with pytest.raises(AssertionError) as error:
bnmf_vb_optimised(R4,M1,K,priors)
assert str(error.value) == "Fully unobserved row in R, row 1."
with pytest.raises(AssertionError) as error:
bnmf_vb_optimised(R4,M2,K,priors)
assert str(error.value) == "Fully unobserved column in R, column 2."
# Finally, a successful case
I,J,K = 3,2,2
R5 = 2*numpy.ones((I,J))
lambdaU = numpy.ones((I,K))
lambdaV = numpy.ones((J,K))
M = numpy.ones((I,J))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
BNMF = bnmf_vb_optimised(R5,M,K,priors)
assert numpy.array_equal(BNMF.R,R5)
assert numpy.array_equal(BNMF.M,M)
assert BNMF.I == I
assert BNMF.J == J
assert BNMF.K == K
assert BNMF.size_Omega == I*J
assert BNMF.alpha == alpha
assert BNMF.beta == beta
assert numpy.array_equal(BNMF.lambdaU,lambdaU)
assert numpy.array_equal(BNMF.lambdaV,lambdaV)
# And when lambdaU and lambdaV are integers
I,J,K = 3,2,2
R5 = 2*numpy.ones((I,J))
lambdaU = 3.
lambdaV = 4.
M = numpy.ones((I,J))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
BNMF = bnmf_vb_optimised(R5,M,K,priors)
assert numpy.array_equal(BNMF.R,R5)
assert numpy.array_equal(BNMF.M,M)
assert BNMF.I == I
assert BNMF.J == J
assert BNMF.K == K
assert BNMF.size_Omega == I*J
assert BNMF.alpha == alpha
assert BNMF.beta == beta
assert numpy.array_equal(BNMF.lambdaU,lambdaU*numpy.ones((I,K)))
assert numpy.array_equal(BNMF.lambdaV,lambdaV*numpy.ones((J,K)))
""" Test initialing parameters """
def test_initialise():
I,J,K = 5,3,2
R = numpy.ones((I,J))
M = numpy.ones((I,J))
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
# Initialisation with expectation
init = 'exp'
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.initialise(init)
assert BNMF.alpha_s == alpha + 15./2.
#assert BNMF.alpha_s == alpha
assert BNMF.beta_s == beta + BNMF.exp_square_diff()/2.
#assert BNMF.beta_s == beta
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMF.tauU[i,k] == 1.
assert BNMF.muU[i,k] == 1./lambdaU[i,k]
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
assert BNMF.tauV[j,k] == 1.
assert BNMF.muV[j,k] == 1./lambdaV[j,k]
assert BNMF.exptau == (alpha + 15./2.) / (beta + BNMF.exp_square_diff()/2.)
#assert BNMF.exptau == alpha / beta
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert abs(BNMF.expU[i,k] - (0.5 + 0.352065 / (1-0.3085))) < 0.0001
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
assert abs(BNMF.expV[j,k] - (1./3. + 0.377383 / (1-0.3694))) < 0.0001
# Initialise tauU, tauV using predefined values
tauUV = {
'tauU' : 2*numpy.ones((I,K)),
'tauV' : 3*numpy.ones((J,K))
}
init = 'exp'
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.initialise(init,tauUV)
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMF.tauU[i,k] == 2.
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
assert BNMF.tauV[j,k] == 3.
""" Test computing the ELBO. """
def test_elbo():
I,J,K = 5,3,2
R = numpy.ones((I,J))
M = numpy.ones((I,J))
M[0,0], M[2,2], M[3,1] = 0, 0, 0 # size Omega = 12
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
expU = 5*numpy.ones((I,K))
expV = 6*numpy.ones((J,K))
varU = 11*numpy.ones((I,K))
varV = 12*numpy.ones((J,K))
exptau = 8.
explogtau = 9.
muU = 14*numpy.ones((I,K))
muV = 15*numpy.ones((J,K))
tauU = numpy.ones((I,K))/100.
tauV = numpy.ones((J,K))/101.
alpha_s = 20.
beta_s = 21.
# expU * expV = [[60]]
# (R - expU*expV)^2 = 12*59^2 = 41772
# Var[U*V] = 12*K*((11+5^2)*(12+6^2)-5^2*6^2) = 12*2*828 = 19872
# -muU*sqrt(tauU) = -14*math.sqrt(100) = -1.4
# -muV*sqrt(tauV) = -15*math.sqrt(101) = -1.4925557853149838
# cdf(-1.4) = 0.080756659233771066
# cdf(-1.4925557853149838) = 0.067776752211548219
ELBO = 12./2.*(explogtau - math.log(2*math.pi)) - 8./2.*(41772+19872) \
+ 5*2*(math.log(2.) - 2.*5.) + 3*2*(math.log(3.) - 3.*6.) \
+ 3.*numpy.log(1.) - numpy.log(math.gamma(3.)) + 2.*9. - 1.*8. \
- 20.*numpy.log(21.) + numpy.log(math.gamma(20.)) - 19.*9. + 21.*8. \
- 0.5*5*2*math.log(1./100.) + 0.5*5*2*math.log(2*math.pi) + 5*2*math.log(1.-0.080756659233771066) \
+ 0.5*5*2*1./100.*(11.+81.) \
- 0.5*3*2*math.log(1./101.) + 0.5*3*2*math.log(2*math.pi) + 3*2*math.log(1.-0.067776752211548219) \
+ 0.5*3*2*1./101.*(12.+81.)
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.expU = expU
BNMF.expV = expV
BNMF.varU = varU
BNMF.varV = varV
BNMF.exptau = exptau
BNMF.explogtau = explogtau
BNMF.muU = muU
BNMF.muV = muV
BNMF.tauU = tauU
BNMF.tauV = tauV
BNMF.alpha_s = alpha_s
BNMF.beta_s = beta_s
assert BNMF.elbo() == ELBO
""" Test updating parameters U, V, tau """
I,J,K = 5,3,2
R = numpy.ones((I,J))
M = numpy.ones((I,J))
M[0,0], M[2,2], M[3,1] = 0, 0, 0
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
def test_exp_square_diff():
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.expU = 1./lambdaU #[[1./2.]]
BNMF.expV = 1./lambdaV #[[1./3.]]
BNMF.varU = numpy.ones((I,K))*2 #[[2.]]
BNMF.varV = numpy.ones((J,K))*3 #[[3.]]
# expU * expV.T = [[1./3.]]. (varU+expU^2)=2.25, (varV+expV^2)=3.+1./9.
exp_square_diff = 172.66666666666666 #12.*(4./9.) + 12.*(2*(2.25*(3.+1./9.)-0.25/9.))
assert BNMF.exp_square_diff() == exp_square_diff
def test_update_tau():
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.expU = 1./lambdaU #[[1./2.]]
BNMF.expV = 1./lambdaV #[[1./3.]]
BNMF.varU = numpy.ones((I,K))*2 #[[2.]]
BNMF.varV = numpy.ones((J,K))*3 #[[3.]]
BNMF.update_tau()
assert BNMF.alpha_s == alpha + 12./2.
assert BNMF.beta_s == beta + 172.66666666666666/2.
def test_update_U():
for k in range(0,K):
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.muU = numpy.zeros((I,K))
BNMF.tauU = numpy.zeros((I,K))
BNMF.expU = 1./lambdaU #[[1./2.]]
BNMF.expV = 1./lambdaV #[[1./3.]]
BNMF.varU = numpy.ones((I,K))*2 #[[2.]]
BNMF.varV = numpy.ones((J,K))*3 #[[3.]]
BNMF.exptau = 3.
BNMF.update_U(k)
for i in range(0,I):
assert BNMF.tauU[i,k] == 3. * (M[i] * ( BNMF.expV[:,k]*BNMF.expV[:,k] + BNMF.varV[:,k] )).sum()
assert BNMF.muU[i,k] == (1./(3. * (M[i] * ( BNMF.expV[:,k]*BNMF.expV[:,k] + BNMF.varV[:,k] )).sum())) * \
( -2. + BNMF.exptau * (M[i]*( (BNMF.R[i] - numpy.dot(BNMF.expU[i],BNMF.expV.T) + BNMF.expU[i,k]*BNMF.expV[:,k])*BNMF.expV[:,k] )).sum() )
def test_update_V():
for k in range(0,K):
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.muV = numpy.zeros((J,K))
BNMF.tauV = numpy.zeros((J,K))
BNMF.expU = 1./lambdaU #[[1./2.]]
BNMF.expV = 1./lambdaV #[[1./3.]]
BNMF.varU = numpy.ones((I,K))*2 #[[2.]]
BNMF.varV = numpy.ones((J,K))*3 #[[3.]]
BNMF.exptau = 3.
BNMF.update_V(k)
for j in range(0,J):
assert BNMF.tauV[j,k] == 3. * (M[:,j] * ( BNMF.expU[:,k]*BNMF.expU[:,k] + BNMF.varU[:,k] )).sum()
assert BNMF.muV[j,k] == (1./(3. * (M[:,j] * ( BNMF.expU[:,k]*BNMF.expU[:,k] + BNMF.varU[:,k] )).sum())) * \
( -3. + BNMF.exptau * (M[:,j]*( (BNMF.R[:,j] - numpy.dot(BNMF.expU,BNMF.expV[j]) + BNMF.expU[:,k]*BNMF.expV[j,k])*BNMF.expU[:,k] )).sum() )
""" Test computing expectation, variance U, V, tau """
def test_update_exp_U():
for k in range(0,K):
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.initialise()
BNMF.tauU = 4*numpy.ones((I,K)) # muU = [[0.5]], tauU = [[4.]]
BNMF.update_exp_U(k) #-mu*sqrt(tau) = -0.5*2 = -1. lambda(1) = 0.241971 / (1-0.1587) = 0.2876155949126352. gamma = 0.37033832534958433
for i in range(0,I):
assert abs(BNMF.expU[i,k] - (0.5 + 1./2. * 0.2876155949126352)) < 0.00001
assert abs(BNMF.varU[i,k] - 1./4.*(1.-0.37033832534958433)) < 0.00001
def test_update_exp_V():
for k in range(0,K):
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.initialise()
BNMF.tauV = 4*numpy.ones((J,K)) # muV = [[1./3.]], tauV = [[4.]]
BNMF.update_exp_V(k) #-mu*sqrt(tau) = -2./3., lambda(..) = 0.319448 / (1-0.2525) = 0.4273551839464883, gamma =
for j in range(0,J):
assert abs(BNMF.expV[j,k] - (1./3. + 1./2. * 0.4273551839464883)) < 0.00001
assert abs(BNMF.varV[j,k] - 1./4.*(1. - 0.4675359092102624)) < 0.00001
def test_update_exp_tau():
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.initialise()
assert abs(BNMF.exptau - (3+12./2.)/(1+35.4113198623/2.)) < 0.000000000001
#assert abs(BNMF.exptau - 3./1.) < 0.000000000001
assert abs(BNMF.explogtau - (2.1406414779556 - math.log(1+35.4113198623/2.))) < 0.000000000001
#assert abs(BNMF.explogtau - (0.922784335098467 - math.log(1))) < 0.000000000001
""" Test two iterations of run(), and that all values have changed. """
def test_run():
I,J,K = 10,5,2
R = numpy.ones((I,J))
M = numpy.ones((I,J))
M[0,0], M[2,2], M[3,1] = 0, 0, 0
R[0,1], R[0,2] = 2., 3.
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
iterations = 2
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.initialise()
BNMF.run(iterations)
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMF.muU[i,k] != 1./lambdaU[i,k]
assert BNMF.tauU[i,k] != 1.
assert BNMF.expU[i,k] != numpy.inf and not math.isnan(BNMF.expU[i,k])
assert BNMF.tauU[i,k] != numpy.inf and not math.isnan(BNMF.tauU[i,k])
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
assert BNMF.muV[j,k] != 1./lambdaV[j,k]
assert BNMF.tauV[j,k] != 1.
assert BNMF.expV[j,k] != numpy.inf and not math.isnan(BNMF.expV[j,k])
assert BNMF.tauV[j,k] != numpy.inf and not math.isnan(BNMF.tauV[j,k])
assert BNMF.alpha_s != alpha
assert BNMF.beta_s != beta
assert BNMF.exptau != numpy.inf and not math.isnan(BNMF.exptau)
assert BNMF.explogtau != numpy.inf and not math.isnan(BNMF.explogtau)
""" Test computing the performance of the predictions using the expectations """
def test_predict():
(I,J,K) = (5,3,2)
R = numpy.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15]],dtype=float)
M = numpy.ones((I,J))
K = 3
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
expU = numpy.array([[125.,126.],[126.,126.],[126.,126.],[126.,126.],[126.,126.]])
expV = numpy.array([[84.,84.],[84.,84.],[84.,84.]])
M_test = numpy.array([[0,0,1],[0,1,0],[0,0,0],[1,1,0],[0,0,0]]) #R->3,5,10,11, P_pred->21084,21168,21168,21168
MSE = (444408561. + 447872569. + 447660964. + 447618649) / 4.
R2 = 1. - (444408561. + 447872569. + 447660964. + 447618649) / (4.25**2+2.25**2+2.75**2+3.75**2) #mean=7.25
Rp = 357. / ( math.sqrt(44.75) * math.sqrt(5292.) ) #mean=7.25,var=44.75, mean_pred=21147,var_pred=5292, corr=(-4.25*-63 + -2.25*21 + 2.75*21 + 3.75*21)
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.expU = expU
BNMF.expV = expV
performances = BNMF.predict(M_test)
assert performances['MSE'] == MSE
assert performances['R^2'] == R2
assert performances['Rp'] == Rp
""" Test the evaluation measures MSE, R^2, Rp """
def test_compute_statistics():
R = numpy.array([[1,2],[3,4]],dtype=float)
M = numpy.array([[1,1],[0,1]])
I, J, K = 2, 2, 3
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
BNMF = bnmf_vb_optimised(R,M,K,priors)
R_pred = numpy.array([[500,550],[1220,1342]],dtype=float)
M_pred = numpy.array([[0,0],[1,1]])
MSE_pred = (1217**2 + 1338**2) / 2.0
R2_pred = 1. - (1217**2+1338**2)/(0.5**2+0.5**2) #mean=3.5
Rp_pred = 61. / ( math.sqrt(.5) * math.sqrt(7442.) ) #mean=3.5,var=0.5,mean_pred=1281,var_pred=7442,cov=61
assert MSE_pred == BNMF.compute_MSE(M_pred,R,R_pred)
assert R2_pred == BNMF.compute_R2(M_pred,R,R_pred)
assert Rp_pred == BNMF.compute_Rp(M_pred,R,R_pred)
""" Test the model quality measures. """
def test_log_likelihood():
R = numpy.array([[1,2],[3,4]],dtype=float)
M = numpy.array([[1,1],[0,1]])
I, J, K = 2, 2, 3
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
BNMF = bnmf_vb_optimised(R,M,K,priors)
BNMF.expU = numpy.ones((I,K))
BNMF.expV = 2*numpy.ones((J,K))
BNMF.explogtau = 5.
BNMF.exptau = 3.
# expU*expV.T = [[6.]]
log_likelihood = 3./2.*(5.-math.log(2*math.pi)) - 3./2. * (5**2 + 4**2 + 2**2)
AIC = -2*log_likelihood + 2*(2*3+2*3)
BIC = -2*log_likelihood + (2*3+2*3)*math.log(3)
MSE = (5**2+4**2+2**2)/3.
assert log_likelihood == BNMF.quality('loglikelihood')
assert AIC == BNMF.quality('AIC')
assert BIC == BNMF.quality('BIC')
assert MSE == BNMF.quality('MSE')
with pytest.raises(AssertionError) as error:
BNMF.quality('FAIL')
assert str(error.value) == "Unrecognised metric for model quality: FAIL."
|
|
#!/usr/bin/python
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# http://blockly.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two versions of Blockly's core files:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
#
# This script also generates:
# blocks_compressed.js: The compressed Blockly language blocks.
# javascript_compressed.js: The compressed Javascript generator.
# python_compressed.js: The compressed Python generator.
# dart_compressed.js: The compressed Dart generator.
# msg/js/<LANG>.js for every language <LANG> defined in msg/js/<LANG>.json.
import errno, glob, httplib, json, os, re, subprocess, sys, threading, urllib
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date
del sys.path[-1]
return module
HEADER = ('// Do not edit this file; automatically generated by build.py.\n'
'"use strict";\n')
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
target_filename = 'blockly_uncompressed.js'
f = open(target_filename, 'w')
f.write(HEADER)
f.write("""
window.BLOCKLY_DIR = (function() {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_uncompressed\.js$');
for (var x = 0, script; script = scripts[x]; x++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}
}
alert('Could not detect Blockly\\'s directory name.');
return '';
})();
window.BLOCKLY_BOOT = function() {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'http://code.google.com/p/blockly/wiki/Closure\\n');
}
// Build map of all dependencies (used and unused).
var dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]core[\\/])', '\\1" + dir + "\\2', add_dependency)
m = re.search('[\\/]([^\\/]+)[\\/]realtime[\\/]realtime.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]realtime[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_dependency + '\n')
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith(os.pardir + os.sep): # '../'
provides.extend(dep.provides)
provides.sort()
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write('goog.require(\'%s\');\n' % provide)
f.write("""
delete window.BLOCKLY_DIR;
delete window.BLOCKLY_BOOT;
};
// Delete any existing Closure (e.g. Soy's nogoog_shim).
document.write('<script type="text/javascript">var goog = undefined;</script>');
// Load fresh Closure Library.
document.write('<script type="text/javascript" src="' + window.BLOCKLY_DIR +
'/../closure-library-read-only/closure/goog/base.js"></script>');
document.write('<script type="text/javascript">window.BLOCKLY_BOOT()</script>');
""")
f.close()
print('SUCCESS: ' + target_filename)
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths):
threading.Thread.__init__(self)
self.search_paths = search_paths
def run(self):
self.gen_core()
self.gen_blocks()
self.gen_generator('javascript')
self.gen_generator('python')
self.gen_generator('dart')
def gen_core(self):
target_filename = 'blockly_compressed.js'
# Define the parameters for the POST request.
params = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('use_closure_library', 'true'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
[os.path.join('core', 'blockly.js')])
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(('js_code', ''.join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, '')
def gen_blocks(self):
target_filename = 'blocks_compressed.js'
# Define the parameters for the POST request.
params = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
]
# Read in all the source files.
# Add Blockly.Blocks to be compatible with the compiler.
params.append(('js_code', "goog.provide('Blockly.Blocks');"))
filenames = glob.glob(os.path.join('blocks', '*.js'))
for filename in filenames:
f = open(filename)
params.append(('js_code', ''.join(f.readlines())))
f.close()
# Remove Blockly.Blocks to be compatible with Blockly.
remove = "var Blockly={Blocks:{}};"
self.do_compile(params, target_filename, filenames, remove)
def gen_generator(self, language):
target_filename = language + '_compressed.js'
# Define the parameters for the POST request.
params = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
]
# Read in all the source files.
# Add Blockly.Generator to be compatible with the compiler.
params.append(('js_code', "goog.provide('Blockly.Generator');"))
filenames = glob.glob(
os.path.join('generators', language, '*.js'))
filenames.insert(0, os.path.join('generators', language + '.js'))
for filename in filenames:
f = open(filename)
params.append(('js_code', ''.join(f.readlines())))
f.close()
filenames.insert(0, '[goog.provide]')
# Remove Blockly.Generator to be compatible with Blockly.
remove = "var Blockly={Generator:{}};"
self.do_compile(params, target_filename, filenames, remove)
def do_compile(self, params, target_filename, filenames, remove):
# Send the request to Google.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close()
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith('Input_'):
return '???'
n = int(name[6:])
return filenames[n]
if json_data.has_key('serverErrors'):
errors = json_data['serverErrors']
for error in errors:
print 'SERVER ERROR: %s' % target_filename
print error['error']
elif json_data.has_key('errors'):
errors = json_data['errors']
for error in errors:
print('FATAL ERROR')
print(error['error'])
print('%s at line %d:' % (
file_lookup(error['file']), error['lineno']))
print(error['line'])
print((' ' * error['charno']) + '^')
sys.exit(1)
else:
if json_data.has_key('warnings'):
warnings = json_data['warnings']
for warning in warnings:
print('WARNING')
print(warning['warning'])
print('%s at line %d:' % (
file_lookup(warning['file']), warning['lineno']))
print(warning['line'])
print((' ' * warning['charno']) + '^')
print()
if not json_data.has_key('compiledCode'):
print('FATAL ERROR: Compiler did not return compiledCode.')
sys.exit(1)
code = HEADER + '\n' + json_data['compiledCode']
code = code.replace(remove, '')
stats = json_data['statistics']
original_b = stats['originalSize']
compressed_b = stats['compressedSize']
if original_b > 0 and compressed_b > 0:
f = open(target_filename, 'w')
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print('SUCCESS: ' + target_filename)
print('Size changed from %d KB to %d KB (%d%%).' % (
original_kb, compressed_kb, ratio))
else:
print 'UNKNOWN ERROR'
class Gen_langfiles(threading.Thread):
"""Generate JavaScript file for each natural language supported.
Runs in a separate thread.
"""
def __init__(self):
threading.Thread.__init__(self)
def _rebuild(self, srcs, dests):
# Determine whether any of the files in srcs is newer than any in dests.
try:
return (max(os.path.getmtime(src) for src in srcs) >
min(os.path.getmtime(dest) for dest in dests))
except OSError, e:
# Was a file not found?
if e.errno == errno.ENOENT:
# If it was a source file, we can't proceed.
if e.filename in srcs:
print('Source file missing: ' + e.filename)
sys.exit(1)
else:
# If a destination file was missing, rebuild.
return True
else:
print('Error checking file creation times: ' + e)
def run(self):
# The files msg/json/{en,qqq,synonyms}.json depend on msg/messages.js.
if self._rebuild([os.path.join('msg', 'messages.js')],
[os.path.join('msg', 'json', f) for f in
['en.json', 'qqq.json', 'synonyms.json']]):
try:
subprocess.check_call([
os.path.join('i18n', 'js_to_json.py'),
'--input_file', 'msg/messages.js',
'--output_dir', 'msg/json/',
'--quiet'])
except (subprocess.CalledProcessError, OSError), e:
# Documentation for subprocess.check_call says that CalledProcessError
# will be raised on failure, but I found that OSError is also possible.
print('Error running i18n/js_to_json.py: ', e)
sys.exit(1)
# Checking whether it is necessary to rebuild the js files would be a lot of
# work since we would have to compare each <lang>.json file with each
# <lang>.js file. Rebuilding is easy and cheap, so just go ahead and do it.
try:
# Use create_messages.py to create .js files from .json files.
cmd = [
os.path.join('i18n', 'create_messages.py'),
'--source_lang_file', os.path.join('msg', 'json', 'en.json'),
'--source_synonym_file', os.path.join('msg', 'json', 'synonyms.json'),
'--key_file', os.path.join('msg', 'json', 'keys.json'),
'--output_dir', os.path.join('msg', 'js'),
'--quiet']
json_files = glob.glob(os.path.join('msg', 'json', '*.json'))
json_files = [file for file in json_files if not
(file.endswith(('keys.json', 'synonyms.json', 'qqq.json')))]
cmd.extend(json_files)
subprocess.check_call(cmd)
except (subprocess.CalledProcessError, OSError), e:
print('Error running i18n/create_messages.py: ', e)
sys.exit(1)
# Output list of .js files created.
for f in json_files:
# This assumes the path to the current directory does not contain 'json'.
f = f.replace('json', 'js')
if os.path.isfile(f):
print('SUCCESS: ' + f)
else:
print('FAILED to create ' + f)
if __name__ == '__main__':
try:
calcdeps = import_path(os.path.join(os.path.pardir,
'closure-library-read-only', 'closure', 'bin', 'calcdeps.py'))
except ImportError:
print("""Error: Closure not found. Read this:
http://code.google.com/p/blockly/wiki/Closure""")
sys.exit(1)
search_paths = calcdeps.ExpandDirectories(
['core', 'realtime', os.path.join(os.path.pardir, 'closure-library-read-only')])
# Run both tasks in parallel threads.
# Uncompressed is limited by processor speed.
# Compressed is limited by network and server speed.
Gen_uncompressed(search_paths).start()
Gen_compressed(search_paths).start()
# This is run locally in a separate thread.
Gen_langfiles().start()
|
|
from .. import BaseProvider, ElementsType
# Data source
#
# The country codes in this provider comes from the following source:
# List of country calling codes
# https://en.wikipedia.org/wiki/List_of_country_calling_codes
#
# Data was collected from the alphabetical listing by country or region
localized = True
class Provider(BaseProvider):
country_calling_codes: ElementsType = (
"+93",
"+358 18",
"+355",
"+213",
"+1 684",
"+376",
"+244",
"+1 264",
"+1 268",
"+54",
"+374",
"+297",
"+247",
"+61",
"+672 1",
"+672",
"+43",
"+994",
"+1 242",
"+973",
"+880",
"+1 246",
"+1 268",
"+375",
"+32",
"+501",
"+229",
"+1 441",
"+975",
"+591",
"+599 7",
"+387",
"+267",
"+55",
"+246",
"+1 284",
"+673",
"+359",
"+226",
"+257",
"+855",
"+237",
"+1",
"+238",
"+599 3",
"+599 4",
"+599 7",
"+1 345",
"+236",
"+235",
"+64",
"+56",
"+86",
"+61 89164",
"+61 89162",
"+57",
"+269",
"+242",
"+243",
"+682",
"+506",
"+385",
"+53",
"+599 9",
"+357",
"+420",
"+45",
"+246",
"+253",
"+1 767",
"+1 809",
"+1 829",
"+1 849",
"+670",
"+56",
"+593",
"+20",
"+503",
"+881 2",
"+881 3",
"+882 13",
"+240",
"+291",
"+372",
"+268",
"+251",
"+500",
"+298",
"+679",
"+358",
"+33",
"+596",
"+594",
"+689",
"+241",
"+220",
"+995",
"+49",
"+233",
"+350",
"+881",
"+881 8",
"+881 9",
"+30",
"+299",
"+1 473",
"+590",
"+1 671",
"+502",
"+44 1481",
"+44 7781",
"+44 7839",
"+44 7911",
"+224",
"+245",
"+592",
"+509",
"+504",
"+852",
"+36",
"+354",
"+881 0",
"+881 1",
"+91",
"+62",
"+870",
"+800",
"+882",
"+883",
"+979",
"+808",
"+98",
"+964",
"+353",
"+881 6",
"+881 7",
"+44 1624",
"+44 7524",
"+44 7624",
"+44 7924",
"+972",
"+39",
"+225",
"+1 876",
"+47 79",
"+81",
"+44 1534",
"+962",
"+7 6",
"+7 7",
"+254",
"+686",
"+850",
"+82",
"+383",
"+965",
"+996",
"+856",
"+371",
"+961",
"+266",
"+231",
"+218",
"+423",
"+370",
"+352",
"+853",
"+261",
"+265",
"+60",
"+960",
"+223",
"+356",
"+692",
"+596",
"+222",
"+230",
"+262 269",
"+262 639",
"+52",
"+691",
"+1 808",
"+373",
"+377",
"+976",
"+382",
"+1 664",
"+212",
"+258",
"+95",
"+374 47",
"+374 97",
"+264",
"+674",
"+977",
"+31",
"+1 869",
"+687",
"+64",
"+505",
"+227",
"+234",
"+683",
"+672 3",
"+389",
"+90 392",
"+44 28",
"+1 670",
"+47",
"+968",
"+92",
"+680",
"+970",
"+507",
"+675",
"+595",
"+51",
"+63",
"+64",
"+48",
"+351",
"+1 787",
"+1 939",
"+974",
"+262",
"+40",
"+7",
"+250",
"+599 4",
"+590",
"+290",
"+1 869",
"+1 758",
"+590",
"+508",
"+1 784",
"+685",
"+378",
"+239",
"+966",
"+221",
"+381",
"+248",
"+232",
"+65",
"+599 3",
"+1 721",
"+421",
"+386",
"+677",
"+252",
"+27",
"+500",
"+995 34",
"+211",
"+34",
"+94",
"+249",
"+597",
"+47 79",
"+46",
"+41",
"+963",
"+886",
"+992",
"+255",
"+888",
"+66",
"+882 16",
"+228",
"+690",
"+676",
"+373 2",
"+373 5",
"+1 868",
"+290 8",
"+216",
"+90",
"+993",
"+1 649",
"+688",
"+256",
"+380",
"+971",
"+44",
"+1",
"+878",
"+598",
"+1 340",
"+998",
"+678",
"+39 06 698",
"+379",
"+58",
"+84",
"+1 808",
"+681",
"+967",
"+260",
"+255 24",
"+263",
)
formats: ElementsType = ("###-###-###",)
msisdn_formats: ElementsType = ("#############",)
def phone_number(self) -> str:
return self.numerify(self.random_element(self.formats))
def country_calling_code(self) -> str:
return self.random_element(self.country_calling_codes)
def msisdn(self) -> str:
"""https://en.wikipedia.org/wiki/MSISDN"""
return self.numerify(self.random_element(self.msisdn_formats))
|
|
# test_ptypes.py
# Copyright (c) 2013-2019 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,W0108
# PyPI imports
import numpy as np
from pmisc import AE
# Intra-package imports
import peng.ptypes
###
# Global variables
###
emsg = lambda msg: (
"[START CONTRACT MSG: {0}]Argument `*[argument_name]*` "
"is not valid[STOP CONTRACT MSG]".format(msg)
)
###
# Helper functions
###
def check_contract(obj, name, value):
AE(obj, ValueError, emsg(name), obj=value)
###
# Test functions
###
def test_engineering_notation_number():
"""Test EngineeringNotationNumber pseudo-type."""
obj = peng.ptypes.engineering_notation_number
items = ["3.12b", "f", "a1b", " + 123.45f "]
for item in items:
check_contract(obj, "engineering_notation_number", item)
items = [" +123.45f ", " -0 "]
for item in items:
obj(item)
def test_engineering_notation_suffix():
"""Test EngineeringNotationSuffix pseudo-type."""
obj = peng.ptypes.engineering_notation_suffix
check_contract(obj, "engineering_notation_suffix", "b")
obj("u")
def test_increasing_real_numpy_vector_contract():
"""Test for IncreasingRealNumpyVector pseudo-type."""
obj = peng.ptypes.increasing_real_numpy_vector
items = [
"a",
[1, 2, 3],
np.array([]),
np.array([[1, 2, 3], [4, 5, 6]]),
np.array(["a", "b"]),
np.array([1, 0, -3]),
np.array([10.0, 8.0, 2.0]),
]
for item in items:
check_contract(obj, "increasing_real_numpy_vector", item)
items = [np.array([1, 2, 3]), np.array([10.0, 12.1, 12.5]), np.array([10.0])]
for item in items:
obj(item)
def test_number_numpy_vector_contract():
"""Test for NumberNumpyVector pseudo-type."""
exmsg = (
"[START CONTRACT MSG: number_numpy_vector]Argument "
"`*[argument_name]*` is not valid[STOP CONTRACT MSG]"
)
items = [
"a",
[1, 2, 3],
np.array([]),
np.array([[1, 2, 3], [4, 5, 6]]),
np.array(["a", "b"]),
]
for item in items:
AE(peng.ptypes.number_numpy_vector, ValueError, exmsg, item)
items = [
np.array([1, 2, 3]),
np.array([10.0, 8.0, 2.0]),
np.array([10.0]),
np.array([complex(1, 1), complex(2, 2)]),
]
for item in items:
peng.ptypes.number_numpy_vector(item)
def test_real_numpy_vector_contract():
"""Test for RealNumpyVector pseudo-type."""
obj = peng.ptypes.real_numpy_vector
items = [
"a",
[1, 2, 3],
np.array([]),
np.array([[1, 2, 3], [4, 5, 6]]),
np.array(["a", "b"]),
]
for item in items:
check_contract(obj, "real_numpy_vector", item)
items = [np.array([1, 2, 3]), np.array([10.0, 8.0, 2.0]), np.array([10.0])]
for item in items:
obj(item)
def test_touchstone_data_contract():
"""Test for TouchstoneData pseudo-type."""
obj = peng.ptypes.touchstone_data
exmsg = (
"[START CONTRACT MSG: touchstone_data]Argument "
"`*[argument_name]*` is not valid"
"[STOP CONTRACT MSG]"
)
freq = np.array([1, 2, 3])
data = np.resize(np.arange(1, 1 + (3 * (2 ** 2))), (3, 2, 2))
wdata1 = np.array([1, 2, -3, 4])
wdata2 = np.array([1, "a", 3])
wfreq1 = np.array([1, 2, "a"])
wfreq2 = np.array([1, 2, 3, 4])
items = [
45,
{},
{"hello": 5},
{"points": 1, "freq": 2},
{"points": 1, "freq": 2, "pars": 3, "hello": 4},
{"points": "a", "freq": freq, "pars": data},
{"points": 3, "freq": "a", "pars": data},
{"points": 3, "freq": freq, "pars": "a"},
{"points": 3, "freq": data, "pars": data},
{"points": 3, "freq": freq, "pars": wdata1},
{"points": 3, "freq": freq, "pars": wdata2},
{"points": 3, "freq": wfreq1, "pars": data},
{"points": 3, "freq": wfreq2, "pars": data},
]
for item in items:
AE(obj, ValueError, exmsg, item)
obj({"points": 3, "freq": freq, "pars": data})
def test_touchstone_noise_data_contract():
"""Test for TouchstoneNoiseData pseudo-type."""
obj = peng.ptypes.touchstone_noise_data
exmsg = (
"[START CONTRACT MSG: touchstone_noise_data]Argument "
"`*[argument_name]*` is not valid"
"[STOP CONTRACT MSG]"
)
freq = np.array([1, 2, 3])
nf = np.array([4, 5, 6])
rc = np.array([1 + 2j, 3 + 4j, 5 + 6j])
res = np.array([50.0, 25.0, 75.0])
wres = np.array([1, 2, -3])
items = [
45,
{"hello": 5},
{"points": 1, "freq": 2, "nf": 3, "rc": 4},
{"points": 1, "freq": 2, "nf": 3, "rc": 4, "res": 5, "hello": 2},
{"points": 1, "freq": 2, "nf": 3, "rc": 4, "res": 5, "hello": 2},
{"points": "a", "freq": freq, "nf": nf, "rc": rc, "res": res},
{"points": 3, "freq": "a", "nf": nf, "rc": rc, "res": res},
{"points": 3, "freq": freq, "nf": "a", "rc": rc, "res": res},
{"points": 3, "freq": freq, "nf": nf, "rc": "a", "res": res},
{"points": 3, "freq": freq, "nf": nf, "rc": rc, "res": "a"},
{"points": 3, "freq": res, "nf": nf, "rc": rc, "res": res},
{"points": 3, "freq": freq, "nf": nf, "rc": rc, "res": wres},
{"points": 4, "freq": freq, "nf": nf, "rc": rc, "res": res},
]
for item in items:
AE(obj, ValueError, exmsg, item)
obj({})
obj({"points": 3, "freq": freq, "nf": nf, "rc": rc, "res": res})
def test_touchstone_options_contract():
"""Test for TouchstoneOptions pseudo-type."""
obj = peng.ptypes.touchstone_options
exmsg = (
"[START CONTRACT MSG: touchstone_options]Argument "
"`*[argument_name]*` is not valid"
"[STOP CONTRACT MSG]"
)
items = [
45,
{},
{"hello": 5},
{"units": 1, "ptype": 2, "pformat": 3},
{"units": 1, "ptype": 2, "pformat": 3, "z0": 4, "hello": 5},
{"units": "a", "pformat": "MA", "ptype": "S", "z0": 50.0},
{"units": "GHz", "pformat": "a", "ptype": "S", "z0": 50.0},
{"units": "GHz", "pformat": "MA", "ptype": "a", "z0": 50.0},
{"units": "GHz", "pformat": "MA", "ptype": "S", "z0": "a"},
{"units": "GHz", "pformat": "MA", "ptype": "S", "z0": -50.0},
]
for item in items:
AE(obj, ValueError, exmsg, item)
obj({"units": "gHz", "pformat": "Ri", "ptype": "s", "z0": 50.0})
def test_wave_interp_option_contract():
"""Test for WaveInterpolationOption pseudo-type."""
exmsg = (
"[START CONTRACT MSG: wave_interp_option]Argument "
"`*[argument_name]*` is not valid"
"[STOP CONTRACT MSG]"
)
items = [None, True, "a", 5.0, []]
for item in items:
AE(peng.ptypes.wave_interp_option, ValueError, exmsg, item)
items = [
"STAIRCASE",
"CONTINUOUS",
"staircase",
"continuous",
"sTaiRcAsE",
"cOnTiNuOuS",
]
for item in items:
peng.ptypes.wave_interp_option(item)
def test_wave_scale_option_contract():
"""Test for WaveScaleOption pseudo-type."""
exmsg = (
"[START CONTRACT MSG: wave_scale_option]Argument "
"`*[argument_name]*` is not valid"
"[STOP CONTRACT MSG]"
)
items = [None, True, "a", 5.0, []]
for item in items:
AE(peng.ptypes.wave_scale_option, ValueError, exmsg, item)
for item in ["LINEAR", "LOG", "linear", "log", "LiNeAr", "lOg"]:
peng.ptypes.wave_scale_option(item)
def test_wave_vectors_contract():
"""Test for WaveVectors pseudo-type."""
exmsg = (
"[START CONTRACT MSG: wave_vectors]Argument "
"`*[argument_name]*` is not valid"
"[STOP CONTRACT MSG]"
)
items = [
"a",
True,
None,
{},
[],
(None, None),
[(None, None)],
[(None, 1)],
[(1, None)],
[(1, 2, 3)],
[(1, 100), (2, 200), (0, 300)],
[(1, 100), (2, 200), (0, "a")],
[(1, 100), ("b", 200), (0, 300)],
]
for item in items:
AE(peng.ptypes.wave_vectors, ValueError, exmsg, item)
peng.ptypes.wave_vectors([(0, 100), (1, 200), (2, 300)])
|
|
'''This module implements specialized container datatypes providing
alternatives to Python's general purpose built-in containers, dict,
list, set, and tuple.
* namedtuple factory function for creating tuple subclasses with named fields
* deque list-like container with fast appends and pops on either end
* ChainMap dict-like class for creating a single view of multiple mappings
* Counter dict subclass for counting hashable objects
* OrderedDict dict subclass that remembers the order entries were added
* defaultdict dict subclass that calls a factory function to supply missing values
* UserDict wrapper around dictionary objects for easier dict subclassing
* UserList wrapper around list objects for easier list subclassing
* UserString wrapper around string objects for easier string subclassing
'''
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict', 'ChainMap']
# For backwards compatibility, continue to make the collections ABCs
# available through the collections module.
from _collections_abc import *
import _collections_abc
__all__ += _collections_abc.__all__
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
try:
from _collections import deque
except ImportError:
pass
else:
MutableSequence.register(deque)
try:
from _collections import defaultdict
except ImportError:
pass
################################################################################
### OrderedDict
################################################################################
class _OrderedDictKeysView(KeysView):
def __reversed__(self):
yield from reversed(self._mapping)
class _OrderedDictItemsView(ItemsView):
def __reversed__(self):
for key in reversed(self._mapping):
yield (key, self._mapping[key])
class _OrderedDictValuesView(ValuesView):
def __reversed__(self):
for key in reversed(self._mapping):
yield self._mapping[key]
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
link.prev = None
link.next = None
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return _OrderedDictKeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return _OrderedDictItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return _OrderedDictValuesView(self)
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
try:
from _collections import OrderedDict
except ImportError:
# Leave the pure Python version in place.
pass
################################################################################
### namedtuple
################################################################################
_class_template = """\
from builtins import property as _property, tuple as _tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values.'
return OrderedDict(zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
{field_defs}
"""
_repr_template = '{name}=%r'
_field_template = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def namedtuple(typename, field_names, *, verbose=False, rename=False, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = str(typename)
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
'identifiers: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
class_definition = _class_template.format(
typename = typename,
field_names = tuple(field_names),
num_fields = len(field_names),
arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
repr_fmt = ', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs = '\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names))
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
except ImportError:
pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(*args, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
if not args:
raise TypeError("descriptor '__init__' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
super(Counter, self).__init__()
self.update(*args, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(*args, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if not args:
raise TypeError("descriptor 'update' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(*args, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if not args:
raise TypeError("descriptor 'subtract' of 'Counter' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
result = Counter()
for elem, count in self.items():
if count > 0:
result[elem] = count
return result
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
result = Counter()
for elem, count in self.items():
if count < 0:
result[elem] = 0 - count
return result
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
########################################################################
### ChainMap
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
be accessed or updated using the *maps* attribute. There is no other
state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''New ChainMap with a new map followed by all previous maps.
If no map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'UserDict' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
dict = None
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
class UserList(MutableSequence):
"""A more or less complete user-defined wrapper around list objects."""
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
return other.data if isinstance(other, UserList) else other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def clear(self): self.data.clear()
def copy(self): return self.__class__(self)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
################################################################################
### UserString
################################################################################
class UserString(Sequence):
def __init__(self, seq):
if isinstance(seq, str):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __getnewargs__(self):
return (self.data[:],)
def __eq__(self, string):
if isinstance(string, UserString):
return self.data == string.data
return self.data == string
def __lt__(self, string):
if isinstance(string, UserString):
return self.data < string.data
return self.data < string
def __le__(self, string):
if isinstance(string, UserString):
return self.data <= string.data
return self.data <= string
def __gt__(self, string):
if isinstance(string, UserString):
return self.data > string.data
return self.data > string
def __ge__(self, string):
if isinstance(string, UserString):
return self.data >= string.data
return self.data >= string
def __contains__(self, char):
if isinstance(char, UserString):
char = char.data
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, str):
return self.__class__(self.data + other)
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, str):
return self.__class__(other + self.data)
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
def __rmod__(self, format):
return self.__class__(format % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def casefold(self):
return self.__class__(self.data.casefold())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
return self.__class__(self.data.encode(encoding))
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=_sys.maxsize):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.find(sub, start, end)
def format(self, *args, **kwds):
return self.data.format(*args, **kwds)
def format_map(self, mapping):
return self.data.format_map(mapping)
def index(self, sub, start=0, end=_sys.maxsize):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def isidentifier(self): return self.data.isidentifier()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isprintable(self): return self.data.isprintable()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
maketrans = str.maketrans
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
if isinstance(old, UserString):
old = old.data
if isinstance(new, UserString):
new = new.data
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=_sys.maxsize):
if isinstance(sub, UserString):
sub = sub.data
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=_sys.maxsize):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None):
return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=False): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=_sys.maxsize):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
|
|
# -*- coding: utf-8 -*-
from __future__ import division
import logging
import random
import time
import unittest
import psycopg2.extensions
from jukoro import arrow
from jukoro import pg
from .base import Base, BaseWithPool
__all__ = ['TestPgPool', 'TestPgConnection', 'TestHistory', 'TestAutoCommit',
'TestManualCommit', 'TestRollback', 'TestNamedCursor', 'TestFetch',
'TestCallProc']
logger = logging.getLogger(__name__)
class TestPgPool(BaseWithPool):
def test_uri(self):
self.assertEqual(self.pool.uri, self.uri())
def test_warm_up(self):
# FIXME do not test private members
self.assertEqual(len(self.pool), 0)
with self.pool.transaction() as cursor:
self.assertTrue(self.pool._pool.is_locked(cursor._pg_conn))
self.assertIsNone(cursor._pg_conn)
self.assertEqual(len(self.pool), self.pool_size)
def test_pool_closed(self):
sz = 3
pool = pg.PgDbPool(self.uri(), pool_size=sz)
# warm up
with pool.transaction():
pass
self.assertEqual(len(pool), sz)
pool.close()
self.assertEqual(len(pool), 0)
with self.assertRaises(pg.PoolClosed):
pool.transaction()
class TestPgConnection(Base):
def test_connection_params(self):
uri, schema = self.uri(), self.schema()
with pg.PgConnection(uri, autoclose=True).transaction() as cursor:
res = cursor.execute_and_get('SHOW search_path;')
self.assertIsInstance(res, dict)
self.assertTrue('search_path' in res)
self.assertTrue(schema in res['search_path'])
res = cursor.execute_and_get('SHOW TIME ZONE;')
self.assertEqual(res['TimeZone'], 'UTC')
def test_connection_close(self):
uri = self.uri()
conn = pg.PgConnection(uri, autoclose=True)
with conn.transaction():
pass
self.assertTrue(conn.is_closed)
conn = pg.PgConnection(uri, autoclose=False)
with conn.transaction():
pass
self.assertFalse(conn.is_closed)
conn.close()
self.assertTrue(conn.is_closed)
def test_connection_schema(self):
uri, schema = self.uri(), self.schema()
conn = pg.PgConnection(uri)
self.assertEqual(schema, conn.schema)
def test_connection_is_closed(self):
uri, schema = self.uri(), self.schema()
conn = pg.PgConnection(uri, autoclose=True)
with conn.transaction():
pass
with self.assertRaises(pg.ConnectionClosed):
conn.autocommit = True
with self.assertRaises(pg.ConnectionClosed):
conn.commit()
with self.assertRaises(pg.ConnectionClosed):
conn.rollback()
with self.assertRaises(pg.ConnectionClosed):
conn.close()
with self.assertRaises(pg.ConnectionClosed):
conn.transaction()
with self.assertRaises(pg.ConnectionClosed):
conn.cursor()
with self.assertRaises(pg.ConnectionClosed):
conn.reattach()
def test_connection_psycopg2_cursor(self):
uri, schema = self.uri(), self.schema()
conn = pg.PgConnection(uri)
cursor = conn.cursor(named=False)
self.assertIsInstance(cursor, psycopg2.extensions.cursor)
self.assertIs(cursor.name, None)
cursor.close()
cursor = conn.cursor(named=True)
self.assertIsInstance(cursor, psycopg2.extensions.cursor)
self.assertIsNot(cursor.name, None)
self.assertTrue(cursor.scrollable)
self.assertTrue(cursor.withhold)
cursor.execute('SELECT 1;') # needed to close named cursor
cursor.close()
conn.close()
def test_connection_transaction(self):
uri, schema = self.uri(), self.schema()
conn = pg.PgConnection(uri)
cursor = conn.transaction()
self.assertIsInstance(cursor, pg.PgTransaction)
cursor.close()
conn.close()
class TestHistory(BaseWithPool):
def test_history_count(self):
entity_id = self.entity_id
cnt = 3
with self.pool.transaction() as cursor:
doc = self._get(cursor, entity_id)[0]
for __ in xrange(cnt):
time.sleep(0.125)
doc['attr6'] = int(time.time())
self._update(cursor, entity_id, doc)
with self.pool.transaction() as cursor:
count = self._count(cursor, entity_id)
raw_count = self._count_raw(cursor, entity_id)
self.assertEqual(count, 1)
self.assertEqual(raw_count, cnt + 1)
def test_history_count2(self):
doc = {
'attr1': 'Ferrari',
'attr2': 'McLaren',
'attr3': 'Mercedes',
'attr4': 22,
'attr5': 122,
'attr6': False,
'attr7': arrow.utcnow(),
}
cnt = 3
with self.pool.transaction() as cursor:
entity_id = self._create(cursor, doc)[0]
count = self._count(cursor, entity_id)
raw_count = self._count_raw(cursor, entity_id)
self.assertEqual(count, 1)
self.assertEqual(raw_count, 1)
with self.pool.transaction() as cursor:
doc1 = self._get(cursor, entity_id)[0]
self.assertFalse(doc1['attr6'])
self.assertFalse('attr8' in doc)
for __ in xrange(cnt):
time.sleep(0.125)
doc['attr8'] = int(time.time())
self._update(cursor, entity_id, doc)
self._delete(cursor, entity_id)
with self.pool.transaction() as cursor:
count = self._count(cursor, entity_id)
raw_count = self._count_raw(cursor, entity_id)
self.assertEqual(count, 0)
self.assertEqual(raw_count, cnt + 1)
class TestAutoCommit(BaseWithPool):
def test_a(self):
entity_id = self.entity_id
with self.pool.transaction() as cur1, self.pool.transaction() as cur2:
doc1, queries1 = self._get(cur1, entity_id)
self.assertIsInstance(doc1, dict)
self.assertEqual(len(queries1), 1)
doc1['attr6'] = int(time.time())
queries1 = self._update(cur1, entity_id, doc1)
self.assertEqual(len(queries1), 2)
doc2 = self._get(cur2, entity_id)[0]
self.assertEqual(doc1.get('attr6'), doc2.get('attr6'))
self.assertDocEqual(doc1, doc2)
def test_b(self):
entity_id = self.entity_id
with self.pool.transaction(autocommit=False) as cur1:
with self.pool.transaction() as cur2:
doc1, queries1 = self._get(cur1, entity_id)
doc1['attr6'] = int(time.time())
queries1 = self._update(cur1, entity_id, doc1)
self.assertEqual(len(queries1), 2)
doc2 = self._get(cur2, entity_id)[0]
self.assertNotEqual(doc1.get('attr6'), doc2.get('attr6'))
self.assertNotEqual(doc1, doc2)
with self.pool.transaction() as cur3:
doc3 = self._get(cur3, entity_id)[0]
self.assertEqual(doc1.get('attr6'), doc3.get('attr6'))
self.assertDocEqual(doc1, doc3)
class TestManualCommit(BaseWithPool):
def test_a(self):
uri, schema = self.uri(), self.schema()
entity_id = self.entity_id
conn = pg.PgConnection(uri)
cur1 = conn.transaction(autocommit=False)
doc1 = self._get(cur1, entity_id)[0]
doc1['attr6'] = int(time.time())
self._update(cur1, entity_id, doc1)
with self.pool.transaction() as cur2:
doc2 = self._get(cur2, entity_id)[0]
self.assertNotEqual(doc1, doc2)
conn.commit()
with self.pool.transaction() as cur3:
doc3 = self._get(cur3, entity_id)[0]
self.assertDocEqual(doc1, doc3)
cur1.close()
conn.close()
class TestRollback(BaseWithPool):
def test_a(self):
entity_id = self.entity_id
with self.pool.transaction(autocommit=False) as cursor:
doc = self._get(cursor, entity_id)[0]
doc['attr5'] = -1
with self.assertRaises(pg.IntegrityError):
self._update(cursor, entity_id, doc)
def test_b(self):
uri, schema = self.uri(), self.schema()
entity_id = self.entity_id
conn = pg.PgConnection(uri)
cur1 = conn.transaction(autocommit=False)
doc1 = self._get(cur1, entity_id)[0]
doc1['attr6'] = int(time.time())
self._update(cur1, entity_id, doc1)
with self.pool.transaction() as cur2:
doc2 = self._get(cur2, entity_id)[0]
self.assertNotEqual(doc1, doc2)
conn.rollback()
with self.pool.transaction() as cur3:
doc3 = self._get(cur3, entity_id)[0]
self.assertNotEqual(doc1, doc3)
self.assertEqual(doc2, doc3)
cur1.close()
conn.close()
class TestNamedCursor(BaseWithPool):
def test_a(self):
q = 'SELECT "entity_id", "doc" from "test_pg__live";'
with self.pool.transaction(autocommit=False, named=True) as cursor:
cursor.execute(q)
queries = ''.join(cursor.queries)
self.assertTrue('DECLARE' in queries)
def test_b(self):
# test proper cursor close
with self.pool.transaction(autocommit=False, named=True):
pass
def test_c(self):
uri, schema = self.uri(), self.schema()
conn = pg.PgConnection(uri)
cur1 = conn.transaction(autocommit=False, named=True)
try:
cur1.execute('SELECT 1/0;')
except pg.DataError:
conn.rollback()
# query not in transaction queries as it failed
self.assertFalse('DECLARE' in ''.join(cur1.queries))
cur1.close()
conn.close()
def test_d(self):
q = 'SELECT "entity_id", "doc" from "test_pg__live";'
with self.pool.transaction(autocommit=False, named=True) as cursor:
cursor.execute(q)
self.assertTrue('DECLARE' in ''.join(cursor.queries))
# second execute on named cursor raises ProgrammingError
with self.assertRaises(pg.ProgrammingError):
cursor.execute('SELECT 1/0;')
def test_e(self):
# test for named cursor fetched data slicing
# check postgresql logs for actual queries
# TODO real test
random.seed()
q = 'SELECT "entity_id", "doc" from "test_pg__live";'
cnt = 0
with self.pool.transaction(autocommit=False,
named=True, block_size=100) as cursor:
res = cursor.execute(q)
self.assertEqual(res.rowcount, -1)
for row in res:
cnt += 1
half = int(cnt / 2)
p1, p2 = random.randint(10, half), random.randint(half, cnt)
p3, p4 = random.randint(10, half), random.randint(half, cnt)
if p3 > p1:
(p1, p2), (p3, p4) = (p3, p4), (p1, p2)
elif p3 == p1:
p3 -= 5
self.assertTrue(p1 > p3)
with self.pool.transaction(autocommit=False,
named=True, block_size=100) as cursor:
res = cursor.execute(q)
res[p1:p2]
res[p3:p4]
with self.assertRaises(ValueError):
res[cnt + 2:]
res[cnt + 1000:cnt + 1200]
res[p1:p2]
class TestFetch(BaseWithPool):
def test_fetch_one(self):
entity_id = self.entity_id
q = 'SELECT "entity_id", "doc" from "test_pg__live" ' \
'WHERE "entity_id" = %s;'
with self.pool.transaction() as cursor:
res = cursor.execute(q, (entity_id, ))
self.assertIsInstance(res, pg.PgResult)
self.assertTrue(len(res) == 1)
r1 = res.get()
r2 = res.get()
self.assertEqual(r1, r2)
self.assertEqual(r1['entity_id'], entity_id)
self.assertTrue(res.is_closed)
with self.assertRaises(pg.CursorClosed):
res.get()
with self.pool.transaction() as cursor:
res = cursor.execute(q, (-1, ))
self.assertTrue(len(res) == 0)
with self.assertRaises(pg.DoesNotExist):
res.get()
def test_fetch_all(self):
q = 'SELECT "entity_id", "doc" from "test_pg__live";'
with self.pool.transaction() as cursor:
res = cursor.execute(q)
self.assertIsInstance(res, pg.PgResult)
rows1 = res.all()
rows2 = res.all()
self.assertEqual(len(res), len(rows1))
self.assertEqual(len(rows2), 0)
with self.pool.transaction() as cursor:
res = cursor.execute(q)
with self.assertRaises(pg.CursorClosed):
res.all()
def test_fetch_many(self):
q = 'SELECT "entity_id", "doc" from "test_pg__live";'
bs = 75
with self.pool.transaction(block_size=bs) as cursor:
res = cursor.execute(q)
cnt, data = 0, []
block = res.block()
while block:
cnt += len(block)
data.extend(block)
self.assertTrue(len(block) <= bs)
block = res.block()
rows = res.all()
self.assertTrue(len(rows) == 0)
half = int(cnt / 2)
res.scroll(0)
rows = set(x['entity_id'] for x in res[:half])
data = set(x['entity_id'] for x in data)
self.assertTrue(len(rows) == half)
self.assertTrue(set(data).issuperset(set(rows)))
def test_scroll(self):
q = 'SELECT "entity_id", "doc" from "test_pg__live";'
cnt, bs = 0, 75
with self.pool.transaction(block_size=bs) as cursor:
res = cursor.execute(q)
for __ in res:
cnt += 1
res.scroll(0)
res.scroll(int(cnt / 2))
with self.assertRaises(pg.DoesNotExist):
res.scroll(-1)
with self.assertRaises(pg.DoesNotExist):
res.scroll(cnt)
@unittest.skip('TODO')
class TestCallProc(Base):
def test_a(self):
pass
def test_b(self):
pass
def test_c(self):
pass
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.datastore.datastore_v3_pb import *
import google.appengine.datastore.datastore_v3_pb
google_dot_apphosting_dot_datastore_dot_datastore__v3__pb = __import__('google.appengine.datastore.datastore_v3_pb', {}, {}, [''])
from google.appengine.datastore.entity_pb import *
import google.appengine.datastore.entity_pb
google_dot_storage_dot_onestore_dot_v3_dot_entity__pb = __import__('google.appengine.datastore.entity_pb', {}, {}, [''])
class Request(ProtocolBuffer.ProtocolMessage):
has_service_name_ = 0
service_name_ = ""
has_method_ = 0
method_ = ""
has_request_ = 0
request_ = ""
has_request_id_ = 0
request_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def service_name(self): return self.service_name_
def set_service_name(self, x):
self.has_service_name_ = 1
self.service_name_ = x
def clear_service_name(self):
if self.has_service_name_:
self.has_service_name_ = 0
self.service_name_ = ""
def has_service_name(self): return self.has_service_name_
def method(self): return self.method_
def set_method(self, x):
self.has_method_ = 1
self.method_ = x
def clear_method(self):
if self.has_method_:
self.has_method_ = 0
self.method_ = ""
def has_method(self): return self.has_method_
def request(self): return self.request_
def set_request(self, x):
self.has_request_ = 1
self.request_ = x
def clear_request(self):
if self.has_request_:
self.has_request_ = 0
self.request_ = ""
def has_request(self): return self.has_request_
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_service_name()): self.set_service_name(x.service_name())
if (x.has_method()): self.set_method(x.method())
if (x.has_request()): self.set_request(x.request())
if (x.has_request_id()): self.set_request_id(x.request_id())
def Equals(self, x):
if x is self: return 1
if self.has_service_name_ != x.has_service_name_: return 0
if self.has_service_name_ and self.service_name_ != x.service_name_: return 0
if self.has_method_ != x.has_method_: return 0
if self.has_method_ and self.method_ != x.method_: return 0
if self.has_request_ != x.has_request_: return 0
if self.has_request_ and self.request_ != x.request_: return 0
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_service_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: service_name not set.')
if (not self.has_method_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: method not set.')
if (not self.has_request_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: request not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.service_name_))
n += self.lengthString(len(self.method_))
n += self.lengthString(len(self.request_))
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_service_name_):
n += 1
n += self.lengthString(len(self.service_name_))
if (self.has_method_):
n += 1
n += self.lengthString(len(self.method_))
if (self.has_request_):
n += 1
n += self.lengthString(len(self.request_))
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def Clear(self):
self.clear_service_name()
self.clear_method()
self.clear_request()
self.clear_request_id()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.service_name_)
out.putVarInt32(26)
out.putPrefixedString(self.method_)
out.putVarInt32(34)
out.putPrefixedString(self.request_)
if (self.has_request_id_):
out.putVarInt32(42)
out.putPrefixedString(self.request_id_)
def OutputPartial(self, out):
if (self.has_service_name_):
out.putVarInt32(18)
out.putPrefixedString(self.service_name_)
if (self.has_method_):
out.putVarInt32(26)
out.putPrefixedString(self.method_)
if (self.has_request_):
out.putVarInt32(34)
out.putPrefixedString(self.request_)
if (self.has_request_id_):
out.putVarInt32(42)
out.putPrefixedString(self.request_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
self.set_service_name(d.getPrefixedString())
continue
if tt == 26:
self.set_method(d.getPrefixedString())
continue
if tt == 34:
self.set_request(d.getPrefixedString())
continue
if tt == 42:
self.set_request_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_service_name_: res+=prefix+("service_name: %s\n" % self.DebugFormatString(self.service_name_))
if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatString(self.method_))
if self.has_request_: res+=prefix+("request: %s\n" % self.DebugFormatString(self.request_))
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kservice_name = 2
kmethod = 3
krequest = 4
krequest_id = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "service_name",
3: "method",
4: "request",
5: "request_id",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.Request'
class ApplicationError(ProtocolBuffer.ProtocolMessage):
has_code_ = 0
code_ = 0
has_detail_ = 0
detail_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def code(self): return self.code_
def set_code(self, x):
self.has_code_ = 1
self.code_ = x
def clear_code(self):
if self.has_code_:
self.has_code_ = 0
self.code_ = 0
def has_code(self): return self.has_code_
def detail(self): return self.detail_
def set_detail(self, x):
self.has_detail_ = 1
self.detail_ = x
def clear_detail(self):
if self.has_detail_:
self.has_detail_ = 0
self.detail_ = ""
def has_detail(self): return self.has_detail_
def MergeFrom(self, x):
assert x is not self
if (x.has_code()): self.set_code(x.code())
if (x.has_detail()): self.set_detail(x.detail())
def Equals(self, x):
if x is self: return 1
if self.has_code_ != x.has_code_: return 0
if self.has_code_ and self.code_ != x.code_: return 0
if self.has_detail_ != x.has_detail_: return 0
if self.has_detail_ and self.detail_ != x.detail_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_code_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: code not set.')
if (not self.has_detail_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: detail not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.code_)
n += self.lengthString(len(self.detail_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_code_):
n += 1
n += self.lengthVarInt64(self.code_)
if (self.has_detail_):
n += 1
n += self.lengthString(len(self.detail_))
return n
def Clear(self):
self.clear_code()
self.clear_detail()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.code_)
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def OutputPartial(self, out):
if (self.has_code_):
out.putVarInt32(8)
out.putVarInt32(self.code_)
if (self.has_detail_):
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_code(d.getVarInt32())
continue
if tt == 18:
self.set_detail(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_code_: res+=prefix+("code: %s\n" % self.DebugFormatInt32(self.code_))
if self.has_detail_: res+=prefix+("detail: %s\n" % self.DebugFormatString(self.detail_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcode = 1
kdetail = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "code",
2: "detail",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.ApplicationError'
class RpcError(ProtocolBuffer.ProtocolMessage):
UNKNOWN = 0
CALL_NOT_FOUND = 1
PARSE_ERROR = 2
SECURITY_VIOLATION = 3
OVER_QUOTA = 4
REQUEST_TOO_LARGE = 5
CAPABILITY_DISABLED = 6
FEATURE_DISABLED = 7
BAD_REQUEST = 8
RESPONSE_TOO_LARGE = 9
CANCELLED = 10
REPLAY_ERROR = 11
DEADLINE_EXCEEDED = 12
_ErrorCode_NAMES = {
0: "UNKNOWN",
1: "CALL_NOT_FOUND",
2: "PARSE_ERROR",
3: "SECURITY_VIOLATION",
4: "OVER_QUOTA",
5: "REQUEST_TOO_LARGE",
6: "CAPABILITY_DISABLED",
7: "FEATURE_DISABLED",
8: "BAD_REQUEST",
9: "RESPONSE_TOO_LARGE",
10: "CANCELLED",
11: "REPLAY_ERROR",
12: "DEADLINE_EXCEEDED",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
has_code_ = 0
code_ = 0
has_detail_ = 0
detail_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def code(self): return self.code_
def set_code(self, x):
self.has_code_ = 1
self.code_ = x
def clear_code(self):
if self.has_code_:
self.has_code_ = 0
self.code_ = 0
def has_code(self): return self.has_code_
def detail(self): return self.detail_
def set_detail(self, x):
self.has_detail_ = 1
self.detail_ = x
def clear_detail(self):
if self.has_detail_:
self.has_detail_ = 0
self.detail_ = ""
def has_detail(self): return self.has_detail_
def MergeFrom(self, x):
assert x is not self
if (x.has_code()): self.set_code(x.code())
if (x.has_detail()): self.set_detail(x.detail())
def Equals(self, x):
if x is self: return 1
if self.has_code_ != x.has_code_: return 0
if self.has_code_ and self.code_ != x.code_: return 0
if self.has_detail_ != x.has_detail_: return 0
if self.has_detail_ and self.detail_ != x.detail_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_code_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: code not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.code_)
if (self.has_detail_): n += 1 + self.lengthString(len(self.detail_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_code_):
n += 1
n += self.lengthVarInt64(self.code_)
if (self.has_detail_): n += 1 + self.lengthString(len(self.detail_))
return n
def Clear(self):
self.clear_code()
self.clear_detail()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.code_)
if (self.has_detail_):
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def OutputPartial(self, out):
if (self.has_code_):
out.putVarInt32(8)
out.putVarInt32(self.code_)
if (self.has_detail_):
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_code(d.getVarInt32())
continue
if tt == 18:
self.set_detail(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_code_: res+=prefix+("code: %s\n" % self.DebugFormatInt32(self.code_))
if self.has_detail_: res+=prefix+("detail: %s\n" % self.DebugFormatString(self.detail_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcode = 1
kdetail = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "code",
2: "detail",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.RpcError'
class Response(ProtocolBuffer.ProtocolMessage):
has_response_ = 0
response_ = ""
has_exception_ = 0
exception_ = ""
has_application_error_ = 0
application_error_ = None
has_java_exception_ = 0
java_exception_ = ""
has_rpc_error_ = 0
rpc_error_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def response(self): return self.response_
def set_response(self, x):
self.has_response_ = 1
self.response_ = x
def clear_response(self):
if self.has_response_:
self.has_response_ = 0
self.response_ = ""
def has_response(self): return self.has_response_
def exception(self): return self.exception_
def set_exception(self, x):
self.has_exception_ = 1
self.exception_ = x
def clear_exception(self):
if self.has_exception_:
self.has_exception_ = 0
self.exception_ = ""
def has_exception(self): return self.has_exception_
def application_error(self):
if self.application_error_ is None:
self.lazy_init_lock_.acquire()
try:
if self.application_error_ is None: self.application_error_ = ApplicationError()
finally:
self.lazy_init_lock_.release()
return self.application_error_
def mutable_application_error(self): self.has_application_error_ = 1; return self.application_error()
def clear_application_error(self):
if self.has_application_error_:
self.has_application_error_ = 0;
if self.application_error_ is not None: self.application_error_.Clear()
def has_application_error(self): return self.has_application_error_
def java_exception(self): return self.java_exception_
def set_java_exception(self, x):
self.has_java_exception_ = 1
self.java_exception_ = x
def clear_java_exception(self):
if self.has_java_exception_:
self.has_java_exception_ = 0
self.java_exception_ = ""
def has_java_exception(self): return self.has_java_exception_
def rpc_error(self):
if self.rpc_error_ is None:
self.lazy_init_lock_.acquire()
try:
if self.rpc_error_ is None: self.rpc_error_ = RpcError()
finally:
self.lazy_init_lock_.release()
return self.rpc_error_
def mutable_rpc_error(self): self.has_rpc_error_ = 1; return self.rpc_error()
def clear_rpc_error(self):
if self.has_rpc_error_:
self.has_rpc_error_ = 0;
if self.rpc_error_ is not None: self.rpc_error_.Clear()
def has_rpc_error(self): return self.has_rpc_error_
def MergeFrom(self, x):
assert x is not self
if (x.has_response()): self.set_response(x.response())
if (x.has_exception()): self.set_exception(x.exception())
if (x.has_application_error()): self.mutable_application_error().MergeFrom(x.application_error())
if (x.has_java_exception()): self.set_java_exception(x.java_exception())
if (x.has_rpc_error()): self.mutable_rpc_error().MergeFrom(x.rpc_error())
def Equals(self, x):
if x is self: return 1
if self.has_response_ != x.has_response_: return 0
if self.has_response_ and self.response_ != x.response_: return 0
if self.has_exception_ != x.has_exception_: return 0
if self.has_exception_ and self.exception_ != x.exception_: return 0
if self.has_application_error_ != x.has_application_error_: return 0
if self.has_application_error_ and self.application_error_ != x.application_error_: return 0
if self.has_java_exception_ != x.has_java_exception_: return 0
if self.has_java_exception_ and self.java_exception_ != x.java_exception_: return 0
if self.has_rpc_error_ != x.has_rpc_error_: return 0
if self.has_rpc_error_ and self.rpc_error_ != x.rpc_error_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_application_error_ and not self.application_error_.IsInitialized(debug_strs)): initialized = 0
if (self.has_rpc_error_ and not self.rpc_error_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_response_): n += 1 + self.lengthString(len(self.response_))
if (self.has_exception_): n += 1 + self.lengthString(len(self.exception_))
if (self.has_application_error_): n += 1 + self.lengthString(self.application_error_.ByteSize())
if (self.has_java_exception_): n += 1 + self.lengthString(len(self.java_exception_))
if (self.has_rpc_error_): n += 1 + self.lengthString(self.rpc_error_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_response_): n += 1 + self.lengthString(len(self.response_))
if (self.has_exception_): n += 1 + self.lengthString(len(self.exception_))
if (self.has_application_error_): n += 1 + self.lengthString(self.application_error_.ByteSizePartial())
if (self.has_java_exception_): n += 1 + self.lengthString(len(self.java_exception_))
if (self.has_rpc_error_): n += 1 + self.lengthString(self.rpc_error_.ByteSizePartial())
return n
def Clear(self):
self.clear_response()
self.clear_exception()
self.clear_application_error()
self.clear_java_exception()
self.clear_rpc_error()
def OutputUnchecked(self, out):
if (self.has_response_):
out.putVarInt32(10)
out.putPrefixedString(self.response_)
if (self.has_exception_):
out.putVarInt32(18)
out.putPrefixedString(self.exception_)
if (self.has_application_error_):
out.putVarInt32(26)
out.putVarInt32(self.application_error_.ByteSize())
self.application_error_.OutputUnchecked(out)
if (self.has_java_exception_):
out.putVarInt32(34)
out.putPrefixedString(self.java_exception_)
if (self.has_rpc_error_):
out.putVarInt32(42)
out.putVarInt32(self.rpc_error_.ByteSize())
self.rpc_error_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_response_):
out.putVarInt32(10)
out.putPrefixedString(self.response_)
if (self.has_exception_):
out.putVarInt32(18)
out.putPrefixedString(self.exception_)
if (self.has_application_error_):
out.putVarInt32(26)
out.putVarInt32(self.application_error_.ByteSizePartial())
self.application_error_.OutputPartial(out)
if (self.has_java_exception_):
out.putVarInt32(34)
out.putPrefixedString(self.java_exception_)
if (self.has_rpc_error_):
out.putVarInt32(42)
out.putVarInt32(self.rpc_error_.ByteSizePartial())
self.rpc_error_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_response(d.getPrefixedString())
continue
if tt == 18:
self.set_exception(d.getPrefixedString())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_application_error().TryMerge(tmp)
continue
if tt == 34:
self.set_java_exception(d.getPrefixedString())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_rpc_error().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_response_: res+=prefix+("response: %s\n" % self.DebugFormatString(self.response_))
if self.has_exception_: res+=prefix+("exception: %s\n" % self.DebugFormatString(self.exception_))
if self.has_application_error_:
res+=prefix+"application_error <\n"
res+=self.application_error_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_java_exception_: res+=prefix+("java_exception: %s\n" % self.DebugFormatString(self.java_exception_))
if self.has_rpc_error_:
res+=prefix+"rpc_error <\n"
res+=self.rpc_error_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kresponse = 1
kexception = 2
kapplication_error = 3
kjava_exception = 4
krpc_error = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "response",
2: "exception",
3: "application_error",
4: "java_exception",
5: "rpc_error",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.Response'
class TransactionRequest_Precondition(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
has_hash_ = 0
hash_ = ""
def __init__(self, contents=None):
self.key_ = Reference()
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key_
def clear_key(self):self.has_key_ = 0; self.key_.Clear()
def has_key(self): return self.has_key_
def hash(self): return self.hash_
def set_hash(self, x):
self.has_hash_ = 1
self.hash_ = x
def clear_hash(self):
if self.has_hash_:
self.has_hash_ = 0
self.hash_ = ""
def has_hash(self): return self.has_hash_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_hash()): self.set_hash(x.hash())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_hash_ != x.has_hash_: return 0
if self.has_hash_ and self.hash_ != x.hash_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
elif not self.key_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.key_.ByteSize())
if (self.has_hash_): n += 1 + self.lengthString(len(self.hash_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(self.key_.ByteSizePartial())
if (self.has_hash_): n += 1 + self.lengthString(len(self.hash_))
return n
def Clear(self):
self.clear_key()
self.clear_hash()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
if (self.has_hash_):
out.putVarInt32(26)
out.putPrefixedString(self.hash_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
if (self.has_hash_):
out.putVarInt32(26)
out.putPrefixedString(self.hash_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 26:
self.set_hash(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_hash_: res+=prefix+("hash: %s\n" % self.DebugFormatString(self.hash_))
return res
class TransactionRequest(ProtocolBuffer.ProtocolMessage):
has_puts_ = 0
puts_ = None
has_deletes_ = 0
deletes_ = None
has_allow_multiple_eg_ = 0
allow_multiple_eg_ = 0
def __init__(self, contents=None):
self.precondition_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def precondition_size(self): return len(self.precondition_)
def precondition_list(self): return self.precondition_
def precondition(self, i):
return self.precondition_[i]
def mutable_precondition(self, i):
return self.precondition_[i]
def add_precondition(self):
x = TransactionRequest_Precondition()
self.precondition_.append(x)
return x
def clear_precondition(self):
self.precondition_ = []
def puts(self):
if self.puts_ is None:
self.lazy_init_lock_.acquire()
try:
if self.puts_ is None: self.puts_ = PutRequest()
finally:
self.lazy_init_lock_.release()
return self.puts_
def mutable_puts(self): self.has_puts_ = 1; return self.puts()
def clear_puts(self):
if self.has_puts_:
self.has_puts_ = 0;
if self.puts_ is not None: self.puts_.Clear()
def has_puts(self): return self.has_puts_
def deletes(self):
if self.deletes_ is None:
self.lazy_init_lock_.acquire()
try:
if self.deletes_ is None: self.deletes_ = DeleteRequest()
finally:
self.lazy_init_lock_.release()
return self.deletes_
def mutable_deletes(self): self.has_deletes_ = 1; return self.deletes()
def clear_deletes(self):
if self.has_deletes_:
self.has_deletes_ = 0;
if self.deletes_ is not None: self.deletes_.Clear()
def has_deletes(self): return self.has_deletes_
def allow_multiple_eg(self): return self.allow_multiple_eg_
def set_allow_multiple_eg(self, x):
self.has_allow_multiple_eg_ = 1
self.allow_multiple_eg_ = x
def clear_allow_multiple_eg(self):
if self.has_allow_multiple_eg_:
self.has_allow_multiple_eg_ = 0
self.allow_multiple_eg_ = 0
def has_allow_multiple_eg(self): return self.has_allow_multiple_eg_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.precondition_size()): self.add_precondition().CopyFrom(x.precondition(i))
if (x.has_puts()): self.mutable_puts().MergeFrom(x.puts())
if (x.has_deletes()): self.mutable_deletes().MergeFrom(x.deletes())
if (x.has_allow_multiple_eg()): self.set_allow_multiple_eg(x.allow_multiple_eg())
def Equals(self, x):
if x is self: return 1
if len(self.precondition_) != len(x.precondition_): return 0
for e1, e2 in zip(self.precondition_, x.precondition_):
if e1 != e2: return 0
if self.has_puts_ != x.has_puts_: return 0
if self.has_puts_ and self.puts_ != x.puts_: return 0
if self.has_deletes_ != x.has_deletes_: return 0
if self.has_deletes_ and self.deletes_ != x.deletes_: return 0
if self.has_allow_multiple_eg_ != x.has_allow_multiple_eg_: return 0
if self.has_allow_multiple_eg_ and self.allow_multiple_eg_ != x.allow_multiple_eg_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.precondition_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_puts_ and not self.puts_.IsInitialized(debug_strs)): initialized = 0
if (self.has_deletes_ and not self.deletes_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.precondition_)
for i in xrange(len(self.precondition_)): n += self.precondition_[i].ByteSize()
if (self.has_puts_): n += 1 + self.lengthString(self.puts_.ByteSize())
if (self.has_deletes_): n += 1 + self.lengthString(self.deletes_.ByteSize())
if (self.has_allow_multiple_eg_): n += 2
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.precondition_)
for i in xrange(len(self.precondition_)): n += self.precondition_[i].ByteSizePartial()
if (self.has_puts_): n += 1 + self.lengthString(self.puts_.ByteSizePartial())
if (self.has_deletes_): n += 1 + self.lengthString(self.deletes_.ByteSizePartial())
if (self.has_allow_multiple_eg_): n += 2
return n
def Clear(self):
self.clear_precondition()
self.clear_puts()
self.clear_deletes()
self.clear_allow_multiple_eg()
def OutputUnchecked(self, out):
for i in xrange(len(self.precondition_)):
out.putVarInt32(11)
self.precondition_[i].OutputUnchecked(out)
out.putVarInt32(12)
if (self.has_puts_):
out.putVarInt32(34)
out.putVarInt32(self.puts_.ByteSize())
self.puts_.OutputUnchecked(out)
if (self.has_deletes_):
out.putVarInt32(42)
out.putVarInt32(self.deletes_.ByteSize())
self.deletes_.OutputUnchecked(out)
if (self.has_allow_multiple_eg_):
out.putVarInt32(48)
out.putBoolean(self.allow_multiple_eg_)
def OutputPartial(self, out):
for i in xrange(len(self.precondition_)):
out.putVarInt32(11)
self.precondition_[i].OutputPartial(out)
out.putVarInt32(12)
if (self.has_puts_):
out.putVarInt32(34)
out.putVarInt32(self.puts_.ByteSizePartial())
self.puts_.OutputPartial(out)
if (self.has_deletes_):
out.putVarInt32(42)
out.putVarInt32(self.deletes_.ByteSizePartial())
self.deletes_.OutputPartial(out)
if (self.has_allow_multiple_eg_):
out.putVarInt32(48)
out.putBoolean(self.allow_multiple_eg_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_precondition().TryMerge(d)
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_puts().TryMerge(tmp)
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_deletes().TryMerge(tmp)
continue
if tt == 48:
self.set_allow_multiple_eg(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.precondition_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Precondition%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_puts_:
res+=prefix+"puts <\n"
res+=self.puts_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_deletes_:
res+=prefix+"deletes <\n"
res+=self.deletes_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_allow_multiple_eg_: res+=prefix+("allow_multiple_eg: %s\n" % self.DebugFormatBool(self.allow_multiple_eg_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kPreconditionGroup = 1
kPreconditionkey = 2
kPreconditionhash = 3
kputs = 4
kdeletes = 5
kallow_multiple_eg = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Precondition",
2: "key",
3: "hash",
4: "puts",
5: "deletes",
6: "allow_multiple_eg",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.TransactionRequest'
class TransactionQueryResult(ProtocolBuffer.ProtocolMessage):
has_result_ = 0
has_entity_group_key_ = 0
has_entity_group_ = 0
entity_group_ = None
def __init__(self, contents=None):
self.result_ = QueryResult()
self.entity_group_key_ = Reference()
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def result(self): return self.result_
def mutable_result(self): self.has_result_ = 1; return self.result_
def clear_result(self):self.has_result_ = 0; self.result_.Clear()
def has_result(self): return self.has_result_
def entity_group_key(self): return self.entity_group_key_
def mutable_entity_group_key(self): self.has_entity_group_key_ = 1; return self.entity_group_key_
def clear_entity_group_key(self):self.has_entity_group_key_ = 0; self.entity_group_key_.Clear()
def has_entity_group_key(self): return self.has_entity_group_key_
def entity_group(self):
if self.entity_group_ is None:
self.lazy_init_lock_.acquire()
try:
if self.entity_group_ is None: self.entity_group_ = EntityProto()
finally:
self.lazy_init_lock_.release()
return self.entity_group_
def mutable_entity_group(self): self.has_entity_group_ = 1; return self.entity_group()
def clear_entity_group(self):
if self.has_entity_group_:
self.has_entity_group_ = 0;
if self.entity_group_ is not None: self.entity_group_.Clear()
def has_entity_group(self): return self.has_entity_group_
def MergeFrom(self, x):
assert x is not self
if (x.has_result()): self.mutable_result().MergeFrom(x.result())
if (x.has_entity_group_key()): self.mutable_entity_group_key().MergeFrom(x.entity_group_key())
if (x.has_entity_group()): self.mutable_entity_group().MergeFrom(x.entity_group())
def Equals(self, x):
if x is self: return 1
if self.has_result_ != x.has_result_: return 0
if self.has_result_ and self.result_ != x.result_: return 0
if self.has_entity_group_key_ != x.has_entity_group_key_: return 0
if self.has_entity_group_key_ and self.entity_group_key_ != x.entity_group_key_: return 0
if self.has_entity_group_ != x.has_entity_group_: return 0
if self.has_entity_group_ and self.entity_group_ != x.entity_group_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_result_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: result not set.')
elif not self.result_.IsInitialized(debug_strs): initialized = 0
if (not self.has_entity_group_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: entity_group_key not set.')
elif not self.entity_group_key_.IsInitialized(debug_strs): initialized = 0
if (self.has_entity_group_ and not self.entity_group_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.result_.ByteSize())
n += self.lengthString(self.entity_group_key_.ByteSize())
if (self.has_entity_group_): n += 1 + self.lengthString(self.entity_group_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_result_):
n += 1
n += self.lengthString(self.result_.ByteSizePartial())
if (self.has_entity_group_key_):
n += 1
n += self.lengthString(self.entity_group_key_.ByteSizePartial())
if (self.has_entity_group_): n += 1 + self.lengthString(self.entity_group_.ByteSizePartial())
return n
def Clear(self):
self.clear_result()
self.clear_entity_group_key()
self.clear_entity_group()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.result_.ByteSize())
self.result_.OutputUnchecked(out)
out.putVarInt32(18)
out.putVarInt32(self.entity_group_key_.ByteSize())
self.entity_group_key_.OutputUnchecked(out)
if (self.has_entity_group_):
out.putVarInt32(26)
out.putVarInt32(self.entity_group_.ByteSize())
self.entity_group_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_result_):
out.putVarInt32(10)
out.putVarInt32(self.result_.ByteSizePartial())
self.result_.OutputPartial(out)
if (self.has_entity_group_key_):
out.putVarInt32(18)
out.putVarInt32(self.entity_group_key_.ByteSizePartial())
self.entity_group_key_.OutputPartial(out)
if (self.has_entity_group_):
out.putVarInt32(26)
out.putVarInt32(self.entity_group_.ByteSizePartial())
self.entity_group_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_result().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_entity_group_key().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_entity_group().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_result_:
res+=prefix+"result <\n"
res+=self.result_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_entity_group_key_:
res+=prefix+"entity_group_key <\n"
res+=self.entity_group_key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_entity_group_:
res+=prefix+"entity_group <\n"
res+=self.entity_group_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kresult = 1
kentity_group_key = 2
kentity_group = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "result",
2: "entity_group_key",
3: "entity_group",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.TransactionQueryResult'
if _extension_runtime:
pass
__all__ = ['Request','ApplicationError','RpcError','Response','TransactionRequest','TransactionRequest_Precondition','TransactionQueryResult']
|
|
#!/usr/bin/env python
# tile2360.py -- convert NetHack 3.4.3 tile sets most of the way to 3.6.0
#
# Copyright (c) 2015, Ray Chason
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import os.path
import struct
import sys
# A Bitmap image, with some extra methods for tile mapping
class Bitmap(object):
def __init__(self, inpname):
# TODO: This assumes the BITMAPINFOHEADER structure. Add support for
# other bitmap formats.
# Read the header
fp = open(inpname, "rb")
header = fp.read(54)
(magic,
self.bmp_size,
reserved,
self.image_offset,
self.header_size,
self.width,
self.height,
self.num_planes,
self.bits_per_pixel,
self.compression,
self.image_size,
self.horiz_res,
self.vert_res,
self.num_colors,
self.num_important_colors) = struct.unpack("<2s6L2H6L", header)
# Check various header fields for unsupported stuff
if magic != "BM":
raise RuntimeError, "%s is not in .BMP format" % inpname
self.color_space_endpoints = [ None ] * 9
if self.header_size == 40:
self.red_mask = None
self.green_mask = None
self.blue_mask = None
self.alpha_mask = None
self.color_space = None
self.red_gamma = None
self.green_gamma = None
self.blue_gamma = None
elif self.header_size == 108:
header2 = fp.read(self.header_size - 40)
(self.red_mask,
self.green_mask,
self.blue_mask,
self.alpha_mask,
self.color_space,
self.color_space_endpoints[0],
self.color_space_endpoints[1],
self.color_space_endpoints[2],
self.color_space_endpoints[3],
self.color_space_endpoints[4],
self.color_space_endpoints[5],
self.color_space_endpoints[6],
self.color_space_endpoints[7],
self.color_space_endpoints[8],
self.red_gamma,
self.green_gamma,
self.blue_gamma) = struct.unpack("<4L4s12L", header2);
# TODO: decode per the bit masks
else:
raise RuntimeError, "%s has an unsupported header type (%d)" % \
(inpname, self.header_size)
if self.num_planes != 1:
raise RuntimeError, "%s has %d planes, not supported" % \
(inpname, self.num_planes)
if self.compression != 0:
raise RuntimeError, "%s is compressed (%d), and not supported" % \
(inpname, self.compression)
if self.bits_per_pixel not in (1, 2, 4, 8, 24, 32):
raise RuntimeError, "%s has %d bits per pixel, not supported" % \
(inpname, self.bits_per_pixel)
# Read the palette
if self.bits_per_pixel <= 8:
if self.num_colors == 0:
self.num_colors = 1 << self.bits_per_pixel
self.palette = [ None ] * self.num_colors
for i in xrange(0, self.num_colors):
b, g, r, z = struct.unpack("<4B", fp.read(4))
self.palette[i] = (b, g, r)
else:
self.palette = None
# Read the pixels
fp.seek(self.image_offset)
self.image = [ None ] * self.height
row_size = ((self.bits_per_pixel * self.width + 31) / 32) * 4
if self.bits_per_pixel <= 8:
# Palettized image; convert to 24 bit
pixels_per_byte = 8 / self.bits_per_pixel
mask = (1 << self.bits_per_pixel) - 1
for y in xrange(0, self.height):
row_bytes = fp.read(row_size)
row_bytes = map(
lambda x : struct.unpack('<1B', x)[0],
row_bytes)
row = [ None ] * self.width
self.image[self.height - 1 - y] = row
shift = 8
for x in xrange(0, self.width):
if shift <= 0:
shift = 8
shift -= self.bits_per_pixel
x_hi = x / pixels_per_byte
i = (row_bytes[x_hi] >> shift) & mask
row[x] = self.palette[i]
else:
# 24 or 32 bits per pixel
bytes_per_pixel = self.bits_per_pixel / 8
for y in xrange(0, self.height):
row_bytes = fp.read(row_size)
row_bytes = map(
lambda x : struct.unpack('<1B', x)[0],
row_bytes)
row = [ None ] * self.width
self.image[self.height - 1 - y] = row
for x in xrange(0, self.width):
x2 = x * bytes_per_pixel
row[x] = tuple(row_bytes[x2 : x2 + 3])
self.bits_per_pixel = 24
# These are yet unknown
self.tile_width = None
self.tile_height = None
self.tiles_per_row = None
self.tile_rows = None
self.tiles = None
# Split the image into tiles
def split(self, tile_width, tile_height):
self.tile_width = tile_width
self.tile_height = tile_height
self.tiles_per_row = self.width / tile_width
self.tile_rows = self.height / tile_height
num_tiles = self.tiles_per_row * self.tile_rows
self.tiles = [ None ] * num_tiles
for t in xrange(0, num_tiles):
tile = [ None ] * tile_height
self.tiles[t] = tile
t_col = t % self.tiles_per_row
t_row = t / self.tiles_per_row
t_x = t_col * tile_width
t_y = t_row * tile_height
for y in xrange(0, tile_height):
tile[y] = self.image[t_y + y][t_x : t_x + tile_width]
# Rearrange the tiles to match the NetHack 3.6.0 order
def remap(self, no_statues):
# If tile_map[X] = Y, the tile in position X for 3.6.0 comes from
# position Y for 3.4.3. Negative numbers indicate tiles that cannot
# be directly mapped.
tile_map = [
# Monsters
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16,
# dingo (19) placed before dog (17)
19, 17, 18,
20, 21,
# winter wolf cub (23) placed before warg (22)
23, 22,
24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100,
# pony (104) placed before white unicorn (101)
104, 101, 102, 103,
105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
170, 171, 172, 173, 174,
# ettin (176) placed before storm giant (175)
176, 175,
177, 178, 179,
180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
# green slime (211) placed before black pudding (210)
211, 210,
212, 213, 214, 215, 216, 217,
# python (219) placed before pit viper (218)
219, 218,
220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
240, 241, 242, 243, 244, 245, 246, 247,
# ghoul (249) placed before giant zombie (248)
249, 248,
250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
260, 261, 262, 263, 264, 265, 266, 267, 268, 269,
270, 271, 272,
# nurse (273) placed after sergeant (281)
274, 275, 276, 277, 278, 279, 280, 281, 273,
282, 283, 284, 285, 286, 287, 288, 289,
290, 291, 292,
# succubus (294) placed before horned devil (293)
294, 293,
295, 296, 297, 298, 299,
300, 301, 302, 303, 304,
# sandestin (319) placed before balrog (305)
319, 305, 306, 307, 308, 309, 310, 311, 312, 313,
314, 315, 316, 317, 318,
320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
330, 331, 332, 333, 334, 335, 336, 337, 338, 339,
340, 341, 342, 343, 344, 345, 346, 347, 348, 349,
350, 351, 352, 353, 354, 355, 356, 357, 358, 359,
360, 361, 362, 363, 364, 365, 366, 367, 368, 369,
370, 371, 372, 373, 374, 375, 376, 377, 378, 379,
380, 381, 382, 383, 384, 385, 386, 387, 388, 389,
390, 391, 392, 393,
# Objects:
394, 395, 396, 397, 398, 399,
400, 401, 402, 403, 404, 405, 406, 407, 408, 409,
410, 411, 412, 413, 414, 415, 416, 417, 418, 419,
420, 421, 422, 423, 424, 425, 426, 427, 428, 429,
430, 431, 432, 433, 434, 435, 436, 437, 438, 439,
440, 441, 442, 443, 444, 445, 446, 447, 448, 449,
450, 451, 452, 453, 454, 455, 456, 457, 458, 459,
460, 461, 462, 463, 464, 465, 466, 467, 468, 469,
470, 471, 472, 473, 474, 475, 476, 477, 478, 479,
480, 481, 482, 483, 484, 485, 486, 487, 488, 489,
490, 491, 492, 493, 494, 495, 496, 497, 498, 499,
500, 501, 502, 503, 504, 505, 506, 507, 508, 509,
510, 511, 512, 513, 514, 515, 516, 517, 518, 519,
520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
530, 531, 532, 533, 534, 535, 536, 537, 538, 539,
540, 541, 542, 543, 544, 545, 546, 547, 548, 549,
550, 551, 552, 553, 554, 555, 556, 557, 558, 559,
560, 561, 562, 563, 564, 565, 566, 567, 568, 569,
570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
580, 581, 582, 583, 584, 585, 586, 587, 588, 589,
590, 591, 592, 593, 594, 595, 596, 597, 598, 599,
600, 601, 602, 603, 604, 605, 606, 607, 608, 609,
610, 611, 612, 613, 614, 615, 616, 617, 618, 619,
620, 621, 622, 623, 624, 625, 626, 627, 628, 629,
630, 631, 632, 633, 634, 635, 636, 637, 638, 639,
640, 641,
-1, # glob of gray ooze
-1, # glob of brown pudding
-1, # glob of green slime
-1, # glob of black pudding
642, 643, 644, 645, 646, 647, 648, 649,
650, 651, 652, 653, 654, 655, 656, 657, 658, 659,
660, 661, 662, 663, 664, 665, 666, 667, 668, 669,
670, 671, 672, 673, 674, 675, 676, 677, 678, 679,
680, 681, 682, 683, 684, 685, 686, 687, 688, 689,
# Random scroll appearances begin here
690, 691, 692, 693, 694, 695, 696, 697, 698, 699,
700, 701, 702, 703, 704, 705, 706, 707, 708, 709,
710, 711, 712, 713, 714,
# New random scroll appearances. Repeat the first 16 above
690, 691, 692, 693, 694, 695, 696, 697, 698, 699,
700, 701, 702, 703, 704, 705,
# Random scroll appearances end here
715, 716, 717, 718, 719,
720, 721, 722, 723, 724, 725, 726, 727, 728, 729,
730, 731, 732, 733, 734, 735, 736, 737, 738, 739,
740, 741, 742, 743, 744, 745, 746, 747, 748, 749,
750, 751, 752, 753, 754, 755, 756, 757,
-1, # Novel
758, 759,
760, 761, 762, 763, 764, 765, 766, 767, 768, 769,
770, 771, 772, 773, 774, 775, 776, 777, 778, 779,
780, 781, 782, 783, 784, 785, 786, 787, 788, 789,
790, 791, 792, 793, 794, 795, 796, 797, 798, 799,
800, 801, 802, 803, 804, 805, 806, 807, 808, 809,
810, 811, 812, 813, 814, 815, 816, 817, 818, 819,
820, 821, 822, 823, 824, 825, 826, 827, 828,
# Dungeon features, missiles, explosions, etc.
829,
830, 831, 832, 833, 834, 835, 836, 837, 838, 839,
840, 841, 842, 843, 844, 845, 846, 847, 848,
-2, # darkened part of a room
849,
850, 851, 852, 853, 854, 855, 856, 857, 858, 859,
860, 861, 862, 863, 864, 865, 866, 867, 868, 869,
870, 871, 872, 873, 874, 875, 876, 877, 878, 879,
880, 881, 882, 883, 884, 885, 886, 887, 888, 889,
890, 891,
-1, # vibrating square
892, 893, 894, 895, 896, 897, 898, 899,
900, 901, 902, 903,
-1, # poison cloud
-1, # valid position
904, 905, 906, 907, 908, 909,
910, 911, 912, 913, 914, 915, 916, 917, 918, 919,
920, 921, 922, 923, 924, 925, 926, 927, 928, 929,
930, 931, 932, 933, 934, 935, 936, 937, 938, 939,
940, 941, 942, 943, 944, 945, 946, 947, 948, 949,
950, 951, 952, 953, 954, 955, 956, 957, 958, 959,
960, 961, 962, 963, 964, 965, 966, 967, 968, 969,
970, 971, 972, 973, 974, 975, 976, 977, 978, 979,
980, 981, 982, 983, 984, 985, 986, 987, 988, 989,
990, 991, 992, 993, 994, 995, 996, 997, 998, 999,
1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009,
1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019,
1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029,
1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049,
1050, 1051, 1052, 1053, 1054, 1055, 1056
# then repeat the monster glyphs to make statues
]
# Map monsters, objects and dungeon features
map_size = len(tile_map)
new_tiles = [ None ] * (map_size + 394)
for i in xrange(0, map_size):
m = tile_map[i]
if m >= 0:
new_tiles[i] = self.tiles[m]
elif m == -2:
new_tiles[i] = self.darkenedTile(self.tiles[m - 1])
else:
new_tiles[i] = self.placeHolderTile()
# Generate statue tiles
if no_statues:
for i in xrange(0, 394):
new_tiles[i + map_size] = self.tiles[824] # statue
else:
for i in xrange(0, 394):
new_tiles[i + map_size] = self.makeStatue(
new_tiles[i], self.tiles[848])
# Update the number of tile rows
self.tile_rows = (len(new_tiles) + self.tiles_per_row - 1) \
/ self.tiles_per_row
# Add some blank tiles to fill out the last row
num_tiles = self.tile_rows * self.tiles_per_row
if len(new_tiles) < num_tiles:
blank_tile = self.blankTile()
while len(new_tiles) < num_tiles:
new_tiles.append(blank_tile)
self.tiles = new_tiles
# Rejoin the tiles into a new image
def join(self):
# New image dimensions; normally width will be unchanged
self.width = self.tiles_per_row * self.tile_width
self.height = self.tile_rows * self.tile_height
# Create blank image
self.image = [ None ] * self.height
for i in xrange(0, self.height):
self.image[i] = []
# Add each tile to the end of its row
for i in xrange(0, len(self.tiles)):
t_row = i / self.tiles_per_row
t_y = t_row * self.tile_height
tile = self.tiles[i]
for j in xrange(0, self.tile_height):
self.image[t_y + j].extend(tile[j])
# Write the image to the output file
def write(self, outname):
fp = open(outname, "wb")
# Write a palettized image if possible without degradation
self.buildPalette()
palette_map = {}
if self.bits_per_pixel <= 8:
for i in xrange(0, len(self.palette)):
palette_map[self.palette[i]] = i
# Write the header, with placeholders for some fields
self.writeHeader(fp)
# Write the palette if any
if self.bits_per_pixel <= 8:
for i in xrange(0, self.num_colors):
fp.write(struct.pack("<4B",
self.palette[i][0],
self.palette[i][1],
self.palette[i][2],
0))
self.image_offset = fp.tell()
# Write the pixels
row_size = ((self.bits_per_pixel * self.width + 31) / 32) * 4
if self.bits_per_pixel <= 8:
for y in xrange(0, self.height):
row = self.image[self.height - 1 - y]
bits = 0
byte = 0
count = 0
for x in xrange(0, self.width):
index = palette_map[row[x]]
byte = (byte << self.bits_per_pixel) | index
bits += self.bits_per_pixel
if bits >= 8:
fp.write(struct.pack("<1B", byte))
byte = 0
bits = 0
count += 1
if bits != 0:
byte <<= 8 - bits
fp.write(struct.pack("<1B", byte))
count += 1
while count < row_size:
fp.write(struct.pack("<1B", 0))
count += 1
else:
for y in xrange(0, self.height):
row = self.image[self.height - 1 - y]
for x in xrange(0, self.width):
for byte in row[x]:
fp.write(struct.pack("<1B", byte))
count = len(row) * len(row[0])
while count < row_size:
fp.write(struct.pack("<1B", 0))
count += 1
# Write the header with the correct offsets
self.bmp_size = fp.tell()
fp.seek(0)
self.writeHeader(fp)
# Given the existing image, build a palette if possible
# If there are more than 256 unique colors, build no palette; we will
# write a 24 bit bitmap
def buildPalette(self):
# Collect all colors present in the image
color_count = {}
for row in self.image:
for pixel in row:
if pixel not in color_count:
color_count[pixel] = 0
color_count[pixel] += 1
# Get the list of unique colors; this will be the palette
palette = color_count.keys()
self.num_colors = len(palette)
if self.num_colors > 256:
# We will write a 24 bit bitmap
self.bits_per_pixel = 24
self.palette = None
return
# Arrange in descending order of occurrence
palette.sort(lambda a, b : color_count[b] - color_count[a])
# Set a valid bit-per-pixel count, with the fewest bits that will
# encompass the palette
self.palette = palette
if self.num_colors < 2:
self.bits_per_pixel = 1
elif self.num_colors < 4:
self.bits_per_pixel = 2
elif self.num_colors < 16:
self.bits_per_pixel = 4
else:
self.bits_per_pixel = 8
# A black tile, to fill the last row
def blankTile(self):
return [ [ (0, 0, 0) ] * self.tile_width ] * self.tile_height
# A placeholder tile, for the tiles that cannot otherwise be derived
# This will appear as a red block with a black X through it
def placeHolderTile(self):
red = ( 0x00, 0x00, 0xFF )
black = ( 0x00, 0x00, 0x00 )
tile = [ None ] * self.tile_height
for y in xrange(0, self.tile_height):
tile[y] = [ red ] * self.tile_width
m = min(self.tile_width, self.tile_height)
for x in xrange(0, m):
tile[x][x] = black
tile[x][m - 1 - x] = black
return tile
# A tile at half brightness to the input
def darkenedTile(self, inptile):
outtile = [ None ] * len(inptile)
for y in xrange(0, len(outtile)):
inprow = inptile[y]
outrow = [ None ] * len(inprow)
outtile[y] = outrow
for x in xrange(0, len(inprow)):
inp = inprow[x]
out = ( inp[0] >> 1, inp[1] >> 1, inp[2] >> 1 )
outrow[x] = out
return outtile
# A statue tile.
# To assist in transforming tile sets that do not use a black background,
# this accepts the floor tile. A pixel that is different from the floor
# tile is considered to be foreground, and converted to grayscale.
def makeStatue(self, inptile, floor):
outtile = [ None ] * len(inptile)
for y in xrange(0, len(outtile)):
inprow = inptile[y]
floor_row = floor[y]
outrow = [ None ] * len(inprow)
outtile[y] = outrow
for x in xrange(0, len(inprow)):
inp = inprow[x]
fl = floor_row[x]
if inp == fl:
# background
out = inp
else:
# foreground
gray = (inp[0] + inp[1] + inp[2]) / 3
out = ( gray, gray, gray )
outrow[x] = out
return outtile
# Write a BITMAPINFOHEADER-type header for a BMP file
def writeHeader(self, fp):
fp.write(struct.pack("<2s6L2H6L",
"BM",
self.bmp_size,
0,
self.image_offset,
self.header_size,
self.width,
self.height,
self.num_planes,
self.bits_per_pixel,
self.compression,
0, #self.image_size,
self.horiz_res,
self.vert_res,
self.num_colors,
self.num_important_colors))
# Convert one bitmap file
# inpname is the name of the file to be converted; args contains the arguments
# as parsed by the ArgumentParser object
def convertBitmap(inpname, args):
# Collect arguments from args
tile_width = args.tile_width
tile_height = args.tile_height
no_statues = args.no_statues
outname = args.output
# Provide default output file name
if outname is None:
d, n = os.path.split(inpname)
dot = n.rfind('.')
if dot != -1:
n = n[:dot]
n += '-360.bmp'
outname = os.path.join(d, n)
# Read the bitmap image
bmp = Bitmap(inpname)
# Provide default tile dimensions
if tile_width is None:
tile_width = bmp.width / 40
if tile_height is None:
tile_height = tile_width
# Split the bitmap into tiles
bmp.split(tile_width, tile_height)
# Remap into 3.6.0 arrangement
bmp.remap(no_statues)
# Rejoin into a single image
bmp.join()
# Write to disk
bmp.write(outname)
# Define command line arguments for this program
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Convert NetHack 3.4.3 tile sets for use with 3.6.0',
epilog='''
If --tile-width is not specified, it is the image width divided by 40.
If --tile-height is not specified, it is equal to the tile width.
If --no-statues is specified, statue glyphs are copied from the 3.4.3 statue
glyph; if not, statue glyphs are generated by converting the monster glyphs
to grayscale.
Images must be in BMP format.
If --output is not specified, the output file name is <input-name>-360.bmp.
Multiple images can be converted, but only if --output is not specified.
''')
parser.add_argument('images', metavar='image', type=str, nargs='+',
help='Name of a tile set image for NetHack 3.4.3')
parser.add_argument('--tile-width', '-x', dest='tile_width', type=int,
help='Width of a single tile in pixels')
parser.add_argument('--tile-height', '-y', dest='tile_height', type=int,
help='Height of a single tile in pixels')
parser.add_argument('--no-statues', '-s', dest='no_statues',
action='store_true',
help='Do not derive statues from monsters')
parser.add_argument('--output', '-o', dest='output', type=str,
help='Name of output image')
args = parser.parse_args()
if len(args.images) > 1 and args.output is not None:
sys.stderr.write("Cannot specify --output with more than one image name\n")
sys.exit(1)
# Process each image in turn
rc = 0
for image in args.images:
if not convertBitmap(image, args):
rc = 1
sys.exit(rc)
|
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# TODO: add proxy handling.
__author__ = '[email protected] (Jeff Scudder)'
import os
try:
from io import StringIO
except ImportError:
from io import StringIO
try:
from urllib.parse import urlparse
except ImportError:
from urllib.parse import urlparse
import urllib.request, urllib.parse, urllib.error
from . import http.client as httplib
ssl = None
try:
import ssl
except ImportError:
pass
class Error(Exception):
pass
class UnknownSize(Error):
pass
class ProxyError(Error):
pass
MIME_BOUNDARY = 'END_OF_PART'
def get_headers(http_response):
"""Retrieves all HTTP headers from an HTTP response from the server.
This method is provided for backwards compatibility for Python2.2 and 2.3.
The httplib.HTTPResponse object in 2.2 and 2.3 does not have a getheaders
method so this function will use getheaders if available, but if not it
will retrieve a few using getheader.
"""
if hasattr(http_response, 'getheaders'):
return http_response.getheaders()
else:
headers = []
for header in (
'location', 'content-type', 'content-length', 'age', 'allow',
'cache-control', 'content-location', 'content-encoding', 'date',
'etag', 'expires', 'last-modified', 'pragma', 'server',
'set-cookie', 'transfer-encoding', 'vary', 'via', 'warning',
'www-authenticate', 'gdata-version'):
value = http_response.getheader(header, None)
if value is not None:
headers.append((header, value))
return headers
class HttpRequest(object):
"""Contains all of the parameters for an HTTP 1.1 request.
The HTTP headers are represented by a dictionary, and it is the
responsibility of the user to ensure that duplicate field names are combined
into one header value according to the rules in section 4.2 of RFC 2616.
"""
method = None
uri = None
def __init__(self, uri=None, method=None, headers=None):
"""Construct an HTTP request.
Args:
uri: The full path or partial path as a Uri object or a string.
method: The HTTP method for the request, examples include 'GET', 'POST',
etc.
headers: dict of strings The HTTP headers to include in the request.
"""
self.headers = headers or {}
self._body_parts = []
if method is not None:
self.method = method
if isinstance(uri, str):
uri = Uri.parse_uri(uri)
self.uri = uri or Uri()
def add_body_part(self, data, mime_type, size=None):
"""Adds data to the HTTP request body.
If more than one part is added, this is assumed to be a mime-multipart
request. This method is designed to create MIME 1.0 requests as specified
in RFC 1341.
Args:
data: str or a file-like object containing a part of the request body.
mime_type: str The MIME type describing the data
size: int Required if the data is a file like object. If the data is a
string, the size is calculated so this parameter is ignored.
"""
if isinstance(data, str):
size = len(data)
if size is None:
# TODO: support chunked transfer if some of the body is of unknown size.
raise UnknownSize('Each part of the body must have a known size.')
if 'Content-Length' in self.headers:
content_length = int(self.headers['Content-Length'])
else:
content_length = 0
# If this is the first part added to the body, then this is not a multipart
# request.
if len(self._body_parts) == 0:
self.headers['Content-Type'] = mime_type
content_length = size
self._body_parts.append(data)
elif len(self._body_parts) == 1:
# This is the first member in a mime-multipart request, so change the
# _body_parts list to indicate a multipart payload.
self._body_parts.insert(0, 'Media multipart posting')
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
content_length += len(boundary_string) + size
self._body_parts.insert(1, boundary_string)
content_length += len('Media multipart posting')
# Put the content type of the first part of the body into the multipart
# payload.
original_type_string = 'Content-Type: %s\r\n\r\n' % (
self.headers['Content-Type'],)
self._body_parts.insert(2, original_type_string)
content_length += len(original_type_string)
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.append(boundary_string)
content_length += len(boundary_string)
# Change the headers to indicate this is now a mime multipart request.
self.headers['Content-Type'] = 'multipart/related; boundary="%s"' % (
MIME_BOUNDARY,)
self.headers['MIME-version'] = '1.0'
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.append(type_string)
content_length += len(type_string)
self._body_parts.append(data)
ending_boundary_string = '\r\n--%s--' % (MIME_BOUNDARY,)
self._body_parts.append(ending_boundary_string)
content_length += len(ending_boundary_string)
else:
# This is a mime multipart request.
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.insert(-1, boundary_string)
content_length += len(boundary_string) + size
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.insert(-1, type_string)
content_length += len(type_string)
self._body_parts.insert(-1, data)
self.headers['Content-Length'] = str(content_length)
# I could add an "append_to_body_part" method as well.
AddBodyPart = add_body_part
def add_form_inputs(self, form_data,
mime_type='application/x-www-form-urlencoded'):
"""Form-encodes and adds data to the request body.
Args:
form_data: dict or sequnce or two member tuples which contains the
form keys and values.
mime_type: str The MIME type of the form data being sent. Defaults
to 'application/x-www-form-urlencoded'.
"""
body = urllib.parse.urlencode(form_data)
self.add_body_part(body, mime_type)
AddFormInputs = add_form_inputs
def _copy(self):
"""Creates a deep copy of this request."""
copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port,
self.uri.path, self.uri.query.copy())
new_request = HttpRequest(uri=copied_uri, method=self.method,
headers=self.headers.copy())
new_request._body_parts = self._body_parts[:]
return new_request
def _dump(self):
"""Converts to a printable string for debugging purposes.
In order to preserve the request, it does not read from file-like objects
in the body.
"""
output = 'HTTP Request\n method: %s\n url: %s\n headers:\n' % (
self.method, str(self.uri))
for header, value in self.headers.items():
output += ' %s: %s\n' % (header, value)
output += ' body sections:\n'
i = 0
for part in self._body_parts:
if isinstance(part, str):
output += ' %s: %s\n' % (i, part)
else:
output += ' %s: <file like object>\n' % i
i += 1
return output
def _apply_defaults(http_request):
if http_request.uri.scheme is None:
if http_request.uri.port == 443:
http_request.uri.scheme = 'https'
else:
http_request.uri.scheme = 'http'
class Uri(object):
"""A URI as used in HTTP 1.1"""
scheme = None
host = None
port = None
path = None
def __init__(self, scheme=None, host=None, port=None, path=None, query=None):
"""Constructor for a URI.
Args:
scheme: str This is usually 'http' or 'https'.
host: str The host name or IP address of the desired server.
post: int The server's port number.
path: str The path of the resource following the host. This begins with
a /, example: '/calendar/feeds/default/allcalendars/full'
query: dict of strings The URL query parameters. The keys and values are
both escaped so this dict should contain the unescaped values.
For example {'my key': 'val', 'second': '!!!'} will become
'?my+key=val&second=%21%21%21' which is appended to the path.
"""
self.query = query or {}
if scheme is not None:
self.scheme = scheme
if host is not None:
self.host = host
if port is not None:
self.port = port
if path:
self.path = path
def _get_query_string(self):
param_pairs = []
for key, value in self.query.items():
quoted_key = urllib.parse.quote_plus(str(key))
if value is None:
param_pairs.append(quoted_key)
else:
quoted_value = urllib.parse.quote_plus(str(value))
param_pairs.append('%s=%s' % (quoted_key, quoted_value))
return '&'.join(param_pairs)
def _get_relative_path(self):
"""Returns the path with the query parameters escaped and appended."""
param_string = self._get_query_string()
if self.path is None:
path = '/'
else:
path = self.path
if param_string:
return '?'.join([path, param_string])
else:
return path
def _to_string(self):
if self.scheme is None and self.port == 443:
scheme = 'https'
elif self.scheme is None:
scheme = 'http'
else:
scheme = self.scheme
if self.path is None:
path = '/'
else:
path = self.path
if self.port is None:
return '%s://%s%s' % (scheme, self.host, self._get_relative_path())
else:
return '%s://%s:%s%s' % (scheme, self.host, str(self.port),
self._get_relative_path())
def __str__(self):
return self._to_string()
def modify_request(self, http_request=None):
"""Sets HTTP request components based on the URI."""
if http_request is None:
http_request = HttpRequest()
if http_request.uri is None:
http_request.uri = Uri()
# Determine the correct scheme.
if self.scheme:
http_request.uri.scheme = self.scheme
if self.port:
http_request.uri.port = self.port
if self.host:
http_request.uri.host = self.host
# Set the relative uri path
if self.path:
http_request.uri.path = self.path
if self.query:
http_request.uri.query = self.query.copy()
return http_request
ModifyRequest = modify_request
def parse_uri(uri_string):
"""Creates a Uri object which corresponds to the URI string.
This method can accept partial URIs, but it will leave missing
members of the Uri unset.
"""
parts = urlparse(uri_string)
uri = Uri()
if parts[0]:
uri.scheme = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
uri.host = host_parts[0]
if len(host_parts) > 1:
uri.port = int(host_parts[1])
if parts[2]:
uri.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
uri.query[urllib.parse.unquote_plus(pair_parts[0])] = (
urllib.parse.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
uri.query[urllib.parse.unquote_plus(pair_parts[0])] = None
return uri
parse_uri = staticmethod(parse_uri)
ParseUri = parse_uri
parse_uri = Uri.parse_uri
ParseUri = Uri.parse_uri
class HttpResponse(object):
status = None
reason = None
_body = None
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def getheaders(self):
return self._headers
def read(self, amt=None):
if self._body is None:
return None
if not amt:
return self._body.read()
else:
return self._body.read(amt)
def _dump_response(http_response):
"""Converts to a string for printing debug messages.
Does not read the body since that may consume the content.
"""
output = 'HttpResponse\n status: %s\n reason: %s\n headers:' % (
http_response.status, http_response.reason)
headers = get_headers(http_response)
if isinstance(headers, dict):
for header, value in headers.items():
output += ' %s: %s\n' % (header, value)
else:
for pair in headers:
output += ' %s: %s\n' % (pair[0], pair[1])
return output
class HttpClient(object):
"""Performs HTTP requests using httplib."""
debug = None
def request(self, http_request):
return self._http_request(http_request.method, http_request.uri,
http_request.headers, http_request._body_parts)
Request = request
def _get_connection(self, uri, headers=None):
"""Opens a socket connection to the server to set up an HTTP request.
Args:
uri: The full URL for the request as a Uri object.
headers: A dict of string pairs containing the HTTP headers for the
request.
"""
connection = None
if uri.scheme == 'https':
if not uri.port:
connection = httplib.HTTPSConnection(uri.host)
else:
connection = httplib.HTTPSConnection(uri.host, int(uri.port))
else:
if not uri.port:
connection = httplib.HTTPConnection(uri.host)
else:
connection = httplib.HTTPConnection(uri.host, int(uri.port))
return connection
def _http_request(self, method, uri, headers=None, body_parts=None):
"""Makes an HTTP request using httplib.
Args:
method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc.
uri: str or atom.http_core.Uri
headers: dict of strings mapping to strings which will be sent as HTTP
headers in the request.
body_parts: list of strings, objects with a read method, or objects
which can be converted to strings using str. Each of these
will be sent in order as the body of the HTTP request.
"""
if isinstance(uri, str):
uri = Uri.parse_uri(uri)
connection = self._get_connection(uri, headers=headers)
if self.debug:
connection.debuglevel = 1
if connection.host != uri.host:
connection.putrequest(method, str(uri))
else:
connection.putrequest(method, uri._get_relative_path())
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (uri.scheme == 'https' and int(uri.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % uri.host
replacement_header_line = 'Host: %s' % uri.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name, value in headers.items():
connection.putheader(header_name, value)
connection.endheaders()
# If there is data, send it in the request.
if body_parts and [x for x in body_parts if x != '']:
for part in body_parts:
_send_data_part(part, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _send_data_part(data, connection):
if isinstance(data, str):
# I might want to just allow str, not unicode.
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
class ProxiedHttpClient(HttpClient):
def _get_connection(self, uri, headers=None):
# Check to see if there are proxy settings required for this request.
proxy = None
if uri.scheme == 'https':
proxy = os.environ.get('https_proxy')
elif uri.scheme == 'http':
proxy = os.environ.get('http_proxy')
if not proxy:
return HttpClient._get_connection(self, uri, headers=headers)
# Now we have the URL of the appropriate proxy server.
# Get a username and password for the proxy if required.
proxy_auth = _get_proxy_auth()
if uri.scheme == 'https':
import socket
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = uri.port
if not port:
port = 443
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (uri.host, port)
# Set the user agent to send to the proxy
user_agent = ''
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_uri.host, int(proxy_uri.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl is not None:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, Nonesock_)
sslobj = httplib.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_uri.host)
connection.sock = sslobj
return connection
elif uri.scheme == 'http':
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_uri.host, int(proxy_uri.port))
return None
def _get_proxy_auth():
import base64
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.b64encode('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
|
|
"""Home Assistant auth provider."""
import asyncio
import base64
from collections import OrderedDict
import logging
from typing import Any, Dict, List, Optional, Set, cast
import bcrypt
import voluptuous as vol
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from ..models import Credentials, UserMeta
STORAGE_VERSION = 1
STORAGE_KEY = "auth_provider.homeassistant"
def _disallow_id(conf: Dict[str, Any]) -> Dict[str, Any]:
"""Disallow ID in config."""
if CONF_ID in conf:
raise vol.Invalid("ID is not allowed for the homeassistant auth provider.")
return conf
CONFIG_SCHEMA = vol.All(AUTH_PROVIDER_SCHEMA, _disallow_id)
class InvalidAuth(HomeAssistantError):
"""Raised when we encounter invalid authentication."""
class InvalidUser(HomeAssistantError):
"""Raised when invalid user is specified.
Will not be raised when validating authentication.
"""
class Data:
"""Hold the user data."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the user data store."""
self.hass = hass
self._store = hass.helpers.storage.Store(
STORAGE_VERSION, STORAGE_KEY, private=True
)
self._data: Optional[Dict[str, Any]] = None
# Legacy mode will allow usernames to start/end with whitespace
# and will compare usernames case-insensitive.
# Remove in 2020 or when we launch 1.0.
self.is_legacy = False
@callback
def normalize_username(self, username: str) -> str:
"""Normalize a username based on the mode."""
if self.is_legacy:
return username
return username.strip().casefold()
async def async_load(self) -> None:
"""Load stored data."""
data = await self._store.async_load()
if data is None:
data = {"users": []}
seen: Set[str] = set()
for user in data["users"]:
username = user["username"]
# check if we have duplicates
folded = username.casefold()
if folded in seen:
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that are case-insensitive"
"equivalent. Please change the username: '%s'.",
username,
)
break
seen.add(folded)
# check if we have unstripped usernames
if username != username.strip():
self.is_legacy = True
logging.getLogger(__name__).warning(
"Home Assistant auth provider is running in legacy mode "
"because we detected usernames that start or end in a "
"space. Please change the username: '%s'.",
username,
)
break
self._data = data
@property
def users(self) -> List[Dict[str, str]]:
"""Return users."""
return self._data["users"] # type: ignore
def validate_login(self, username: str, password: str) -> None:
"""Validate a username and password.
Raises InvalidAuth if auth invalid.
"""
username = self.normalize_username(username)
dummy = b"$2b$12$CiuFGszHx9eNHxPuQcwBWez4CwDTOcLTX5CbOpV6gef2nYuXkY7BO"
found = None
# Compare all users to avoid timing attacks.
for user in self.users:
if self.normalize_username(user["username"]) == username:
found = user
if found is None:
# check a hash to make timing the same as if user was found
bcrypt.checkpw(b"foo", dummy)
raise InvalidAuth
user_hash = base64.b64decode(found["password"])
# bcrypt.checkpw is timing-safe
if not bcrypt.checkpw(password.encode(), user_hash):
raise InvalidAuth
# pylint: disable=no-self-use
def hash_password(self, password: str, for_storage: bool = False) -> bytes:
"""Encode a password."""
hashed: bytes = bcrypt.hashpw(password.encode(), bcrypt.gensalt(rounds=12))
if for_storage:
hashed = base64.b64encode(hashed)
return hashed
def add_auth(self, username: str, password: str) -> None:
"""Add a new authenticated user/pass."""
username = self.normalize_username(username)
if any(
self.normalize_username(user["username"]) == username for user in self.users
):
raise InvalidUser
self.users.append(
{
"username": username,
"password": self.hash_password(password, True).decode(),
}
)
@callback
def async_remove_auth(self, username: str) -> None:
"""Remove authentication."""
username = self.normalize_username(username)
index = None
for i, user in enumerate(self.users):
if self.normalize_username(user["username"]) == username:
index = i
break
if index is None:
raise InvalidUser
self.users.pop(index)
def change_password(self, username: str, new_password: str) -> None:
"""Update the password.
Raises InvalidUser if user cannot be found.
"""
username = self.normalize_username(username)
for user in self.users:
if self.normalize_username(user["username"]) == username:
user["password"] = self.hash_password(new_password, True).decode()
break
else:
raise InvalidUser
async def async_save(self) -> None:
"""Save data."""
await self._store.async_save(self._data)
@AUTH_PROVIDERS.register("homeassistant")
class HassAuthProvider(AuthProvider):
"""Auth provider based on a local storage of users in HASS config dir."""
DEFAULT_TITLE = "Home Assistant Local"
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize an Home Assistant auth provider."""
super().__init__(*args, **kwargs)
self.data: Optional[Data] = None
self._init_lock = asyncio.Lock()
async def async_initialize(self) -> None:
"""Initialize the auth provider."""
async with self._init_lock:
if self.data is not None:
return
data = Data(self.hass)
await data.async_load()
self.data = data
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
return HassLoginFlow(self)
async def async_validate_login(self, username: str, password: str) -> None:
"""Validate a username and password."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
await self.hass.async_add_executor_job(
self.data.validate_login, username, password
)
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
norm_username = self.data.normalize_username
username = norm_username(flow_result["username"])
for credential in await self.async_credentials():
if norm_username(credential.data["username"]) == username:
return credential
# Create new credentials.
return self.async_create_credentials({"username": username})
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Get extra info for this credential."""
return UserMeta(name=credentials.data["username"], is_active=True)
async def async_will_remove_credentials(self, credentials: Credentials) -> None:
"""When credentials get removed, also remove the auth."""
if self.data is None:
await self.async_initialize()
assert self.data is not None
try:
self.data.async_remove_auth(credentials.data["username"])
await self.data.async_save()
except InvalidUser:
# Can happen if somehow we didn't clean up a credential
pass
class HassLoginFlow(LoginFlow):
"""Handler for the login flow."""
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the step of the form."""
errors = {}
if user_input is not None:
try:
await cast(HassAuthProvider, self._auth_provider).async_validate_login(
user_input["username"], user_input["password"]
)
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
user_input.pop("password")
return await self.async_finish(user_input)
schema: Dict[str, type] = OrderedDict()
schema["username"] = str
schema["password"] = str
return self.async_show_form(
step_id="init", data_schema=vol.Schema(schema), errors=errors
)
|
|
from scanner_engine.utils.redirection.utils import run_redirection_scan
from scanner_engine.utils.watcher.utils import run_watcher_scan
from register_site.models import RedirectionsIndex, EntriesIndex, WatchersIndex
from scanner_engine.models import RedirectionScanResult
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import api_view
from ember_api.serializers import RedirectionSerializer, EntriesIndexSerializer, RedirectionScanResultSerializer
from register_site.utils import parse_domain_name
from django.contrib import admin
admin.autodiscover()
from rest_framework import permissions, routers, serializers, viewsets
from oauth2_provider.ext.rest_framework import TokenHasReadWriteScope, TokenHasScope
from oauth2_provider.decorators import protected_resource
class ScannerEngineViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows to view redirections
"""
permission_classes = [permissions.IsAuthenticated]
resource_name = 'redirection'
serializer_class = RedirectionSerializer
queryset = RedirectionsIndex.objects.all()
def get_queryset(self):
user = self.request.user
return RedirectionsIndex.objects.filter(entry__owner_username=user.username)
class EntriesIndexViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows to view entries
"""
permission_classes = [permissions.IsAuthenticated]
resource_name = 'entry'
queryset = EntriesIndex.objects.all()
serializer_class = EntriesIndexSerializer
def get_queryset(self):
user = self.request.user
return EntriesIndex.objects.filter(owner_username=user.username)
class RedirectionScanViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows to view redirection scans
"""
permission_classes = [permissions.IsAuthenticated]
resource_name = 'redirection-scan'
queryset = RedirectionScanResult.objects.all()
serializer_class = RedirectionScanResultSerializer
def get_queryset(self):
user = self.request.user
return RedirectionScanResult.objects.filter(redirection__entry__owner_username=user.username)
@api_view(['POST'])
@protected_resource()
def UpdateScan(request):
"""
API endpoint that allows to update entry with given id
Function reponses with json
"""
entry_id = None
is_success = True
message = ''
if 'id' in request.POST:
entry_id = request.POST['id']
entry = EntriesIndex.objects.get(id=entry_id)
if entry.has_watcher():
if len(WatchersIndex.objects.filter(entry=entry)) == 1:
run_watcher_scan(WatchersIndex.objects.get(entry=entry))
else:
is_success = False
message = 'Watcher scan error!'
return Response({
'success': is_success,
'error': not is_success,
'message': message,
'id': entry_id
})
if entry.has_redirections():
if len(RedirectionsIndex.objects.filter(entry=entry)) != 0:
for redirection in RedirectionsIndex.objects.filter(entry=entry):
run_redirection_scan(redirection, number_of_proxies_to_use=1)
else:
entry.redirections_exists = 0
entry.save()
is_success = False
message = 'Redirection scan error!'
return Response({
'success': is_success,
'error': not is_success,
'message': message,
'id': entry_id
})
message = 'Entry with id ' + entry_id + ' updated!'
else:
is_success = False
message = 'Entry id not given!'
return Response({
'success': is_success,
'error': not is_success,
'message': message,
'id': entry_id
})
@api_view(['POST'])
@protected_resource() # TODO delete only your own entries
def delete_entry(request):
"""
API endpoint that allows to delete entry with given id
Function reponses with json
"""
entry_id = None
is_success = True
message = ''
if 'id' in request.POST:
entry_id = request.POST['id']
if len(EntriesIndex.objects.filter(id=entry_id)) > 0:
entry = EntriesIndex.objects.get(id=entry_id)
entry.delete()
message = 'Entry has been deleted.'
else:
is_success = False
message = 'Entry with given id not found.'
return Response({
'success': is_success,
'error': not is_success,
'message': message,
'id': entry_id
})
else:
is_success = False
message = 'Entry id not given!'
return Response({
'success': is_success,
'error': not is_success,
'message': message,
'id': entry_id
})
@api_view(['POST'])
@protected_resource()
def register_redirection(request):
"""
API endpoint that allows to add new redirection to database
Function reponses with json
"""
alias = None
base_url = None
target_url = None
redirection_code = None
is_success = True
message = ''
if 'base_url' in request.POST and 'target_url' in request.POST:
new_entry = EntriesIndex()
new_entry.url = request.POST['base_url']
new_entry.owner_username = request.user.username
if not 'alias' in request.POST:
new_entry.alias = parse_domain_name(new_entry.url)
else:
new_entry.alias = request.POST['alias']
new_entry.save()
new_redirection = RedirectionsIndex()
new_redirection.entry = new_entry
if 'code' in request.POST:
new_redirection.status_code = request.POST['code']
else:
new_redirection.status_code = 301
new_redirection.base_url = new_entry.url
new_redirection.target_url = request.POST['target_url']
new_redirection.save()
new_entry.redirections_exists = 1
new_entry.save()
"""
Scan newly added entry, to have a least one scan in the DB.
It is necessary to display templates correctly.
"""
run_redirection_scan(new_redirection, number_of_proxies_to_use=1)
message = 'Redirection registered.'
else:
is_success = False
message = 'Missing some arguments.'
return Response({
'success': is_success,
'error': not is_success,
'message': message
})
@api_view(['GET'])
@protected_resource()
def get_user_info(request):
user = request.user
return Response({
'id': user.id,
'username': user.username,
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
'last_login': user.last_login,
'date_joined': user.date_joined,
'is_superuser': user.is_superuser
})
|
|
'''
With this recipe, you can control pan/tilt and preset of Sony VISCA Color Video Camera.
'''
# <!-- parameters
param_disabled = Parameter({'desc': 'Disables this node?', 'schema': {'type': 'boolean'}})
param_ipAddress = Parameter({'schema': {'type': 'string'}})
_port = 52381
param_port = Parameter({'schema': {'type': 'integer', 'hint': '(default is %s)' % _port}})
_viscaAddress = 1
param_viscaAddress = Parameter({'schema': {'type': 'integer', 'hint': '(default is %s)' % _viscaAddress}})
def main():
if not param_ipAddress:
console.warn('IP address not configured')
return
if param_port: # 0 is not allowed here
global _port
_port = param_port
if param_viscaAddress != None: # 0 is allowed here
global _viscaAddress
_viscaAddress = param_viscaAddress
target = "%s:%s" % (param_ipAddress, _port)
console.info('Will connect to [%s]' % target)
udp.setDest(target)
resetSequenceNo()
def udp_received(src, data):
log(2, 'udp_recv %s (from %s)' % (':'.join([b.encode('hex') for b in data]), src))
def udp_sent(data):
log(1, 'udp_sent %s' % ':'.join([b.encode('hex') for b in data]))
udp = UDP(sent=udp_sent,
ready=lambda: console.info('udp_ready'),
received=udp_received)
def get_command_string(cmd_type, visca_addr, seq_number, data=None):
def address_to_hex(addr_number):
return chr(0x80 + addr_number)
def seq_to_hex(seq_number):
hex_str = ''
hex_str += chr(seq_number >> 24 & 0xff)
hex_str += chr(seq_number >> 16 & 0xff)
hex_str += chr(seq_number >> 8 & 0xff)
hex_str += chr(seq_number & 0xff)
return hex_str
def number_to_hex(number):
return chr(int(number))
def payload_len_to_hex(payload):
payload_len = len(payload)
hex_str = ''
hex_str += chr(payload_len >> 8 & 0xff)
hex_str += chr(payload_len & 0xff)
return hex_str
msg_header = None
msg_payload = None
pan_speed = local_event_PanSpeed.getArg()
tilt_speed = local_event_TiltSpeed.getArg()
if cmd_type == 'up':
msg_payload = address_to_hex(visca_addr) + '\x01\x06\x01' + chr(pan_speed) + chr(tilt_speed) + '\x03\x01' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'down':
msg_payload = address_to_hex(visca_addr) + '\x01\x06\x01' + chr(pan_speed) + chr(tilt_speed) + '\x03\x02' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'left':
msg_payload = address_to_hex(visca_addr) + '\x01\x06\x01' + chr(pan_speed) + chr(tilt_speed) + '\x01\x03' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'right':
msg_payload = address_to_hex(visca_addr) + '\x01\x06\x01' + chr(pan_speed) + chr(tilt_speed) + '\x02\x03' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'home':
msg_payload = address_to_hex(visca_addr) + '\x01\x06\x04' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'stop':
msg_payload = address_to_hex(visca_addr) + '\x01\x06\x01\x05\x05\x03\x03' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'reset_seq':
msg_payload = '\x01'
msg_header = '\x02\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'preset_reset':
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x3f\x00' + number_to_hex(data) + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'preset_set':
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x3f\x01' + number_to_hex(data) + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'preset_recall':
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x3f\x02' + number_to_hex(data) + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'zoom_stop':
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x07\x00' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'zoom_tele': # Standard
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x07\x02' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'zoom_wide': # Standard
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x07\x03' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'focus_auto':
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x38\x02' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'focus_manual':
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x38\x03' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'focus_stop':
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x08\x00' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'focus_far': # Standard
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x08\x02' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
elif cmd_type == 'focus_near': # Standard
msg_payload = address_to_hex(visca_addr) + '\x01\x04\x08\x03' + '\xff'
msg_header = '\x01\x00' + payload_len_to_hex(msg_payload) + seq_to_hex(seq_number)
else:
raise Exception('Unsupported command type')
return msg_header + msg_payload
# -->
# <!-- actions
def resetSequenceNo():
console.log('[resetSequenceNo] called')
ctrlCmd_reset_seq = get_command_string('reset_seq', _viscaAddress, next_seq() + 20000)
udp.send(ctrlCmd_reset_seq)
# -- drive related --
INIT_PAN_SPEED = 5 # initial values
INIT_TILT_SPEED = 5
local_event_PanSpeed = LocalEvent({ 'group': 'PTZ Drive', 'title': 'Pan Speed', 'schema': { 'type': 'integer', 'format': 'range', 'min': 1, 'max': 24 }, 'order': next_seq() })
local_event_TiltSpeed = LocalEvent({ 'group': 'PTZ Drive', 'title': 'Tilt Speed', 'schema': { 'type': 'integer', 'format': 'range', 'min': 1, 'max': 24 }, 'order': next_seq() })
@before_main
def initPanAndTiltSpeeds():
panSpeedArg = local_event_PanSpeed.getArg()
if panSpeedArg < 1 or panSpeedArg > 24:
local_event_PanSpeed.emit(INIT_PAN_SPEED)
tiltSpeedArg = local_event_TiltSpeed.getArg()
if tiltSpeedArg < 1 or tiltSpeedArg > 24:
local_event_TiltSpeed.emit(INIT_TILT_SPEED)
@local_action({'group': 'PTZ Drive', 'title': 'Pan Speed', 'schema': { 'type': 'integer', 'hint': '(default: 5, Min: 1, Max: 24)', 'format': 'range', 'min': 1, 'max': 24 }, 'order': next_seq() })
def PanSpeed(arg):
if arg < 1 or arg > 24:
return console.warn('[set_pan_speed] bad arg - %s' % arg)
iArg = int(arg)
console.log('[set_pan_speed] %s' % iArg)
local_event_PanSpeed.emit(iArg)
@local_action({'group': 'PTZ Drive', 'title': 'Tilt Speed', 'schema': { 'type': 'integer', 'hint': '(default: 5, Min: 1, Max: 24)', 'format': 'range', 'min': 1, 'max': 24}, 'order': next_seq() })
def TiltSpeed(arg):
if arg < 1 or arg > 24:
return console.warn('[set_tilt_speed] bad arg - %s' % arg)
iArg = int(arg)
console.log('[set_tilt_speed] %s' % iArg)
local_event_TiltSpeed.emit(iArg)
@local_action({'group': 'PTZ Drive', 'title': 'Home', 'order': next_seq()})
def ptz_home(ignore):
console.log('[ptz_home] called')
inquery_ptdHome = get_command_string('home', _viscaAddress, next_seq() + 20000)
udp.send(inquery_ptdHome)
@local_action({'group': 'PTZ Drive', 'title': 'Up', 'order': next_seq()})
def ptz_up(data):
console.log('[ptz_up] called')
inquery_ptdUp = get_command_string('up', _viscaAddress, next_seq() + 20000)
udp.send(inquery_ptdUp)
@local_action({'group': 'PTZ Drive', 'title': 'Down', 'order': next_seq()})
def ptz_down(data):
console.log('[ptz_down] called')
inquery_ptdDown = get_command_string('down', _viscaAddress, next_seq() + 20000)
udp.send(inquery_ptdDown)
@local_action({'group': 'PTZ Drive', 'title': 'Left', 'order': next_seq()})
def ptz_left(data):
console.log('[ptz_left] called')
inquery_ptdLeft = get_command_string('left', _viscaAddress, next_seq() + 20000)
udp.send(inquery_ptdLeft)
@local_action({'group': 'PTZ Drive', 'title': 'Right', 'order': next_seq()})
def ptz_right(data):
console.log('[ptz_right] called')
inquery_ptdRight = get_command_string('right', _viscaAddress, next_seq() + 20000)
udp.send(inquery_ptdRight)
@local_action({'group': 'PTZ Drive', 'title': 'Stop', 'order': next_seq()})
def ptz_stop(data):
console.log('[ptz_stop] called')
inquery_ptdStop = get_command_string('stop', _viscaAddress, next_seq() + 20000)
udp.send(inquery_ptdStop)
# -- preset related --
@local_action({'group': 'PTZ Preset', 'title': 'Preset Reset', 'order': next_seq(), 'schema': {'type': 'integer'}})
def ptz_preset_reset(data):
console.log('[ptz_preset_reset] called')
inquery_presetReset = get_command_string('preset_reset', _viscaAddress, next_seq() + 20000, data)
udp.send(inquery_presetReset)
@local_action({'group': 'PTZ Preset', 'title': 'Preset Set', 'order': next_seq(), 'schema': {'type': 'integer'}})
def ptz_preset_set(data):
console.log('[ptz_preset_set] called')
inquery_presetSet = get_command_string('preset_set', _viscaAddress, next_seq() + 20000, data)
udp.send(inquery_presetSet)
@local_action({'group': 'PTZ Preset', 'title': 'Preset Recall', 'order': next_seq(), 'schema': {'type': 'integer'}})
def ptz_preset_recall(arg):
console.log('[ptz_preset_recall] called')
inquery_presetRecall = get_command_string('preset_recall', _viscaAddress, next_seq() + 20000, arg)
udp.send(inquery_presetRecall)
# -- Zoom related --
@local_action({'group': 'PTZ Zoom', 'title': 'Zoom Stop', 'order': next_seq()})
def ptz_zoom_stop(arg):
console.log('[ptz_zoom_stop] called')
inquery_zoomStop = get_command_string('zoom_stop', _viscaAddress, next_seq() + 20000)
udp.send(inquery_zoomStop)
@local_action({'group': 'PTZ Zoom', 'title': 'Zoom Tele', 'order': next_seq()})
def ptz_zoom_tele(arg):
console.log('[ptz_zoom_tele] called')
inquery_zoomTele = get_command_string('zoom_tele', _viscaAddress, next_seq() + 20000)
udp.send(inquery_zoomTele)
@local_action({'group': 'PTZ Zoom', 'title': 'Zoom Wide', 'order': next_seq()})
def ptz_zoom_wide(arg):
console.log('[ptz_zoom_wide] called')
inquery_zoomWide = get_command_string('zoom_wide', _viscaAddress, next_seq() + 20000)
udp.send(inquery_zoomWide)
# -- Focus related --
le_Focus_Mode = create_local_event(
'Focus Mode',
metadata={
'title': 'Focus Mode',
'group': 'PTZ Focus',
'order': next_seq(),
'schema': {
'type': 'string'
}
}
)
@local_action({'group': 'PTZ Focus', 'title': 'Focus Mode - Auto', 'order': next_seq()})
def ptz_focus_mode_auto(arg):
console.log('[ptz_focus_mode_auto] called')
inquery_focusModeAuto = get_command_string('focus_auto', _viscaAddress, next_seq() + 20000)
udp.send(inquery_focusModeAuto)
le_Focus_Mode.emit('AUTO')
@local_action({'group': 'PTZ Focus', 'title': 'Focus Mode - Manual', 'order': next_seq()})
def ptz_focus_mode_manual(arg):
console.log('[ptz_focus_mode_manual] called')
inquery_focusModeManual = get_command_string('focus_manual', _viscaAddress, next_seq() + 20000)
udp.send(inquery_focusModeManual)
le_Focus_Mode.emit('MANUAL')
@local_action({'group': 'PTZ Focus', 'title': 'Focus - Stop', 'order': next_seq()})
def ptz_focus_stop(arg):
console.log('[ptz_focus_stop] called')
inquery_focusStop = get_command_string('focus_stop', _viscaAddress, next_seq() + 20000)
udp.send(inquery_focusStop)
@local_action({'group': 'PTZ Focus', 'title': 'Focus - Far', 'order': next_seq()})
def ptz_focus_far(arg):
console.log('[ptz_focus_far] called')
inquery_focusFar = get_command_string('focus_far', _viscaAddress, next_seq() + 20000)
udp.send(inquery_focusFar)
@local_action({'group': 'PTZ Focus', 'title': 'Focus - Near', 'order': next_seq()})
def ptz_focus_near(arg):
console.log('[ptz_focus_near] called')
inquery_focusNear = get_command_string('focus_near', _viscaAddress, next_seq() + 20000)
udp.send(inquery_focusNear)
@local_action({'group': 'Status', 'order': next_seq()})
def httpPoll():
# look for this token if result to be sure
TOKEN = 'birddog_p200.png'
url = 'http://%s/login' % param_ipAddress
try:
log(2, 'httpPoll %s' % url)
resp = get_url(url, connectTimeout=5)
if TOKEN not in resp:
console.warn('unexpected response! did not find token [%s] in response from %s' % (TOKEN, url))
return
global _lastReceive
_lastReceive = system_clock()
except:
log(1, 'problem polling %s' % url)
timer_poller = Timer(lambda: httpPoll.call(), 30, 5) # every 30s, first after 5
# -->
# <!-- status
local_event_Status = LocalEvent({'title': 'Status', 'group': 'Status', 'order': 9990, "schema": { 'title': 'Status', 'type': 'object', 'properties': {
'level': {'title': 'Level', 'order': 1, 'type': 'integer'},
'message': {'title': 'Message', 'order': 2, 'type': 'string'}
} } })
_lastReceive = 0 # last valid comms, system_clock() based
# roughly, the last contact
local_event_LastContactDetect = LocalEvent({'group': 'Status', 'title': 'Last contact detect', 'schema': {'type': 'string'}})
def statusCheck():
diff = (system_clock() - _lastReceive)/1000.0 # (in secs)
now = date_now()
if diff > status_check_interval+15:
previousContactValue = local_event_LastContactDetect.getArg()
if previousContactValue == None: message = 'Never seen'
else:
previousContact = date_parse(previousContactValue)
message = 'Missing %s' % formatPeriod(previousContact)
local_event_Status.emit({'level': 2, 'message': message})
return
local_event_Status.emit({'level': 0, 'message': 'OK'})
local_event_LastContactDetect.emit(str(now))
status_check_interval = 75
timer_statusCheck = Timer(statusCheck, status_check_interval)
def formatPeriod(dateObj):
if dateObj == None: return 'for unknown period'
now = date_now()
diff = (now.getMillis() - dateObj.getMillis()) / 1000 / 60 # in mins
if diff == 0: return 'for <1 min'
elif diff < 60: return 'for <%s mins' % diff
elif diff < 60*24: return 'since %s' % dateObj.toString('h:mm:ss a')
else: return 'since %s' % dateObj.toString('E d-MMM h:mm:ss a')
# status -->
# <!-- logging
local_event_LogLevel = LocalEvent({
'group': 'Debug',
'order': 10000 + next_seq(),
'desc': 'Use this to ramp up the logging (with indentation)',
'schema': {'type': 'integer'}
})
def warn(level, msg):
if local_event_LogLevel.getArg() >= level:
console.warn((' ' * level) + msg)
def log(level, msg):
if local_event_LogLevel.getArg() >= level:
console.log((' ' * level) + msg)
# --!>
|
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
def ensure_experimental(test_fn):
"""Decorator for running once with and once without the --use-experimental-runner flag.
"""
def wrapper(self, *args, **kwargs):
JunitTestsConcurrencyIntegrationTest.USE_EXPERIMENTAL_RUNNER = True
test_fn(self, *args, **kwargs)
JunitTestsConcurrencyIntegrationTest.USE_EXPERIMENTAL_RUNNER = False
test_fn(self, *args, **kwargs)
return wrapper
class JunitTestsConcurrencyIntegrationTest(PantsRunIntegrationTest):
"""Run tests with different concurrency settings.
Note that each of these tests is intended to be annotated with @ensure_experimental and run twice,
once with the experimental runner enabled and once with it off.
"""
USE_EXPERIMENTAL_RUNNER = False
def run_pants_with_workdir(self, command, workdir, **kwargs):
if self.USE_EXPERIMENTAL_RUNNER:
command.append("--test-junit-use-experimental-runner")
return super(JunitTestsConcurrencyIntegrationTest, self).run_pants_with_workdir(
command, workdir, **kwargs)
@ensure_experimental
def test_parallel_target(self):
"""Checks the 'concurrency=parallel_classes' setting in the junit_tests() target"""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'testprojects/tests/java/org/pantsbuild/testproject/parallel'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
@ensure_experimental
def test_parallel_cmdline(self):
"""Checks the --test-junit-default-concurrency=PARALLEL_CLASSES option."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=PARALLEL_CLASSES',
'--test-junit-parallel-threads=2',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:cmdline'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
# TODO(zundel): remove this test when --default-parallel is removed
@ensure_experimental
def test_parallel_cmdline_deprecated_arg(self):
"""Checks the --test-junit-default-parallel option still works."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-parallel',
'--test-junit-parallel-threads=2',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:cmdline'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
@ensure_experimental
def test_concurrency_serial_default(self):
"""Checks the --test-junit-default-concurrency=SERIAL option."""
with self.temporary_workdir() as workdir:
# NB(zundel): the timeout for each test in ParallelMethodsDefaultParallel tests is
# currently set to 3 seconds making this test take about 2 seconds to run due
# to (1 timeout failure)
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=SERIAL',
'--test-junit-parallel-threads=2',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:cmdline'
], workdir)
self.assert_failure(pants_run)
# Its not deterministic which test will fail, but one of them should timeout
self.assertIn("Tests run: 2, Failures: 1", pants_run.stdout_data)
@ensure_experimental
def test_parallel_annotated_test_parallel(self):
"""Checks the @TestParallel annotation."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=SERIAL',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:annotated-parallel'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
@ensure_experimental
def test_parallel_annotated_test_serial(self):
"""Checks the @TestSerial annotation."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=PARALLEL_CLASSES',
'--test-junit-parallel-threads=2',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:annotated-serial'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
@ensure_experimental
def test_parallel_both(self):
"""Checks the concurency='parallel_both' setting."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=SERIAL',
'--test-junit-parallel-threads=4',
'testprojects/tests/java/org/pantsbuild/testproject/parallelclassesandmethods'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (4 tests)", pants_run.stdout_data)
@ensure_experimental
def test_parallel_both_cmdline(self):
"""Checks the --test-junit-default_concurrency=PARALLEL_CLASSES_AND_METHODS setting."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=PARALLEL_CLASSES_AND_METHODS',
'--test-junit-parallel-threads=4',
'testprojects/tests/java/org/pantsbuild/testproject/parallelclassesandmethods:cmdline'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (4 tests)", pants_run.stdout_data)
@ensure_experimental
def test_parallel_both_serial_default(self):
"""Checks the --test-junit-default-concurrency=SERIAL setting."""
with self.temporary_workdir() as workdir:
# NB(zundel): the timeout for each test in ParallelMethodsDefaultParallel tests is
# currently set to 1 seconds making this test take about 3 seconds to run due
# to (3 timeout failures)
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=SERIAL',
'--test-junit-parallel-threads=4',
'testprojects/tests/java/org/pantsbuild/testproject/parallelclassesandmethods:cmdline'
], workdir)
self.assert_failure(pants_run)
# Its not deterministic which test will fail, but 3/4 of them should timeout
self.assertIn("Tests run: 4, Failures: 3", pants_run.stdout_data)
class ExperimentalOnlyJunitTestsConcurrencyIntegrationTest(PantsRunIntegrationTest):
"""The following tests only work with the experimental runner."""
def test_concurrency_annotated_test_serial_parallel_both(self):
"""Checks the @TestSerial annotation with --test-junit-default-concurrency=PARALLEL_CLASSES_AND_METHODS."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=PARALLEL_CLASSES_AND_METHODS',
'--test-junit-parallel-threads=2',
'--test-junit-use-experimental-runner',
'testprojects/tests/java/org/pantsbuild/testproject/parallel:annotated-serial'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (2 tests)", pants_run.stdout_data)
def test_parallel_methods(self):
"""Checks the concurency='parallel_methods' setting."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=SERIAL',
'--test-junit-parallel-threads=4',
'--test-junit-use-experimental-runner',
'testprojects/tests/java/org/pantsbuild/testproject/parallelmethods'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (4 tests)", pants_run.stdout_data)
def test_parallel_methods_cmdline(self):
"""Checks the --test-junit-default_concurrency=PARALLEL_METHODS setting."""
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir([
'test',
'--test-junit-default-concurrency=PARALLEL_METHODS',
'--test-junit-parallel-threads=4',
'--test-junit-use-experimental-runner',
'testprojects/tests/java/org/pantsbuild/testproject/parallelmethods:cmdline'
], workdir)
self.assert_success(pants_run)
self.assertIn("OK (4 tests)", pants_run.stdout_data)
|
|
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Optimized NVP client for Quark
"""
import aiclib
from oslo_log import log as logging
from quark.db import models
from quark.drivers.nvp_driver import NVPDriver
import sqlalchemy as sa
from sqlalchemy import orm
LOG = logging.getLogger(__name__)
class OptimizedNVPDriver(NVPDriver):
def __init__(self):
super(OptimizedNVPDriver, self).__init__()
@classmethod
def get_name(klass):
return "NVP"
def delete_network(self, context, network_id):
lswitches = self._lswitches_for_network(context, network_id)
for switch in lswitches:
try:
self._lswitch_delete(context, switch.nvp_id)
self._remove_default_tz_bindings(
context, network_id)
except aiclib.core.AICException as ae:
LOG.info("LSwitch/Network %s found in database."
" Adding to orphaned database table."
% network_id)
if ae.code != 404:
LOG.info("LSwitch/Network %s was found in NVP."
" Adding to orpaned table for later cleanup."
" Code: %s, Message: %s"
% (network_id, ae.code, ae.message))
orphaned_lswitch = OrphanedLSwitch(
nvp_id=switch.nvp_id,
network_id=switch.network_id,
display_name=switch.display_name
)
context.session.add(orphaned_lswitch)
LOG.info("Deleting LSwitch/Network %s from original"
" table." % network_id)
context.session.delete(switch)
except Exception as e:
message = e.args[0] if e.args else ''
LOG.info("Failed to delete LSwitch/Network %s from "
" NVP (optimized). Message: %s"
% (network_id, message))
def create_port(self, context, network_id, port_id,
status=True, security_groups=None,
device_id="", **kwargs):
security_groups = security_groups or []
nvp_port = super(OptimizedNVPDriver, self).create_port(
context, network_id, port_id, status=status,
security_groups=security_groups, device_id=device_id)
switch_nvp_id = nvp_port["lswitch"]
# slightly inefficient for the sake of brevity. Lets the
# parent class do its thing then finds the switch that
# the port was created on for creating the association. Switch should
# be in the query cache so the subsequent lookup should be minimal,
# but this could be an easy optimization later if we're looking.
switch = self._lswitch_select_by_nvp_id(context, switch_nvp_id)
new_port = LSwitchPort(port_id=nvp_port["uuid"],
switch_id=switch.id)
context.session.add(new_port)
switch.port_count = switch.port_count + 1
return nvp_port
def update_port(self, context, port_id, status=True,
security_groups=None, **kwargs):
security_groups = security_groups or []
mac_address = kwargs.get('mac_address')
device_id = kwargs.get('device_id')
nvp_port = super(OptimizedNVPDriver, self).update_port(
context, port_id, mac_address=mac_address, device_id=device_id,
status=status, security_groups=security_groups)
port = self._lport_select_by_id(context, port_id)
port.update(nvp_port)
def delete_port(self, context, port_id, **kwargs):
port = self._lport_select_by_id(context, port_id)
if not port:
LOG.warning("Lost local reference to NVP lport %s" % port_id)
return # we return here because there isn't anything else to do
switch = port.switch
try:
self._lport_delete(context, port_id, switch)
except aiclib.core.AICException as ae:
LOG.info("LSwitchPort/Port %s found in database."
" Adding to orphaned database table."
% port_id)
if ae.code != 404:
LOG.info("LSwitchPort/Port %s was found in NVP."
" Adding to orpaned table for later cleanup."
" Code: %s, Message: %s"
% (port_id, ae.code, ae.args[0]))
orphaned_lswitch_port = OrphanedLSwitchPort(
port_id=port_id,
)
context.session.add(orphaned_lswitch_port)
except Exception as e:
LOG.info("Failed to delete LSwitchPort/Port %s from "
" NVP (optimized). Message: %s"
% (port_id, e.args[0]))
LOG.info("Deleting LSwitchPort/Port %s from original"
" table." % port_id)
context.session.delete(port)
switch.port_count = switch.port_count - 1
if switch.port_count == 0:
switches = self._lswitches_for_network(context, switch.network_id)
if len(switches) > 1: # do not delete last lswitch on network
self._lswitch_delete(context, switch.nvp_id)
def _lport_delete(self, context, port_id, switch=None):
if switch is None:
port = self._lport_select_by_id(context, port_id)
switch = port.switch
super(OptimizedNVPDriver, self).delete_port(
context, port_id, lswitch_uuid=switch.nvp_id)
def create_security_group(self, context, group_name, **group):
nvp_group = super(OptimizedNVPDriver, self).create_security_group(
context, group_name, **group)
group_id = group.get('group_id')
profile = SecurityProfile(id=group_id, nvp_id=nvp_group['uuid'])
context.session.add(profile)
def delete_security_group(self, context, group_id, **kwargs):
super(OptimizedNVPDriver, self).delete_security_group(
context, group_id)
group = self._query_security_group(context, group_id)
context.session.delete(group)
def _lport_select_by_id(self, context, port_id):
query = context.session.query(LSwitchPort)
query = query.filter(LSwitchPort.port_id == port_id)
return query.first()
def _lswitch_delete(self, context, lswitch_uuid):
switch = self._lswitch_select_by_nvp_id(context, lswitch_uuid)
super(OptimizedNVPDriver, self)._lswitch_delete(
context, lswitch_uuid)
context.session.delete(switch)
def _lswitch_select_by_nvp_id(self, context, nvp_id):
switch = context.session.query(LSwitch).filter(
LSwitch.nvp_id == nvp_id).first()
return switch
def _lswitch_select_first(self, context, network_id):
query = context.session.query(LSwitch)
query = query.filter(LSwitch.network_id == network_id)
return query.first()
def _lswitch_select_free(self, context, network_id):
query = context.session.query(LSwitch)
query = query.filter(LSwitch.port_count <
self.limits['max_ports_per_switch'])
query = query.filter(LSwitch.network_id == network_id)
switch = query.order_by(LSwitch.port_count).first()
return switch
def _lswitch_status_query(self, context, network_id):
"""Child implementation of lswitch_status_query.
Deliberately empty as we rely on _get_network_details to be more
efficient than we can be here.
"""
pass
def _lswitch_select_open(self, context, network_id=None, **kwargs):
if self.limits['max_ports_per_switch'] == 0:
switch = self._lswitch_select_first(context, network_id)
else:
switch = self._lswitch_select_free(context, network_id)
if switch:
return switch.nvp_id
LOG.debug("Could not find optimized switch")
def _get_network_details(self, context, network_id, switches):
name, phys_net, phys_type, segment_id = None, None, None, None
switch = self._lswitch_select_first(context, network_id)
if switch:
name = switch.display_name
phys_net = switch.transport_zone
phys_type = switch.transport_connector
segment_id = switch.segment_id
return dict(network_name=name, phys_net=phys_net,
phys_type=phys_type, segment_id=segment_id)
def _lswitch_create(self, context, network_name=None, tags=None,
network_id=None, **kwargs):
nvp_id = super(OptimizedNVPDriver, self)._lswitch_create(
context, network_name, tags, network_id, **kwargs)
return self._lswitch_create_optimized(context, network_name, nvp_id,
network_id, **kwargs).nvp_id
def _lswitch_create_optimized(self, context, network_name, nvp_id,
network_id, phys_net=None, phys_type=None,
segment_id=None):
new_switch = LSwitch(nvp_id=nvp_id, network_id=network_id,
port_count=0, transport_zone=phys_net,
transport_connector=phys_type,
display_name=network_name[:40],
segment_id=segment_id)
context.session.add(new_switch)
return new_switch
def get_lswitch_ids_for_network(self, context, network_id):
"""Public interface for fetching lswitch ids for a given network.
NOTE(morgabra) This is here because calling private methods
from outside the class feels wrong, and we need to be able to
fetch lswitch ids for use in other drivers.
"""
lswitches = self._lswitches_for_network(context, network_id)
return [s['nvp_id'] for s in lswitches]
def _lswitches_for_network(self, context, network_id):
switches = context.session.query(LSwitch).filter(
LSwitch.network_id == network_id).all()
return switches
def _lswitch_from_port(self, context, port_id):
port = self._lport_select_by_id(context, port_id)
return port.switch.nvp_id
def _query_security_group(self, context, group_id):
return context.session.query(SecurityProfile).filter(
SecurityProfile.id == group_id).first()
def _make_security_rule_dict(self, rule):
res = {"port_range_min": rule.get("port_range_min"),
"port_range_max": rule.get("port_range_max"),
"protocol": rule.get("protocol"),
"ip_prefix": rule.get("remote_ip_prefix"),
"group_id": rule.get("remote_group_id"),
"ethertype": rule.get("ethertype")}
for key, value in res.items():
if value is None:
res.pop(key)
return res
def _get_security_group(self, context, group_id):
group = context.session.query(models.SecurityGroup).filter(
models.SecurityGroup.id == group_id).first()
rulelist = {'ingress': [], 'egress': []}
for rule in group.rules:
rulelist[rule.direction].append(
self._make_security_rule_dict(rule))
return {'uuid': self._query_security_group(context, group_id).nvp_id,
'logical_port_ingress_rules': rulelist['ingress'],
'logical_port_egress_rules': rulelist['egress']}
def _check_rule_count_per_port(self, context, group_id):
ports = context.session.query(models.SecurityGroup).filter(
models.SecurityGroup.id == group_id).first().get('ports', [])
groups = (set(group.id for group in port.get('security_groups', []))
for port in ports)
return max(self._check_rule_count_for_groups(
context, (self._get_security_group(context, id) for id in g))
for g in groups)
class LSwitchPort(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_lswitchport"
port_id = sa.Column(sa.String(36), nullable=False, index=True)
switch_id = sa.Column(sa.String(36),
sa.ForeignKey("quark_nvp_driver_lswitch.id"),
nullable=False)
class LSwitch(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_lswitch"
nvp_id = sa.Column(sa.String(36), nullable=False, index=True)
network_id = sa.Column(sa.String(36), nullable=False, index=True)
display_name = sa.Column(sa.String(255))
port_count = sa.Column(sa.Integer())
ports = orm.relationship(LSwitchPort, backref='switch')
transport_zone = sa.Column(sa.String(36))
transport_connector = sa.Column(sa.String(20))
segment_id = sa.Column(sa.Integer())
class QOS(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_qos"
display_name = sa.Column(sa.String(255), nullable=False)
max_bandwidth_rate = sa.Column(sa.Integer(), nullable=False)
min_bandwidth_rate = sa.Column(sa.Integer(), nullable=False)
class SecurityProfile(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_security_profile"
nvp_id = sa.Column(sa.String(36), nullable=False, index=True)
class OrphanedLSwitch(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_orphaned_lswitches"
nvp_id = sa.Column(sa.String(36), nullable=False, index=True)
network_id = sa.Column(sa.String(36), nullable=False, index=True)
display_name = sa.Column(sa.String(255), index=True)
class OrphanedLSwitchPort(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_orphaned_lswitch_ports"
port_id = sa.Column(sa.String(36), nullable=False, index=True)
|
|
import os
import shutil
import sys
import tempfile
from resdk import Resolwe
from ..base import URL, USER_PASSWORD, USER_USERNAME, BaseResdkFunctionalTest
TEST_FILES_DIR = os.path.abspath(
os.path.normpath(os.path.join(__file__, "../../../files"))
)
DOCS_SCRIPTS_DIR = os.path.abspath(
os.path.normpath(os.path.join(__file__, "../../../../docs/files"))
)
sys.path.insert(0, DOCS_SCRIPTS_DIR)
class BaseResdkDocsFunctionalTest(BaseResdkFunctionalTest):
sample_slug = "resdk-example"
reads_slug = "resdk-example-reads"
genome_slug = "resdk-example-genome"
genome_index_slug = "resdk-example-genome-index"
annotation_slug = "resdk-example-annotation"
rrna_slug = "resdk-example-rrna"
rrna_index_slug = "resdk-example-rrna-index"
globin_slug = "resdk-example-globin"
globin_index_slug = "resdk-example-globin-index"
collection_slug = "resdk-example-collection"
def setUp(self):
super().setUp()
self.tmpdir = tempfile.mkdtemp()
self.original_cwd = os.getcwd()
os.chdir(self.tmpdir)
self.collection = self.res.collection.create(slug=self.collection_slug)
self.collection.permissions.set_public("view")
def tearDown(self):
os.chdir(self.original_cwd)
shutil.rmtree(self.tmpdir)
if hasattr(self, "collection"):
self.collection.delete(force=True)
def run_tutorial_script(self, script_name, replace_lines=None):
"""Run a script from tutorial folder.
If given, ``replace_lines`` should be in a list of 2-tuples::
replace lines = [
(0, 'replace the content of first line with this')
(2, 'replace the content of third line with this')
]
First element of tuple is line index and the second is line
content.
"""
script_path = os.path.join(DOCS_SCRIPTS_DIR, script_name)
with open(script_path) as handle:
content = handle.readlines()
if replace_lines:
for line_index, line_content in replace_lines:
content[line_index] = line_content
exec("".join(content))
def upload_reads(self, res):
reads = res.run(
slug="upload-fastq-single",
input={"src": os.path.join(TEST_FILES_DIR, "reads.fastq.gz")},
collection=self.collection.id,
)
self.set_slug(reads, self.reads_slug)
self.set_slug(reads.sample, self.sample_slug)
return reads
def upload_genome(self, res, fasta, slug):
genome = res.run(
slug="upload-fasta-nucl",
input={
"src": os.path.join(TEST_FILES_DIR, fasta),
"species": "Dictyostelium discoideum",
"build": "dd-05-2009",
},
collection=self.collection.id,
)
self.set_slug(genome, slug)
return genome
def upload_annotation(self, res):
annotation = res.run(
slug="upload-gtf",
input={
"src": os.path.join(TEST_FILES_DIR, "annotation.gtf.gz"),
"source": "DICTYBASE",
"species": "Dictyostelium discoideum",
"build": "dd-05-2009",
},
collection=self.collection.id,
)
self.set_slug(annotation, self.annotation_slug)
return annotation
def create_genome_index(self, res, fasta, slug):
genome_index = res.run(
slug="alignment-star-index",
input={
"ref_seq": fasta,
},
collection=self.collection.id,
)
self.set_slug(genome_index, slug)
return genome_index
def allow_run_process(self, res, slug):
process = res.process.get(slug=slug)
process.permissions.set_public("view")
def allow_use_descriptor_schema(self, res, slug):
descriptor_schema = res.descriptor_schema.get(slug=slug)
descriptor_schema.permissions.set_public("view")
class TestIndex(BaseResdkDocsFunctionalTest):
def setUp(self):
super().setUp()
self.reads = self.upload_reads(self.res)
def test_index(self):
"""Test example code used in ``README.rst`` and ``index.rst``."""
self.run_tutorial_script(
"index.py",
replace_lines=[(4, "res = resdk.Resolwe(url='{}')\n".format(URL))],
)
class TestStart(BaseResdkDocsFunctionalTest):
def setUp(self):
super().setUp()
# Create data for tests:
self.reads = self.upload_reads(self.res)
self.genome = self.upload_genome(self.res, "genome.fasta.gz", self.genome_slug)
self.genome_index = self.create_genome_index(
self.res, self.genome, self.genome_index_slug
)
# Set permissions for running processes:
self.allow_run_process(self.res, "alignment-star")
def test_start(self):
"""Test getting started."""
self.run_tutorial_script(
"start.py",
replace_lines=[
(4, "res = resdk.Resolwe(url='{}')\n".format(URL)),
(5, "res.login('{}', '{}')\n".format(USER_USERNAME, USER_PASSWORD)),
],
)
class TestTutorialGet(BaseResdkDocsFunctionalTest):
def setUp(self):
super().setUp()
self.reads = self.upload_reads(self.res)
def test_tutorial_get(self):
"""Test tutorial-get."""
self.run_tutorial_script(
"tutorial-get.py",
replace_lines=[
(4, "res = resdk.Resolwe(url='{}')\n".format(URL)),
(5, "res.login('{}', '{}')\n".format(USER_USERNAME, USER_PASSWORD)),
],
)
class TestTutorialCreate(BaseResdkDocsFunctionalTest):
def setUp(self):
super().setUp()
self.reads = self.upload_reads(self.res)
self.annotation = self.upload_annotation(self.res)
self.genome = self.upload_genome(self.res, "genome.fasta.gz", self.genome_slug)
self.genome_index = self.create_genome_index(
self.res, self.genome, self.genome_index_slug
)
self.rrna = self.upload_genome(self.res, "rrna.fasta", self.rrna_slug)
self.rrna_index = self.create_genome_index(
self.res, self.rrna, self.rrna_index_slug
)
self.globin = self.upload_genome(self.res, "globin.fasta", self.globin_slug)
self.globin_index = self.create_genome_index(
self.res, self.globin, self.globin_index_slug
)
# Set permissions for running processes:
self.allow_run_process(self.res, "upload-fastq-single")
self.allow_run_process(self.res, "alignment-star")
self.allow_run_process(self.res, "workflow-bbduk-star-featurecounts-qc")
# Set permissions for using descriptor_schemas:
self.allow_use_descriptor_schema(self.res, "reads")
self.allow_use_descriptor_schema(self.res, "sample")
def test_tutorial_create(self):
"""Test tutorial-create."""
self.run_tutorial_script(
"tutorial-create.py",
replace_lines=[
(3, "res = resdk.Resolwe(url='{}')\n".format(URL)),
(4, "res.login('{}', '{}')\n".format(USER_USERNAME, USER_PASSWORD)),
(
21,
" 'src': '{}'\n".format(
os.path.join(TEST_FILES_DIR, "reads.fastq.gz")
),
),
# Data object is not finished, so something like this
# (107, "foo = res.data.get('{}').stdout()\n".format(self.reads_slug)),
# is replaced with an empty line. There is now way to perform
# download if data objects are still processing and/or have not
# produced any stdout.txt. So just write an empty line:
(107, "\n"),
],
)
class TestTutorialResources(BaseResdkFunctionalTest):
def test_tutorial_resources(self):
"""Verify existence of resources required for tutorial."""
res = Resolwe(url="https://app.genialis.com")
sample_slugs = [
BaseResdkDocsFunctionalTest.sample_slug,
]
for sample_slug in sample_slugs:
res.sample.get(sample_slug)
data_slugs = [
BaseResdkDocsFunctionalTest.reads_slug,
BaseResdkDocsFunctionalTest.genome_slug,
BaseResdkDocsFunctionalTest.annotation_slug,
BaseResdkDocsFunctionalTest.genome_index_slug,
BaseResdkDocsFunctionalTest.rrna_slug,
BaseResdkDocsFunctionalTest.rrna_index_slug,
BaseResdkDocsFunctionalTest.globin_slug,
BaseResdkDocsFunctionalTest.globin_index_slug,
]
for data_slug in data_slugs:
res.data.get(slug=data_slug, fields="id")
|
|
from django import forms
from django.contrib.auth import authenticate
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext as _
from .. import scope
from ..constants import RESPONSE_TYPE_CHOICES, SCOPES
from ..forms import OAuthForm, OAuthValidationError
from ..scope import SCOPE_NAMES
from ..utils import now
from .models import Client, Grant, RefreshToken
class ClientForm(forms.ModelForm):
"""
Form to create new consumers.
"""
class Meta:
model = Client
fields = ('name', 'url', 'redirect_uri', 'client_type')
def save(self, user=None, **kwargs):
self.instance.user = user
return super(ClientForm, self).save(**kwargs)
class ClientAuthForm(forms.Form):
"""
Client authentication form. Required to make sure that we're dealing with a
real client. Form is used in :attr:`provider.oauth2.backends` to validate
the client.
"""
client_id = forms.CharField()
client_secret = forms.CharField()
def clean(self):
data = self.cleaned_data
try:
client = Client.objects.get(client_id=data.get('client_id'),
client_secret=data.get('client_secret'))
except Client.DoesNotExist:
raise forms.ValidationError(_("Client could not be validated with "
"key pair."))
data['client'] = client
return data
class ScopeChoiceField(forms.ChoiceField):
"""
Custom form field that seperates values on space as defined in
:rfc:`3.3`.
"""
widget = forms.SelectMultiple
def to_python(self, value):
if not value:
return []
# New in Django 1.6: value may come in as a string.
# Instead of raising an `OAuthValidationError`, try to parse and
# ultimately return an empty list if nothing remains -- this will
# eventually raise an `OAuthValidationError` in `validate` where
# it should be anyways.
if not isinstance(value, (list, tuple)):
value = value.split(' ')
# Split values into list
return u' '.join([smart_unicode(val) for val in value]).split(u' ')
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise OAuthValidationError({'error': 'invalid_request'})
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise OAuthValidationError({
'error': 'invalid_request',
'error_description': _("'%s' is not a valid scope.") % \
val})
class ScopeMixin(object):
"""
Form mixin to clean scope fields.
"""
def clean_scope(self):
"""
The scope is assembled by combining all the set flags into a single
integer value which we can later check again for set bits.
If *no* scope is set, we return the default scope which is the first
defined scope in :attr:`provider.constants.SCOPES`.
"""
default = SCOPES[0][0]
flags = self.cleaned_data.get('scope', [])
return scope.to_int(default=default, *flags)
class AuthorizationRequestForm(ScopeMixin, OAuthForm):
"""
This form is used to validate the request data that the authorization
endpoint receives from clients.
Included data is specified in :rfc:`4.1.1`.
"""
# Setting all required fields to false to explicitly check by hand
# and use custom error messages that can be reused in the OAuth2
# protocol
response_type = forms.CharField(required=False)
"""
``"code"`` or ``"token"`` depending on the grant type.
"""
redirect_uri = forms.URLField(required=False)
"""
Where the client would like to redirect the user
back to. This has to match whatever value was saved while creating
the client.
"""
state = forms.CharField(required=False)
"""
Opaque - just pass back to the client for validation.
"""
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
"""
The scope that the authorization should include.
"""
def clean_response_type(self):
"""
:rfc:`3.1.1` Lists of values are space delimited.
"""
response_type = self.cleaned_data.get('response_type')
if not response_type:
raise OAuthValidationError({'error': 'invalid_request',
'error_description': "No 'response_type' supplied."})
types = response_type.split(" ")
for type in types:
if type not in RESPONSE_TYPE_CHOICES:
raise OAuthValidationError({
'error': 'unsupported_response_type',
'error_description': u"'%s' is not a supported response "
"type." % type})
return response_type
def clean_redirect_uri(self):
"""
:rfc:`3.1.2` The redirect value has to match what was saved on the
authorization server.
"""
redirect_uri = self.cleaned_data.get('redirect_uri')
if redirect_uri:
if not redirect_uri == self.client.redirect_uri:
raise OAuthValidationError({
'error': 'invalid_request',
'error_description': _("The requested redirect didn't "
"match the client settings.")})
return redirect_uri
class AuthorizationForm(ScopeMixin, OAuthForm):
"""
A form used to ask the resource owner for authorization of a given client.
"""
authorize = forms.BooleanField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
def save(self, **kwargs):
authorize = self.cleaned_data.get('authorize')
if not authorize:
return None
grant = Grant()
grant.scope = self.cleaned_data.get('scope')
return grant
class RefreshTokenGrantForm(ScopeMixin, OAuthForm):
"""
Checks and returns a refresh token.
"""
refresh_token = forms.CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
def clean_refresh_token(self):
token = self.cleaned_data.get('refresh_token')
if not token:
raise OAuthValidationError({'error': 'invalid_request'})
try:
token = RefreshToken.objects.get(token=token,
expired=False, client=self.client)
except RefreshToken.DoesNotExist:
raise OAuthValidationError({'error': 'invalid_grant'})
return token
def clean(self):
"""
Make sure that the scope is less or equal to the previous scope!
"""
data = self.cleaned_data
want_scope = data.get('scope') or 0
refresh_token = data.get('refresh_token')
access_token = getattr(refresh_token, 'access_token', None) if \
refresh_token else \
None
has_scope = access_token.scope if access_token else 0
# Only check if we've actually got a scope in the data
# (read: All fields have been cleaned)
if want_scope is not 0 and not scope.check(want_scope, has_scope):
raise OAuthValidationError({'error': 'invalid_scope'})
return data
class AuthorizationCodeGrantForm(ScopeMixin, OAuthForm):
"""
Check and return an authorization grant.
"""
code = forms.CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
def clean_code(self):
code = self.cleaned_data.get('code')
if not code:
raise OAuthValidationError({'error': 'invalid_request'})
try:
self.cleaned_data['grant'] = Grant.objects.get(
code=code, client=self.client, expires__gt=now())
except Grant.DoesNotExist:
raise OAuthValidationError({'error': 'invalid_grant'})
return code
def clean(self):
"""
Make sure that the scope is less or equal to the scope allowed on the
grant!
"""
data = self.cleaned_data
want_scope = data.get('scope') or 0
grant = data.get('grant')
has_scope = grant.scope if grant else 0
# Only check if we've actually got a scope in the data
# (read: All fields have been cleaned)
if want_scope is not 0 and not scope.check(want_scope, has_scope):
raise OAuthValidationError({'error': 'invalid_scope'})
return data
class PasswordGrantForm(ScopeMixin, OAuthForm):
"""
Validate the password of a user on a password grant request.
"""
username = forms.CharField(required=False)
password = forms.CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
def clean_username(self):
username = self.cleaned_data.get('username')
if not username:
raise OAuthValidationError({'error': 'invalid_request'})
return username
def clean_password(self):
password = self.cleaned_data.get('password')
if not password:
raise OAuthValidationError({'error': 'invalid_request'})
return password
def clean(self):
data = self.cleaned_data
user = authenticate(email=data.get('username'),
password=data.get('password'))
if user is None:
user = authenticate(username=data.get('username'),
password=data.get('password'))
if user is None:
raise OAuthValidationError({'error': 'invalid_grant'})
data['user'] = user
return data
class PublicPasswordGrantForm(PasswordGrantForm):
client_id = forms.CharField(required=True)
grant_type = forms.CharField(required=True)
def clean_grant_type(self):
grant_type = self.cleaned_data.get('grant_type')
if grant_type != 'password':
raise OAuthValidationError({'error': 'invalid_grant'})
return grant_type
def clean(self):
data = super(PublicPasswordGrantForm, self).clean()
try:
client = Client.objects.get(client_id=data.get('client_id'))
except Client.DoesNotExist:
raise OAuthValidationError({'error': 'invalid_client'})
if client.client_type != 1: # public
raise OAuthValidationError({'error': 'invalid_client'})
data['client'] = client
return data
|
|
#!/usr/bin/env vpython
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for classes in gtest_utils.py."""
import unittest
import gtest_utils
FAILURES = [
'NavigationControllerTest.Reload',
'NavigationControllerTest/SpdyNetworkTransTest.Constructor/0',
'BadTest.TimesOut', 'MoreBadTest.TimesOutAndFails',
'SomeOtherTest.SwitchTypes', 'SomeOtherTest.FAILS_ThisTestTimesOut'
]
FAILS_FAILURES = ['SomeOtherTest.FAILS_Bar']
FLAKY_FAILURES = ['SomeOtherTest.FLAKY_Baz']
TIMEOUT_MESSAGE = 'Killed (timed out).'
RELOAD_ERRORS = (r'C:\b\slave\chrome-release-snappy\build\chrome\browser'
r'\navigation_controller_unittest.cc:381: Failure' + """
Value of: -1
Expected: contents->controller()->GetPendingEntryIndex()
Which is: 0
""")
SPDY_ERRORS = (r'C:\b\slave\chrome-release-snappy\build\chrome\browser'
r'\navigation_controller_unittest.cc:439: Failure' + """
Value of: -1
Expected: contents->controller()->GetPendingEntryIndex()
Which is: 0
""")
SWITCH_ERRORS = (r'C:\b\slave\chrome-release-snappy\build\chrome\browser'
r'\navigation_controller_unittest.cc:615: Failure' + """
Value of: -1
Expected: contents->controller()->GetPendingEntryIndex()
Which is: 0
""" + r'C:\b\slave\chrome-release-snappy\build\chrome\browser'
r'\navigation_controller_unittest.cc:617: Failure' + """
Value of: contents->controller()->GetPendingEntry()
Actual: true
Expected: false
""")
# pylint: disable=line-too-long
TIMEOUT_ERRORS = (
'[61613:263:0531/042613:2887943745568888:ERROR:/b/slave'
'/chromium-rel-mac-builder/build/src/chrome/browser/extensions'
'/extension_error_reporter.cc(56)] Extension error: Could not load extension '
'from \'extensions/api_test/geolocation/no_permission\'. Manifest file is '
'missing or unreadable.')
MOREBAD_ERRORS = """
Value of: entry->page_type()
Actual: 2
Expected: NavigationEntry::NORMAL_PAGE
"""
TEST_DATA = (
"""
[==========] Running 7 tests from 3 test cases.
[----------] Global test environment set-up.
[----------] 1 test from HunspellTest
[ RUN ] HunspellTest.All
[ OK ] HunspellTest.All (62 ms)
[----------] 1 test from HunspellTest (62 ms total)
[----------] 4 tests from NavigationControllerTest
[ RUN ] NavigationControllerTest.Defaults
[ OK ] NavigationControllerTest.Defaults (48 ms)
[ RUN ] NavigationControllerTest.Reload
%(reload_errors)s
[ FAILED ] NavigationControllerTest.Reload (2 ms)
[ RUN ] NavigationControllerTest.Reload_GeneratesNewPage
[ OK ] NavigationControllerTest.Reload_GeneratesNewPage (22 ms)
[ RUN ] NavigationControllerTest/SpdyNetworkTransTest.Constructor/0
%(spdy_errors)s
[ FAILED ] NavigationControllerTest/SpdyNetworkTransTest.Constructor/0 (2 ms)
[----------] 4 tests from NavigationControllerTest (74 ms total)
YOU HAVE 2 FLAKY TESTS
[----------] 1 test from BadTest
[ RUN ] BadTest.TimesOut
%(timeout_errors)s
""" % {
'reload_errors': RELOAD_ERRORS,
'spdy_errors': SPDY_ERRORS,
'timeout_errors': TIMEOUT_ERRORS
} + '[0531/042642:ERROR:/b/slave/chromium-rel-mac-builder/build/src/chrome'
'/test/test_launcher/out_of_proc_test_runner.cc(79)] Test timeout (30000 ms) '
'exceeded for BadTest.TimesOut' + """
Handling SIGTERM.
Successfully wrote to shutdown pipe, resetting signal handler.
""" +
'[61613:19971:0531/042642:2887973024284693:INFO:/b/slave/chromium-rel-mac-'
'builder/build/src/chrome/browser/browser_main.cc(285)] Handling shutdown for '
'signal 15.' + """
[----------] 1 test from MoreBadTest
[ RUN ] MoreBadTest.TimesOutAndFails
%(morebad_errors)s
""" % {
'morebad_errors': MOREBAD_ERRORS
} +
'[0531/042642:ERROR:/b/slave/chromium-rel-mac-builder/build/src/chrome/test'
'/test_launcher/out_of_proc_test_runner.cc(79)] Test timeout (30000 ms) '
'exceeded for MoreBadTest.TimesOutAndFails' + """
Handling SIGTERM.
Successfully wrote to shutdown pipe, resetting signal handler.
[ FAILED ] MoreBadTest.TimesOutAndFails (31000 ms)
[----------] 5 tests from SomeOtherTest
[ RUN ] SomeOtherTest.SwitchTypes
%(switch_errors)s
[ FAILED ] SomeOtherTest.SwitchTypes (40 ms)
[ RUN ] SomeOtherTest.Foo
[ OK ] SomeOtherTest.Foo (20 ms)
[ RUN ] SomeOtherTest.FAILS_Bar
Some error message for a failing test.
[ FAILED ] SomeOtherTest.FAILS_Bar (40 ms)
[ RUN ] SomeOtherTest.FAILS_ThisTestTimesOut
""" % {
'switch_errors': SWITCH_ERRORS
} + '[0521/041343:ERROR:test_launcher.cc(384)] Test timeout (5000 ms) '
'exceeded for SomeOtherTest.FAILS_ThisTestTimesOut' + """
[ RUN ] SomeOtherTest.FLAKY_Baz
Some error message for a flaky test.
[ FAILED ] SomeOtherTest.FLAKY_Baz (40 ms)
[----------] 2 tests from SomeOtherTest (60 ms total)
[----------] Global test environment tear-down
[==========] 8 tests from 3 test cases ran. (3750 ms total)
[ PASSED ] 4 tests.
[ FAILED ] 4 tests, listed below:
[ FAILED ] NavigationControllerTest.Reload
[ FAILED ] NavigationControllerTest/SpdyNetworkTransTest.Constructor/0
[ FAILED ] SomeOtherTest.SwitchTypes
[ FAILED ] SomeOtherTest.FAILS_ThisTestTimesOut
1 FAILED TEST
YOU HAVE 10 DISABLED TESTS
YOU HAVE 2 FLAKY TESTS
program finished with exit code 1
""")
TEST_DATA_CRASH = """
[==========] Running 7 tests from 3 test cases.
[----------] Global test environment set-up.
[----------] 1 test from HunspellTest
[ RUN ] HunspellTest.Crashes
Oops, this test crashed!
"""
TEST_DATA_MIXED_STDOUT = """
[==========] Running 3 tests from 3 test cases.
[----------] Global test environment set-up.
[----------] 1 tests from WebSocketHandshakeHandlerSpdy3Test
[ RUN ] WebSocketHandshakeHandlerSpdy3Test.RequestResponse
[ OK ] WebSocketHandshakeHandlerSpdy3Test.RequestResponse (1 ms)
[----------] 1 tests from WebSocketHandshakeHandlerSpdy3Test (1 ms total)
[----------] 1 test from URLRequestTestFTP
[ RUN ] URLRequestTestFTP.UnsafePort
FTP server started on port 32841...
sending server_data: {"host": "127.0.0.1", "port": 32841} (36 bytes)
starting FTP server[ OK ] URLRequestTestFTP.UnsafePort (300 ms)
[----------] 1 test from URLRequestTestFTP (300 ms total)
[ RUN ] TestFix.TestCase
[1:2/3:WARNING:extension_apitest.cc(169)] Workaround for 177163,
prematurely stopping test
[ OK ] X (1000ms total)
[----------] 1 test from Crash
[ RUN ] Crash.Test
Oops, this test crashed!
"""
TEST_DATA_SKIPPED = """
[==========] Running 1 test from 1 test suite.
[----------] Global test environment set-up.
[----------] 1 test from ProcessReaderLinux
[ RUN ] ProcessReaderLinux.AbortMessage
../../third_party/crashpad/crashpad/snapshot/linux/process_reader_linux_test.cc:842: Skipped
Stack trace:
#00 pc 0x00000000002350b7 /data/local/tmp/crashpad_tests__dist/crashpad_tests
#01 pc 0x0000000000218183 /data/local/tmp/crashpad_tests__dist/crashpad_tests
[ SKIPPED ] ProcessReaderLinux.AbortMessage (1 ms)
[----------] 1 test from ProcessReaderLinux (2 ms total)
[----------] Global test environment tear-down
[==========] 1 test from 1 test suite ran. (2 ms total)
[ PASSED ] 0 tests.
[ SKIPPED ] 1 test, listed below:
[ SKIPPED ] ProcessReaderLinux.AbortMessage
"""
VALGRIND_HASH = 'B254345E4D3B6A00'
VALGRIND_REPORT = """Leak_DefinitelyLost
1 (1 direct, 0 indirect) bytes in 1 blocks are lost in loss record 1 of 1
operator new(unsigned long) (m_replacemalloc/vg_replace_malloc.c:1140)
content::NavigationControllerTest_Reload::TestBody() (a/b/c/d.cc:1150)
Suppression (error hash=#%(hash)s#):
{
<insert_a_suppression_name_here>
Memcheck:Leak
fun:_Znw*
fun:_ZN31NavigationControllerTest_Reload8TestBodyEv
}""" % {
'hash': VALGRIND_HASH
}
TEST_DATA_VALGRIND = """
[==========] Running 5 tests from 2 test cases.
[----------] Global test environment set-up.
[----------] 1 test from HunspellTest
[ RUN ] HunspellTest.All
[ OK ] HunspellTest.All (62 ms)
[----------] 1 test from HunspellTest (62 ms total)
[----------] 4 tests from NavigationControllerTest
[ RUN ] NavigationControllerTest.Defaults
[ OK ] NavigationControllerTest.Defaults (48 ms)
[ RUN ] NavigationControllerTest.Reload
[ OK ] NavigationControllerTest.Reload (2 ms)
[ RUN ] NavigationControllerTest.Reload_GeneratesNewPage
[ OK ] NavigationControllerTest.Reload_GeneratesNewPage (22 ms)
[ RUN ] NavigationControllerTest/SpdyNetworkTransTest.Constructor/0
[ OK ] NavigationControllerTest/SpdyNetworkTransTest.Constructor/0 (2 ms)
[----------] 4 tests from NavigationControllerTest (74 ms total)
[----------] Global test environment tear-down
[==========] 5 tests from 1 test cases ran. (136 ms total)
[ PASSED ] 5 tests.
### BEGIN MEMORY TOOL REPORT (error hash=#%(hash)s#)
%(report)s
### END MEMORY TOOL REPORT (error hash=#%(hash)s#)
program finished with exit code 255
""" % {
'report': VALGRIND_REPORT,
'hash': VALGRIND_HASH
}
FAILING_TESTS_OUTPUT = """
Failing tests:
ChromeRenderViewTest.FAILS_AllowDOMStorage
PrerenderBrowserTest.PrerenderHTML5VideoJs
"""
FAILING_TESTS_EXPECTED = [
'ChromeRenderViewTest.FAILS_AllowDOMStorage',
'PrerenderBrowserTest.PrerenderHTML5VideoJs'
]
TEST_DATA_SHARD_0 = (
"""Note: This is test shard 1 of 30.
[==========] Running 6 tests from 3 test cases.
[----------] Global test environment set-up.
[----------] 1 test from HunspellTest
[ RUN ] HunspellTest.All
[ OK ] HunspellTest.All (62 ms)
[----------] 1 test from HunspellTest (62 ms total)
[----------] 1 test from BadTest
[ RUN ] BadTest.TimesOut
%(timeout_errors)s
""" % {
'timeout_errors': TIMEOUT_ERRORS
} +
'[0531/042642:ERROR:/b/slave/chromium-rel-mac-builder/build/src/chrome/test'
'/test_launcher/out_of_proc_test_runner.cc(79)] Test timeout (30000 ms) '
'exceeded for BadTest.TimesOut' + """
Handling SIGTERM.
Successfully wrote to shutdown pipe, resetting signal handler.
""" +
'[61613:19971:0531/042642:2887973024284693:INFO:/b/slave/chromium-rel-mac-'
'builder/build/src/chrome/browser/browser_main.cc(285)] Handling shutdown for '
'signal 15.' + """
[----------] 4 tests from SomeOtherTest
[ RUN ] SomeOtherTest.SwitchTypes
%(switch_errors)s
[ FAILED ] SomeOtherTest.SwitchTypes (40 ms)
[ RUN ] SomeOtherTest.Foo
[ OK ] SomeOtherTest.Foo (20 ms)
[ RUN ] SomeOtherTest.FAILS_Bar
Some error message for a failing test.
[ FAILED ] SomeOtherTest.FAILS_Bar (40 ms)
[ RUN ] SomeOtherTest.FAILS_ThisTestTimesOut
""" % {
'switch_errors': SWITCH_ERRORS
} +
'[0521/041343:ERROR:test_launcher.cc(384)] Test timeout (5000 ms) exceeded '
'for SomeOtherTest.FAILS_ThisTestTimesOut' + """
[ RUN ] SomeOtherTest.FLAKY_Baz
Some error message for a flaky test.
[ FAILED ] SomeOtherTest.FLAKY_Baz (40 ms)
[----------] 2 tests from SomeOtherTest (60 ms total)
[----------] Global test environment tear-down
[==========] 7 tests from 3 test cases ran. (3750 ms total)
[ PASSED ] 5 tests.
[ FAILED ] 2 test, listed below:
[ FAILED ] SomeOtherTest.SwitchTypes
[ FAILED ] SomeOtherTest.FAILS_ThisTestTimesOut
1 FAILED TEST
YOU HAVE 10 DISABLED TESTS
YOU HAVE 2 FLAKY TESTS
""")
TEST_DATA_SHARD_1 = (
"""Note: This is test shard 13 of 30.
[==========] Running 5 tests from 2 test cases.
[----------] Global test environment set-up.
[----------] 4 tests from NavigationControllerTest
[ RUN ] NavigationControllerTest.Defaults
[ OK ] NavigationControllerTest.Defaults (48 ms)
[ RUN ] NavigationControllerTest.Reload
%(reload_errors)s
[ FAILED ] NavigationControllerTest.Reload (2 ms)
[ RUN ] NavigationControllerTest.Reload_GeneratesNewPage
[ OK ] NavigationControllerTest.Reload_GeneratesNewPage (22 ms)
[ RUN ] NavigationControllerTest/SpdyNetworkTransTest.Constructor/0
%(spdy_errors)s
""" % {
'reload_errors': RELOAD_ERRORS,
'spdy_errors': SPDY_ERRORS
} + '[ FAILED ] NavigationControllerTest/SpdyNetworkTransTest.Constructor'
'/0 (2 ms)' + """
[----------] 4 tests from NavigationControllerTest (74 ms total)
YOU HAVE 2 FLAKY TESTS
[----------] 1 test from MoreBadTest
[ RUN ] MoreBadTest.TimesOutAndFails
%(morebad_errors)s
""" % {
'morebad_errors': MOREBAD_ERRORS
} +
'[0531/042642:ERROR:/b/slave/chromium-rel-mac-builder/build/src/chrome/test'
'/test_launcher/out_of_proc_test_runner.cc(79)] Test timeout (30000 ms) '
'exceeded for MoreBadTest.TimesOutAndFails' + """
Handling SIGTERM.
Successfully wrote to shutdown pipe, resetting signal handler.
[ FAILED ] MoreBadTest.TimesOutAndFails (31000 ms)
[----------] Global test environment tear-down
[==========] 5 tests from 2 test cases ran. (3750 ms total)
[ PASSED ] 3 tests.
[ FAILED ] 2 tests, listed below:
[ FAILED ] NavigationControllerTest.Reload
[ FAILED ] NavigationControllerTest/SpdyNetworkTransTest.Constructor/0
1 FAILED TEST
YOU HAVE 10 DISABLED TESTS
YOU HAVE 2 FLAKY TESTS
""")
TEST_DATA_SHARD_EXIT = 'program finished with exit code '
TEST_DATA_CRASH_SHARD = """Note: This is test shard 5 of 5.
[==========] Running 7 tests from 3 test cases.
[----------] Global test environment set-up.
[----------] 1 test from HunspellTest
[ RUN ] HunspellTest.Crashes
Oops, this test crashed!"""
TEST_DATA_NESTED_RUNS = (
"""
[ 1/3] 1.0s Foo.Bar (45.5s)
Note: Google Test filter = Foo.Bar
[==========] Running 1 test from 1 test case.
[----------] Global test environment set-up.
[----------] 1 test from Foo, where TypeParam =
[ RUN ] Foo.Bar
""" +
'[0725/050653:ERROR:test_launcher.cc(380)] Test timeout (45000 ms) exceeded '
'for Foo.Bar' + """
Starting tests...
IMPORTANT DEBUGGING NOTE: each test is run inside its own process.
For debugging a test inside a debugger, use the
--gtest_filter=<your_test_name> flag along with either
--single_process (to run all tests in one launcher/browser process) or
--single-process (to do the above, and also run Chrome in single-
process mode).
1 test run
1 test failed (0 ignored)
Failing tests:
Foo.Bar
[ 2/2] 2.00s Foo.Pass (1.0s)""")
# Data generated with run_test_case.py
TEST_DATA_RUN_TEST_CASE_FAIL = """
[ 6/422] 7.45s SUIDSandboxUITest.testSUIDSandboxEnabled (1.49s) - retry #2
[ RUN ] SUIDSandboxUITest.testSUIDSandboxEnabled
[ FAILED ] SUIDSandboxUITest.testSUIDSandboxEnabled (771 ms)
[ 8/422] 7.76s PrintPreviewWebUITest.SourceIsPDFShowFitToPageOption (1.67s)
"""
TEST_DATA_RUN_TEST_CASE_TIMEOUT = """
[ 6/422] 7.45s SUIDSandboxUITest.testSUIDSandboxEnabled (1.49s) - retry #2
[ RUN ] SUIDSandboxUITest.testSUIDSandboxEnabled
(junk)
[ 8/422] 7.76s PrintPreviewWebUITest.SourceIsPDFShowFitToPageOption (1.67s)
"""
# Data generated by swarming.py
TEST_DATA_SWARM_TEST_FAIL = """
================================================================
Begin output from shard index 0 (machine tag: swarm12.c, id: swarm12)
================================================================
[==========] Running 2 tests from linux_swarm_trigg-8-base_unittests test run.
Starting tests (using 2 parallel jobs)...
IMPORTANT DEBUGGING NOTE: batches of tests are run inside their
own process. For debugging a test inside a debugger, use the
--gtest_filter=<your_test_name> flag along with
--single-process-tests.
[1/1242] HistogramDeathTest.BadRangesTest (62 ms)
[2/1242] OutOfMemoryDeathTest.New (22 ms)
[1242/1242] ThreadIdNameManagerTest.ThreadNameInterning (0 ms)
Retrying 1 test (retry #1)
[ RUN ] PickleTest.EncodeDecode
../../base/pickle_unittest.cc:69: Failure
Value of: false
Actual: false
Expected: true
[ FAILED ] PickleTest.EncodeDecode (0 ms)
[1243/1243] PickleTest.EncodeDecode (0 ms)
Retrying 1 test (retry #2)
[ RUN ] PickleTest.EncodeDecode
../../base/pickle_unittest.cc:69: Failure
Value of: false
Actual: false
Expected: true
[ FAILED ] PickleTest.EncodeDecode (1 ms)
[1244/1244] PickleTest.EncodeDecode (1 ms)
Retrying 1 test (retry #3)
[ RUN ] PickleTest.EncodeDecode
../../base/pickle_unittest.cc:69: Failure
Value of: false
Actual: false
Expected: true
[ FAILED ] PickleTest.EncodeDecode (0 ms)
[1245/1245] PickleTest.EncodeDecode (0 ms)
1245 tests run
1 test failed:
PickleTest.EncodeDecode
Summary of all itest iterations:
1 test failed:
PickleTest.EncodeDecode
End of the summary.
Tests took 31 seconds.
================================================================
End output from shard index 0 (machine tag: swarm12.c, id: swarm12). Return 1
================================================================
"""
# pylint: enable=line-too-long
class TestGTestLogParserTests(unittest.TestCase):
def testGTestLogParserNoSharing(self):
# Tests for log parsing without sharding.
parser = gtest_utils.GTestLogParser()
for line in TEST_DATA.splitlines():
parser.ProcessLine(line)
self.assertEqual(0, len(parser.ParsingErrors()))
self.assertFalse(parser.RunningTests())
self.assertEqual(sorted(FAILURES), sorted(parser.FailedTests()))
self.assertEqual(
sorted(FAILURES + FAILS_FAILURES),
sorted(parser.FailedTests(include_fails=True)))
self.assertEqual(
sorted(FAILURES + FLAKY_FAILURES),
sorted(parser.FailedTests(include_flaky=True)))
self.assertEqual(
sorted(FAILURES + FAILS_FAILURES + FLAKY_FAILURES),
sorted(parser.FailedTests(include_fails=True, include_flaky=True)))
self.assertEqual(10, parser.DisabledTests())
self.assertEqual(2, parser.FlakyTests())
test_name = 'NavigationControllerTest.Reload'
self.assertEqual('\n'.join(['%s: ' % test_name, RELOAD_ERRORS]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['FAILURE'], parser.TriesForTest(test_name))
test_name = 'NavigationControllerTest/SpdyNetworkTransTest.Constructor/0'
self.assertEqual('\n'.join(['%s: ' % test_name, SPDY_ERRORS]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['FAILURE'], parser.TriesForTest(test_name))
test_name = 'SomeOtherTest.SwitchTypes'
self.assertEqual('\n'.join(['%s: ' % test_name, SWITCH_ERRORS]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['FAILURE'], parser.TriesForTest(test_name))
test_name = 'BadTest.TimesOut'
self.assertEqual(
'\n'.join(['%s: ' % test_name, TIMEOUT_ERRORS, TIMEOUT_MESSAGE]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['TIMEOUT'], parser.TriesForTest(test_name))
test_name = 'MoreBadTest.TimesOutAndFails'
self.assertEqual(
'\n'.join(['%s: ' % test_name, MOREBAD_ERRORS, TIMEOUT_MESSAGE]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['TIMEOUT'], parser.TriesForTest(test_name))
self.assertEqual(['SUCCESS'], parser.TriesForTest('SomeOtherTest.Foo'))
parser = gtest_utils.GTestLogParser()
for line in TEST_DATA_CRASH.splitlines():
parser.ProcessLine(line)
self.assertEqual(0, len(parser.ParsingErrors()))
self.assertTrue(parser.RunningTests())
self.assertEqual(['HunspellTest.Crashes'], parser.FailedTests())
self.assertEqual(0, parser.DisabledTests())
self.assertEqual(0, parser.FlakyTests())
test_name = 'HunspellTest.Crashes'
self.assertEqual('\n'.join(['%s: ' % test_name, 'Did not complete.']),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['UNKNOWN'], parser.TriesForTest(test_name))
def testGTestLogParserSharing(self):
# Same tests for log parsing with sharding_supervisor.
parser = gtest_utils.GTestLogParser()
test_data_shard = TEST_DATA_SHARD_0 + TEST_DATA_SHARD_1
for line in test_data_shard.splitlines():
parser.ProcessLine(line)
parser.ProcessLine(TEST_DATA_SHARD_EXIT + '2')
self.assertEqual(0, len(parser.ParsingErrors()))
self.assertFalse(parser.RunningTests())
self.assertEqual(sorted(FAILURES), sorted(parser.FailedTests()))
self.assertEqual(
sorted(FAILURES + FAILS_FAILURES),
sorted(parser.FailedTests(include_fails=True)))
self.assertEqual(
sorted(FAILURES + FLAKY_FAILURES),
sorted(parser.FailedTests(include_flaky=True)))
self.assertEqual(
sorted(FAILURES + FAILS_FAILURES + FLAKY_FAILURES),
sorted(parser.FailedTests(include_fails=True, include_flaky=True)))
self.assertEqual(10, parser.DisabledTests())
self.assertEqual(2, parser.FlakyTests())
test_name = 'NavigationControllerTest.Reload'
self.assertEqual('\n'.join(['%s: ' % test_name, RELOAD_ERRORS]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['FAILURE'], parser.TriesForTest(test_name))
test_name = ('NavigationControllerTest/SpdyNetworkTransTest.Constructor/0')
self.assertEqual('\n'.join(['%s: ' % test_name, SPDY_ERRORS]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['FAILURE'], parser.TriesForTest(test_name))
test_name = 'SomeOtherTest.SwitchTypes'
self.assertEqual('\n'.join(['%s: ' % test_name, SWITCH_ERRORS]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['FAILURE'], parser.TriesForTest(test_name))
test_name = 'BadTest.TimesOut'
self.assertEqual(
'\n'.join(['%s: ' % test_name, TIMEOUT_ERRORS, TIMEOUT_MESSAGE]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['TIMEOUT'], parser.TriesForTest(test_name))
test_name = 'MoreBadTest.TimesOutAndFails'
self.assertEqual(
'\n'.join(['%s: ' % test_name, MOREBAD_ERRORS, TIMEOUT_MESSAGE]),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['TIMEOUT'], parser.TriesForTest(test_name))
self.assertEqual(['SUCCESS'], parser.TriesForTest('SomeOtherTest.Foo'))
parser = gtest_utils.GTestLogParser()
for line in TEST_DATA_CRASH.splitlines():
parser.ProcessLine(line)
self.assertEqual(0, len(parser.ParsingErrors()))
self.assertTrue(parser.RunningTests())
self.assertEqual(['HunspellTest.Crashes'], parser.FailedTests())
self.assertEqual(0, parser.DisabledTests())
self.assertEqual(0, parser.FlakyTests())
test_name = 'HunspellTest.Crashes'
self.assertEqual('\n'.join(['%s: ' % test_name, 'Did not complete.']),
'\n'.join(parser.FailureDescription(test_name)))
self.assertEqual(['UNKNOWN'], parser.TriesForTest(test_name))
def testGTestLogParserMixedStdout(self):
parser = gtest_utils.GTestLogParser()
for line in TEST_DATA_MIXED_STDOUT.splitlines():
parser.ProcessLine(line)
self.assertEqual([], parser.ParsingErrors())
self.assertEqual(['Crash.Test'], parser.RunningTests())
self.assertEqual(['TestFix.TestCase', 'Crash.Test'], parser.FailedTests())
self.assertEqual(0, parser.DisabledTests())
self.assertEqual(0, parser.FlakyTests())
self.assertEqual(['UNKNOWN'], parser.TriesForTest('Crash.Test'))
self.assertEqual(['TIMEOUT'], parser.TriesForTest('TestFix.TestCase'))
self.assertEqual(['SUCCESS'],
parser.TriesForTest(
'WebSocketHandshakeHandlerSpdy3Test.RequestResponse'))
def testGtestLogParserSkipped(self):
parser = gtest_utils.GTestLogParser()
for line in TEST_DATA_SKIPPED.splitlines():
parser.ProcessLine(line)
self.assertEqual([], parser.ParsingErrors())
self.assertEqual([], parser.RunningTests())
self.assertEqual([], parser.FailedTests())
self.assertEqual(['ProcessReaderLinux.AbortMessage'], parser.SkippedTests())
self.assertEqual(0, parser.DisabledTests())
self.assertEqual(0, parser.FlakyTests())
self.assertEqual(['SKIPPED'],
parser.TriesForTest('ProcessReaderLinux.AbortMessage'))
def testRunTestCaseFail(self):
parser = gtest_utils.GTestLogParser()
for line in TEST_DATA_RUN_TEST_CASE_FAIL.splitlines():
parser.ProcessLine(line)
self.assertEqual(0, len(parser.ParsingErrors()))
self.assertEqual([], parser.RunningTests())
self.assertEqual(['SUIDSandboxUITest.testSUIDSandboxEnabled'],
parser.FailedTests())
self.assertEqual(
['SUIDSandboxUITest.testSUIDSandboxEnabled: '],
parser.FailureDescription('SUIDSandboxUITest.testSUIDSandboxEnabled'))
self.assertEqual(
['FAILURE'],
parser.TriesForTest('SUIDSandboxUITest.testSUIDSandboxEnabled'))
def testRunTestCaseTimeout(self):
parser = gtest_utils.GTestLogParser()
for line in TEST_DATA_RUN_TEST_CASE_TIMEOUT.splitlines():
parser.ProcessLine(line)
self.assertEqual(0, len(parser.ParsingErrors()))
self.assertEqual([], parser.RunningTests())
self.assertEqual(['SUIDSandboxUITest.testSUIDSandboxEnabled'],
parser.FailedTests())
self.assertEqual(
['SUIDSandboxUITest.testSUIDSandboxEnabled: ', '(junk)'],
parser.FailureDescription('SUIDSandboxUITest.testSUIDSandboxEnabled'))
self.assertEqual(
['TIMEOUT'],
parser.TriesForTest('SUIDSandboxUITest.testSUIDSandboxEnabled'))
def testRunTestCaseParseSwarm(self):
parser = gtest_utils.GTestLogParser()
for line in TEST_DATA_SWARM_TEST_FAIL.splitlines():
parser.ProcessLine(line)
self.assertEqual(0, len(parser.ParsingErrors()))
self.assertEqual([], parser.RunningTests())
self.assertEqual(['PickleTest.EncodeDecode'], parser.FailedTests())
self.assertEqual([
'PickleTest.EncodeDecode: ',
'../../base/pickle_unittest.cc:69: Failure',
'Value of: false',
' Actual: false',
'Expected: true',
], parser.FailureDescription('PickleTest.EncodeDecode'))
self.assertEqual(['FAILURE'],
parser.TriesForTest('PickleTest.EncodeDecode'))
def testNestedGtests(self):
parser = gtest_utils.GTestLogParser()
for line in TEST_DATA_NESTED_RUNS.splitlines():
parser.ProcessLine(line)
self.assertEqual(['Foo.Bar'], parser.FailedTests(True, True))
if __name__ == '__main__':
unittest.main()
|
|
#
# FILE: TreeBrowser.py
#
# DESCRIPTION:
# This file provides a generic hierarchical tree browser widget.
#
# AUTHOR: Steve Kinneberg <[email protected]>,
# MontaVista Software, Inc. <[email protected]>
#
# Copyright 2001 MontaVista Software Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 675 Mass Ave, Cambridge, MA 02139, USA.
#
import types
import Tkinter
import Pmw
class _Branching:
def __init__(self):
# List of branch names
self._nodeNames = []
# Map from branch name to branch info
# branch Either _LeafNode or _BranchNode widget of the branch
# nodetype Either 'TreeNode' or 'LeafNode'
self._nodeAttrs = {}
def addbranch(self, branchName = None, **kw):
kw['indent'] = self['indent']
return apply(self._insertnode,
('tree', branchName, len(self._nodeNames),
self._treeRoot),
kw)
def addleaf(self, leafName = None, **kw):
return apply(self._insertnode,
('leaf', leafName, len(self._nodeNames),
self._treeRoot),
kw)
def insertbranch(self, branchName = None, before = 0, **kw):
kw['indent'] = self['indent']
return apply(self._insertnode,
('tree', branchName, before, self._treeRoot),
kw)
def insertleaf(self, leafName = None, before = 0, **kw):
return apply(self._insertnode,
('leaf', leafName, before, self._treeRoot),
kw)
def _insertnode(self, type, nodeName, before, treeRoot, **kw):
if 'selectbackground' not in kw.keys():
kw['selectbackground'] = self['selectbackground']
if 'selectforeground' not in kw.keys():
kw['selectforeground'] = self['selectforeground']
if 'background' not in kw.keys():
kw['background'] = self['background']
if 'foreground' not in kw.keys():
kw['foreground'] = self['foreground']
if nodeName == None:
nodeName = self._nodeName + ".%d" % (len(self._nodeNames) + 1)
if self._nodeAttrs.has_key(nodeName):
msg = 'Node "%s" already exists.' % nodeName
raise ValueError, msg
# Do this early to catch bad <before> spec before creating any items.
beforeIndex = self.index(before, 1)
attributes = {}
last = (beforeIndex == len(self._nodeNames))
if last and len(self._nodeNames) > 0:
# set the previous node to not last
self._nodeAttrs[self._nodeNames[-1]]['branch']._setlast(0)
if(type == 'tree'):
node = apply(self.createcomponent, ('branch%d'%len(self._nodeNames),
(), None,
_BranchNode,
self._branchFrame,
nodeName,
treeRoot,
self,
last,
), kw)
attributes['nodetype'] = 'TreeNode'
else:
node = apply(self.createcomponent, ('leaf%d'%len(self._nodeNames),
(), None,
_LeafNode,
self._branchFrame,
nodeName,
treeRoot,
self,
last,
), kw)
attributes['nodetype'] = 'LeafNode'
if len(self._nodeNames) == beforeIndex:
node.pack(anchor='w')
else:
bname = self._nodeNames[beforeIndex]
battrs = self._nodeAttrs[bname]
node.pack(anchor='w', before=battrs['branch'])
attributes['branch'] = node
self._nodeAttrs[nodeName] = attributes
self._nodeNames.insert(beforeIndex, nodeName)
self._sizechange()
return node
def delete(self, *nodes):
curSel = self._treeRoot.curselection()[0]
for node in nodes:
index = self.index(node)
name = self._nodeNames.pop(index)
dnode = self._nodeAttrs[name]['branch']
del self._nodeAttrs[name]
if dnode == curSel:
self._treeRoot._unhightlightnode(dnode)
dnode.destroy()
self._sizechange()
def destroy(self):
for node in len(self._nodeNames):
self.delete(node)
Pmw.MegaWidget.destroy(self)
def index(self, index, forInsert = 0):
if isinstance(index, _LeafNode):
index = index._nodeName
listLength = len(self._nodeNames)
if type(index) == types.IntType:
if forInsert and index <= listLength:
return index
elif not forInsert and index < listLength:
return index
else:
raise ValueError, 'index "%s" is out of range' % index
elif type(index) == types.StringType:
if index in self._nodeNames:
return self._nodeNames.index(index)
raise ValueError, 'bad branch or leaf name: %s' % index
elif index is Pmw.END:
if forInsert:
return listLength
elif listLength > 0:
return listLength - 1
else:
raise ValueError, 'TreeNode has no branches'
#elif index is Pmw.SELECT:
# if listLength == 0:
# raise ValueError, 'TreeNode has no branches'
# return self._pageNames.index(self.getcurselection())
else:
validValues = 'a name, a number, Pmw.END, Pmw.SELECT, or a reference to a TreeBrowser Leaf or Branch'
raise ValueError, \
'bad index "%s": must be %s' % (index, validValues)
def getnodenames(self):
return self._nodeNames
def getnode(self, node):
nodeName = self._nodeNames[self.index(node)]
return self._nodeAttrs[nodeName]['branch']
class _LeafNode(Pmw.MegaWidget):
def __init__(self, parent, nodeName, treeRoot, parentnode, last = 1, **kw):
colors = Pmw.Color.getdefaultpalette(parent)
self._nodeName = nodeName
self._treeRoot = treeRoot
self._parentNode = parentnode
self._last = last
# Define the megawidget options.
INITOPT = Pmw.INITOPT
optiondefs = (
('selectbackground', colors['selectBackground'], INITOPT),
('selectforeground', colors['selectForeground'], INITOPT),
('background', colors['background'], INITOPT),
('foreground', colors['foreground'], INITOPT),
('selectcommand', None, None),
('deselectcommand', None, None),
('labelpos', 'e', INITOPT),
('labelmargin', 0, INITOPT),
('label', None, None),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Pmw.MegaWidget.__init__(self, parent)
# Create the components
interior = self._hull
labelpos = self['labelpos']
if self['label'] == None:
self._labelWidget = self.createcomponent('labelwidget',
(), None,
Pmw.LabeledWidget,
(interior,),
#background = self['background'],
#foreground = self['foreground'],
)
else:
self._labelWidget = self.createcomponent('labelwidget',
(), None,
Pmw.LabeledWidget,
(interior,),
label_background = self['background'],
label_foreground = self['foreground'],
labelpos = labelpos,
labelmargin = self['labelmargin'],
label_text = self['label'],
)
self._labelWidget.component('label').bind('<ButtonRelease-1>',
self._selectevent)
self._labelWidget.grid(column = 1, row = 0, sticky = 'w')
self._labelWidget.update()
self._labelheight = self._labelWidget.winfo_height()
self._lineCanvas = self.createcomponent('linecanvas',
(), None,
Tkinter.Canvas,
(interior,),
width = self._labelheight,
height = self._labelheight,
)
self._lineCanvas.grid( column = 0, row = 0, sticky = 'news')
self._lineCanvas.update()
cw = int(self._lineCanvas['width'])
ch = int(self._lineCanvas['height'])
self._lineCanvas.create_line(cw/2, ch/2, cw, ch/2, tag='hline')
if last:
self._lineCanvas.create_line(cw/2, 0, cw/2, ch/2, tag='vline')
else:
self._lineCanvas.create_line(cw/2, 0, cw/2, ch, tag='vline')
# Check keywords and initialise options.
self.initialiseoptions()
def interior(self):
return self._labelWidget.interior()
def select(self):
self._highlight()
def getname(self):
return self._nodeName
def getlabel(self):
return self['label']
def _selectevent(self, event):
self._highlight()
def _highlight(self):
self._treeRoot._highlightnode(self)
#self._subHull.configure(background = self._selectbg, relief = 'raised')
if self['label'] != None:
self._labelWidget.configure(label_background = self['selectbackground'])
self._labelWidget.configure(label_foreground = self['selectforeground'])
#self._viewButton.configure(background = self._selectbg)
cmd = self['selectcommand']
if callable(cmd):
cmd(self)
def _unhighlight(self):
#self._subHull.configure(background = self._bg, relief = 'flat')
if self['label'] != None:
self._labelWidget.configure(label_background = self['background'])
self._labelWidget.configure(label_foreground = self['foreground'])
#self._viewButton.configure(background = self._bg)
cmd = self['deselectcommand']
if callable(cmd):
cmd(self)
def _setlast(self, last):
self._last = last
cw = int(self._lineCanvas['width'])
ch = int(self._lineCanvas['height'])
if last:
self._lineCanvas.create_line(cw/2, 0, cw/2, ch/2, tag='vline')
else:
self._lineCanvas.create_line(cw/2, 0, cw/2, ch, tag='vline')
class _BranchNode(_LeafNode, _Branching): #Pmw.MegaWidget):
def __init__(self, parent, nodeName, treeRoot, parentnode, last = 1, **kw):
# Define the megawidget options.
INITOPT = Pmw.INITOPT
optiondefs = (
('view', 'collapsed', None),
('expandcommand', None, None),
('collapsecommand', None, None),
('indent', 0, INITOPT)
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
apply(_LeafNode.__init__,
(self, parent, nodeName, treeRoot, parentnode, last),
kw)
_Branching.__init__(self)
# Create the components
interior = self._hull
# Create the expand/collapse button
self._viewButton = self.createcomponent('viewbutton', (), None,
Tkinter.Canvas,
(interior,),
background = self['background'],
width = self._labelheight - 4,
height = self._labelheight - 4,
borderwidth = 2,
relief = 'raised')
self._viewButton.grid(column = 0, row = 0, sticky='se')
self._viewButton.bind('<ButtonPress-1>', self._showbuttonpress)
self._viewButton.bind('<ButtonRelease-1>', self._toggleview)
# The label widget is already created by the base class, however
# we do need to make some slight modifications.
if self['label'] != None:
self._labelWidget.component('label').bind('<Double-1>',
self._toggleview)
self._labelWidget.grid(column=1, row=0, columnspan = 3, sticky='sw')
# A line canvas is already created for us, we just need to make
# some slight modifications
self._lineCanvas.delete('hline')
self._lineCanvas.grid_forget()
# Set the minsize of column 1 to control additional branch frame indentation
self.grid_columnconfigure(1, minsize = self['indent'])
# Create the branch frame that will contain all the branch/leaf nodes
self._branchFrame = self.createcomponent('frame', (), None,
Tkinter.Frame, (interior,),
#borderwidth=2,
#relief='ridge',
)
self.grid_columnconfigure(2,minsize=0, weight=1)
#self.grid_rowconfigure(0,minsize=0)
if(self['view'] == 'expanded'):
Pmw.drawarrow(self._viewButton,
self['foreground'],
'down', 'arrow')
self._branchFrame.grid(column = 2, row = 1, sticky='nw')
if not self._last:
self._branchFrame.update()
bh = self._branchFrame.winfo_height()
self._lineCanvas.configure(height = bh)
self._lineCanvas.grid(column = 0, row = 1, sticky='news')
cw = int(self._lineCanvas['width'])
ch = int(self._lineCanvas['height'])
#self._lineCanvas.create_line(cw/2, 1, cw/2, ch, tag = 'vline')
self._lineCanvas.coords('vline', cw/2, 1, cw/2, ch)
else:
Pmw.drawarrow(self._viewButton,
self['foreground'],
'right', 'arrow')
self._viewButton.configure(relief = 'raised')
# Check keywords and initialise options.
self.initialiseoptions()
def _showbuttonpress(self, event):
self._viewButton.configure(relief = 'sunken')
def _toggleview(self, event):
self._viewButton.configure(relief = 'sunken')
self.select()
if(self['view'] == 'expanded'):
self.collapsetree()
else:
self.expandtree()
self._viewButton.configure(relief = 'raised')
def expandtree(self):
if(self['view'] == 'collapsed'):
cmd = self['expandcommand']
if cmd is not None:
cmd(self)
self['view'] = 'expanded'
Pmw.drawarrow(self._viewButton,
self['foreground'],
'down', 'arrow')
self._branchFrame.grid(column = 2, row = 1, sticky='nw')
if not self._last:
self._branchFrame.update()
bh = self._branchFrame.winfo_height()
self._lineCanvas.configure(height = bh)
self._lineCanvas.grid(column = 0, row = 1, sticky='news')
cw = int(self._lineCanvas['width'])
ch = int(self._lineCanvas['height'])
#self._lineCanvas.create_line( cw/2, 1, cw/2, ch, tag = 'vline')
self._lineCanvas.coords('vline', cw/2, 1, cw/2, ch)
self._parentNode._sizechange()
def collapsetree(self):
if(self['view'] == 'expanded'):
cmd = self['collapsecommand']
if cmd is not None:
cmd(self)
self['view'] = 'collapsed'
Pmw.drawarrow(self._viewButton,
self['foreground'],
'right', 'arrow')
self._branchFrame.grid_forget()
if not self._last:
#self._lineCanvas.delete('vline')
self._lineCanvas.grid_forget()
self._parentNode._sizechange()
def _setlast(self, last):
self._last = last
if self['view'] == 'expanded':
self._branchFrame.update()
bh = self._branchFrame.winfo_height()
self._lineCanvas.configure(height = bh)
cw = int(self._lineCanvas['width'])
ch = int(self._lineCanvas['height'])
self._lineCanvas.delete('vline')
if not last:
self._lineCanvas.create_line(cw/2, 1, cw/2, ch, tag='vline')
def _sizechange(self):
if not self._last and self['view'] == 'expanded':
self._branchFrame.update()
bh = self._branchFrame.winfo_height()
self._lineCanvas.configure(height = bh)
if self._lineCanvas.coords('vline')[3] < bh:
cw = int(self._lineCanvas['width'])
ch = int(self._lineCanvas['height'])
#self._lineCanvas.delete('vline')
#self._lineCanvas.create_line(cw/2, 1, cw/2, ch, tag='vline')
self._lineCanvas.coords('vline', cw/2, 1, cw/2, ch)
self._parentNode._sizechange()
class TreeBrowser(Pmw.MegaWidget, _Branching):
def __init__(self, parent = None, nodeName = '0', **kw):
colors = Pmw.Color.getdefaultpalette(parent)
# Define the megawidget options.
INITOPT = Pmw.INITOPT
optiondefs = (
('indent', 0, INITOPT),
('selectbackground', colors['selectBackground'], INITOPT),
('selectforeground', colors['selectForeground'], INITOPT),
('background', colors['background'], INITOPT),
('foreground', colors['foreground'], INITOPT),
#('selectrelief', 'raised', INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Pmw.MegaWidget.__init__(self, parent)
_Branching.__init__(self)
# Create the components
interior = self._hull
browserFrame = self.createcomponent('frame', (), None,
Pmw.ScrolledFrame,
(interior,),
)
browserFrame.pack(expand = 1, fill='both')
self._branchFrame = browserFrame.interior()
self._highlightedNode = None
self._treeRoot = self
self._nodeName = nodeName
# Check keywords and initialise options.
self.initialiseoptions()
def _highlightnode(self, newNode):
if self._highlightedNode != newNode:
if self._highlightedNode != None:
self._highlightedNode._unhighlight()
self._highlightedNode = newNode
def _unhighlightnode(self):
if self._highlightedNode != None:
self._highlightedNode._unhighlight()
self._highlightedNode = None
def curselection(self):
retVal = None
if self._highlightedNode != None:
retVal = (self._highlightedNode,
self._highlightedNode._nodeName,
self._highlightedNode['label'])
return retVal
def getname(self):
return self._nodeName
# The top-level TreeBrowser widget only shows nodes in an expanded view
# but still provides collapsetree() and expandtree() methods so that users
# don't have to special case the top-level node
def collapsetree(self):
return
def expandtree(self):
return
def _sizechange(self):
return
if __name__ == '__main__':
rootWin = Tkinter.Tk()
Pmw.initialise()
rootWin.title('TreeBrowser Demo')
# Create the hierarchical tree browser widget
treeBrowser = TreeBrowser(rootWin,
#selectbackground = "darkgreen",
#selectforeground = 'lightgreen',
#background = 'green',
#indent = 10,
)
def printselected(node):
selection = treeBrowser.curselection()
if selection != None:
print "Selected node name:", selection[1], " label:", selection[2]
def printdeselected(node):
selection = treeBrowser.curselection()
if selection != None:
print "Deselected node name:", selection[1], " label:", selection[2]
def printexpanded(node):
print "Expanded node name:", node.getname(), " label:", node.getlabel()
def printcollapsed(node):
print "Collapsed node name:", node.getname(), " label:", node.getlabel()
for i in range(3):
# Add a tree node to the top level
treeNodeLevel1 = treeBrowser.addbranch(label = 'TreeNode %d'%i,
selectcommand = printselected,
deselectcommand = printdeselected,
expandcommand = printexpanded,
collapsecommand = printcollapsed,
)
for j in range(3):
# Add a tree node to the second level
treeNodeLevel2 = treeNodeLevel1.addbranch(label = 'TreeNode %d.%d'%(i,j),
#selectforeground = 'yellow',
selectcommand = printselected,
deselectcommand = printdeselected,
expandcommand = printexpanded,
collapsecommand = printcollapsed,
)
if i == 0 and j == 1:
dynamicTreeRootNode = treeNodeLevel1
dynamicTreePosNode = treeNodeLevel2
for item in range((i+1)*(j+1)):
# Add a leaf node to the third level
leaf = treeNodeLevel2.addleaf(label = "Item %c"%(item+65),
#selectbackground = 'blue',
selectcommand = printselected,
deselectcommand = printdeselected)
for item in range(i+1):
# Add a leaf node to the top level
leaf = treeNodeLevel1.addleaf(label = "Item %c"%(item+65),
selectcommand = printselected,
deselectcommand = printdeselected)
treeNodeLevel1 = treeBrowser.addbranch(label = 'Check Button Label',
selectcommand = printselected,
deselectcommand = printdeselected,
expandcommand = printexpanded,
collapsecommand = printcollapsed,
)
checkButton = Tkinter.Checkbutton(treeNodeLevel1.interior(),
text = 'Da Check Button',
relief = 'ridge',
command = treeNodeLevel1.select)
checkButton.pack()
treeNodeLevel1.addleaf(label = 'Labeled Leaf',
selectcommand = printselected,
deselectcommand = printdeselected)
leaf = treeNodeLevel1.addleaf(label = 'Labeled Leaf w/ Checkbutton',
selectcommand = printselected,
deselectcommand = printdeselected)
checkButton = Tkinter.Checkbutton(leaf.interior(),
text = 'Da Check Button',
relief = 'ridge',
command = leaf.select)
checkButton.pack()
treeNodeLevel1 = treeBrowser.addbranch(selectcommand = printselected,
deselectcommand = printdeselected,
expandcommand = printexpanded,
collapsecommand = printcollapsed,
)
checkButton = Tkinter.Checkbutton(treeNodeLevel1.interior(),
text = 'Check Button with no label',
relief = 'ridge',
command = treeNodeLevel1.select)
checkButton.pack()
treeNodeLevel1 = treeBrowser.addbranch(label = 'Label',
selectcommand = printselected,
deselectcommand = printdeselected,
expandcommand = printexpanded,
collapsecommand = printcollapsed,
)
# setup dynamic tree node insertion and removal
class dynTree:
def __init__(self):
self.dyn = Tkinter.IntVar()
self.dtree = None
self.dLeaf = treeBrowser.addleaf(selectcommand = self.dynSelected,
deselectcommand = self.dynDeselected)
self.dCheckButton = Tkinter.Checkbutton(self.dLeaf.interior(),
text = 'Enable Dynamic Tree',
variable = self.dyn,
command = self.ChkBtnHandler)
self.dCheckButton.pack()
def dynSelected(self, node):
self.dCheckButton.configure(background = self.dLeaf.configure('selectbackground')[4])
printselected(node)
def dynDeselected(self, node):
self.dCheckButton.configure(background = self.dLeaf.configure('background')[4])
printdeselected(node)
def ChkBtnHandler(self):
self.dLeaf.select()
if self.dyn.get() == 1:
self.dtree = dynamicTreeRootNode.insertbranch(label = 'Dynamic Tree Node',
selectcommand = printselected,
deselectcommand = printdeselected,
expandcommand = printexpanded,
collapsecommand = printcollapsed,
before = dynamicTreePosNode)
self.dtree.addleaf(label = 'Dynamic Leaf 1',
selectcommand = printselected,
deselectcommand = printdeselected)
self.dtree.addleaf(label = 'Dynamic Leaf 2',
selectcommand = printselected,
deselectcommand = printdeselected)
else:
if self.dtree != None:
dynamicTreeRootNode.delete(self.dtree)
self.dtree = None
foo = dynTree()
treeBrowser.pack(expand = 1, fill='both')
exitButton = Tkinter.Button(rootWin, text="Quit", command=rootWin.quit)
exitButton.pack()
rootWin.mainloop()
|
|
from __future__ import absolute_import, unicode_literals
import io
import os
import sys
from collections import defaultdict
from functools import partial
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.py26compat import import_module
from setuptools.extern.six import string_types
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
obj_alias = handler.section_prefix
target_obj = handler.target_obj
for option in handler.set_options:
getter = getattr(target_obj, 'get_%s' % option, None)
if getter is None:
value = getattr(target_obj, option)
else:
value = getter()
config_dict[obj_alias][option] = value
return config_dict
def parse_configuration(
distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
meta = ConfigMetadataHandler(
distribution.metadata, command_options, ignore_option_errors)
meta.parse()
options = ConfigOptionsHandler(
distribution, command_options, ignore_option_errors)
options.parse()
return [meta, options]
class ConfigHandler(object):
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, '').strip('.')
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, 'set_%s' % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
'Unable to parse option value to dict: %s' % value)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
include: LICENSE
include: src/file.txt
:param str value:
:rtype: str
"""
if not isinstance(value, string_types):
return value
include_directive = 'file:'
if not value.startswith(include_directive):
return value
current_directory = os.getcwd()
filepath = value.replace(include_directive, '').strip()
filepath = os.path.abspath(filepath)
if not filepath.startswith(current_directory):
raise DistutilsOptionError(
'`file:` directive can not access %s' % filepath)
if os.path.isfile(filepath):
with io.open(filepath, encoding='utf-8') as f:
value = f.read()
return value
@classmethod
def _parse_attr(cls, value):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
sys.path.insert(0, os.getcwd())
try:
module = import_module(module_name)
value = getattr(module, attr_name)
finally:
sys.path = sys.path[1:]
return value
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method = getattr(
self,
# Dots in section names are tranlsated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None)
if section_parser_method is None:
raise DistutilsOptionError(
'Unsupported distribution option section: [%s.%s]' % (
self.section_prefix, section_name))
section_parser_method(section_options)
class ConfigMetadataHandler(ConfigHandler):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': parse_list,
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': parse_file,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
}
def parse_section_classifiers(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
classifiers = []
for begin, (_, rest) in section_options.items():
classifiers.append('%s :%s' % (begin.title(), rest))
self['classifiers'] = classifiers
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_attr(value)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, '__iter__'):
version = '.'.join(map(str, version))
else:
version = '%s' % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = 'options'
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=';')
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
'zip_safe': parse_bool,
'use_2to3': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'use_2to3_fixers': parse_list,
'use_2to3_exclude_fixers': parse_list,
'convert_2to3_doctests': parse_list,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': parse_list,
'install_requires': parse_list_semicolon,
'setup_requires': parse_list_semicolon,
'tests_require': parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directive = 'find:'
if not value.startswith(find_directive):
return self._parse_list(value)
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {}))
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(
section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v])
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get('*')
if root:
parsed[''] = root
del parsed['*']
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(
section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
|
|
from hearthbreaker.cards.minions.neutral import (
BloodfenRaptor,
IronbeakOwl,
NoviceEngineer,
StonetuskBoar,
WarGolem,
MogushanWarden,
FaerieDragon,
KoboldGeomancer,
ElvenArcher,
ArgentSquire,
SilvermoonGuardian,
TwilightDrake,
MagmaRager,
DireWolfAlpha,
WorgenInfiltrator,
Archmage,
DalaranMage,
Malygos,
AzureDrake,
OgreMagi,
Spellbreaker,
BloodmageThalnos,
LootHoarder,
LeperGnome,
IronforgeRifleman,
GnomishInventor,
GoldshireFootman,
FrostwolfGrunt,
IronfurGrizzly,
LordOfTheArena,
MurlocRaider,
ManaAddict,
OasisSnapjaw,
RecklessRocketeer,
RiverCrocolisk,
SenjinShieldmasta,
ScarletCrusader,
Shieldbearer,
SilverbackPatriarch,
JunglePanther,
RavenholdtAssassin,
StormpikeCommando,
StormwindKnight,
StranglethornTiger,
Sunwalker,
ThrallmarFarseer,
WindfuryHarpy,
YoungDragonhawk,
Wolfrider,
BootyBayBodyguard,
BoulderfistOgre,
ChillwindYeti,
CoreHound,
VoodooDoctor,
EarthenRingFarseer,
ArcaneGolem,
PriestessOfElune,
DarkscaleHealer,
ArgentCommander,
BluegillWarrior,
Wisp,
Nightblade,
ShatteredSunCleric,
TheBlackKnight,
AbusiveSergeant,
DarkIronDwarf,
Abomination,
AmaniBerserker,
SilverHandKnight,
FenCreeper,
VentureCoMercenary,
StormwindChampion,
Deathwing,
Alexstrasza,
EmperorCobra,
CrazedAlchemist,
AcidicSwampOoze,
AncientBrewmaster,
YouthfulBrewmaster,
BaronGeddon,
AngryChicken,
RagingWorgen,
TaurenWarrior,
SpitefulSmith,
BloodKnight,
FrostwolfWarlord,
RaidLeader,
DragonlingMechanic,
MurlocTidehunter,
RazorfenHunter,
KnifeJuggler,
CairneBloodhoof,
HarvestGolem,
TheBeast,
SylvanasWindrunner,
StampedingKodo,
FrostElemental,
Demolisher,
Doomsayer,
Gruul,
Hogger,
ImpMaster,
InjuredBlademaster,
MasterSwordsmith,
NatPagle,
Nozdormu,
RagnarosTheFirelord,
ColdlightOracle,
ColdlightSeer,
GrimscaleOracle,
MurlocWarleader,
AncientWatcher,
BigGameHunter,
BloodsailCorsair,
BloodsailRaider,
CaptainGreenskin,
HungryCrab,
MadBomber,
ManaWraith,
MindControlTech,
MurlocTidecaller,
Onyxia,
SouthseaCaptain,
SouthseaDeckhand,
YoungPriestess,
AcolyteOfPain,
CultMaster,
Secretkeeper,
VioletTeacher,
GadgetzanAuctioneer,
IllidanStormrage,
Lightwarden,
FlesheatingGhoul,
QuestingAdventurer,
GurubashiBerserker,
AncientMage,
DefenderOfArgus,
SunfuryProtector,
HarrisonJones,
KingMukla,
LeeroyJenkins,
SeaGiant,
MoltenGiant,
MountainGiant,
DreadCorsair,
CaptainsParrot,
TinkmasterOverspark,
AlarmoBot,
EliteTaurenChieftain,
MillhouseManastorm,
PintSizedSummoner,
OldMurkEye,
Ysera,
GelbinMekkatorque,
LorewalkerCho,
WildPyromancer,
FacelessManipulator,
NerubianEgg,
Maexxna,
HauntedCreeper,
NerubarWeblord,
UnstableGhoul,
Loatheb,
StoneskinGargoyle,
SludgeBelcher,
BaronRivendare,
DancingSwords,
Deathlord,
SpectralKnight,
Undertaker,
WailingSoul,
ZombieChow,
Feugen,
Stalagg,
MadScientist,
EchoingOoze,
ShadeOfNaxxramas,
KelThuzad,
PilotedShredder,
PilotedSkyGolem,
SneedsOldShredder,
AntiqueHealbot,
AnnoyoTron,
ArcaneNullifierX21,
Blingtron3000,
BombLobber,
BurlyRockjawTrogg,
Mechwarper,
Frog,
ClockworkGiant,
ClockworkGnome,
BoomBot,
DoctorBoom,
TargetDummy,
ExplosiveSheep,
Puddlestomper,
MicroMachine,
MechanicalYeti,
SpiderTank,
GilblinStalker,
ShipsCannon,
OgreBrute,
MogorTheOgre,
Toshley,
ForceTankMAX,
FelReaver,
MadderBomber,
Gazlowe,
MiniMage,
SaltyDog,
GnomereganInfantry,
FlyingMachine,
LostTallstrider,
HemetNesingwary,
Illuminator,
MekgineerThermaplugg,
StonesplinterTrogg,
TroggzorTheEarthinator,
Hobgoblin,
Cogmaster,
GoblinSapper,
TinkertownTechnician,
Junkbot,
Jeeves,
Recombobulator,
LilExorcist,
EnhanceoMechano,
FoeReaper4000,
KezanMystic,
MimironsHead,
GnomishExperimenter,
HungryDragon,
GrimPatron,
BlackwingTechnician,
EmperorThaurissan,
MajordomoExecutus,
VolcanicDrake,
BlackwingCorruptor,
DrakonidCrusher,
DragonEgg,
Chromaggus,
DragonkinSorcerer,
RendBlackhand,
Nefarian,
)
from hearthbreaker.cards.minions.druid import (
KeeperOfTheGrove,
DruidOfTheClaw,
AncientOfLore,
AncientOfWar,
IronbarkProtector,
Cenarius,
AnodizedRoboCub,
MechBearCat,
DruidOfTheFang,
Malorne,
GroveTender,
DruidOfTheFlame,
VolcanicLumberer,
)
from hearthbreaker.cards.minions.hunter import (
TimberWolf,
SavannahHighmane,
Houndmaster,
KingKrush,
StarvingBuzzard,
TundraRhino,
ScavengingHyena,
Webspinner,
Hound,
Huffer,
Misha,
Leokk,
Snake,
MetaltoothLeaper,
KingOfBeasts,
Gahzrilla,
SteamwheedleSniper,
CoreRager,
)
from hearthbreaker.cards.minions.mage import (
ManaWyrm,
SorcerersApprentice,
KirinTorMage,
EtherealArcanist,
WaterElemental,
ArchmageAntonidas,
Snowchugger,
GoblinBlastmage,
SootSpewer,
WeeSpellstopper,
FlameLeviathan,
Flamewaker
)
from hearthbreaker.cards.minions.paladin import (
AldorPeacekeeper,
ArgentProtector,
GuardianOfKings,
TirionFordring,
CobaltGuardian,
SilverHandRecruit,
ShieldedMinibot,
Quartermaster,
ScarletPurifier,
BolvarFordragon,
DragonConsort,
)
from hearthbreaker.cards.minions.priest import (
AuchenaiSoulpriest,
CabalShadowPriest,
Lightspawn,
Lightwell,
NorthshireCleric,
ProphetVelen,
TempleEnforcer,
DarkCultist,
Shrinkmeister,
UpgradedRepairBot,
Shadowbomber,
Shadowboxer,
Voljin,
TwilightWhelp,
)
from hearthbreaker.cards.minions.rogue import (
AnubarAmbusher,
DefiasRingleader,
EdwinVanCleef,
Kidnapper,
MasterOfDisguise,
PatientAssassin,
SI7Agent,
OneeyedCheat,
IronSensei,
OgreNinja,
TradePrinceGallywix,
GoblinAutoBarber,
DarkIronSkulker,
)
from hearthbreaker.cards.minions.shaman import (
AlAkirTheWindlord,
DustDevil,
EarthElemental,
FireElemental,
FlametongueTotem,
ManaTideTotem,
UnboundElemental,
Windspeaker,
HealingTotem,
SearingTotem,
StoneclawTotem,
WrathOfAirTotem,
SpiritWolf,
VitalityTotem,
SiltfinSpiritwalker,
WhirlingZapomatic,
DunemaulShaman,
Neptulon,
FireguardDestroyer,
)
from hearthbreaker.cards.minions.warlock import (
FlameImp,
PitLord,
Voidwalker,
DreadInfernal,
Felguard,
Doomguard,
Succubus,
SummoningPortal,
BloodImp,
LordJaraxxus,
VoidTerror,
Voidcaller,
AnimaGolem,
WorthlessImp,
FelCannon,
MalGanis,
FloatingWatcher,
MistressOfPain,
ImpGangBoss,
)
from hearthbreaker.cards.minions.warrior import (
ArathiWeaponsmith,
Armorsmith,
CruelTaskmaster,
FrothingBerserker,
GrommashHellscream,
KorkronElite,
WarsongCommander,
Warbot,
Shieldmaiden,
SiegeEngine,
IronJuggernaut,
ScrewjankClunker,
AxeFlinger,
)
|
|
" ""VK API description"""
# Copyright (c) 2015-2016 The Khronos Group Inc.
# Copyright (c) 2015-2016 Valve Corporation
# Copyright (c) 2015-2016 LunarG, Inc.
# Copyright (c) 2015-2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Chia-I Wu <[email protected]>
# Author: Jon Ashburn <[email protected]>
# Author: Courtney Goeltzenleuchter <[email protected]>
# Author: Tobin Ehlis <[email protected]>
# Author: Tony Barbour <[email protected]>
# Author: Gwan-gyeong Mun <[email protected]>
class Param(object):
"""A function parameter."""
def __init__(self, ty, name):
self.ty = ty
self.name = name
def c(self):
"""Return the parameter in C."""
idx = self.ty.find("[")
# arrays have a different syntax
if idx >= 0:
return "%s %s%s" % (self.ty[:idx], self.name, self.ty[idx:])
else:
return "%s %s" % (self.ty, self.name)
def indirection_level(self):
"""Return the level of indirection."""
return self.ty.count("*") + self.ty.count("[")
def dereferenced_type(self, level=0):
"""Return the type after dereferencing."""
if not level:
level = self.indirection_level()
deref = self.ty if level else ""
while level > 0:
idx = deref.rfind("[")
if idx < 0:
idx = deref.rfind("*")
if idx < 0:
deref = ""
break
deref = deref[:idx]
level -= 1;
return deref.rstrip()
def __repr__(self):
return "Param(\"%s\", \"%s\")" % (self.ty, self.name)
class Proto(object):
"""A function prototype."""
def __init__(self, ret, name, params=[]):
# the proto has only a param
if not isinstance(params, list):
params = [params]
self.ret = ret
self.name = name
self.params = params
def c_params(self, need_type=True, need_name=True):
"""Return the parameter list in C."""
if self.params and (need_type or need_name):
if need_type and need_name:
return ", ".join([param.c() for param in self.params])
elif need_type:
return ", ".join([param.ty for param in self.params])
else:
return ", ".join([param.name for param in self.params])
else:
return "void" if need_type else ""
def c_decl(self, name, attr="", typed=False, need_param_names=True):
"""Return a named declaration in C."""
if typed:
return "%s (%s*%s)(%s)" % (
self.ret,
attr + "_PTR " if attr else "",
name,
self.c_params(need_name=need_param_names))
else:
return "%s%s %s%s(%s)" % (
attr + "_ATTR " if attr else "",
self.ret,
attr + "_CALL " if attr else "",
name,
self.c_params(need_name=need_param_names))
def c_pretty_decl(self, name, attr=""):
"""Return a named declaration in C, with vulkan.h formatting."""
plist = []
for param in self.params:
idx = param.ty.find("[")
if idx < 0:
idx = len(param.ty)
pad = 44 - idx
if pad <= 0:
pad = 1
plist.append(" %s%s%s%s" % (param.ty[:idx],
" " * pad, param.name, param.ty[idx:]))
return "%s%s %s%s(\n%s)" % (
attr + "_ATTR " if attr else "",
self.ret,
attr + "_CALL " if attr else "",
name,
",\n".join(plist))
def c_typedef(self, suffix="", attr=""):
"""Return the typedef for the prototype in C."""
return self.c_decl(self.name + suffix, attr=attr, typed=True)
def c_func(self, prefix="", attr=""):
"""Return the prototype in C."""
return self.c_decl(prefix + self.name, attr=attr, typed=False)
def c_call(self):
"""Return a call to the prototype in C."""
return "%s(%s)" % (self.name, self.c_params(need_type=False))
def object_in_params(self):
"""Return the params that are simple VK objects and are inputs."""
return [param for param in self.params if param.ty in objects]
def object_out_params(self):
"""Return the params that are simple VK objects and are outputs."""
return [param for param in self.params
if param.dereferenced_type() in objects]
def __repr__(self):
param_strs = []
for param in self.params:
param_strs.append(str(param))
param_str = " [%s]" % (",\n ".join(param_strs))
return "Proto(\"%s\", \"%s\",\n%s)" % \
(self.ret, self.name, param_str)
class Extension(object):
def __init__(self, name, headers, objects, protos, ifdef = None):
self.name = name
self.headers = headers
self.objects = objects
self.protos = protos
self.ifdef = ifdef
def __repr__(self):
lines = []
lines.append("Extension(")
lines.append(" name=\"%s\"," % self.name)
lines.append(" headers=[\"%s\"]," %
"\", \"".join(self.headers))
lines.append(" objects=[")
for obj in self.objects:
lines.append(" \"%s\"," % obj)
lines.append(" ],")
lines.append(" protos=[")
for proto in self.protos:
param_lines = str(proto).splitlines()
param_lines[-1] += ",\n" if proto != self.protos[-1] else ","
for p in param_lines:
lines.append(" " + p)
lines.append(" ],")
lines.append(")")
return "\n".join(lines)
# VK core API
core = Extension(
name="VK_CORE",
headers=["vulkan/vulkan.h"],
objects=[
"VkInstance",
"VkPhysicalDevice",
"VkDevice",
"VkQueue",
"VkSemaphore",
"VkCommandBuffer",
"VkFence",
"VkDeviceMemory",
"VkBuffer",
"VkImage",
"VkEvent",
"VkQueryPool",
"VkBufferView",
"VkImageView",
"VkShaderModule",
"VkPipelineCache",
"VkPipelineLayout",
"VkRenderPass",
"VkPipeline",
"VkDescriptorSetLayout",
"VkSampler",
"VkDescriptorPool",
"VkDescriptorSet",
"VkFramebuffer",
"VkCommandPool",
],
protos=[
Proto("VkResult", "CreateInstance",
[Param("const VkInstanceCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkInstance*", "pInstance")]),
Proto("void", "DestroyInstance",
[Param("VkInstance", "instance"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "EnumeratePhysicalDevices",
[Param("VkInstance", "instance"),
Param("uint32_t*", "pPhysicalDeviceCount"),
Param("VkPhysicalDevice*", "pPhysicalDevices")]),
Proto("void", "GetPhysicalDeviceFeatures",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkPhysicalDeviceFeatures*", "pFeatures")]),
Proto("void", "GetPhysicalDeviceFormatProperties",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkFormat", "format"),
Param("VkFormatProperties*", "pFormatProperties")]),
Proto("VkResult", "GetPhysicalDeviceImageFormatProperties",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkFormat", "format"),
Param("VkImageType", "type"),
Param("VkImageTiling", "tiling"),
Param("VkImageUsageFlags", "usage"),
Param("VkImageCreateFlags", "flags"),
Param("VkImageFormatProperties*", "pImageFormatProperties")]),
Proto("void", "GetPhysicalDeviceProperties",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkPhysicalDeviceProperties*", "pProperties")]),
Proto("void", "GetPhysicalDeviceQueueFamilyProperties",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t*", "pQueueFamilyPropertyCount"),
Param("VkQueueFamilyProperties*", "pQueueFamilyProperties")]),
Proto("void", "GetPhysicalDeviceMemoryProperties",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkPhysicalDeviceMemoryProperties*", "pMemoryProperties")]),
Proto("PFN_vkVoidFunction", "GetInstanceProcAddr",
[Param("VkInstance", "instance"),
Param("const char*", "pName")]),
Proto("PFN_vkVoidFunction", "GetDeviceProcAddr",
[Param("VkDevice", "device"),
Param("const char*", "pName")]),
Proto("VkResult", "CreateDevice",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("const VkDeviceCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkDevice*", "pDevice")]),
Proto("void", "DestroyDevice",
[Param("VkDevice", "device"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "EnumerateInstanceExtensionProperties",
[Param("const char*", "pLayerName"),
Param("uint32_t*", "pPropertyCount"),
Param("VkExtensionProperties*", "pProperties")]),
Proto("VkResult", "EnumerateDeviceExtensionProperties",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("const char*", "pLayerName"),
Param("uint32_t*", "pPropertyCount"),
Param("VkExtensionProperties*", "pProperties")]),
Proto("VkResult", "EnumerateInstanceLayerProperties",
[Param("uint32_t*", "pPropertyCount"),
Param("VkLayerProperties*", "pProperties")]),
Proto("VkResult", "EnumerateDeviceLayerProperties",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t*", "pPropertyCount"),
Param("VkLayerProperties*", "pProperties")]),
Proto("void", "GetDeviceQueue",
[Param("VkDevice", "device"),
Param("uint32_t", "queueFamilyIndex"),
Param("uint32_t", "queueIndex"),
Param("VkQueue*", "pQueue")]),
Proto("VkResult", "QueueSubmit",
[Param("VkQueue", "queue"),
Param("uint32_t", "submitCount"),
Param("const VkSubmitInfo*", "pSubmits"),
Param("VkFence", "fence")]),
Proto("VkResult", "QueueWaitIdle",
[Param("VkQueue", "queue")]),
Proto("VkResult", "DeviceWaitIdle",
[Param("VkDevice", "device")]),
Proto("VkResult", "AllocateMemory",
[Param("VkDevice", "device"),
Param("const VkMemoryAllocateInfo*", "pAllocateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkDeviceMemory*", "pMemory")]),
Proto("void", "FreeMemory",
[Param("VkDevice", "device"),
Param("VkDeviceMemory", "memory"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "MapMemory",
[Param("VkDevice", "device"),
Param("VkDeviceMemory", "memory"),
Param("VkDeviceSize", "offset"),
Param("VkDeviceSize", "size"),
Param("VkMemoryMapFlags", "flags"),
Param("void**", "ppData")]),
Proto("void", "UnmapMemory",
[Param("VkDevice", "device"),
Param("VkDeviceMemory", "memory")]),
Proto("VkResult", "FlushMappedMemoryRanges",
[Param("VkDevice", "device"),
Param("uint32_t", "memoryRangeCount"),
Param("const VkMappedMemoryRange*", "pMemoryRanges")]),
Proto("VkResult", "InvalidateMappedMemoryRanges",
[Param("VkDevice", "device"),
Param("uint32_t", "memoryRangeCount"),
Param("const VkMappedMemoryRange*", "pMemoryRanges")]),
Proto("void", "GetDeviceMemoryCommitment",
[Param("VkDevice", "device"),
Param("VkDeviceMemory", "memory"),
Param("VkDeviceSize*", "pCommittedMemoryInBytes")]),
Proto("VkResult", "BindBufferMemory",
[Param("VkDevice", "device"),
Param("VkBuffer", "buffer"),
Param("VkDeviceMemory", "memory"),
Param("VkDeviceSize", "memoryOffset")]),
Proto("VkResult", "BindImageMemory",
[Param("VkDevice", "device"),
Param("VkImage", "image"),
Param("VkDeviceMemory", "memory"),
Param("VkDeviceSize", "memoryOffset")]),
Proto("void", "GetBufferMemoryRequirements",
[Param("VkDevice", "device"),
Param("VkBuffer", "buffer"),
Param("VkMemoryRequirements*", "pMemoryRequirements")]),
Proto("void", "GetImageMemoryRequirements",
[Param("VkDevice", "device"),
Param("VkImage", "image"),
Param("VkMemoryRequirements*", "pMemoryRequirements")]),
Proto("void", "GetImageSparseMemoryRequirements",
[Param("VkDevice", "device"),
Param("VkImage", "image"),
Param("uint32_t*", "pSparseMemoryRequirementCount"),
Param("VkSparseImageMemoryRequirements*", "pSparseMemoryRequirements")]),
Proto("void", "GetPhysicalDeviceSparseImageFormatProperties",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkFormat", "format"),
Param("VkImageType", "type"),
Param("VkSampleCountFlagBits", "samples"),
Param("VkImageUsageFlags", "usage"),
Param("VkImageTiling", "tiling"),
Param("uint32_t*", "pPropertyCount"),
Param("VkSparseImageFormatProperties*", "pProperties")]),
Proto("VkResult", "QueueBindSparse",
[Param("VkQueue", "queue"),
Param("uint32_t", "bindInfoCount"),
Param("const VkBindSparseInfo*", "pBindInfo"),
Param("VkFence", "fence")]),
Proto("VkResult", "CreateFence",
[Param("VkDevice", "device"),
Param("const VkFenceCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkFence*", "pFence")]),
Proto("void", "DestroyFence",
[Param("VkDevice", "device"),
Param("VkFence", "fence"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "ResetFences",
[Param("VkDevice", "device"),
Param("uint32_t", "fenceCount"),
Param("const VkFence*", "pFences")]),
Proto("VkResult", "GetFenceStatus",
[Param("VkDevice", "device"),
Param("VkFence", "fence")]),
Proto("VkResult", "WaitForFences",
[Param("VkDevice", "device"),
Param("uint32_t", "fenceCount"),
Param("const VkFence*", "pFences"),
Param("VkBool32", "waitAll"),
Param("uint64_t", "timeout")]),
Proto("VkResult", "CreateSemaphore",
[Param("VkDevice", "device"),
Param("const VkSemaphoreCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSemaphore*", "pSemaphore")]),
Proto("void", "DestroySemaphore",
[Param("VkDevice", "device"),
Param("VkSemaphore", "semaphore"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreateEvent",
[Param("VkDevice", "device"),
Param("const VkEventCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkEvent*", "pEvent")]),
Proto("void", "DestroyEvent",
[Param("VkDevice", "device"),
Param("VkEvent", "event"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "GetEventStatus",
[Param("VkDevice", "device"),
Param("VkEvent", "event")]),
Proto("VkResult", "SetEvent",
[Param("VkDevice", "device"),
Param("VkEvent", "event")]),
Proto("VkResult", "ResetEvent",
[Param("VkDevice", "device"),
Param("VkEvent", "event")]),
Proto("VkResult", "CreateQueryPool",
[Param("VkDevice", "device"),
Param("const VkQueryPoolCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkQueryPool*", "pQueryPool")]),
Proto("void", "DestroyQueryPool",
[Param("VkDevice", "device"),
Param("VkQueryPool", "queryPool"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "GetQueryPoolResults",
[Param("VkDevice", "device"),
Param("VkQueryPool", "queryPool"),
Param("uint32_t", "firstQuery"),
Param("uint32_t", "queryCount"),
Param("size_t", "dataSize"),
Param("void*", "pData"),
Param("VkDeviceSize", "stride"),
Param("VkQueryResultFlags", "flags")]),
Proto("VkResult", "CreateBuffer",
[Param("VkDevice", "device"),
Param("const VkBufferCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkBuffer*", "pBuffer")]),
Proto("void", "DestroyBuffer",
[Param("VkDevice", "device"),
Param("VkBuffer", "buffer"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreateBufferView",
[Param("VkDevice", "device"),
Param("const VkBufferViewCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkBufferView*", "pView")]),
Proto("void", "DestroyBufferView",
[Param("VkDevice", "device"),
Param("VkBufferView", "bufferView"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreateImage",
[Param("VkDevice", "device"),
Param("const VkImageCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkImage*", "pImage")]),
Proto("void", "DestroyImage",
[Param("VkDevice", "device"),
Param("VkImage", "image"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("void", "GetImageSubresourceLayout",
[Param("VkDevice", "device"),
Param("VkImage", "image"),
Param("const VkImageSubresource*", "pSubresource"),
Param("VkSubresourceLayout*", "pLayout")]),
Proto("VkResult", "CreateImageView",
[Param("VkDevice", "device"),
Param("const VkImageViewCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkImageView*", "pView")]),
Proto("void", "DestroyImageView",
[Param("VkDevice", "device"),
Param("VkImageView", "imageView"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreateShaderModule",
[Param("VkDevice", "device"),
Param("const VkShaderModuleCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkShaderModule*", "pShaderModule")]),
Proto("void", "DestroyShaderModule",
[Param("VkDevice", "device"),
Param("VkShaderModule", "shaderModule"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreatePipelineCache",
[Param("VkDevice", "device"),
Param("const VkPipelineCacheCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkPipelineCache*", "pPipelineCache")]),
Proto("void", "DestroyPipelineCache",
[Param("VkDevice", "device"),
Param("VkPipelineCache", "pipelineCache"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "GetPipelineCacheData",
[Param("VkDevice", "device"),
Param("VkPipelineCache", "pipelineCache"),
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
Proto("VkResult", "MergePipelineCaches",
[Param("VkDevice", "device"),
Param("VkPipelineCache", "dstCache"),
Param("uint32_t", "srcCacheCount"),
Param("const VkPipelineCache*", "pSrcCaches")]),
Proto("VkResult", "CreateGraphicsPipelines",
[Param("VkDevice", "device"),
Param("VkPipelineCache", "pipelineCache"),
Param("uint32_t", "createInfoCount"),
Param("const VkGraphicsPipelineCreateInfo*", "pCreateInfos"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkPipeline*", "pPipelines")]),
Proto("VkResult", "CreateComputePipelines",
[Param("VkDevice", "device"),
Param("VkPipelineCache", "pipelineCache"),
Param("uint32_t", "createInfoCount"),
Param("const VkComputePipelineCreateInfo*", "pCreateInfos"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkPipeline*", "pPipelines")]),
Proto("void", "DestroyPipeline",
[Param("VkDevice", "device"),
Param("VkPipeline", "pipeline"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreatePipelineLayout",
[Param("VkDevice", "device"),
Param("const VkPipelineLayoutCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkPipelineLayout*", "pPipelineLayout")]),
Proto("void", "DestroyPipelineLayout",
[Param("VkDevice", "device"),
Param("VkPipelineLayout", "pipelineLayout"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreateSampler",
[Param("VkDevice", "device"),
Param("const VkSamplerCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSampler*", "pSampler")]),
Proto("void", "DestroySampler",
[Param("VkDevice", "device"),
Param("VkSampler", "sampler"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreateDescriptorSetLayout",
[Param("VkDevice", "device"),
Param("const VkDescriptorSetLayoutCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkDescriptorSetLayout*", "pSetLayout")]),
Proto("void", "DestroyDescriptorSetLayout",
[Param("VkDevice", "device"),
Param("VkDescriptorSetLayout", "descriptorSetLayout"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreateDescriptorPool",
[Param("VkDevice", "device"),
Param("const VkDescriptorPoolCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkDescriptorPool*", "pDescriptorPool")]),
Proto("void", "DestroyDescriptorPool",
[Param("VkDevice", "device"),
Param("VkDescriptorPool", "descriptorPool"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "ResetDescriptorPool",
[Param("VkDevice", "device"),
Param("VkDescriptorPool", "descriptorPool"),
Param("VkDescriptorPoolResetFlags", "flags")]),
Proto("VkResult", "AllocateDescriptorSets",
[Param("VkDevice", "device"),
Param("const VkDescriptorSetAllocateInfo*", "pAllocateInfo"),
Param("VkDescriptorSet*", "pDescriptorSets")]),
Proto("VkResult", "FreeDescriptorSets",
[Param("VkDevice", "device"),
Param("VkDescriptorPool", "descriptorPool"),
Param("uint32_t", "descriptorSetCount"),
Param("const VkDescriptorSet*", "pDescriptorSets")]),
Proto("void", "UpdateDescriptorSets",
[Param("VkDevice", "device"),
Param("uint32_t", "descriptorWriteCount"),
Param("const VkWriteDescriptorSet*", "pDescriptorWrites"),
Param("uint32_t", "descriptorCopyCount"),
Param("const VkCopyDescriptorSet*", "pDescriptorCopies")]),
Proto("VkResult", "CreateFramebuffer",
[Param("VkDevice", "device"),
Param("const VkFramebufferCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkFramebuffer*", "pFramebuffer")]),
Proto("void", "DestroyFramebuffer",
[Param("VkDevice", "device"),
Param("VkFramebuffer", "framebuffer"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "CreateRenderPass",
[Param("VkDevice", "device"),
Param("const VkRenderPassCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkRenderPass*", "pRenderPass")]),
Proto("void", "DestroyRenderPass",
[Param("VkDevice", "device"),
Param("VkRenderPass", "renderPass"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("void", "GetRenderAreaGranularity",
[Param("VkDevice", "device"),
Param("VkRenderPass", "renderPass"),
Param("VkExtent2D*", "pGranularity")]),
Proto("VkResult", "CreateCommandPool",
[Param("VkDevice", "device"),
Param("const VkCommandPoolCreateInfo*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkCommandPool*", "pCommandPool")]),
Proto("void", "DestroyCommandPool",
[Param("VkDevice", "device"),
Param("VkCommandPool", "commandPool"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "ResetCommandPool",
[Param("VkDevice", "device"),
Param("VkCommandPool", "commandPool"),
Param("VkCommandPoolResetFlags", "flags")]),
Proto("VkResult", "AllocateCommandBuffers",
[Param("VkDevice", "device"),
Param("const VkCommandBufferAllocateInfo*", "pAllocateInfo"),
Param("VkCommandBuffer*", "pCommandBuffers")]),
Proto("void", "FreeCommandBuffers",
[Param("VkDevice", "device"),
Param("VkCommandPool", "commandPool"),
Param("uint32_t", "commandBufferCount"),
Param("const VkCommandBuffer*", "pCommandBuffers")]),
Proto("VkResult", "BeginCommandBuffer",
[Param("VkCommandBuffer", "commandBuffer"),
Param("const VkCommandBufferBeginInfo*", "pBeginInfo")]),
Proto("VkResult", "EndCommandBuffer",
[Param("VkCommandBuffer", "commandBuffer")]),
Proto("VkResult", "ResetCommandBuffer",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkCommandBufferResetFlags", "flags")]),
Proto("void", "CmdBindPipeline",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkPipelineBindPoint", "pipelineBindPoint"),
Param("VkPipeline", "pipeline")]),
Proto("void", "CmdSetViewport",
[Param("VkCommandBuffer", "commandBuffer"),
Param("uint32_t", "firstViewport"),
Param("uint32_t", "viewportCount"),
Param("const VkViewport*", "pViewports")]),
Proto("void", "CmdSetScissor",
[Param("VkCommandBuffer", "commandBuffer"),
Param("uint32_t", "firstScissor"),
Param("uint32_t", "scissorCount"),
Param("const VkRect2D*", "pScissors")]),
Proto("void", "CmdSetLineWidth",
[Param("VkCommandBuffer", "commandBuffer"),
Param("float", "lineWidth")]),
Proto("void", "CmdSetDepthBias",
[Param("VkCommandBuffer", "commandBuffer"),
Param("float", "depthBiasConstantFactor"),
Param("float", "depthBiasClamp"),
Param("float", "depthBiasSlopeFactor")]),
Proto("void", "CmdSetBlendConstants",
[Param("VkCommandBuffer", "commandBuffer"),
Param("const float[4]", "blendConstants")]),
Proto("void", "CmdSetDepthBounds",
[Param("VkCommandBuffer", "commandBuffer"),
Param("float", "minDepthBounds"),
Param("float", "maxDepthBounds")]),
Proto("void", "CmdSetStencilCompareMask",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkStencilFaceFlags", "faceMask"),
Param("uint32_t", "compareMask")]),
Proto("void", "CmdSetStencilWriteMask",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkStencilFaceFlags", "faceMask"),
Param("uint32_t", "writeMask")]),
Proto("void", "CmdSetStencilReference",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkStencilFaceFlags", "faceMask"),
Param("uint32_t", "reference")]),
Proto("void", "CmdBindDescriptorSets",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkPipelineBindPoint", "pipelineBindPoint"),
Param("VkPipelineLayout", "layout"),
Param("uint32_t", "firstSet"),
Param("uint32_t", "descriptorSetCount"),
Param("const VkDescriptorSet*", "pDescriptorSets"),
Param("uint32_t", "dynamicOffsetCount"),
Param("const uint32_t*", "pDynamicOffsets")]),
Proto("void", "CmdBindIndexBuffer",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkBuffer", "buffer"),
Param("VkDeviceSize", "offset"),
Param("VkIndexType", "indexType")]),
Proto("void", "CmdBindVertexBuffers",
[Param("VkCommandBuffer", "commandBuffer"),
Param("uint32_t", "firstBinding"),
Param("uint32_t", "bindingCount"),
Param("const VkBuffer*", "pBuffers"),
Param("const VkDeviceSize*", "pOffsets")]),
Proto("void", "CmdDraw",
[Param("VkCommandBuffer", "commandBuffer"),
Param("uint32_t", "vertexCount"),
Param("uint32_t", "instanceCount"),
Param("uint32_t", "firstVertex"),
Param("uint32_t", "firstInstance")]),
Proto("void", "CmdDrawIndexed",
[Param("VkCommandBuffer", "commandBuffer"),
Param("uint32_t", "indexCount"),
Param("uint32_t", "instanceCount"),
Param("uint32_t", "firstIndex"),
Param("int32_t", "vertexOffset"),
Param("uint32_t", "firstInstance")]),
Proto("void", "CmdDrawIndirect",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkBuffer", "buffer"),
Param("VkDeviceSize", "offset"),
Param("uint32_t", "drawCount"),
Param("uint32_t", "stride")]),
Proto("void", "CmdDrawIndexedIndirect",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkBuffer", "buffer"),
Param("VkDeviceSize", "offset"),
Param("uint32_t", "drawCount"),
Param("uint32_t", "stride")]),
Proto("void", "CmdDispatch",
[Param("VkCommandBuffer", "commandBuffer"),
Param("uint32_t", "x"),
Param("uint32_t", "y"),
Param("uint32_t", "z")]),
Proto("void", "CmdDispatchIndirect",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkBuffer", "buffer"),
Param("VkDeviceSize", "offset")]),
Proto("void", "CmdCopyBuffer",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkBuffer", "srcBuffer"),
Param("VkBuffer", "dstBuffer"),
Param("uint32_t", "regionCount"),
Param("const VkBufferCopy*", "pRegions")]),
Proto("void", "CmdCopyImage",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkImage", "srcImage"),
Param("VkImageLayout", "srcImageLayout"),
Param("VkImage", "dstImage"),
Param("VkImageLayout", "dstImageLayout"),
Param("uint32_t", "regionCount"),
Param("const VkImageCopy*", "pRegions")]),
Proto("void", "CmdBlitImage",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkImage", "srcImage"),
Param("VkImageLayout", "srcImageLayout"),
Param("VkImage", "dstImage"),
Param("VkImageLayout", "dstImageLayout"),
Param("uint32_t", "regionCount"),
Param("const VkImageBlit*", "pRegions"),
Param("VkFilter", "filter")]),
Proto("void", "CmdCopyBufferToImage",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkBuffer", "srcBuffer"),
Param("VkImage", "dstImage"),
Param("VkImageLayout", "dstImageLayout"),
Param("uint32_t", "regionCount"),
Param("const VkBufferImageCopy*", "pRegions")]),
Proto("void", "CmdCopyImageToBuffer",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkImage", "srcImage"),
Param("VkImageLayout", "srcImageLayout"),
Param("VkBuffer", "dstBuffer"),
Param("uint32_t", "regionCount"),
Param("const VkBufferImageCopy*", "pRegions")]),
Proto("void", "CmdUpdateBuffer",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkBuffer", "dstBuffer"),
Param("VkDeviceSize", "dstOffset"),
Param("VkDeviceSize", "dataSize"),
Param("const void*", "pData")]),
Proto("void", "CmdFillBuffer",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkBuffer", "dstBuffer"),
Param("VkDeviceSize", "dstOffset"),
Param("VkDeviceSize", "size"),
Param("uint32_t", "data")]),
Proto("void", "CmdClearColorImage",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkImage", "image"),
Param("VkImageLayout", "imageLayout"),
Param("const VkClearColorValue*", "pColor"),
Param("uint32_t", "rangeCount"),
Param("const VkImageSubresourceRange*", "pRanges")]),
Proto("void", "CmdClearDepthStencilImage",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkImage", "image"),
Param("VkImageLayout", "imageLayout"),
Param("const VkClearDepthStencilValue*", "pDepthStencil"),
Param("uint32_t", "rangeCount"),
Param("const VkImageSubresourceRange*", "pRanges")]),
Proto("void", "CmdClearAttachments",
[Param("VkCommandBuffer", "commandBuffer"),
Param("uint32_t", "attachmentCount"),
Param("const VkClearAttachment*", "pAttachments"),
Param("uint32_t", "rectCount"),
Param("const VkClearRect*", "pRects")]),
Proto("void", "CmdResolveImage",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkImage", "srcImage"),
Param("VkImageLayout", "srcImageLayout"),
Param("VkImage", "dstImage"),
Param("VkImageLayout", "dstImageLayout"),
Param("uint32_t", "regionCount"),
Param("const VkImageResolve*", "pRegions")]),
Proto("void", "CmdSetEvent",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkEvent", "event"),
Param("VkPipelineStageFlags", "stageMask")]),
Proto("void", "CmdResetEvent",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkEvent", "event"),
Param("VkPipelineStageFlags", "stageMask")]),
Proto("void", "CmdWaitEvents",
[Param("VkCommandBuffer", "commandBuffer"),
Param("uint32_t", "eventCount"),
Param("const VkEvent*", "pEvents"),
Param("VkPipelineStageFlags", "srcStageMask"),
Param("VkPipelineStageFlags", "dstStageMask"),
Param("uint32_t", "memoryBarrierCount"),
Param("const VkMemoryBarrier*", "pMemoryBarriers"),
Param("uint32_t", "bufferMemoryBarrierCount"),
Param("const VkBufferMemoryBarrier*", "pBufferMemoryBarriers"),
Param("uint32_t", "imageMemoryBarrierCount"),
Param("const VkImageMemoryBarrier*", "pImageMemoryBarriers")]),
Proto("void", "CmdPipelineBarrier",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkPipelineStageFlags", "srcStageMask"),
Param("VkPipelineStageFlags", "dstStageMask"),
Param("VkDependencyFlags", "dependencyFlags"),
Param("uint32_t", "memoryBarrierCount"),
Param("const VkMemoryBarrier*", "pMemoryBarriers"),
Param("uint32_t", "bufferMemoryBarrierCount"),
Param("const VkBufferMemoryBarrier*", "pBufferMemoryBarriers"),
Param("uint32_t", "imageMemoryBarrierCount"),
Param("const VkImageMemoryBarrier*", "pImageMemoryBarriers")]),
Proto("void", "CmdBeginQuery",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkQueryPool", "queryPool"),
Param("uint32_t", "query"),
Param("VkQueryControlFlags", "flags")]),
Proto("void", "CmdEndQuery",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkQueryPool", "queryPool"),
Param("uint32_t", "query")]),
Proto("void", "CmdResetQueryPool",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkQueryPool", "queryPool"),
Param("uint32_t", "firstQuery"),
Param("uint32_t", "queryCount")]),
Proto("void", "CmdWriteTimestamp",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkPipelineStageFlagBits", "pipelineStage"),
Param("VkQueryPool", "queryPool"),
Param("uint32_t", "query")]),
Proto("void", "CmdCopyQueryPoolResults",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkQueryPool", "queryPool"),
Param("uint32_t", "firstQuery"),
Param("uint32_t", "queryCount"),
Param("VkBuffer", "dstBuffer"),
Param("VkDeviceSize", "dstOffset"),
Param("VkDeviceSize", "stride"),
Param("VkQueryResultFlags", "flags")]),
Proto("void", "CmdPushConstants",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkPipelineLayout", "layout"),
Param("VkShaderStageFlags", "stageFlags"),
Param("uint32_t", "offset"),
Param("uint32_t", "size"),
Param("const void*", "pValues")]),
Proto("void", "CmdBeginRenderPass",
[Param("VkCommandBuffer", "commandBuffer"),
Param("const VkRenderPassBeginInfo*", "pRenderPassBegin"),
Param("VkSubpassContents", "contents")]),
Proto("void", "CmdNextSubpass",
[Param("VkCommandBuffer", "commandBuffer"),
Param("VkSubpassContents", "contents")]),
Proto("void", "CmdEndRenderPass",
[Param("VkCommandBuffer", "commandBuffer")]),
Proto("void", "CmdExecuteCommands",
[Param("VkCommandBuffer", "commandBuffer"),
Param("uint32_t", "commandBufferCount"),
Param("const VkCommandBuffer*", "pCommandBuffers")]),
],
)
ext_khr_surface = Extension(
name="VK_KHR_surface",
headers=["vulkan/vulkan.h"],
objects=["vkSurfaceKHR"],
protos=[
Proto("void", "DestroySurfaceKHR",
[Param("VkInstance", "instance"),
Param("VkSurfaceKHR", "surface"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "GetPhysicalDeviceSurfaceSupportKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t", "queueFamilyIndex"),
Param("VkSurfaceKHR", "surface"),
Param("VkBool32*", "pSupported")]),
Proto("VkResult", "GetPhysicalDeviceSurfaceCapabilitiesKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkSurfaceKHR", "surface"),
Param("VkSurfaceCapabilitiesKHR*", "pSurfaceCapabilities")]),
Proto("VkResult", "GetPhysicalDeviceSurfaceFormatsKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkSurfaceKHR", "surface"),
Param("uint32_t*", "pSurfaceFormatCount"),
Param("VkSurfaceFormatKHR*", "pSurfaceFormats")]),
Proto("VkResult", "GetPhysicalDeviceSurfacePresentModesKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkSurfaceKHR", "surface"),
Param("uint32_t*", "pPresentModeCount"),
Param("VkPresentModeKHR*", "pPresentModes")]),
],
)
ext_khr_display = Extension(
name="VK_KHR_display",
headers=["vulkan/vulkan.h"],
objects=['VkSurfaceKHR', 'VkDisplayModeKHR'],
protos=[
Proto("VkResult", "GetPhysicalDeviceDisplayPropertiesKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t*", "pPropertyCount"),
Param("VkDisplayPropertiesKHR*", "pProperties")]),
Proto("VkResult", "GetPhysicalDeviceDisplayPlanePropertiesKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t*", "pPropertyCount"),
Param("VkDisplayPlanePropertiesKHR*", "pProperties")]),
Proto("VkResult", "GetDisplayPlaneSupportedDisplaysKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t", "planeIndex"),
Param("uint32_t*", "pDisplayCount"),
Param("VkDisplayKHR*", "pDisplays")]),
Proto("VkResult", "GetDisplayModePropertiesKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkDisplayKHR", "display"),
Param("uint32_t*", "pPropertyCount"),
Param("VkDisplayModePropertiesKHR*", "pProperties")]),
Proto("VkResult", "CreateDisplayModeKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkDisplayKHR", "display"),
Param("const VkDisplayModeCreateInfoKHR*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkDisplayModeKHR*", "pMode")]),
Proto("VkResult", "GetDisplayPlaneCapabilitiesKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("VkDisplayModeKHR", "mode"),
Param("uint32_t", "planeIndex"),
Param("VkDisplayPlaneCapabilitiesKHR*", "pCapabilities")]),
Proto("VkResult", "CreateDisplayPlaneSurfaceKHR",
[Param("VkInstance", "instance"),
Param("const VkDisplaySurfaceCreateInfoKHR*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSurfaceKHR*", "pSurface")]),
],
)
ext_khr_device_swapchain = Extension(
name="VK_KHR_swapchain",
headers=["vulkan/vulkan.h"],
objects=["VkSwapchainKHR"],
protos=[
Proto("VkResult", "CreateSwapchainKHR",
[Param("VkDevice", "device"),
Param("const VkSwapchainCreateInfoKHR*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSwapchainKHR*", "pSwapchain")]),
Proto("void", "DestroySwapchainKHR",
[Param("VkDevice", "device"),
Param("VkSwapchainKHR", "swapchain"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("VkResult", "GetSwapchainImagesKHR",
[Param("VkDevice", "device"),
Param("VkSwapchainKHR", "swapchain"),
Param("uint32_t*", "pSwapchainImageCount"),
Param("VkImage*", "pSwapchainImages")]),
Proto("VkResult", "AcquireNextImageKHR",
[Param("VkDevice", "device"),
Param("VkSwapchainKHR", "swapchain"),
Param("uint64_t", "timeout"),
Param("VkSemaphore", "semaphore"),
Param("VkFence", "fence"),
Param("uint32_t*", "pImageIndex")]),
Proto("VkResult", "QueuePresentKHR",
[Param("VkQueue", "queue"),
Param("const VkPresentInfoKHR*", "pPresentInfo")]),
],
)
ext_khr_xcb_surface = Extension(
name="VK_KHR_xcb_surface",
headers=["vulkan/vulkan.h"],
objects=[],
protos=[
Proto("VkResult", "CreateXcbSurfaceKHR",
[Param("VkInstance", "instance"),
Param("const VkXcbSurfaceCreateInfoKHR*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSurfaceKHR*", "pSurface")]),
Proto("VkBool32", "GetPhysicalDeviceXcbPresentationSupportKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t", "queueFamilyIndex"),
Param("xcb_connection_t*", "connection"),
Param("xcb_visualid_t", "visual_id")]),
],
)
ext_khr_xlib_surface = Extension(
name="VK_KHR_xlib_surface",
headers=["vulkan/vulkan.h"],
objects=[],
ifdef="VK_USE_PLATFORM_XLIB_KHR",
protos=[
Proto("VkResult", "CreateXlibSurfaceKHR",
[Param("VkInstance", "instance"),
Param("const VkXlibSurfaceCreateInfoKHR*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSurfaceKHR*", "pSurface")]),
Proto("VkBool32", "GetPhysicalDeviceXlibPresentationSupportKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t", "queueFamilyIndex"),
Param("Display*", "dpy"),
Param("VisualID", "visualID")]),
],
)
ext_khr_wayland_surface = Extension(
name="VK_KHR_wayland_surface",
headers=["vulkan/vulkan.h"],
objects=[],
protos=[
Proto("VkResult", "CreateWaylandSurfaceKHR",
[Param("VkInstance", "instance"),
Param("const VkWaylandSurfaceCreateInfoKHR*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSurfaceKHR*", "pSurface")]),
Proto("VkBool32", "GetPhysicalDeviceWaylandPresentationSupportKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t", "queueFamilyIndex"),
Param("struct wl_display*", "display")]),
],
)
ext_khr_mir_surface = Extension(
name="VK_KHR_mir_surface",
headers=["vulkan/vulkan.h"],
objects=[],
protos=[
Proto("VkResult", "CreateMirSurfaceKHR",
[Param("VkInstance", "instance"),
Param("const VkMirSurfaceCreateInfoKHR*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSurfaceKHR*", "pSurface")]),
Proto("VkBool32", "GetPhysicalDeviceMirPresentationSupportKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t", "queueFamilyIndex"),
Param("MirConnection*", "connection")]),
],
)
ext_khr_android_surface = Extension(
name="VK_KHR_android_surface",
headers=["vulkan/vulkan.h"],
objects=[],
protos=[
Proto("VkResult", "CreateAndroidSurfaceKHR",
[Param("VkInstance", "instance"),
Param("const VkAndroidSurfaceCreateInfoKHR*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSurfaceKHR*", "pSurface")]),
],
)
ext_khr_win32_surface = Extension(
name="VK_KHR_win32_surface",
headers=["vulkan/vulkan.h"],
objects=[],
protos=[
Proto("VkResult", "CreateWin32SurfaceKHR",
[Param("VkInstance", "instance"),
Param("const VkWin32SurfaceCreateInfoKHR*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkSurfaceKHR*", "pSurface")]),
Proto("VkBool32", "GetPhysicalDeviceWin32PresentationSupportKHR",
[Param("VkPhysicalDevice", "physicalDevice"),
Param("uint32_t", "queueFamilyIndex")]),
],
)
lunarg_debug_report = Extension(
name="VK_EXT_debug_report",
headers=["vulkan/vulkan.h"],
objects=[
"VkDebugReportCallbackEXT",
],
protos=[
Proto("VkResult", "CreateDebugReportCallbackEXT",
[Param("VkInstance", "instance"),
Param("const VkDebugReportCallbackCreateInfoEXT*", "pCreateInfo"),
Param("const VkAllocationCallbacks*", "pAllocator"),
Param("VkDebugReportCallbackEXT*", "pCallback")]),
Proto("void", "DestroyDebugReportCallbackEXT",
[Param("VkInstance", "instance"),
Param("VkDebugReportCallbackEXT", "callback"),
Param("const VkAllocationCallbacks*", "pAllocator")]),
Proto("void", "DebugReportMessageEXT",
[Param("VkInstance", "instance"),
Param("VkDebugReportFlagsEXT", "flags"),
Param("VkDebugReportObjectTypeEXT", "objType"),
Param("uint64_t", "object"),
Param("size_t", "location"),
Param("int32_t", "msgCode"),
Param("const char *", "pLayerPrefix"),
Param("const char *", "pMsg")]),
],
)
import sys
if sys.argv[1] == 'AllPlatforms':
extensions = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_win32_surface, ext_khr_xcb_surface, ext_khr_xlib_surface, ext_khr_wayland_surface, ext_khr_mir_surface, ext_khr_display, ext_khr_android_surface]
extensions_all = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_win32_surface, ext_khr_xcb_surface, ext_khr_xlib_surface, ext_khr_wayland_surface, ext_khr_mir_surface, ext_khr_display, ext_khr_android_surface, lunarg_debug_report]
else :
if len(sys.argv) > 3:
if sys.platform.startswith('win32') and sys.argv[1] != 'Android':
extensions = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_win32_surface, ext_khr_display]
extensions_all = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_win32_surface, ext_khr_display, lunarg_debug_report]
elif sys.platform.startswith('linux') and sys.argv[1] != 'Android':
extensions = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_xcb_surface, ext_khr_xlib_surface, ext_khr_wayland_surface, ext_khr_mir_surface, ext_khr_display]
extensions_all = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_xcb_surface, ext_khr_xlib_surface, ext_khr_wayland_surface, ext_khr_mir_surface, ext_khr_display, lunarg_debug_report]
else: # android
extensions = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_android_surface]
extensions_all = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_android_surface, lunarg_debug_report]
else :
if sys.argv[1] == 'Win32':
extensions = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_win32_surface, ext_khr_display]
extensions_all = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_win32_surface, ext_khr_display, lunarg_debug_report]
elif sys.argv[1] == 'Android':
extensions = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_android_surface]
extensions_all = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_android_surface, lunarg_debug_report]
elif sys.argv[1] == 'Xcb' or sys.argv[1] == 'Xlib' or sys.argv[1] == 'Wayland' or sys.argv[1] == 'Mir' or sys.argv[1] == 'Display':
extensions = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_xcb_surface, ext_khr_xlib_surface, ext_khr_wayland_surface, ext_khr_mir_surface, ext_khr_display]
extensions_all = [core, ext_khr_surface, ext_khr_device_swapchain, ext_khr_xcb_surface, ext_khr_xlib_surface, ext_khr_wayland_surface, ext_khr_mir_surface, ext_khr_display, lunarg_debug_report]
else:
print('Error: Undefined DisplayServer')
extensions = []
extensions_all = []
object_dispatch_list = [
"VkInstance",
"VkPhysicalDevice",
"VkDevice",
"VkQueue",
"VkCommandBuffer",
]
object_non_dispatch_list = [
"VkCommandPool",
"VkFence",
"VkDeviceMemory",
"VkBuffer",
"VkImage",
"VkSemaphore",
"VkEvent",
"VkQueryPool",
"VkBufferView",
"VkImageView",
"VkShaderModule",
"VkPipelineCache",
"VkPipelineLayout",
"VkPipeline",
"VkDescriptorSetLayout",
"VkSampler",
"VkDescriptorPool",
"VkDescriptorSet",
"VkRenderPass",
"VkFramebuffer",
"VkSwapchainKHR",
"VkSurfaceKHR",
"VkDebugReportCallbackEXT",
"VkDisplayKHR",
"VkDisplayModeKHR",
]
object_type_list = object_dispatch_list + object_non_dispatch_list
headers = []
objects = []
protos = []
for ext in extensions:
headers.extend(ext.headers)
objects.extend(ext.objects)
protos.extend(ext.protos)
proto_names = [proto.name for proto in protos]
headers_all = []
objects_all = []
protos_all = []
for ext in extensions_all:
headers_all.extend(ext.headers)
objects_all.extend(ext.objects)
protos_all.extend(ext.protos)
proto_all_names = [proto.name for proto in protos_all]
def parse_vk_h(filename):
# read object and protoype typedefs
object_lines = []
proto_lines = []
with open(filename, "r") as fp:
for line in fp:
line = line.strip()
if line.startswith("VK_DEFINE"):
begin = line.find("(") + 1
end = line.find(",")
# extract the object type
object_lines.append(line[begin:end])
if line.startswith("typedef") and line.endswith(");"):
if "*PFN_vkVoidFunction" in line:
continue
# drop leading "typedef " and trailing ");"
proto_lines.append(line[8:-2])
# parse proto_lines to protos
protos = []
for line in proto_lines:
first, rest = line.split(" (VKAPI_PTR *PFN_vk")
second, third = rest.split(")(")
# get the return type, no space before "*"
proto_ret = "*".join([t.rstrip() for t in first.split("*")])
# get the name
proto_name = second.strip()
# get the list of params
param_strs = third.split(", ")
params = []
for s in param_strs:
ty, name = s.rsplit(" ", 1)
# no space before "*"
ty = "*".join([t.rstrip() for t in ty.split("*")])
# attach [] to ty
idx = name.rfind("[")
if idx >= 0:
ty += name[idx:]
name = name[:idx]
params.append(Param(ty, name))
protos.append(Proto(proto_ret, proto_name, params))
# make them an extension and print
ext = Extension("VK_CORE",
headers=["vulkan/vulkan.h"],
objects=object_lines,
protos=protos)
print("core =", str(ext))
print("")
print("typedef struct VkLayerDispatchTable_")
print("{")
for proto in ext.protos:
print(" PFN_vk%s %s;" % (proto.name, proto.name))
print("} VkLayerDispatchTable;")
if __name__ == "__main__":
parse_vk_h("include/vulkan/vulkan.h")
|
|
# -*- test-case-name: diamondash.tests.test_server -*-
"""Diamondash's web server functionality"""
import yaml
import json
from os import path
from glob import glob
from pkg_resources import resource_filename, resource_string
from twisted.web import http
from twisted.web.static import File
from twisted.web.template import Element, renderer, XMLString, tags
from twisted.internet.defer import maybeDeferred
from twisted.python import log
from klein import Klein
from diamondash import utils, PageElement
from diamondash.config import Config, ConfigError
from dashboard import DashboardConfig, Dashboard, DashboardPage
from diamondash.widgets.dynamic import DynamicWidget
class DiamondashConfig(Config):
FILENAME = 'diamondash.yml'
DEFAULTS = {
'poll_interval': '60s',
'backend': {
'type': 'diamondash.backends.graphite.GraphiteBackend',
'url': 'http://127.0.0.1:8080',
}
}
@classmethod
def parse(cls, config):
dashboard_configs = sorted(
config.get('dashboards', []),
key=lambda d: d['name'])
config['dashboards'] = [
DashboardConfig(cls._apply_dashboard_defaults(config, d))
for d in dashboard_configs]
return config
@classmethod
def from_dir(cls, dirname):
config = yaml.safe_load(open(path.join(dirname, cls.FILENAME)))
config['dashboards'] = [
yaml.safe_load(open(filename))
for filename in glob(path.join(dirname, 'dashboards', '*.yml'))]
return cls(config)
@classmethod
def _apply_dashboard_defaults(cls, config, dashboard_config):
return utils.add_dicts({
'backend': config['backend'],
'poll_interval': config['poll_interval'],
}, dashboard_config)
def apply_dashboard_defaults(self, dashboard_config):
return self._apply_dashboard_defaults(self, dashboard_config)
class ApiRequestError(Exception):
"""
Raised when there was a problem handling a client request to the api.
"""
class DiamondashServer(object):
"""Contains the server's configuration options and dashboards"""
app = Klein()
CONFIG_CLS = DiamondashConfig
RESOURCE_DIRNAME = path.join(resource_filename(__name__, ''), 'public')
def __init__(self, config):
self.config = config
self.dashboards_by_name = {}
self.dashboards_by_share_id = {}
self.index = Index()
self.resources = self.create_resources()
for dashboard_config in config['dashboards']:
self.add_dashboard(dashboard_config)
@classmethod
def create_resources(cls):
return File(path.join(cls.RESOURCE_DIRNAME))
def get_dashboard(self, name):
return self.dashboards_by_name.get(name)
def get_dashboard_by_share_id(self, share_id):
return self.dashboards_by_share_id.get(share_id)
def has_dashboard(self, name):
return name in self.dashboards_by_name
def add_dashboard(self, config, overwrite=False):
"""Adds a dashboard to diamondash"""
if not overwrite and self.has_dashboard(config['name']):
return log.msg("Dashboard '%s' already exists" % config['name'])
dashboard = Dashboard(config)
self.dashboards_by_name[config['name']] = dashboard
if 'share_id' in config:
self.dashboards_by_share_id[config['share_id']] = dashboard
self.index.add_dashboard(dashboard)
def remove_dashboard(self, name):
dashboard = self.get_dashboard(name)
if dashboard is None:
return None
self.index.remove_dashboard(name)
if 'share_id' in dashboard.config:
del self.dashboards_by_share_id[dashboard.config['share_id']]
del self.dashboards_by_name[name]
# Rendering
# =========
def render_error_response(self, request, message, code):
request.setResponseCode(code)
return ErrorPage(code, message)
@app.route('/')
def show_index(self, request):
return self.index
@app.route('/public/<string:res_type>/<string:name>')
def serve_resource(self, request, res_type, name):
"""Routing for all public resources"""
res_dir = self.resources.getChild(res_type, request)
return res_dir.getChild(name, request)
@app.route('/favicon.ico')
def favicon(self, request):
return File(resource_filename(__name__, 'public/favicon.png'))
@app.route('/<string:name>')
def render_dashboard(self, request, name):
"""Render a non-shared dashboard page"""
dashboard = self.get_dashboard(name.encode('utf-8'))
if dashboard is None:
return self.render_error_response(
request,
code=http.NOT_FOUND,
message="Dashboard '%s' does not exist" % name)
return DashboardPage(dashboard)
@app.route('/shared/<string:share_id>')
def render_shared_dashboard(self, request, share_id):
"""Render a shared dashboard page"""
dashboard = self.get_dashboard_by_share_id(share_id.encode('utf-8'))
if dashboard is None:
return self.render_error_response(
request,
code=http.NOT_FOUND,
message=(
"Dashboard with share id '%s' does not exist "
"or is not shared" % share_id))
return DashboardPage(dashboard, shared=True)
# API
# ===)
@classmethod
def api_response(cls, request, data, code=http.OK, headers={}):
request.responseHeaders.setRawHeaders(
'Content-Type', ['application/json'])
for field, value in headers.iteritems():
request.responseHeaders.setRawHeaders(field, value)
request.setResponseCode(code)
return json.dumps(data)
@classmethod
def api_success_response(cls, request, data=None, code=http.OK):
return cls.api_response(request, code=code, data={
'success': True,
'data': data,
})
@classmethod
def api_error_response(cls, request, message, code):
return cls.api_response(request, code=code, data={
'success': False,
'message': message,
})
@classmethod
def api_get(cls, request, getter, *args, **kwargs):
d = maybeDeferred(getter, *args, **kwargs)
def trap_unhandled_error(f):
f.trap(Exception)
log.msg("Unhandled error occured during api request: %s" % f.value)
return cls.api_error_response(
request,
code=http.INTERNAL_SERVER_ERROR,
message="Some unhandled error occurred")
d.addCallback(lambda data: cls.api_response(request, data))
d.addErrback(trap_unhandled_error)
return d
# Dashboard API
# -------------
def api_add_dashboard(self, request, replace):
try:
config = json.loads(request.content.read())
except:
return self.api_error_response(
request,
code=http.BAD_REQUEST,
message="Error parsing dashboard config as json object")
if not isinstance(config, dict):
return self.api_error_response(
request,
code=http.BAD_REQUEST,
message="Dashboard configs need to be json objects")
if 'name' not in config:
return self.api_error_response(
request,
code=http.BAD_REQUEST,
message="Dashboards need a name to be created")
if not replace and self.has_dashboard(utils.slugify(config['name'])):
return self.api_error_response(
request,
code=http.BAD_REQUEST,
message="Dashboard with name '%s' already exists")
config = self.config.apply_dashboard_defaults(config)
try:
config = DashboardConfig(config)
except ConfigError, e:
return self.api_error_response(
request,
code=http.BAD_REQUEST,
message="Error parsing dashboard config: %r" % e)
self.add_dashboard(config, replace)
return self.api_success_response(
request,
data=self.get_dashboard(config['name']).get_details(),
code=http.CREATED if not replace else http.OK)
@app.route('/api/dashboards/<string:name>', methods=['GET'])
def api_get_dashboard_details(self, request, name):
dashboard = self.get_dashboard(name.encode('utf-8'))
if dashboard is None:
return self.render_error_response(
request,
code=http.NOT_FOUND,
message="Dashboard '%s' does not exist" % name)
return self.api_get(request, dashboard.get_details)
@app.route('/api/dashboards', methods=['POST'])
def api_create_dashboard(self, request):
return self.api_add_dashboard(request, replace=False)
@app.route('/api/dashboards', methods=['PUT'])
def api_replace_dashboard(self, request):
return self.api_add_dashboard(request, replace=True)
@app.route('/api/dashboards/<string:name>', methods=['DELETE'])
def api_remove_dashboard(self, request, name):
name = utils.slugify(name.encode('utf-8'))
if not self.has_dashboard(name):
return self.render_error_response(
request,
code=http.NOT_FOUND,
message="Dashboard '%s' does not exist" % name)
self.remove_dashboard(name)
return self.api_success_response(request)
# Widget API
# -------------
@app.route('/api/widgets/<string:dashboard_name>/<string:widget_name>',
methods=['GET'])
def api_get_widget_details(self, request, dashboard_name, widget_name):
dashboard_name = dashboard_name.encode('utf-8')
widget_name = widget_name.encode('utf-8')
dashboard = self.get_dashboard(dashboard_name)
if dashboard is None:
return self.api_error_response(
request,
code=http.NOT_FOUND,
message="Dashboard '%s' does not exist" % dashboard_name)
widget = dashboard.get_widget(widget_name)
if widget is None:
return self.api_error_response(
request,
code=http.NOT_FOUND,
message="Widget '%s' does not exist" % widget_name)
return self.api_get(request, widget.get_details)
@app.route(
'/api/widgets/<string:dashboard_name>/<string:widget_name>/snapshot',
methods=['GET'])
def api_get_widget_snapshot(self, request, dashboard_name, widget_name):
dashboard_name = dashboard_name.encode('utf-8')
widget_name = widget_name.encode('utf-8')
dashboard = self.get_dashboard(dashboard_name)
if dashboard is None:
return self.api_error_response(
request,
code=http.NOT_FOUND,
message="Dashboard '%s' does not exist" % dashboard_name)
widget = dashboard.get_widget(widget_name)
if widget is None:
return self.api_error_response(
request,
code=http.NOT_FOUND,
message="Widget '%s' does not exist" % widget_name)
if not isinstance(widget, DynamicWidget):
return self.api_error_response(
request,
code=http.BAD_REQUEST,
message="Widget '%s' is not dynamic" % widget_name)
return self.api_get(request, widget.get_snapshot)
class Index(PageElement):
"""Index element with links to dashboards"""
loader = XMLString(resource_string(__name__, 'views/index.xml'))
def __init__(self, dashboards=[]):
self.dashboard_items = {}
for dashboard in dashboards:
self.add_dashboard(dashboard)
def has_dashboard(self, name):
return name in self.dashboard_items
def add_dashboard(self, dashboard):
item = DashboardIndexListItem.from_dashboard(dashboard)
# we intentionally overwrite existing dashboard items with the same
# dashboard name
self.dashboard_items[dashboard.config['name']] = item
def remove_dashboard(self, name):
if name not in self.dashboard_items:
return None
del self.dashboard_items[name]
@renderer
def dashboard_list_item_renderer(self, request, tag):
for name, item in sorted(self.dashboard_items.iteritems()):
yield item
class DashboardIndexListItem(Element):
loader = XMLString(resource_string(
__name__, 'views/index_dashboard_list_item.xml'))
def __init__(self, title, url, shared_url_tag):
self.title = title
self.url = url
self.shared_url_tag = shared_url_tag
@classmethod
def from_dashboard(cls, dashboard):
url = '/%s' % (dashboard.config['name'])
if 'share_id' in dashboard.config:
shared_url = '/shared/%s' % dashboard.config['share_id']
shared_url_tag = tags.a(shared_url, href=shared_url)
else:
shared_url_tag = ''
return cls(dashboard.config['title'], url, shared_url_tag)
@renderer
def dashboard_list_item_renderer(self, request, tag):
tag.fillSlots(title_slot=self.title,
url_slot=self.url,
shared_url_slot=self.shared_url_tag)
yield tag
class ErrorPage(PageElement):
loader = XMLString(resource_string(
__name__, 'views/error_page.xml'))
def __init__(self, code, message):
self.title = "(%s) %s" % (
code, http.RESPONSES.get(code, "Error with Unknown Code"))
self.message = message
@renderer
def header_renderer(self, request, tag):
tag.fillSlots(title_slot=self.title, message_slot=self.message)
yield tag
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ldap
from keystone import exception
from keystone.common import logging
from keystone.common.ldap import fakeldap
LOG = logging.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
def py2ldap(val):
if isinstance(val, str):
return val
elif isinstance(val, bool):
return 'TRUE' if val else 'FALSE'
else:
return str(val)
def ldap2py(val):
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError:
pass
return val
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
class BaseLdap(object):
DEFAULT_SUFFIX = "dc=example,dc=com"
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
options_name = None
model = None
attribute_mapping = {}
attribute_ignore = []
model = None
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
if self.options_name is not None:
self.suffix = conf.ldap.suffix
if (self.suffix == None):
self.suffix = self.DEFAULT_SUFFIX
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.suffix, self.DEFAULT_OU))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
self.use_dumb_member = getattr(conf.ldap, 'use_dumb_member') or True
def get_connection(self, user=None, password=None):
if self.LDAP_URL.startswith('fake://'):
conn = fakeldap.FakeLdap(self.LDAP_URL)
else:
conn = LdapWrapper(self.LDAP_URL)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
conn.simple_bind_s(user, password)
return conn
def _id_to_dn(self, id):
return '%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(str(id)),
self.tree_dn)
@staticmethod
def _dn_to_id(dn):
return ldap.dn.str2dn(dn)[0][0][1]
def _ldap_res_to_model(self, res):
obj = self.model(id=self._dn_to_id(res[0]))
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
v = res[1][self.attribute_mapping.get(k, k)]
except KeyError:
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def affirm_unique(self, values):
if values['name'] is not None:
entity = self.get_by_name(values['name'])
if entity is not None:
raise exception.Conflict(type=self.options_name,
details='Duplicate name, %s.' %
values['name'])
if values['id'] is not None:
entity = self.get(values['id'])
if entity is not None:
raise exception.Conflict(type=self.options_name,
details='Duplicate ID, %s.' %
values['id'])
def create(self, values):
conn = self.get_connection()
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in values.iteritems():
if k == 'id' or k in self.attribute_ignore:
continue
if v is not None:
attr_type = self.attribute_mapping.get(k, k)
attrs.append((attr_type, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.DUMB_MEMBER_DN]))
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, id, filter=None):
conn = self.get_connection()
query = '(objectClass=%s)' % self.object_class
if filter is not None:
query = '(&%s%s)' % (filter, query)
try:
res = conn.search_s(self._id_to_dn(id), ldap.SCOPE_BASE, query)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_all(self, filter=None):
conn = self.get_connection()
query = '(objectClass=%s)' % (self.object_class,)
if filter is not None:
query = '(&%s%s)' % (filter, query)
try:
return conn.search_s(self.tree_dn, ldap.SCOPE_ONELEVEL, query)
except ldap.NO_SUCH_OBJECT:
return []
def get(self, id, filter=None):
res = self._ldap_get(id, filter)
if res is None:
return None
else:
return self._ldap_res_to_model(res)
def get_all(self, filter=None):
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(filter)]
def get_page(self, marker, limit):
return self._get_page(marker, limit, self.get_all())
def get_page_markers(self, marker, limit):
return self._get_page_markers(marker, limit, self.get_all())
@staticmethod
def _get_page(marker, limit, lst, key=lambda x: x.id):
lst.sort(key=key)
if not marker:
return lst[:limit]
else:
return [x for x in lst if key(x) > marker][:limit]
@staticmethod
def _get_page_markers(marker, limit, lst, key=lambda x: x.id):
if len(lst) < limit:
return (None, None)
lst.sort(key=key)
if marker is None:
if len(lst) <= limit + 1:
nxt = None
else:
nxt = key(lst[limit])
return (None, nxt)
i = 0
for i, item in enumerate(lst):
k = key(item)
if k >= marker:
break
if i <= limit:
prv = None
else:
prv = key(lst[i - limit])
if i + limit >= len(lst) - 1:
nxt = None
else:
nxt = key(lst[i + limit])
return (prv, nxt)
def update(self, id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(id)
modlist = []
for k, v in values.iteritems():
if k == 'id' or k in self.attribute_ignore:
continue
if v is None:
if old_obj[k] is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
elif old_obj[k] != v:
if old_obj[k] is None:
op = ldap.MOD_ADD
else:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
conn = self.get_connection()
conn.modify_s(self._id_to_dn(id), modlist)
def delete(self, id):
conn = self.get_connection()
conn.delete_s(self._id_to_dn(id))
class LdapWrapper(object):
def __init__(self, url):
LOG.debug("LDAP init: url=%s", url)
self.conn = ldap.initialize(url)
def simple_bind_s(self, user, password):
LOG.debug("LDAP bind: dn=%s", user)
return self.conn.simple_bind_s(user, password)
def add_s(self, dn, attrs):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in attrs]
if LOG.isEnabledFor(logging.DEBUG):
sane_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s, attrs=%s', dn, sane_attrs)
return self.conn.add_s(dn, ldap_attrs)
def search_s(self, dn, scope, query):
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug('LDAP search: dn=%s, scope=%s, query=%s',
dn,
scope,
query)
res = self.conn.search_s(dn, scope, query)
o = []
for dn, attrs in res:
o.append((dn, dict((kind, [ldap2py(x) for x in values])
for kind, values in attrs.iteritems())))
return o
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
if LOG.isEnabledFor(logging.DEBUG):
sane_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug("LDAP modify: dn=%s, modlist=%s", dn, sane_modlist)
return self.conn.modify_s(dn, ldap_modlist)
def delete_s(self, dn):
LOG.debug("LDAP delete: dn=%s", dn)
return self.conn.delete_s(dn)
|
|
'''Utilities.py - Tools for writing reproducible scripts
=========================================================
The :mod:`Utilities` modules contains utility functions for argument
parsing, logging and record keeping within scripts.
This module is imported by most UMI-tools tools. It provides convenient
and consistent methods for
* `Record keeping`_
* `Argument parsing`_
* `Input/Output redirection`_
* `Logging`_
* `Running external commands`_
* `Benchmarking`_
The basic usage of this module within a script is::
"""script_name.py - my script
Mode Documentation
"""
import sys
import optparse
import Utilities as U
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv: argv = sys.argv
# setup command line parser
parser = U.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"] )
parser.add_option("-t", "--test", dest="test", type="string",
help="supply help")
# add common options (-h/--help, ...) and parse
# command line
(options, args) = U.Start(parser)
# do something
# ...
U.info("an information message")
U.warn("a warning message)
## write footer and output benchmark information.
U.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
Record keeping
--------------
The central functions in this module are the :py:func:`Start` and
:py:func:`Stop` methods which are called before or after any work is
done within a script.
The :py:func:`Start` is called with an U.OptionParser object.
:py:func:`Start` will add additional command line arguments, such as
``--help`` for command line help or ``--verbose`` to control the
:term:`loglevel`. It can also add optional arguments for scripts
needing database access, writing to multiple output files, etc.
:py:func:`Start` will write record keeping information to a
logfile. Typically, logging information is output on stdout, prefixed
by a `#`, but it can be re-directed to a separate file. Below is a
typical output::
# output generated by /ifs/devel/andreas/cgat/beds2beds.py --force-output --exclusive-overlap --method=unmerged-combinations --output-filename-pattern=030m.intersection.tsv.dir/030m.intersection.tsv-%s.bed.gz --log=030m.intersection.tsv.log Irf5-030m-R1.bed.gz Rela-030m-R1.bed.gz
# job started at Thu Mar 29 13:06:33 2012 on cgat150.anat.ox.ac.uk -- e1c16e80-03a1-4023-9417-f3e44e33bdcd
# pid: 16649, system: Linux 2.6.32-220.7.1.el6.x86_64 #1 SMP Fri Feb 10 15:22:22 EST 2012 x86_64
# exclusive : True
# filename_update : None
# ignore_strand : False
# loglevel : 1
# method : unmerged-combinations
# output_filename_pattern : 030m.intersection.tsv.dir/030m.intersection.tsv-%s.bed.gz
# output_force : True
# pattern_id : (.*).bed.gz
# stderr : <open file \'<stderr>\', mode \'w\' at 0x2ba70e0c2270>
# stdin : <open file \'<stdin>\', mode \'r\' at 0x2ba70e0c2150>
# stdlog : <open file \'030m.intersection.tsv.log\', mode \'a\' at 0x1f1a810>
# stdout : <open file \'<stdout>\', mode \'w\' at 0x2ba70e0c21e0>
# timeit_file : None
# timeit_header : None
# timeit_name : all
# tracks : None
The header contains information about:
* the script name (``beds2beds.py``)
* the command line options (``--force-output --exclusive-overlap
--method=unmerged-combinations
--output-filename-pattern=030m.intersection.tsv.dir/030m.intersection.tsv-%s.bed.gz
--log=030m.intersection.tsv.log Irf5-030m-R1.bed.gz
Rela-030m-R1.bed.gz``)
* the time when the job was started (``Thu Mar 29 13:06:33 2012``)
* the location it was executed (``cgat150.anat.ox.ac.uk``)
* a unique job id (``e1c16e80-03a1-4023-9417-f3e44e33bdcd``)
* the pid of the job (``16649``)
* the system specification (``Linux 2.6.32-220.7.1.el6.x86_64 #1
SMP Fri Feb 10 15:22:22 EST 2012 x86_64``)
It is followed by a list of all options that have been set in the script.
Once completed, a script will call the :py:func:`Stop` function to
signify the end of the experiment.
:py:func:`Stop` will output to the log file that the script has
concluded successfully. Below is typical output::
# job finished in 11 seconds at Thu Mar 29 13:06:44 2012 -- 11.36 0.45 0.00 0.01 -- e1c16e80-03a1-4023-9417-f3e44e33bdcd
The footer contains information about:
* the job has finished (``job finished``)
* the time it took to execute (``11 seconds``)
* when it completed (``Thu Mar 29 13:06:44 2012``)
* some benchmarking information (``11.36 0.45 0.00 0.01``)
which is ``user time``, ``system time``,
``child user time``, ``child system time``.
* the unique job id (``e1c16e80-03a1-4023-9417-f3e44e33bdcd``)
The unique job id can be used to easily retrieve matching information
from a concatenation of log files.
Argument parsing
----------------
The module provides :class:`OptionParser` to facilitate option
parsing. :class:`OptionParser` is derived from the
:py:class:`optparse.OptionParser` class, but has improvements to
provide better formatted output on the command line. It also allows to
provide a comma-separated list to options that accept multiple
arguments. Thus, ``--method=sort --method=crop`` and
``--method=sort,crop`` are equivalent.
Input/Output redirection
------------------------
:func:`Start` adds the options ``--stdin``, ``--stderr` and
``--stdout`` which allow using files as input/output streams.
To make this work, scripts should not read from sys.stdin or write to
sys.stdout directly, but instead use ``options.stdin`` and
``options.stdout``. For example to simply read all lines from stdin
and write to stdout, use::
(options, args) = U.Start(parser)
input_data = options.stdin.readlines()
options.stdout.write("".join(input_data))
The script can then be used in many different contexts::
cat in.data | python script.py > out.data
python script.py --stdin=in.data > out.data
python script.py --stdin=in.data --stdout=out.data
The method handles gzip compressed files transparently. The following
are equivalent::
zcat in.data.gz | python script.py | gzip > out.data.gz
python script.py --stdin=in.data.gz --stdout=out.data.gz
For scripts producing multiple output files, use the argument
``add_output_options=True`` to :func:`Start`. This provides the option
``--output-filename-pattern`` on the command line. The user can then
supply a pattern for output files. Any ``%s`` appearing in the pattern
will be substituted by a ``section``. Inside the script, When opening
an output file, use the method :func:`openOutputFile` to provide a
file object::
output_histogram = U.openOutputFile(section="histogram")
output_stats = U.openOutputFile(section="stats")
If the user calls the script with::
python script.py --output-filename-pattern=sample1_%s.tsv.gz
the script will create the files ``sample1_histogram.tsv.gz`` and
``sample1_stats.tsv.gz``.
This method will also add the option ``--force-output`` to permit
overwriting existing files.
Logging
-------
:py:mod:`Utilities` provides the well known logging methods from
the :py:mod:`logging` module such as :py:func:`info`,
:py:func:`warn`, etc. These are provided so that no additional import
of the :py:mod:`logging` module is required, but either functions
can be used.
Running external commands
-------------------------
The :func:`run` method is a shortcut :py:func:`subprocess.call` and
similar methods with some additional sanity checking.
Benchmarking
------------
The :func:`Start` method records basic benchmarking information when a
script starts and :func:`Stop` outputs it as part of its final log
message::
# job finished in 11 seconds at Thu Mar 29 13:06:44 2012 -- 11.36 0.45 0.00 0.01 -- e1c16e80-03a1-4023-9417-f3e44e33bdcd
See `Record keeping`_ for an explanations of the fields.
To facilitate collecting benchmark information from running multiple
scripts, these data can be tagged and saved in a separate file. See the
command line options ``--timeit``, ``--timeit-name``, ``--timeit-header``
in :func:`Start`.
The module contains some decorator functions for benchmarking
(:func:`benchmark`) and caching function (:func:`cachedfunction`) or
class method (:func:`cachedmethod`) calls.
Complete reference
------------------
'''
########################################################################################
# The code for Utilities.py has been taken with permission from CGAT.Experiment.py
# https://github.com/CGATOxford/cgat/blob/master/CGAT/Experiment.py
# and CGATPipelines.CGATPipelines.Pipeline.Files.py
# https://github.com/CGATOxford/CGATPipelines/blob/master/CGATPipelines/Pipeline/Files.py
#########################################################################################
import re
import sys
import time
import inspect
import copy
import os
import logging
import collections
import gzip
import optparse
import textwrap
import random
import uuid
import tempfile
import regex
from umi_tools import __version__
from builtins import bytes, chr
class DefaultOptions:
stdlog = sys.stdout
stdout = sys.stdout
stderr = sys.stderr
stdin = sys.stdin
loglevel = 2
timeit_file = None
compresslevel = 6
global_starting_time = time.time()
global_options = DefaultOptions()
global_args = None
global_id = uuid.uuid4()
global_benchmark = collections.defaultdict(int)
##########################################################################
# The code for BetterFormatter has been taken from
# http://code.google.com/p/yjl/source/browse/Python/snippet/BetterFormatter.py
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Copyright (c) 2011 Yu-Jie Lin. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
class BetterFormatter(optparse.IndentedHelpFormatter):
"""A formatter for :class:`OptionParser` outputting indented
help text.
"""
def __init__(self, *args, **kwargs):
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
self.wrapper = textwrap.TextWrapper(width=self.width)
def _formatter(self, text):
return '\n'.join(['\n'.join(p) for p in
map(self.wrapper.wrap,
self.parser.expand_prog_name(text).split('\n'))])
def format_description(self, description):
if description:
return self._formatter(description) + '\n'
else:
return ''
def format_epilog(self, epilog):
if epilog:
return '\n' + self._formatter(epilog) + '\n'
else:
return ''
def format_usage(self, usage):
msg = '''
For full UMI-tools documentation, see https://umi-tools.readthedocs.io/en/latest/\n'''
return optparse._(usage) + msg
def format_option(self, option):
# Ripped and modified from Python 2.6's optparse's HelpFormatter
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
# Added expand program name
help_text = self.parser.expand_prog_name(help_text)
# Modified the generation of help_line
help_lines = []
wrapper = textwrap.TextWrapper(width=self.help_width)
for p in map(wrapper.wrap, help_text.split('\n')):
if p:
help_lines.extend(p)
else:
help_lines.append('')
# End of modification
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
# End of BetterFormatter()
#################################################################
#################################################################
#################################################################
class AppendCommaOption(optparse.Option):
'''Option with additional parsing capabilities.
* "," in arguments to options that have the action 'append'
are treated as a list of options. This is what galaxy does,
but generally convenient.
* Option values of "None" and "" are treated as default values.
'''
# def check_value( self, opt, value ):
# do not check type for ',' separated lists
# if "," in value:
# return value
# else:
# return optparse.Option.check_value( self, opt, value )
#
# def take_action(self, action, dest, opt, value, values, parser):
# if action == "append" and "," in value:
# lvalue = value.split(",")
# values.ensure_value(dest, []).extend(lvalue)
# else:
# optparse.Option.take_action(
# self, action, dest, opt, value, values, parser)
#
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
if self.action == "append":
if "," in value:
return [self.check_value(opt, v) for v in
value.split(",") if v != ""]
else:
if value != "":
return self.check_value(opt, value)
else:
return value
else:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
# why is it necessary to pass action and dest to this function when
# they could be accessed as self.action and self.dest?
def take_action(self, action, dest, opt, value, values, parser):
if action == "append" and type(value) == list:
values.ensure_value(dest, []).extend(value)
else:
optparse.Option.take_action(
self, action, dest, opt, value, values, parser)
class OptionParser(optparse.OptionParser):
'''UMI-tools derivative of OptionParser.
'''
def __init__(self, *args, **kwargs):
# if "--short" is a command line option
# remove usage from kwargs
if "--no-usage" in sys.argv:
kwargs["usage"] = None
optparse.OptionParser.__init__(self, *args,
option_class=AppendCommaOption,
formatter=BetterFormatter(),
add_help_option=False,
**kwargs)
# set new option parser
# parser.formatter = BetterFormatter()
# parser.formatter.set_parser(parser)
if "--no-usage" in sys.argv:
self.add_option("--no-usage", dest="help_no_usage",
action="store_true",
help="output help without usage information")
class OptionGroup(optparse.OptionGroup):
pass
def callbackShortHelp(option, opt, value, parser):
'''output short help (only command line options).'''
# clear usage and description
parser.set_description(None)
# parser.set_usage(None)
# output help
parser.print_help()
# exit
parser.exit()
def openFile(filename, mode="r", create_dir=False):
'''open file in *filename* with mode *mode*.
If *create* is set, the directory containing filename
will be created if it does not exist.
gzip - compressed files are recognized by the
suffix ``.gz`` and opened transparently.
Note that there are differences in the file
like objects returned, for example in the
ability to seek.
returns a file or file-like object.
'''
_, ext = os.path.splitext(filename)
if create_dir:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if ext.lower() in (".gz", ".z"):
if sys.version_info.major >= 3:
if mode == "r":
return gzip.open(filename, 'rt', encoding="ascii")
elif mode == "w":
return gzip.open(filename, 'wt',
compresslevel=global_options.compresslevel,
encoding="ascii")
else:
raise NotImplementedError(
"mode '{}' not implemented".format(mode))
else:
return gzip.open(filename, mode,
compresslevel=global_options.compresslevel)
else:
return open(filename, mode)
def getHeader():
"""return a header string with command line options and timestamp
"""
system, host, release, version, machine = os.uname()
return "# UMI-tools version: %s\n# output generated by %s\n# job started at %s on %s -- %s\n# pid: %i, system: %s %s %s %s" %\
(__version__,
" ".join(sys.argv),
time.asctime(time.localtime(time.time())),
host,
global_id,
os.getpid(),
system, release, version, machine)
def getParams(options=None):
"""return a string containing script parameters.
Parameters are all variables that start with ``param_``.
"""
result = []
if options:
members = options.__dict__
for k, v in sorted(members.items()):
result.append("# %-40s: %s" % (k, str(v)))
else:
vars = inspect.currentframe().f_back.f_locals
for var in filter(lambda x: re.match("param_", x), vars.keys()):
result.append("# %-40s: %s" %
(var, str(vars[var])))
if result:
return "\n".join(result)
else:
return "# no parameters."
def getFooter():
"""return a header string with command line options and
timestamp.
"""
return "# job finished in %i seconds at %s -- %s -- %s" %\
(time.time() - global_starting_time,
time.asctime(time.localtime(time.time())),
" ".join(map(lambda x: "%5.2f" % x, os.times()[:4])),
global_id)
class MultiLineFormatter(logging.Formatter):
'''logfile formatter: add identation for multi-line entries.'''
def format(self, record):
s = logging.Formatter.format(self, record)
if s.startswith("#"):
prefix = "#"
else:
prefix = ""
if record.message:
header, footer = s.split(record.message)
s = prefix + s.replace('\n', '\n%s' % prefix + ' ' * len(header))
return s
def Start(parser=None,
argv=sys.argv,
quiet=False,
add_pipe_options=True,
add_extract_options=False,
add_group_dedup_options=True,
add_sam_options=True,
add_umi_grouping_options=True,
return_parser=False):
"""set up an experiment.
The :py:func:`Start` method will set up a file logger and add some
default and some optional options to the command line parser. It
will then parse the command line and set up input/output
redirection and start a timer for benchmarking purposes.
The default options added by this method are:
``-v/--verbose``
the :term:`loglevel`
``timeit``
turn on benchmarking information and save to file
``timeit-name``
name to use for timing information,
``timeit-header``
output header for timing information.
``seed``
the random seed. If given, the python random
number generator will be initialized with this
seed.
Optional options added are:
Arguments
---------
param parser : :py:class:`U.OptionParser`
instance with command line options.
argv : list
command line options to parse. Defaults to
:py:data:`sys.argv`
quiet : bool
set :term:`loglevel` to 0 - no logging
return_parser : bool
return the parser object, no parsing. Useful for inspecting
the command line options of a script without running it.
add_pipe_options : bool
add common options for redirecting input/output
add_extract_options : bool
add options for extracting barcodes
add_sam_options : bool
add options for SAM/BAM input
add_umi_grouping_options : bool
add options for barcode grouping
add_group_dedup_options : bool
add options for UMI grouping and deduping
Returns
-------
tuple
(:py:class:`U.OptionParser` object, list of positional
arguments)
"""
if not parser:
parser = OptionParser(
version="%prog version: $Id$")
global global_options, global_args, global_starting_time
# save default values given by user
user_defaults = copy.copy(parser.defaults)
global_starting_time = time.time()
if add_extract_options:
group = OptionGroup(parser, "fastq barcode extraction options")
group.add_option("--extract-method",
dest="extract_method", type="choice",
choices=["string", "regex"],
help=("How to extract the umi +/- cell barcodes, "
"Choose from 'string' or 'regex'"))
group.add_option("-p", "--bc-pattern", dest="pattern", type="string",
help="Barcode pattern")
group.add_option("--bc-pattern2", dest="pattern2", type="string",
help="Barcode pattern for paired reads")
group.add_option("--3prime", dest="prime3", action="store_true",
help="barcode is on 3' end of read.")
group.add_option("--read2-in", dest="read2_in", type="string",
help="file name for read pairs")
group.add_option("--filtered-out",
dest="filtered_out", type="string", default=None,
help=("Write out reads not matching regex pattern"
" to this file"))
group.add_option("--filtered-out2",
dest="filtered_out2", type="string", default=None,
help=("Write out paired reads not matching regex"
" pattern to this file"))
group.add_option("--ignore-read-pair-suffixes",
dest="ignore_suffix", action="store_true",
help="Ignore '\\1' and '\\2' read name suffixes")
parser.add_option_group(group)
if add_sam_options:
group = OptionGroup(parser, "Barcode extraction options")
group.add_option("--extract-umi-method", dest="get_umi_method", type="choice",
choices=("read_id", "tag", "umis"), default="read_id",
help="how is the read UMI +/ cell barcode encoded? "
"[default=%default]")
group.add_option("--umi-separator", dest="umi_sep",
type="string", help="separator between read id and UMI",
default="_")
group.add_option("--umi-tag", dest="umi_tag",
type="string", help="tag containing umi",
default='RX')
group.add_option("--umi-tag-split", dest="umi_tag_split",
type="string",
help="split UMI in tag and take the first element",
default=None)
group.add_option("--umi-tag-delimiter", dest="umi_tag_delim",
type="string",
help="concatenate UMI in tag separated by delimiter",
default=None)
group.add_option("--cell-tag", dest="cell_tag",
type="string", help="tag containing cell barcode",
default=None)
group.add_option("--cell-tag-split", dest="cell_tag_split",
type="string",
help=("split cell barcode in tag and take the first"
"element for e.g 10X GEM tags"),
default='-')
group.add_option("--cell-tag-delimiter", dest="cell_tag_delim",
type="string",
help="concatenate cell barcode in tag separated by delimiter",
default=None)
group.add_option("--filter-umi",
dest="filter_umi",
action="store_true",
#help="Filter the UMIs"
help=optparse.SUPPRESS_HELP)
group.add_option("--umi-whitelist", dest="umi_whitelist",
type="string", default=None,
#help="A whitelist of accepted UMIs"
#"[default=%default]"
help=optparse.SUPPRESS_HELP)
group.add_option("--umi-whitelist-paired", dest="umi_whitelist_paired",
type="string", default=None,
#help="A whitelist of accepted UMIs for "
#"read2[default=%default]"
help=optparse.SUPPRESS_HELP)
parser.add_option_group(group)
if add_umi_grouping_options:
group = OptionGroup(parser, "UMI grouping options")
group.add_option("--method", dest="method", type="choice",
choices=("adjacency", "directional",
"percentile", "unique", "cluster"),
default="directional",
help="method to use for umi grouping [default=%default]")
group.add_option("--edit-distance-threshold", dest="threshold",
type="int",
default=1,
help="Edit distance theshold at which to join two UMIs "
"when grouping UMIs. [default=%default]")
group.add_option("--spliced-is-unique", dest="spliced",
action="store_true",
help="Treat a spliced read as different to an unspliced"
" one [default=%default]",
default=False)
group.add_option("--soft-clip-threshold", dest="soft_clip_threshold",
type="float",
help="number of bases clipped from 5' end before "
"read is counted as spliced [default=%default]",
default=4)
group.add_option("--read-length", dest="read_length",
action="store_true", default=False,
help="use read length in addition to position and UMI "
"to identify possible duplicates [default=%default]")
parser.add_option_group(group)
if add_sam_options:
group = OptionGroup(parser, "single-cell RNA-Seq options")
group.add_option("--per-gene", dest="per_gene", action="store_true",
default=False,
help="Group/Dedup/Count per gene. Must combine with "
"either --gene-tag or --per-contig")
group.add_option("--gene-tag", dest="gene_tag",
type="string",
help="Gene is defined by this bam tag [default=%default]",
default=None)
group.add_option("--assigned-status-tag", dest="assigned_tag",
type="string",
help="Bam tag describing whether read is assigned to a gene "
"By defualt, this is set as the same tag as --gene-tag",
default=None)
group.add_option("--skip-tags-regex", dest="skip_regex",
type="string",
help="Used with --gene-tag. "
"Ignore reads where the gene-tag matches this regex",
default="^(__|Unassigned)")
group.add_option("--per-contig", dest="per_contig", action="store_true",
default=False,
help="group/dedup/count UMIs per contig (field 3 in BAM; RNAME),"
" e.g for transcriptome where contig = gene")
group.add_option("--gene-transcript-map", dest="gene_transcript_map",
type="string",
help="File mapping transcripts to genes (tab separated)",
default=None)
group.add_option("--per-cell", dest="per_cell", action="store_true",
default=False,
help="group/dedup/count per cell")
parser.add_option_group(group)
if add_group_dedup_options:
group = OptionGroup(parser, "group/dedup options")
group.add_option("--buffer-whole-contig", dest="whole_contig",
action="store_true", default=False,
help="Read whole contig before outputting bundles: "
"guarantees that no reads are missed, but increases "
"memory usage")
group.add_option("--whole-contig", dest="whole_contig",
action="store_true", default=False,
help=optparse.SUPPRESS_HELP)
group.add_option("--multimapping-detection-method",
dest="detection_method", type="choice",
choices=("NH", "X0", "XT"),
default=None,
help="Some aligners identify multimapping using bam "
"tags. Setting this option to NH, X0 or XT will "
"use these tags when selecting the best read "
"amongst reads with the same position and umi "
"[default=%default]")
parser.add_option_group(group)
# options added separately here to maintain better output order
if add_sam_options:
group = OptionGroup(parser, "SAM/BAM options")
group.add_option("--mapping-quality", dest="mapping_quality",
type="int",
help="Minimum mapping quality for a read to be retained"
" [default=%default]",
default=0)
group.add_option("--output-unmapped", dest="output_unmapped", action="store_true",
default=False, help=optparse.SUPPRESS_HELP)
group.add_option("--unmapped-reads", dest="unmapped_reads",
type="choice",
choices=("discard", "use", "output"),
default="discard",
help=("How to handle unmapped reads. Options are "
"'discard', 'use' or 'correct' [default=%default]"))
group.add_option("--chimeric-pairs", dest="chimeric_pairs",
type="choice",
choices=("discard", "use", "output"),
default="use",
help=("How to handle chimeric read pairs. Options are "
"'discard', 'use' or 'correct' [default=%default]"))
group.add_option("--unpaired-reads", dest="unpaired_reads",
type="choice",
choices=("discard", "use", "output"),
default="use",
help=("How to handle unpaired reads. Options are "
"'discard', 'use' or 'correct' [default=%default]"))
group.add_option("--ignore-umi", dest="ignore_umi",
action="store_true", help="Ignore UMI and dedup"
" only on position", default=False)
group.add_option("--ignore-tlen", dest="ignore_tlen", action="store_true",
default=False,
help="Option to dedup paired end reads based solely on read1, "
"whether or not the template length is the same")
group.add_option("--chrom", dest="chrom", type="string",
help="Restrict to one chromosome",
default=None)
group.add_option("--subset", dest="subset", type="float",
help="Use only a fraction of reads, specified by subset",
default=None)
group.add_option("-i", "--in-sam", dest="in_sam", action="store_true",
help="Input file is in sam format [default=%default]",
default=False)
group.add_option("--paired", dest="paired", action="store_true",
default=False,
help="paired input BAM. [default=%default]")
group.add_option("-o", "--out-sam", dest="out_sam", action="store_true",
help="Output alignments in sam format [default=%default]",
default=False)
group.add_option("--no-sort-output", dest="no_sort_output",
action="store_true", default=False,
help="Don't Sort the output")
parser.add_option_group(group)
if add_pipe_options:
group = OptionGroup(parser, "input/output options")
group.add_option("-I", "--stdin", dest="stdin", type="string",
help="file to read stdin from [default = stdin].",
metavar="FILE")
group.add_option("-L", "--log", dest="stdlog", type="string",
help="file with logging information "
"[default = stdout].",
metavar="FILE")
group.add_option("-E", "--error", dest="stderr", type="string",
help="file with error information "
"[default = stderr].",
metavar="FILE")
group.add_option("-S", "--stdout", dest="stdout", type="string",
help="file where output is to go "
"[default = stdout].",
metavar="FILE")
group.add_option("--temp-dir", dest="tmpdir", type="string",
help="Directory for temporary files. If not set,"
" the bash environmental variable TMPDIR is used"
"[default = None].",
metavar="FILE")
group.add_option("--log2stderr", dest="log2stderr",
action="store_true", help="send logging information"
" to stderr [default = False].")
group.add_option("--compresslevel", dest="compresslevel", type="int",
help="Level of Gzip compression to use. Default (6) matches"
"GNU gzip rather than python gzip default (which is 9)")
parser.set_defaults(stderr=sys.stderr)
parser.set_defaults(stdout=sys.stdout)
parser.set_defaults(stdlog=sys.stdout)
parser.set_defaults(stdin=sys.stdin)
parser.set_defaults(tmpdir=None)
parser.set_defaults(log2stderr=False)
parser.set_defaults(compresslevel=6)
parser.add_option_group(group)
group = OptionGroup(parser, "profiling options")
group.add_option("--timeit", dest='timeit_file', type="string",
help="store timeing information in file [%default].")
group.add_option("--timeit-name", dest='timeit_name', type="string",
help="name in timing file for this class of jobs "
"[%default].")
group.add_option("--timeit-header", dest='timeit_header',
action="store_true",
help="add header for timing information [%default].")
parser.add_option_group(group)
group = OptionGroup(parser, "common options")
group.add_option("-v", "--verbose", dest="loglevel", type="int",
help="loglevel [%default]. The higher, the more output.")
group.add_option("-h", "--help", dest="short_help", action="callback",
callback=callbackShortHelp,
help="output short help (command line options only).")
group.add_option('--help-extended', action='help',
help='Output full documentation')
group.add_option("--random-seed", dest='random_seed', type="int",
help="random seed to initialize number generator "
"with [%default].")
parser.add_option_group(group)
# restore user defaults
parser.defaults.update(user_defaults)
if quiet:
parser.set_defaults(loglevel=0)
else:
parser.set_defaults(loglevel=1)
parser.set_defaults(
timeit_file=None,
timeit_name='all',
timeit_header=None,
random_seed=None,
)
if return_parser:
return parser
global_options, global_args = parser.parse_args(argv[1:])
if global_options.random_seed is not None:
random.seed(global_options.random_seed)
if add_pipe_options:
if global_options.stdout != sys.stdout:
global_options.stdout = openFile(global_options.stdout, "w")
if global_options.stderr != sys.stderr:
if global_options.stderr == "stderr":
global_options.stderr = global_options.stderr
else:
global_options.stderr = openFile(global_options.stderr, "w")
if global_options.stdlog != sys.stdout:
global_options.stdlog = openFile(global_options.stdlog, "a")
elif global_options.log2stderr:
global_options.stdlog = global_options.stderr
if global_options.stdin != sys.stdin:
global_options.stdin = openFile(global_options.stdin, "r")
else:
global_options.stderr = sys.stderr
global_options.stdout = sys.stdout
global_options.stdin = sys.stdin
if global_options.log2stderr:
global_options.stdlog = sys.stderr
else:
global_options.stdlog = sys.stdout
if global_options.loglevel >= 1:
global_options.stdlog.write(getHeader() + "\n")
global_options.stdlog.write(getParams(global_options) + "\n")
global_options.stdlog.flush()
# configure logging
# map from 0-10 to logging scale
# 0: quiet
# 1: little verbositiy
# >1: increased verbosity
if global_options.loglevel == 0:
lvl = logging.ERROR
elif global_options.loglevel == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
if global_options.stdout == global_options.stdlog:
format = '# %(asctime)s %(levelname)s %(message)s'
else:
format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(
level=lvl,
format=format,
stream=global_options.stdlog)
# set up multi-line logging
# Note that .handlers is not part of the API, might change
# Solution is to configure handlers explicitely.
for handler in logging.getLogger().handlers:
handler.setFormatter(MultiLineFormatter(format))
return global_options, global_args
def validateExtractOptions(options):
''' Check the validity of the option combinations for barcode extraction'''
if not options.pattern and not options.pattern2:
if not options.read2_in:
raise ValueError("Must supply --bc-pattern for single-end")
else:
raise ValueError("Must supply --bc-pattern and/or --bc-pattern2 "
"if paired-end ")
if options.pattern2:
if not options.read2_in:
raise ValueError("must specify a paired fastq ``--read2-in``")
if not options.pattern2:
options.pattern2 = options.pattern
if options.filtered_out2 and not options.read2_in:
raise ValueError("Cannot use --filtered-out2 without read2 input (--read2-in)")
if ((options.read2_in and options.filtered_out) and not options.filtered_out2) or (
options.filtered_out2 and not options.filtered_out):
raise ValueError("Must supply both --filtered-out and --filtered-out2"
"to write out filtered reads for paired end")
extract_cell = False
extract_umi = False
# If the pattern is a regex we can compile the regex(es) prior to
# ExtractFilterAndUpdate instantiation
if options.extract_method == "regex":
if options.pattern:
try:
options.pattern = regex.compile(options.pattern)
except regex.error:
raise ValueError("--bc-pattern '%s' is not a "
"valid regex" % options.pattern)
if options.pattern2:
try:
options.pattern2 = regex.compile(options.pattern2)
except regex.Error:
raise ValueError("--bc-pattern2 '%s' is not a "
"valid regex" % options.pattern2)
# check whether the regex contains a umi group(s) and cell groups(s)
if options.extract_method == "regex":
if options.pattern:
for group in options.pattern.groupindex:
if group.startswith("cell_"):
extract_cell = True
elif group.startswith("umi_"):
extract_umi = True
if options.pattern2:
for group in options.pattern2.groupindex:
if group.startswith("cell_"):
extract_cell = True
elif group.startswith("umi_"):
extract_umi = True
# check whether the pattern string contains umi/cell bases
elif options.extract_method == "string":
if options.pattern:
if "C" in options.pattern:
extract_cell = True
if "N" in options.pattern:
extract_umi = True
if options.pattern2:
if "C" in options.pattern2:
extract_cell = True
if "N" in options.pattern2:
extract_umi = True
if not extract_umi:
if options.extract_method == "string":
raise ValueError("barcode pattern(s) do not include any umi bases "
"(marked with 'Ns') %s, %s" % (
options.pattern, options.pattern2))
elif options.extract_method == "regex":
raise ValueError("barcode regex(es) do not include any umi groups "
"(starting with 'umi_') %s, %s" % (
options.pattern, options.pattern2))
return(extract_cell, extract_umi)
def validateSamOptions(options, group=False):
''' Check the validity of the option combinations for sam/bam input '''
if options.per_gene:
if options.gene_tag and options.per_contig:
raise ValueError("need to use either --per-contig "
"OR --gene-tag, please do not provide both")
if not options.per_contig and not options.gene_tag:
raise ValueError("for per-gene applications, must supply "
"--per-contig or --gene-tag")
if options.per_contig and not options.per_gene:
raise ValueError("need to use --per-gene with --per-contig")
if options.gene_tag and not options.per_gene:
raise ValueError("need to use --per-gene with --gene_tag")
if options.gene_transcript_map and not options.per_contig:
raise ValueError("need to use --per-contig and --per-gene"
"with --gene-transcript-map")
if options.get_umi_method == "tag":
if options.umi_tag is None:
raise ValueError("Need to supply the --umi-tag option")
if options.per_cell and options.cell_tag is None:
raise ValueError("Need to supply the --cell-tag option")
if options.assigned_tag is None:
options.assigned_tag = options.gene_tag
if options.skip_regex:
try:
re.compile(options.skip_regex)
except re.error:
raise ValueError("skip-regex '%s' is not a "
"valid regex" % options.skip_regex)
if not group:
if options.unmapped_reads == "output":
raise ValueError("Cannot use --unmapped-reads=output. If you want "
"to retain unmapped without deduplicating them, "
"use the group command")
if options.chimeric_pairs == "output":
raise ValueError("Cannot use --chimeric-pairs=output. If you want "
"to retain chimeric read pairs without "
"deduplicating them, use the group command")
if options.unpaired_reads == "output":
raise ValueError("Cannot use --unpaired-reads=output. If you want "
"to retain unmapped without deduplicating them, "
"use the group command")
if options.paired:
if options.chimeric_pairs == "use":
warn("Chimeric read pairs are being used. "
"Some read pair UMIs may be grouped/deduplicated using "
"just the mapping coordinates from read1."
"This may also increase the run time and memory usage. "
"Consider --chimeric-pairs==discard to discard these reads "
"or --chimeric-pairs==output (group command only) to "
"output them without grouping")
if options.unpaired_reads == "use":
warn("Unpaired read pairs are being used. "
"Some read pair UMIs may be grouped/deduplicated using "
"just the mapping coordinates from read1."
"This may also increase the run time and memory usage. "
"Consider --unpared-reads==discard to discard these reads "
"or --unpared-reads==output (group command only) to "
"output them without grouping")
if options.unmapped_reads == "use":
warn("Unmapped read pairs are being used. "
"Some read pair UMIs may be grouped/deduplicated using "
"just the mapping coordinates from read1. "
"This may also increase the run time and memory usage. "
"Consider --unmapped_reads==discard to discard these reads "
"or --unmapped_reads==output (group command only) to "
"output them without grouping")
command = " ".join(sys.argv)
info("command: %s" % command)
if "--umi-tag" in command or "--cell-tag" in command:
if options.get_umi_method != "tag":
raise ValueError("--umi-tag and/or --cell-tag options provided. "
"Need to set --extract-umi-method=tag")
if options.unmapped_reads == "use":
if not options.paired:
raise ValueError("--unmapped-reads=use is only compatible with "
"paired end reads (--paired)")
if "--chimeric-pairs" in command:
info("command: %s" % command)
if not options.paired:
raise ValueError("--chimeric-pairs is only compatible "
"with paired end reads (--paired)")
if "--unpaired-reads" in command:
if not options.paired:
raise ValueError("--unpaired-reads is only compatible "
"with paired end reads (--paired)")
if "--ignore-tlen" in command:
if not options.paired:
raise ValueError("--ignore-tlen is only compatible "
"with paired end reads (--paired)")
# legacy support for --output-unmapped behaviour
if options.output_unmapped:
warn("--output-unmapped will be removed in the near future. "
"Use --unmapped-reads=output instead")
# We will update the value of options.unmapped_reads so we want to
# check the user has not also supplied this option
if "--unmapped_reads" in command:
raise ValueError("Do not use --output-unmapped in combination with"
"--unmapped-reads. Just use --unmapped-reads")
options.unmapped_reads = "output"
def Stop():
"""stop the experiment.
This method performs final book-keeping, closes the output streams
and writes the final log messages indicating script completion.
"""
if global_options.loglevel >= 1 and global_benchmark:
t = time.time() - global_starting_time
global_options.stdlog.write(
"######### Time spent in benchmarked functions #########\n")
global_options.stdlog.write("# function\tseconds\tpercent\n")
for key, value in global_benchmark.items():
global_options.stdlog.write(
"# %s\t%6i\t%5.2f%%\n" % (key, value,
(100.0 * float(value) / t)))
global_options.stdlog.write(
"#######################################################\n")
if global_options.loglevel >= 1:
global_options.stdlog.write(getFooter() + "\n")
# close files
if global_options.stdout != sys.stdout:
global_options.stdout.close()
# do not close log, otherwise error occurs in atext.py
# if global_options.stdlog != sys.stdout:
# global_options.stdlog.close()
if global_options.stderr != sys.stderr:
global_options.stderr.close()
if global_options.timeit_file:
outfile = open(global_options.timeit_file, "a")
if global_options.timeit_header:
outfile.write("\t".join(
("name", "wall", "user", "sys", "cuser", "csys",
"host", "system", "release", "machine",
"start", "end", "path", "cmd")) + "\n")
csystem, host, release, version, machine = map(str, os.uname())
uusr, usys, c_usr, c_sys = map(lambda x: "%5.2f" % x, os.times()[:4])
t_end = time.time()
c_wall = "%5.2f" % (t_end - global_starting_time)
if sys.argv[0] == "run.py":
cmd = global_args[0]
if len(global_args) > 1:
cmd += " '" + "' '".join(global_args[1:]) + "'"
else:
cmd = sys.argv[0]
result = "\t".join((global_options.timeit_name,
c_wall, uusr, usys, c_usr, c_sys,
host, csystem, release, machine,
time.asctime(time.localtime(global_starting_time)),
time.asctime(time.localtime(t_end)),
os.path.abspath(os.getcwd()),
cmd)) + "\n"
outfile.write(result)
outfile.close()
def log(loglevel, message):
"""log message at loglevel."""
logging.log(loglevel, message)
def info(message):
'''log information message, see the :mod:`logging` module'''
logging.info(message)
def warning(message):
'''log warning message, see the :mod:`logging` module'''
logging.warning(message)
def warn(message):
'''log warning message, see the :mod:`logging` module'''
logging.warning(message)
def debug(message):
'''log debugging message, see the :mod:`logging` module'''
logging.debug(message)
def error(message):
'''log error message, see the :mod:`logging` module'''
logging.error(message)
raise ValueError("UMI-tools failed with an error. Check the log file")
def critical(message):
'''log critical message, see the :mod:`logging` module'''
logging.critical(message)
def getTempFile(dir=None, shared=False, suffix=""):
'''get a temporary file.
The file is created and the caller needs to close and delete
the temporary file once it is not used any more.
Arguments
---------
dir : string
Directory of the temporary file and if not given is set to the
default temporary location in the global configuration dictionary.
shared : bool
If set, the tempory file will be in a shared temporary
location (given by the global configuration directory).
suffix : string
Filename suffix
Returns
-------
file : File
A file object of the temporary file.
'''
return tempfile.NamedTemporaryFile(dir=dir, delete=False, prefix="ctmp",
suffix=suffix)
def getTempFilename(dir=None, shared=False, suffix=""):
'''return a temporary filename.
The file is created and the caller needs to delete the temporary
file once it is not used any more.
Arguments
---------
dir : string
Directory of the temporary file and if not given is set to the
default temporary location in the global configuration dictionary.
shared : bool
If set, the tempory file will be in a shared temporary
location.
suffix : string
Filename suffix
Returns
-------
filename : string
Absolute pathname of temporary file.
'''
tmpfile = getTempFile(dir=dir, shared=shared, suffix=suffix)
tmpfile.close()
return tmpfile.name
|
|
# This file is part of VoltDB.
# Copyright (C) 2008-2013 VoltDB Inc.
#
# This file contains original code and/or modifications of original code.
# Any modifications made by VoltDB Inc. are licensed under the following
# terms and conditions:
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'scooper'
import sys
import os
import optparse
import shlex
import copy
from voltcli import utility
# Volt CLI command processor
# Individual option variables are added by the option parser. They are available
# externally as module attributes.
#===============================================================================
class BaseOption(object):
#===============================================================================
"""
General CLI option specification (uses optparse keywords for now).
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
self.short_opt = short_opt
self.long_opt = long_opt
self.kwargs = kwargs
self.kwargs['dest'] = dest
# A help message of None makes it a hidden option.
if help_msg is not None:
self.kwargs['help'] = help_msg
else:
self.kwargs['help'] = optparse.SUPPRESS_HELP
if 'default' in self.kwargs:
if utility.is_string(kwargs['default']):
self.kwargs['help'] += ' (default="%s")' % self.kwargs['default']
else:
self.kwargs['help'] += ' (default=%s)' % self.kwargs['default']
def get_option_names(self):
return [a for a in (self.short_opt, self.long_opt) if a is not None]
def get_dest(self):
if 'dest' not in self.kwargs:
utility.abort('%s must specify a "dest" property.' % self.__class__.__name__)
return self.kwargs['dest']
def get_default(self):
return self.kwargs.get('default', None)
def postprocess_value(self, value):
# Hook for massaging the option instance value. Default to NOP.
return value
def __str__(self):
return '%s(%s/%s %s)' % (self.__class__.__name__,
self.short_opt, self.long_opt, self.kwargs)
def __cmp__(self, other):
# Sort options by lowercase letter or word, depending on which is available.
if self.short_opt:
if other.short_opt:
return cmp(self.short_opt.lower(), other.short_opt.lower())
return 1
if other.short_opt:
return -1
if self.long_opt:
if other.long_opt:
return cmp(self.long_opt.lower(), other.long_opt.lower())
return 1
if other.long_opt:
return -1
return 0
def has_value(self):
return (not 'action' in self.kwargs or self.kwargs['action'] == 'store')
#===============================================================================
class BooleanOption(BaseOption):
#===============================================================================
"""
Boolean CLI option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
BaseOption.__init__(self, short_opt, long_opt, dest, help_msg,
action = 'store_true', **kwargs)
#===============================================================================
class StringOption(BaseOption):
#===============================================================================
"""
CLI string value option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
BaseOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
#===============================================================================
class IntegerOption(BaseOption):
#===============================================================================
"""
Integer CLI option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
BaseOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
#===============================================================================
class StringListOption(StringOption):
#===============================================================================
"""
CLI comma-separated string list option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
return [v.strip() for v in value.split(',')]
#===============================================================================
class IntegerListOption(StringOption):
#===============================================================================
"""
CLI comma-separated integer list option.
"""
def __init__(self, short_opt, long_opt, dest, help_msg, **kwargs):
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
bad = []
converted = []
for v in value.split(','):
try:
converted.append(int(v.strip()))
except ValueError:
bad.append(v.strip())
if bad:
utility.abort('Bad "%s" integer list value(s):' % self.get_dest().upper(), bad)
return converted
#===============================================================================
class EnumOption(StringOption):
#===============================================================================
"""
Enumeration option for selecting from a list of possible symbols.
"""
def __init__(self, short_opt, long_opt, dest, help_pfx, *values, **kwargs):
if not values or len(values) <= 1:
utility.abort('EnumOption "%s" must specify multiple valid values.' % dest)
self.values = values
help_msg = '%s [%s]' % (help_pfx, '|'.join(self.values))
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
if value not in self.values:
utility.abort('EnumOption "%s" value "%s" is not one of the following:'
% (self.get_dest(), value), self.values)
return value
#===============================================================================
class HostOption(StringOption):
#===============================================================================
"""
Comma-separated HOST[:PORT] list option.
"""
def __init__(self, short_opt, long_opt, dest, name, **kwargs):
self.min_count = utility.kwargs_get_integer(kwargs, 'min_count', default = 1)
self.max_count = utility.kwargs_get_integer(kwargs, 'max_count', default = 1)
self.default_port = utility.kwargs_get_integer(kwargs, 'default_port', default = 21212)
if self.max_count == 1:
help_msg = 'the %s HOST[:PORT]' % name
else:
help_msg = 'the comma-separated %s HOST[:PORT] list' % name
if self.default_port:
help_msg += ' (default port=%d)' % self.default_port
StringOption.__init__(self, short_opt, long_opt, dest, help_msg, **kwargs)
def postprocess_value(self, value):
hosts = utility.parse_hosts(value,
min_hosts = self.min_count,
max_hosts = self.max_count,
default_port = self.default_port)
if self.max_count == 1:
return hosts[0]
return hosts
#===============================================================================
class ArgumentException(Exception):
#===============================================================================
pass
#===============================================================================
class BaseArgument(object):
#===============================================================================
def __init__(self, name, help, **kwargs):
self.name = name
self.help = help
self.min_count = kwargs.get('min_count', 1)
self.max_count = kwargs.get('max_count', 1)
# A max_count value of None is interpreted as infinity.
if self.max_count is None:
self.max_count = sys.maxint
def get(self, value):
utility.abort('BaseArgument subclass must implement a get(value) method: %s'
% self.__class__.__name__)
#===============================================================================
class StringArgument(BaseArgument):
#===============================================================================
def __init__(self, name, help, **kwargs):
BaseArgument.__init__(self, name, help, **kwargs)
def get(self, value):
return str(value)
#===============================================================================
class IntegerArgument(BaseArgument):
#===============================================================================
def __init__(self, name, help, **kwargs):
BaseArgument.__init__(self, name, help, **kwargs)
def get(self, value):
try:
return int(value)
except ValueError, e:
raise ArgumentException('%s value is not a valid integer: %s'
% (self.name.upper(), str(value)))
#===============================================================================
class PathArgument(StringArgument):
#===============================================================================
def __init__(self, name, help, **kwargs):
# For now the only intelligence is to check for absolute paths when required.
# TODO: Add options to check for directories, files, attributes, etc..
self.absolute = utility.kwargs_get_boolean(kwargs, 'absolute', default = False)
self.exists = utility.kwargs_get_boolean(kwargs, 'exists', default = False)
requirements = []
help2 = ''
if self.absolute:
requirements.append('absolute path')
if self.exists:
requirements.append('must exist')
if requirements:
help2 = ' (%s)' % ', '.join(requirements)
StringArgument.__init__(self, name, help + help2, **kwargs)
def get(self, value):
svalue = str(value)
if self.absolute and not svalue.startswith('/'):
raise ArgumentException('%s path is not absolute: %s' % (self.name.upper(), svalue))
if self.exists and not os.path.exists(svalue):
raise ArgumentException('%s path does not exist: %s' % (self.name.upper(), svalue))
return svalue
#===============================================================================
class ParsedCommand(object):
#===============================================================================
"""
Holds the result of parsing a CLI command.
"""
def __init__(self, parser, opts, args, verb):
self.opts = opts
self.args = args
self.parser = parser
self.verb = verb
def __str__(self):
return 'ParsedCommand: %s %s %s' % (self.verb.name, self.opts, self.args)
#===============================================================================
class ExtendedHelpOptionParser(optparse.OptionParser):
#===============================================================================
'''
Extends OptionParser in order to support extended help.
'''
def __init__(self, *args, **kwargs):
self.format_epilog_called = False
optparse.OptionParser.__init__(self, *args, **kwargs)
def format_epilog(self, formatter):
"""
OptionParser hook that allows us to append verb descriptions to the
help message.
"""
self.format_epilog_called = True
return self.on_format_epilog()
def print_help(self):
"""
Override OptionParser.print_help() to work around Python 2.4 optparse
not supporting format_epilog().
"""
self.format_epilog_called = False
optparse.OptionParser.print_help(self)
if not self.format_epilog_called:
sys.stdout.write(self.on_format_epilog())
def on_format_epilog(self):
utility.abort('ExtendedHelpOptionParser subclass must override on_format_epilog(): %s'
% self.__class__.__name__)
#===============================================================================
class CLIParser(ExtendedHelpOptionParser):
#===============================================================================
"""
Command/sub-command (verb) argument and option parsing and validation.
"""
def __init__(self, verbs, base_options, usage, description, version):
"""
Command line processor constructor.
"""
self.verb = None
self.verbs = verbs
self.verb_names = verbs.keys()
self.base_options = base_options
self.verb_names.sort()
self.base_options.sort()
optparse.OptionParser.__init__(self,
description = description,
usage = usage,
version = version)
def add_base_options(self):
"""
Add the base options.
"""
for option in self.base_options:
self.add_option(*option.get_option_names(), **option.kwargs)
def add_verb_options(self, verb):
"""
Add options for verb command line.
"""
for option in verb.iter_options():
try:
self.add_option(*option.get_option_names(), **option.kwargs)
except Exception, e:
utility.abort('Exception initializing options for verb "%s".' % verb.name, e)
def process_verb_options(self, verb, opts):
"""
Validate the verb options and post-process the values.
"""
max_width = 0
missing = []
# Post-process the option values, e.g. convert strings to lists as needed.
for o in verb.iter_options():
dest = o.get_dest()
setattr(opts, dest, o.postprocess_value(getattr(opts, dest)))
def process_verb_arguments(self, verb, verb_args, verb_opts):
"""
Validate the verb arguments. Check that required arguments are present
and populate verb_opts attributes with scalar values or lists (for
trailing arguments with max_count > 1).
"""
# Add fixed arguments passed in through the decorator to the verb object.
args = copy.copy(verb_args) + verb.command_arguments
# Set attributes for required arguments.
missing = []
exceptions = []
iarg = 0
nargs = verb.get_argument_count()
for arg in verb.iter_arguments():
# It's missing if we've exhausted all the arguments before
# exhausting all the argument specs, unless it's the last argument
# spec and it's optional.
if iarg > len(args) or (iarg == len(args) and arg.min_count > 0):
missing.append((arg.name, arg.help))
else:
value = None
# The last argument can have repeated arguments. If more than
# one are allowed the values are put into a list.
if iarg == nargs - 1 and arg.max_count > 1:
if len(args) - iarg < arg.min_count:
utility.abort('A minimum of %d %s arguments are required.'
% (arg.min_count, arg.name.upper()))
if len(args) - iarg > arg.max_count:
utility.abort('A maximum of %d %s arguments are allowed.'
% (arg.max_count, arg.name.upper()))
# Pass through argument class get() for validation, conversion, etc..
# Skip bad values and report on them at the end.
value = []
for v in args[iarg:]:
try:
value.append(arg.get(v))
except ArgumentException, e:
exceptions.append(e)
iarg = len(args)
else:
# All other arguments are treated as scalars.
# Pass through argument class get() for validation, conversion, etc..
try:
value = arg.get(args[iarg])
except ArgumentException, e:
exceptions.append(e)
iarg += 1
if value is not None:
setattr(verb_opts, arg.name, value)
# Run the gauntlet of error disclosure. Abort and display usage as appropriate.
had_errors = 0
show_usage = False
if exceptions:
msg = 'Argument value %s:' % utility.pluralize('error', len(exceptions))
utility.error(msg, [e.message for e in exceptions])
had_errors += 1
if iarg < len(args):
self._abort('Extra arguments were provided:', args[iarg:])
had_errors += 1
show_usage = True
if missing:
fmt = '%%-%ds %%s' % max([len(o) for (o, h) in missing])
msg = 'Missing required %s:' % utility.pluralize('argument', len(missing))
utility.error(msg, [fmt % (o.upper(), h) for (o, h) in missing])
had_errors += 1
show_usage = True
if had_errors > 0:
if show_usage:
self._abort()
sys.exit(1)
def initialize_verb(self, verb_name):
"""
Initialize command line options for a specific verb.
"""
# Add the base options that are applicable to all verbs.
self.add_base_options()
# See if we know about the verb.
if verb_name.startswith('-'):
self._abort('The first argument must be a verb, not an option.')
if verb_name not in self.verbs:
self._abort('Unknown verb: %s' % verb_name)
self.verb = self.verbs[verb_name]
# Change the messaging from generic to verb-specific.
self.set_usage(get_verb_usage(self.verb))
self.set_description(self.verb.cli_spec.get_attr('description', 'No description provided'))
# Parse the command-specific options.
self.add_verb_options(self.verb)
def parse(self, *cmdargs):
"""
Parse command line.
"""
# Need something.
if not cmdargs:
self._abort('No verb was specified.')
pre_opts = preprocess_options(self.base_options, cmdargs)
# Support verb-less options like -h, --help and --version.
if cmdargs[0].startswith('-') and (pre_opts.help or pre_opts.version):
opts, args = self.parse_args(list(cmdargs))
return ParsedCommand(self, opts, args, None)
# Initialize options and arguments.
self.initialize_verb(cmdargs[0])
verb_cmdargs = list(cmdargs[1:])
if self.verb.cli_spec.passthrough:
# Provide all options and arguments without processing the options.
# E.g. Java programs want to handle all the options without interference.
verb_args = verb_cmdargs
verb_opts = None
else:
# Parse the verb command line.
verb_opts, verb_parsed_args = self.parse_args(verb_cmdargs)
# Post-process options.
self.process_verb_options(self.verb, verb_opts)
# Post-process arguments.
self.process_verb_arguments(self.verb, verb_parsed_args, verb_opts)
# The arguments should all be attributes in verb_opts now.
verb_args = []
return ParsedCommand(self, verb_opts, verb_args, self.verb)
def get_usage_string(self):
"""
Get usage string.
"""
# Swap stdout with UsageScraper pseudo-file object so that output is captured.
# Necessary because optparse only sends help to stdout.
class UsageScraper(object):
def __init__(self):
self.usage = []
def write(self, s):
self.usage.append(s)
scraper = UsageScraper()
stdout_save = sys.stdout
try:
sys.stdout = scraper
self.print_help()
finally:
sys.stdout = stdout_save
return ''.join(scraper.usage)
def on_format_epilog(self):
if not self.verb:
return self._format_verb_list()
if self.verb.get_argument_count() == 0:
return ''
rows = [(get_argument_usage(a), a.help) for a in self.verb.iter_arguments()]
lines = ['Arguments:', utility.format_table(rows, indent = 2)]
description2 = self.verb.cli_spec.get_attr('description2', None)
if description2:
lines.extend(('', description2))
return '\n%s\n' % ('\n'.join(lines))
def _abort(self, *msgs):
utility.error(*msgs)
sys.stdout.write('\n')
self.print_help()
sys.stdout.write('\n')
sys.exit(1)
def _format_verb_list(self):
rows1 = []
rows2 = []
for verb_name in self.verb_names:
verb = self.verbs[verb_name]
if not verb.cli_spec.hideverb:
if verb.cli_spec.baseverb:
rows2.append((get_verb_usage(verb), verb.cli_spec.description))
else:
rows1.append((get_verb_usage(verb), verb.cli_spec.description))
table1 = utility.format_table(rows1, caption = 'Verb Descriptions', separator = ' ')
table2 = utility.format_table(rows2, caption = 'Common Verbs', separator = ' ')
return '%s\n%s' % (table1, table2)
#===============================================================================
class CLISpec(object):
#===============================================================================
def __init__(self, **kwargs):
self._kwargs = kwargs
# Make sure options and arguments are flat lists.
if 'options' in self._kwargs:
self._kwargs['options'] = utility.flatten_to_list(self._kwargs['options'])
else:
self._kwargs['options'] = []
if 'arguments' in self._kwargs:
self._kwargs['arguments'] = utility.flatten_to_list(self._kwargs['arguments'])
else:
self._kwargs['arguments'] = []
def __getattr__(self, name):
return self._kwargs.get(name, None)
def __str__(self):
s = 'CLISpec: [\n'
keys = self._kwargs.keys()
keys.sort()
for key in keys:
s += ' %s: %s\n' % (key, utility.to_display_string(self._kwargs[key]))
s += ']'
return s
def add_to_list(self, name, *args):
utility.kwargs_merge_list(self._kwargs, name, *args)
def get_attr(self, name, default = None):
return utility.kwargs_get(self._kwargs, name, default = default, remove = False)
def pop_attr(self, name, default = None):
return utility.kwargs_get(self._kwargs, name, default = default, remove = True)
def merge_java_options(self, name, *options):
utility.kwargs_merge_java_options(self._kwargs, name, options)
def set_defaults(self, **kwargs):
utility.kwargs_set_defaults(self._kwargs, **kwargs)
def find_option(self, dest_name):
for o in self._kwargs['options']:
if o.get_dest() == dest_name:
return o
return None
def find_argument(self, dest_name):
for a in self._kwargs['arguments']:
if a.name == dest_name:
return a
return None
#===============================================================================
def get_argument_usage(a):
#===============================================================================
if a.max_count > 1:
ellipsis = ' ...'
else:
ellipsis = ''
if a.min_count == 0:
fmt = '[ %s%s ]'
else:
fmt = '%s%s'
return fmt % (a.name.upper(), ellipsis)
#===============================================================================
def get_verb_usage(verb):
#===============================================================================
"""
Provide the full usage string, including argument names, for a verb.
"""
if verb.cli_spec.usage:
usage2 = ' %s' % verb.cli_spec.usage
else:
usage2 = ''
args = [get_argument_usage(a) for a in verb.iter_arguments()]
if args:
sargs = ' %s' % (' '.join(args))
else:
sargs = ''
return ''.join([verb.name, usage2, sargs])
#===============================================================================
def preprocess_options(base_options, cmdargs):
#===============================================================================
"""
Simplistically parses command line options to allow early option checking.
Allows the parsing process to display debug messages. Returns an object
with attributes set for option values.
"""
class OptionValues(object):
pass
option_values = OptionValues()
# Create a base option dictionary indexed by short and long options.
# Add the built-in optparse help and version options so that they can be
# detected as stand-alone options.
options = {}
builtins = [BooleanOption('-h', '--help', 'help', ''),
BooleanOption(None, '--version', 'version', '')]
for opt in list(base_options) + builtins:
setattr(option_values, opt.get_dest(), opt.get_default())
if opt.short_opt:
options[opt.short_opt] = opt
if opt.long_opt:
options[opt.long_opt] = opt
# Walk through the options and arguments and set option values as attributes.
iopt = 0
while iopt < len(cmdargs):
if cmdargs[iopt].startswith('-'):
if cmdargs[iopt] in options:
opt = options[cmdargs[iopt]]
if opt.has_value():
# Option with argument
setattr(option_values, opt.get_dest(), cmdargs[iopt+1])
iopt += 1
else:
# Boolean option
setattr(option_values, opt.get_dest(), True)
iopt += 1
return option_values
|
|
#
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# This is derived from gmsk2_pkt.py.
#
# Modified by: Thomas Schmid, Leslie Choong, Sanna Leidelof
#
import numpy
from gnuradio import gr, gru
from gnuradio.digital import packet_utils
from gnuradio import ucla
import crc16
import gnuradio.gr.gr_threading as _threading
import ieee802_15_4
import struct
MAX_PKT_SIZE = 128
def make_ieee802_15_4_packet(FCF, seqNr, addressInfo, payload, pad_for_usrp=False, preambleLength=4, SFD=0xA7):
"""
Build a 802_15_4 packet
@param FCF: 2 bytes defining the type of frame.
@type FCF: string
@param seqNr: 1 byte sequence number.
@type seqNr: byte
@param addressInfo: 0 to 20 bytes of address information.
@type addressInfo: string
@param payload: The payload of the packet. The maximal size of the message
can not be larger than 128.
@type payload: string
@param pad_for_usrp: If we should add 0s at the end to pad for the USRP.
@type pad_for_usrp: boolean
@param preambleLength: Length of the preambble. Currently ignored.
@type preambleLength: int
@param SFD: Start of frame describtor. This is by default set to the IEEE 802.15.4 standard,
but can be changed if required.
@type SFD: byte
"""
if len(FCF) != 2:
raise ValueError, "len(FCF) must be equal to 2"
if seqNr > 255:
raise ValueError, "seqNr must be smaller than 255"
if len(addressInfo) > 20:
raise ValueError, "len(addressInfo) must be in [0, 20]"
if len(payload) > MAX_PKT_SIZE - 5 - len(addressInfo):
raise ValueError, "len(payload) must be in [0, %d]" %(MAX_PKT_SIZE)
SHR = struct.pack("BBBBB", 0x00, 0x00, 0x00, 0x00, SFD)
PHR = struct.pack("B", 3 + len(addressInfo) + len(payload) + 2)
print hex(ord(PHR))
MPDU = FCF + struct.pack("B", seqNr) + addressInfo + payload
crc = crc16.CRC16()
crc.update(MPDU)
FCS = struct.pack("H", crc.intchecksum())
pkt = ''.join((SHR, PHR, MPDU, FCS))
if pad_for_usrp:
# note that we have 16 samples which go over the USB for each bit
pkt = pkt + (_npadding_bytes(len(pkt), 8) * '\x00')+0*'\x00'
return pkt
def _npadding_bytes(pkt_byte_len, spb):
"""
Generate sufficient padding such that each packet ultimately ends
up being a multiple of 512 bytes when sent across the USB. We
send 4-byte samples across the USB (16-bit I and 16-bit Q), thus
we want to pad so that after modulation the resulting packet
is a multiple of 128 samples.
@param ptk_byte_len: len in bytes of packet, not including padding.
@param spb: samples per baud == samples per bit (1 bit / baud with GMSK)
@type spb: int
@returns number of bytes of padding to append.
"""
modulus = 128
byte_modulus = gru.lcm(modulus/8, spb) / spb
r = pkt_byte_len % byte_modulus
if r == 0:
return 0
return byte_modulus - r
def make_FCF(frameType=1, securityEnabled=0, framePending=0, acknowledgeRequest=0, intraPAN=1, destinationAddressingMode=2, sourceAddressingMode=2):
"""
Build the FCF for the 802_15_4 packet
"""
if frameType >= 2**3:
raise ValueError, "frametype must be < 8"
if securityEnabled >= 2**1:
raise ValueError, " must be < "
if framePending >= 2**1:
raise ValueError, " must be < "
if acknowledgeRequest >= 2**1:
raise ValueError, " must be < "
if intraPAN >= 2**1:
raise ValueError, " must be < "
#if destinationAddressingMode >= 2**2:
# raise ValueError, " must be < "
if sourceAddressingMode >= 2**2:
raise ValueError, " must be < "
return struct.pack("H", frameType
+ (securityEnabled << 3)
+ (framePending << 4)
+ (acknowledgeRequest << 5)
+ (intraPAN << 6)
+ (destinationAddressingMode << 10)
+ (sourceAddressingMode << 14))
class ieee802_15_4_mod_pkts(gr.hier_block2):
"""
IEEE 802.15.4 modulator that is a GNU Radio source.
Send packets by calling send_pkt
"""
def __init__(self, pad_for_usrp=True, *args, **kwargs):
"""
Hierarchical block for the 802_15_4 O-QPSK modulation.
Packets to be sent are enqueued by calling send_pkt.
The output is the complex modulated signal at baseband.
@param msgq_limit: maximum number of messages in message queue
@type msgq_limit: int
@param pad_for_usrp: If true, packets are padded such that they end up a multiple of 128 samples
See 802_15_4_mod for remaining parameters
"""
try:
self.msgq_limit = kwargs.pop('msgq_limit')
except KeyError:
pass
gr.hier_block2.__init__(self, "ieee802_15_4_mod_pkts",
gr.io_signature(0, 0, 0), # Input
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output
self.pad_for_usrp = pad_for_usrp
# accepts messages from the outside world
self.pkt_input = gr.message_source(gr.sizeof_char, self.msgq_limit)
self.ieee802_15_4_mod = ieee802_15_4.ieee802_15_4_mod(self, *args, **kwargs)
self.connect(self.pkt_input, self.ieee802_15_4_mod, self)
def send_pkt(self, seqNr, addressInfo, payload='', eof=False):
"""
Send the payload.
@param seqNr: sequence number of packet
@type seqNr: byte
@param addressInfo: address information for packet
@type addressInfo: string
@param payload: data to send
@type payload: string
"""
if eof:
msg = gr.message(1) # tell self.pkt_input we're not sending any more packets
else:
FCF = make_FCF()
pkt = make_ieee802_15_4_packet(FCF,
seqNr,
addressInfo,
payload,
self.pad_for_usrp)
#print "pkt =", packet_utils.string_to_hex_list(pkt), len(pkt)
msg = gr.message_from_string(pkt)
self.pkt_input.msgq().insert_tail(msg)
class ieee802_15_4_demod_pkts(gr.hier_block2):
"""
802_15_4 demodulator that is a GNU Radio sink.
The input is complex baseband. When packets are demodulated, they are passed to the
app via the callback.
"""
def __init__(self, *args, **kwargs):
"""
Hierarchical block for O-QPSK demodulation.
The input is the complex modulated signal at baseband.
Demodulated packets are sent to the handler.
@param callback: function of two args: ok, payload
@type callback: ok: bool; payload: string
@param threshold: detect access_code with up to threshold bits wrong (-1 -> use default)
@type threshold: int
See ieee802_15_4_demod for remaining parameters.
"""
try:
self.callback = kwargs.pop('callback')
self.threshold = kwargs.pop('threshold')
self.chan_num = kwargs.pop('channel')
except KeyError:
pass
gr.hier_block2.__init__(self, "ieee802_15_4_demod_pkts",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input
gr.io_signature(0, 0, 0)) # Output
self._rcvd_pktq = gr.msg_queue() # holds packets from the PHY
self.ieee802_15_4_demod = ieee802_15_4.ieee802_15_4_demod(self, *args, **kwargs)
self._packet_sink = ucla.ieee802_15_4_packet_sink(self._rcvd_pktq, self.threshold)
self.connect(self,self.ieee802_15_4_demod, self._packet_sink)
self._watcher = _queue_watcher_thread(self._rcvd_pktq, self.callback, self.chan_num)
def carrier_sensed(self):
"""
Return True if we detect carrier.
"""
return self._packet_sink.carrier_sensed()
class _queue_watcher_thread(_threading.Thread):
def __init__(self, rcvd_pktq, callback, chan_num):
_threading.Thread.__init__(self)
self.setDaemon(1)
self.rcvd_pktq = rcvd_pktq
self.callback = callback
self.chan_num = chan_num
self.prev_crc = -1
self.keep_running = True
self.start()
def run(self):
while self.keep_running:
#print "802_15_4_pkt: waiting for packet"
msg = self.rcvd_pktq.delete_head()
ok = 0
payload = msg.to_string()
#print "received packet "
if len(payload) > 2:
crc = crc16.CRC16()
else:
print "too small:", len(payload)
continue
# Calculate CRC skipping over LQI and CRC
crc.update(payload[1:-2])
crc_check = crc.intchecksum()
#print "checksum: %s, received: %s" % (crc_check,
#str(ord(payload[-2]) + ord(payload[-1])*256))
ok = (crc_check == ord(payload[-2]) + ord(payload[-1])*256)
msg_payload = payload
if self.prev_crc != crc_check:
self.prev_crc = crc_check
if self.callback:
self.callback(ok, msg_payload, self.chan_num)
class chan_802_15_4:
chan_map= { 11 : 2405e6,
12 : 2410e6,
13 : 2415e6,
14 : 2420e6,
15 : 2425e6,
16 : 2430e6,
17 : 2435e6,
18 : 2440e6,
19 : 2445e6,
20 : 2450e6,
21 : 2455e6,
22 : 2460e6,
23 : 2465e6,
24 : 2470e6,
25 : 2475e6,
26 : 2480e6}
|
|
# -*- test-case-name: txweb2.test.test_util -*-
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, [email protected]
##
"""
Utilities
This API is considered private to static.py and is therefore subject to
change.
"""
__all__ = [
"allDataFromStream",
"davXMLFromStream",
"noDataFromStream",
"normalizeURL",
"joinURL",
"parentForURL",
"unimplemented",
"bindMethods",
]
from urlparse import urlsplit, urlunsplit
import posixpath # Careful; this module is not documented as public API
from twisted.python.failure import Failure
from twisted.internet.defer import succeed
from twext.python.log import Logger
from txweb2.stream import readStream
from txdav.xml.parser import WebDAVDocument
log = Logger()
##
# Reading request body
##
def allDataFromStream(stream, filter=None):
data = []
def gotAllData(_):
if not data:
return None
result = "".join([str(x) for x in data])
if filter is None:
return result
else:
return filter(result)
return readStream(stream, data.append).addCallback(gotAllData)
def davXMLFromStream(stream):
# FIXME:
# This reads the request body into a string and then parses it.
# A better solution would parse directly and incrementally from the
# request stream.
if stream is None:
return succeed(None)
def parse(xml):
try:
doc = WebDAVDocument.fromString(xml)
doc.root_element.validate()
return doc
except ValueError:
log.error("Bad XML:\n%s" % (xml,))
raise
return allDataFromStream(stream, parse)
def noDataFromStream(stream):
def gotData(data):
if data:
raise ValueError("Stream contains unexpected data.")
return readStream(stream, gotData)
##
# URLs
##
def normalizeURL(url):
"""
Normalized a URL.
@param url: a URL.
@return: the normalized representation of C{url}. The returned URL will
never contain a trailing C{"/"}; it is up to the caller to determine
whether the resource referred to by the URL is a collection and add a
trailing C{"/"} if so.
"""
def cleanup(path):
# For some silly reason, posixpath.normpath doesn't clean up '//' at the
# start of a filename, so let's clean it up here.
if path[0] == "/":
count = 0
for char in path:
if char != "/":
break
count += 1
path = path[count - 1:]
return path.encode("utf-8")
(scheme, host, path, query, fragment) = urlsplit(cleanup(url))
path = cleanup(posixpath.normpath(path))
return urlunsplit((scheme, host, path, query, fragment))
def joinURL(*urls):
"""
Appends URLs in series.
@param urls: URLs to join.
@return: the normalized URL formed by combining each URL in C{urls}. The
returned URL will contain a trailing C{"/"} if and only if the last
given URL contains a trailing C{"/"}.
"""
if len(urls) > 0 and len(urls[-1]) > 0 and urls[-1][-1] == "/":
trailing = "/"
else:
trailing = ""
url = normalizeURL("/".join([url for url in urls]))
if url == "/":
return "/"
else:
return url + trailing
def parentForURL(url):
"""
Extracts the URL of the containing collection resource for the resource
corresponding to a given URL. This removes any query or fragment pieces.
@param url: an absolute (server-relative is OK) URL.
@return: the normalized URL of the collection resource containing the
resource corresponding to C{url}. The returned URL will always contain
a trailing C{"/"}.
"""
(scheme, host, path, _ignore_query, _ignore_fragment) = urlsplit(normalizeURL(url))
index = path.rfind("/")
if index is 0:
if path == "/":
return None
else:
path = "/"
else:
if index is -1:
raise ValueError("Invalid URL: %s" % (url,))
else:
path = path[:index] + "/"
return urlunsplit((scheme, host, path, None, None))
##
# Python magic
##
def unimplemented(obj):
"""
Throw an exception signifying that the current method is unimplemented
and should not have been invoked.
"""
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError("Method %s is unimplemented in subclass %s" % (caller, obj.__class__))
def bindMethods(module, clazz, prefixes=("preconditions_", "http_", "report_")):
"""
Binds all functions in the given module (as defined by that module's
C{__all__} attribute) which start with any of the given prefixes as methods
of the given class.
@param module: the module in which to search for functions.
@param clazz: the class to bind found functions to as methods.
@param prefixes: a sequence of prefixes to match found functions against.
"""
for submodule_name in module.__all__:
try:
__import__(module.__name__ + "." + submodule_name)
except ImportError:
log.error("Unable to import module %s" % (module.__name__ + "." + submodule_name,))
Failure().raiseException()
submodule = getattr(module, submodule_name)
for method_name in submodule.__all__:
for prefix in prefixes:
if method_name.startswith(prefix):
method = getattr(submodule, method_name)
setattr(clazz, method_name, method)
break
|
|
# mapping of entity names to utf-8 values
# lists all characters in the "XHTML 1.1 + MathML 2.0" DTD
map_utf8 = {
"CloseCurlyDoubleQuote": "\xe2\x80\x9d",
"CloseCurlyQuote": "\xe2\x80\x99",
"LeftAngleBracket": "\xe2\x8c\xa9",
"LeftCeiling": "\xe2\x8c\x88",
"LeftDoubleBracket": "\xe3\x80\x9a",
"LeftFloor": "\xe2\x8c\x8a",
"OpenCurlyDoubleQuote": "\xe2\x80\x9c",
"OpenCurlyQuote": "\xe2\x80\x98",
"RightAngleBracket": "\xe2\x8c\xaa",
"RightCeiling": "\xe2\x8c\x89",
"RightDoubleBracket": "\xe3\x80\x9b",
"RightFloor": "\xe2\x8c\x8b",
"InvisibleComma": "\xe2\x81\xa3",
"ic": "\xe2\x81\xa3",
"HorizontalLine": "\xe2\x94\x80",
"VerticalLine": "\x7c",
"Assign": "\xe2\x89\x94",
"Because": "\xe2\x88\xb5",
"Therefore": "\xe2\x88\xb4",
"VerticalSeparator": "\xe2\x9d\x98",
"Colon": "\xe2\x88\xb7",
"Colone": "\xe2\xa9\xb4",
"Proportion": "\xe2\x88\xb7",
"SuchThat": "\xe2\x88\x8b",
"DoubleLeftTee": "\xe2\xab\xa4",
"DoubleRightTee": "\xe2\x8a\xa8",
"DownTee": "\xe2\x8a\xa4",
"DownTeeArrow": "\xe2\x86\xa7",
"LeftDownTeeVector": "\xe2\xa5\xa1",
"RightDownTeeVector": "\xe2\xa5\x9d",
"DoubleLeftTee": "\xe2\xab\xa4",
"LeftTee": "\xe2\x8a\xa3",
"LeftTeeArrow": "\xe2\x86\xa4",
"DownLeftTeeVector": "\xe2\xa5\x9e",
"LeftTeeVector": "\xe2\xa5\x9a",
"DoubleRightTee": "\xe2\x8a\xa8",
"RightTee": "\xe2\x8a\xa2",
"RightTeeArrow": "\xe2\x86\xa6",
"DownRightTeeVector": "\xe2\xa5\x9f",
"RightTeeVector": "\xe2\xa5\x9b",
"Implies": "\xe2\x87\x92",
"RoundImplies": "\xe2\xa5\xb0",
"RoundImplies": "\xe2\xa5\xb0",
"Or": "\xe2\xa9\x94",
"And": "\xe2\xa9\x93",
"bNot": "\xe2\xab\xad",
"Not": "\xe2\xab\xac",
"NotCongruent": "\xe2\x89\xa2",
"NotDoubleVerticalBar": "\xe2\x88\xa6",
"NotElement": "\xe2\x88\x89",
"NotEqual": "\xe2\x89\xa0",
"NotExists": "\xe2\x88\x84",
"NotGreater": "\xe2\x89\xaf",
"NotGreaterEqual": "\xe2\x89\xb1",
"NotGreaterLess": "\xe2\x89\xb9",
"NotGreaterTilde": "\xe2\x89\xb5",
"NotLeftTriangle": "\xe2\x8b\xaa",
"NotLeftTriangleEqual": "\xe2\x8b\xac",
"NotLess": "\xe2\x89\xae",
"NotLessEqual": "\xe2\x89\xb0",
"NotLessGreater": "\xe2\x89\xb8",
"NotLessTilde": "\xe2\x89\xb4",
"NotPrecedes": "\xe2\x8a\x80",
"NotPrecedesEqual": "\xe2\xaa\xaf",
"NotPrecedesSlantEqual": "\xe2\x8b\xa0",
"NotReverseElement": "\xe2\x88\x8c",
"NotRightTriangle": "\xe2\x8b\xab",
"NotRightTriangleEqual": "\xe2\x8b\xad",
"NotSquareSubsetEqual": "\xe2\x8b\xa2",
"NotSquareSupersetEqual": "\xe2\x8b\xa3",
"NotSubset": "\xe2\x8a\x84",
"NotSubsetEqual": "\xe2\x8a\x88",
"NotSucceeds": "\xe2\x8a\x81",
"NotSucceedsSlantEqual": "\xe2\x8b\xa1",
"NotSuperset": "\xe2\x8a\x85",
"NotSupersetEqual": "\xe2\x8a\x89",
"NotTilde": "\xe2\x89\x81",
"NotTildeEqual": "\xe2\x89\x84",
"NotTildeFullEqual": "\xe2\x89\x87",
"NotTildeTilde": "\xe2\x89\x89",
"NotVerticalBar": "\xe2\x88\xa4",
"NotCupCap": "\xe2\x89\xad",
"Exists": "\xe2\x88\x83",
"NotExists": "\xe2\x88\x84",
"ForAll": "\xe2\x88\x80",
"NotExists": "\xe2\x88\x84",
"Element": "\xe2\x88\x88",
"NotElement": "\xe2\x88\x89",
"NotReverseElement": "\xe2\x88\x8c",
"ReverseElement": "\xe2\x88\x8b",
"NotElement": "\xe2\x88\x89",
"NotReverseElement": "\xe2\x88\x8c",
"NotSquareSubsetEqual": "\xe2\x8b\xa2",
"NotSquareSubsetEqual": "\xe2\x8b\xa2",
"NotSquareSupersetEqual": "\xe2\x8b\xa3",
"NotSquareSupersetEqual": "\xe2\x8b\xa3",
"NotSubset": "\xe2\x8a\x84",
"NotSubsetEqual": "\xe2\x8a\x88",
"NotSubsetEqual": "\xe2\x8a\x88",
"NotSuperset": "\xe2\x8a\x85",
"NotSupersetEqual": "\xe2\x8a\x89",
"NotSupersetEqual": "\xe2\x8a\x89",
"NotReverseElement": "\xe2\x88\x8c",
"ReverseElement": "\xe2\x88\x8b",
"NotSquareSubsetEqual": "\xe2\x8b\xa2",
"SquareSubset": "\xe2\x8a\x8f",
"SquareSubsetEqual": "\xe2\x8a\x91",
"NotSquareSubsetEqual": "\xe2\x8b\xa2",
"SquareSubsetEqual": "\xe2\x8a\x91",
"NotSquareSupersetEqual": "\xe2\x8b\xa3",
"SquareSuperset": "\xe2\x8a\x90",
"SquareSupersetEqual": "\xe2\x8a\x92",
"NotSquareSupersetEqual": "\xe2\x8b\xa3",
"SquareSupersetEqual": "\xe2\x8a\x92",
"Sub": "\xe2\x8b\x90",
"NotSquareSubsetEqual": "\xe2\x8b\xa2",
"NotSubset": "\xe2\x8a\x84",
"NotSubsetEqual": "\xe2\x8a\x88",
"SquareSubset": "\xe2\x8a\x8f",
"SquareSubsetEqual": "\xe2\x8a\x91",
"Subset": "\xe2\x8b\x90",
"SubsetEqual": "\xe2\x8a\x86",
"NotSquareSubsetEqual": "\xe2\x8b\xa2",
"NotSubsetEqual": "\xe2\x8a\x88",
"SquareSubsetEqual": "\xe2\x8a\x91",
"SubsetEqual": "\xe2\x8a\x86",
"NotSquareSupersetEqual": "\xe2\x8b\xa3",
"NotSuperset": "\xe2\x8a\x85",
"NotSupersetEqual": "\xe2\x8a\x89",
"SquareSuperset": "\xe2\x8a\x90",
"SquareSupersetEqual": "\xe2\x8a\x92",
"Superset": "\xe2\x8a\x83",
"SupersetEqual": "\xe2\x8a\x87",
"NotSquareSupersetEqual": "\xe2\x8b\xa3",
"NotSupersetEqual": "\xe2\x8a\x89",
"SquareSupersetEqual": "\xe2\x8a\x92",
"SupersetEqual": "\xe2\x8a\x87",
"DoubleLeftArrow": "\xe2\x87\x90",
"DoubleLeftRightArrow": "\xe2\x87\x94",
"DoubleRightArrow": "\xe2\x87\x92",
"DownLeftRightVector": "\xe2\xa5\x90",
"DownLeftTeeVector": "\xe2\xa5\x9e",
"DownLeftVector": "\xe2\x86\xbd",
"DownLeftVectorBar": "\xe2\xa5\x96",
"DownLeftVectorBar": "\xe2\xa5\x96",
"DownRightTeeVector": "\xe2\xa5\x9f",
"DownRightVector": "\xe2\x87\x81",
"DownRightVectorBar": "\xe2\xa5\x97",
"DownRightVectorBar": "\xe2\xa5\x97",
"DoubleLeftArrow": "\xe2\x87\x90",
"DoubleLongLeftArrow": "\xe2\x9f\xb8",
"LeftArrow": "\xe2\x86\x90",
"LeftArrowBar": "\xe2\x87\xa4",
"LeftArrowRightArrow": "\xe2\x87\x86",
"LongLeftArrow": "\xe2\x9f\xb5",
"LowerLeftArrow": "\xe2\x86\x99",
"RightArrowLeftArrow": "\xe2\x87\x84",
"UpperLeftArrow": "\xe2\x86\x96",
"LeftArrowBar": "\xe2\x87\xa4",
"LeftArrowRightArrow": "\xe2\x87\x86",
"DoubleLeftRightArrow": "\xe2\x87\x94",
"DoubleLongLeftRightArrow": "\xe2\x9f\xba",
"LeftRightArrow": "\xe2\x86\x94",
"LongLeftRightArrow": "\xe2\x9f\xb7",
"DownLeftRightVector": "\xe2\xa5\x90",
"LeftRightVector": "\xe2\xa5\x8e",
"LeftTeeArrow": "\xe2\x86\xa4",
"DownLeftTeeVector": "\xe2\xa5\x9e",
"LeftTeeVector": "\xe2\xa5\x9a",
"DownLeftVector": "\xe2\x86\xbd",
"LeftVector": "\xe2\x86\xbc",
"DownLeftVectorBar": "\xe2\xa5\x96",
"LeftVectorBar": "\xe2\xa5\x92",
"DownLeftVectorBar": "\xe2\xa5\x96",
"LeftVectorBar": "\xe2\xa5\x92",
"LowerLeftArrow": "\xe2\x86\x99",
"LowerRightArrow": "\xe2\x86\x98",
"DoubleLeftRightArrow": "\xe2\x87\x94",
"DoubleLongLeftRightArrow": "\xe2\x9f\xba",
"DoubleLongRightArrow": "\xe2\x9f\xb9",
"DoubleRightArrow": "\xe2\x87\x92",
"LeftArrowRightArrow": "\xe2\x87\x86",
"LeftRightArrow": "\xe2\x86\x94",
"LongLeftRightArrow": "\xe2\x9f\xb7",
"LongRightArrow": "\xe2\x9f\xb6",
"LowerRightArrow": "\xe2\x86\x98",
"RightArrow": "\xe2\x86\x92",
"RightArrowBar": "\xe2\x87\xa5",
"RightArrowLeftArrow": "\xe2\x87\x84",
"UpperRightArrow": "\xe2\x86\x97",
"RightArrowBar": "\xe2\x87\xa5",
"RightArrowLeftArrow": "\xe2\x87\x84",
"RightTeeArrow": "\xe2\x86\xa6",
"DownRightTeeVector": "\xe2\xa5\x9f",
"RightTeeVector": "\xe2\xa5\x9b",
"DownRightVector": "\xe2\x87\x81",
"RightVector": "\xe2\x87\x80",
"DownLeftRightVector": "\xe2\xa5\x90",
"DownRightVectorBar": "\xe2\xa5\x97",
"LeftRightVector": "\xe2\xa5\x8e",
"RightVectorBar": "\xe2\xa5\x93",
"DownRightVectorBar": "\xe2\xa5\x97",
"RightVectorBar": "\xe2\xa5\x93",
"UpperLeftArrow": "\xe2\x86\x96",
"UpperRightArrow": "\xe2\x86\x97",
"mumap": "\xe2\x8a\xb8",
"lotimes": "\xe2\xa8\xb4",
"lthree": "\xe2\x8b\x8b",
"ltimes": "\xe2\x8b\x89",
"olt": "\xe2\xa7\x80",
"otimes": "\xe2\x8a\x97",
"Otimes": "\xe2\xa8\xb7",
"otimesas": "\xe2\xa8\xb6",
"rotimes": "\xe2\xa8\xb5",
"timesb": "\xe2\x8a\xa0",
"timesbar": "\xe2\xa8\xb1",
"tritime": "\xe2\xa8\xbb",
"lparlt": "\xe2\xa6\x93",
"ltrPar": "\xe2\xa6\x96",
"nlt": "\xe2\x89\xae",
"nltri": "\xe2\x8b\xaa",
"nltrie": "\xe2\x8b\xac",
"nvlt": "\xe2\x89\xae",
"lltri": "\xe2\x97\xba",
"ultri": "\xe2\x97\xb8",
"ltcc": "\xe2\xaa\xa6",
"ltcir": "\xe2\xa9\xb9",
"ltdot": "\xe2\x8b\x96",
"ltlarr": "\xe2\xa5\xb6",
"ltquest": "\xe2\xa9\xbb",
"ltrie": "\xe2\x8a\xb4",
"rtriltri": "\xe2\xa7\x8e",
"submult": "\xe2\xab\x81",
"supmult": "\xe2\xab\x82",
"vltri": "\xe2\x8a\xb2",
"delta": "\xce\xb4",
"Delta": "\xce\x94",
"times": "\xc3\x97",
"ltri": "\xe2\x97\x83",
"ltrif": "\xe2\x97\x82",
"malt": "\xe2\x9c\xa0",
"hamilt": "\xe2\x84\x8b",
"nabla": "\xe2\x88\x87",
"blacktriangleleft": "\xe2\x97\x82",
"InvisibleTimes": "\xe2\x81\xa2",
"leftthreetimes": "\xe2\x8b\x8b",
"LeftTriangle": "\xe2\x8a\xb2",
"LeftTriangleEqual": "\xe2\x8a\xb4",
"lessdot": "\xe2\x8b\x96",
"maltese": "\xe2\x9c\xa0",
"multimap": "\xe2\x8a\xb8",
"nless": "\xe2\x89\xae",
"NotLeftTriangle": "\xe2\x8b\xaa",
"NotLeftTriangleEqual": "\xe2\x8b\xac",
"NotLess": "\xe2\x89\xae",
"ntriangleleft": "\xe2\x8b\xaa",
"ntrianglelefteq": "\xe2\x8b\xac",
"triangleleft": "\xe2\x97\x83",
"trianglelefteq": "\xe2\x8a\xb4",
"vartriangleleft": "\xe2\x8a\xb2",
"it": "\xe2\x81\xa2",
"Congruent": "\xe2\x89\xa1",
"NotCongruent": "\xe2\x89\xa2",
"CupCap": "\xe2\x89\x8d",
"NotCupCap": "\xe2\x89\xad",
"DotEqual": "\xe2\x89\x90",
"DoubleVerticalBar": "\xe2\x88\xa5",
"NotDoubleVerticalBar": "\xe2\x88\xa6",
"DotEqual": "\xe2\x89\x90",
"EqualTilde": "\xe2\x89\x82",
"GreaterEqual": "\xe2\x89\xa5",
"GreaterEqualLess": "\xe2\x8b\x9b",
"GreaterFullEqual": "\xe2\x89\xa7",
"GreaterSlantEqual": "\xe2\xa9\xbe",
"HumpEqual": "\xe2\x89\x8f",
"LeftTriangleEqual": "\xe2\x8a\xb4",
"LessEqualGreater": "\xe2\x8b\x9a",
"LessFullEqual": "\xe2\x89\xa6",
"LessSlantEqual": "\xe2\xa9\xbd",
"NotEqual": "\xe2\x89\xa0",
"NotGreaterEqual": "\xe2\x89\xb1",
"NotLeftTriangleEqual": "\xe2\x8b\xac",
"NotLessEqual": "\xe2\x89\xb0",
"NotPrecedesEqual": "\xe2\xaa\xaf",
"NotPrecedesSlantEqual": "\xe2\x8b\xa0",
"NotRightTriangleEqual": "\xe2\x8b\xad",
"NotSquareSubsetEqual": "\xe2\x8b\xa2",
"NotSquareSupersetEqual": "\xe2\x8b\xa3",
"NotSubsetEqual": "\xe2\x8a\x88",
"NotSucceedsSlantEqual": "\xe2\x8b\xa1",
"NotSupersetEqual": "\xe2\x8a\x89",
"NotTildeEqual": "\xe2\x89\x84",
"NotTildeFullEqual": "\xe2\x89\x87",
"PrecedesEqual": "\xe2\xaa\xaf",
"PrecedesSlantEqual": "\xe2\x89\xbc",
"RightTriangleEqual": "\xe2\x8a\xb5",
"SquareSubsetEqual": "\xe2\x8a\x91",
"SquareSupersetEqual": "\xe2\x8a\x92",
"SubsetEqual": "\xe2\x8a\x86",
"SucceedsEqual": "\xe2\x89\xbd",
"SucceedsSlantEqual": "\xe2\x89\xbd",
"SupersetEqual": "\xe2\x8a\x87",
"TildeEqual": "\xe2\x89\x83",
"TildeFullEqual": "\xe2\x89\x85",
"Equal": "\xe2\xa9\xb5",
"EqualTilde": "\xe2\x89\x82",
"Equilibrium": "\xe2\x87\x8c",
"ReverseEquilibrium": "\xe2\x87\x8b",
"ReverseUpEquilibrium": "\xe2\xa5\xaf",
"UpEquilibrium": "\xe2\xa5\xae",
"GreaterEqual": "\xe2\x89\xa5",
"GreaterEqualLess": "\xe2\x8b\x9b",
"NotGreaterEqual": "\xe2\x89\xb1",
"GreaterEqualLess": "\xe2\x8b\x9b",
"GreaterFullEqual": "\xe2\x89\xa7",
"NestedGreaterGreater": "\xe2\x89\xab",
"GreaterGreater": "\xe2\xaa\xa2",
"GreaterLess": "\xe2\x89\xb7",
"NotGreaterLess": "\xe2\x89\xb9",
"GreaterSlantEqual": "\xe2\xa9\xbe",
"GreaterTilde": "\xe2\x89\xb3",
"NotGreaterTilde": "\xe2\x89\xb5",
"HumpDownHump": "\xe2\x89\x8e",
"HumpEqual": "\xe2\x89\x8f",
"LeftTriangle": "\xe2\x8a\xb2",
"LeftTriangleEqual": "\xe2\x8a\xb4",
"NotLeftTriangle": "\xe2\x8b\xaa",
"NotLeftTriangleEqual": "\xe2\x8b\xac",
"LeftTriangleBar": "\xe2\xa7\x8f",
"LeftTriangleBar": "\xe2\xa7\x8f",
"LeftTriangleEqual": "\xe2\x8a\xb4",
"NotLeftTriangleEqual": "\xe2\x8b\xac",
"angzarr": "\xe2\x8d\xbc",
"cirmid": "\xe2\xab\xaf",
"cudarrl": "\xe2\xa4\xb8",
"cularr": "\xe2\x86\xb6",
"cularrp": "\xe2\xa4\xbd",
"dHar": "\xe2\xa5\xa5",
"dharl": "\xe2\x87\x83",
"harr": "\xe2\x86\x94",
"harrcir": "\xe2\xa5\x88",
"harrw": "\xe2\x86\xad",
"lAarr": "\xe2\x87\x9a",
"Larr": "\xe2\x86\x9e",
"larrbfs": "\xe2\xa4\x9f",
"larrfs": "\xe2\xa4\x9d",
"larrhk": "\xe2\x86\xa9",
"larrlp": "\xe2\x86\xab",
"larrpl": "\xe2\xa4\xb9",
"larrsim": "\xe2\xa5\xb3",
"larrtl": "\xe2\x86\xa2",
"latail": "\xe2\xa4\x99",
"lAtail": "\xe2\xa4\x9b",
"lbarr": "\xe2\xa4\x8c",
"lBarr": "\xe2\xa4\x8e",
"ldca": "\xe2\xa4\xb6",
"ldrdhar": "\xe2\xa5\xa7",
"ldrushar": "\xe2\xa5\x8b",
"ldsh": "\xe2\x86\xb2",
"lfisht": "\xe2\xa5\xbc",
"lHar": "\xe2\xa5\xa2",
"lhard": "\xe2\x86\xbd",
"lharu": "\xe2\x86\xbc",
"lharul": "\xe2\xa5\xaa",
"llarr": "\xe2\x87\x87",
"llhard": "\xe2\xa5\xab",
"loarr": "\xe2\x87\xbd",
"lrarr": "\xe2\x87\x86",
"lrhar": "\xe2\x87\x8b",
"lurdshar": "\xe2\xa5\x8a",
"luruhar": "\xe2\xa5\xa6",
"midcir": "\xe2\xab\xb0",
"nharr": "\xe2\x86\xae",
"nlarr": "\xe2\x86\x9a",
"nvHarr": "\xe2\x87\x8e",
"nvlArr": "\xe2\x87\x8d",
"nvrArr": "\xe2\x87\x8f",
"olarr": "\xe2\x86\xba",
"orarr": "\xe2\x86\xbb",
"rAarr": "\xe2\x87\x9b",
"rarrbfs": "\xe2\xa4\xa0",
"rarrfs": "\xe2\xa4\x9e",
"rarrtl": "\xe2\x86\xa3",
"rAtail": "\xe2\xa4\x9c",
"rdldhar": "\xe2\xa5\xa9",
"rdsh": "\xe2\x86\xb3",
"rlarr": "\xe2\x87\x84",
"rlhar": "\xe2\x87\x8c",
"ruluhar": "\xe2\xa5\xa8",
"Uarrocir": "\xe2\xa5\x89",
"uHar": "\xe2\xa5\xa3",
"uharl": "\xe2\x86\xbf",
"xharr": "\xe2\x9f\xb7",
"xhArr": "\xe2\x9f\xba",
"xlarr": "\xe2\x9f\xb5",
"xlArr": "\xe2\x9f\xb8",
"Barwed": "\xe2\x8c\x86",
"Cap": "\xe2\x8b\x92",
"Cup": "\xe2\x8b\x93",
"Dagger": "\xe2\x80\xa1",
"loplus": "\xe2\xa8\xad",
"lotimes": "\xe2\xa8\xb4",
"lthree": "\xe2\x8b\x8b",
"ltimes": "\xe2\x8b\x89",
"oast": "\xe2\x8a\x9b",
"ocir": "\xe2\x8a\x9a",
"odash": "\xe2\x8a\x9d",
"odiv": "\xe2\xa8\xb8",
"odot": "\xe2\x8a\x99",
"odsold": "\xe2\xa6\xbc",
"ofcir": "\xe2\xa6\xbf",
"ogt": "\xe2\xa7\x81",
"ohbar": "\xe2\xa6\xb5",
"olcir": "\xe2\xa6\xbe",
"olt": "\xe2\xa7\x80",
"omid": "\xe2\xa6\xb6",
"ominus": "\xe2\x8a\x96",
"opar": "\xe2\xa6\xb7",
"operp": "\xe2\xa6\xb9",
"oplus": "\xe2\x8a\x95",
"osol": "\xe2\x8a\x98",
"otimes": "\xe2\x8a\x97",
"Otimes": "\xe2\xa8\xb7",
"otimesas": "\xe2\xa8\xb6",
"ovbar": "\xe2\x8c\xbd",
"plusacir": "\xe2\xa8\xa3",
"pluscir": "\xe2\xa8\xa2",
"roplus": "\xe2\xa8\xae",
"rotimes": "\xe2\xa8\xb5",
"sdot": "\xe2\x8b\x85",
"sstarf": "\xe2\x8b\x86",
"tridot": "\xe2\x97\xac",
"triminus": "\xe2\xa8\xba",
"triplus": "\xe2\xa8\xb9",
"trisb": "\xe2\xa7\x8d",
"tritime": "\xe2\xa8\xbb",
"xcirc": "\xe2\x97\xaf",
"xdtri": "\xe2\x96\xbd",
"xodot": "\xe2\x8a\x99",
"xoplus": "\xe2\x8a\x95",
"xotime": "\xe2\x8a\x97",
"xutri": "\xe2\x96\xb3",
"dlcorn": "\xe2\x8c\x9e",
"gtlPar": "\xe2\xa6\x95",
"langd": "\xe2\xa6\x91",
"lbrke": "\xe2\xa6\x8b",
"lbrksld": "\xe2\xa6\x8f",
"lbrkslu": "\xe2\xa6\x8d",
"lceil": "\xe2\x8c\x88",
"lfloor": "\xe2\x8c\x8a",
"lparlt": "\xe2\xa6\x93",
"ltrPar": "\xe2\xa6\x96",
"rangd": "\xe2\xa6\x92",
"ulcorn": "\xe2\x8c\x9c",
"lnap": "\xe2\xaa\x89",
"lne": "\xe2\x89\xa8",
"lnE": "\xe2\x89\xa8",
"lnsim": "\xe2\x8b\xa6",
"nle": "\xe2\x89\xb0",
"nlsim": "\xe2\x89\xb4",
"nlt": "\xe2\x89\xae",
"nltri": "\xe2\x8b\xaa",
"nltrie": "\xe2\x8b\xac",
"npar": "\xe2\x88\xa6",
"nrtri": "\xe2\x8b\xab",
"nrtrie": "\xe2\x8b\xad",
"ntgl": "\xe2\x89\xb9",
"ntlg": "\xe2\x89\xb8",
"nvlt": "\xe2\x89\xae",
"parsim": "\xe2\xab\xb3",
"ang": "\xe2\x88\xa0",
"ange": "\xe2\xa6\xa4",
"angmsd": "\xe2\x88\xa1",
"angmsdaa": "\xe2\xa6\xa8",
"angmsdab": "\xe2\xa6\xa9",
"angmsdac": "\xe2\xa6\xaa",
"angmsdad": "\xe2\xa6\xab",
"angmsdae": "\xe2\xa6\xac",
"angmsdaf": "\xe2\xa6\xad",
"angmsdag": "\xe2\xa6\xae",
"angmsdah": "\xe2\xa6\xaf",
"angrtvbd": "\xe2\xa6\x9d",
"bemptyv": "\xe2\xa6\xb0",
"cemptyv": "\xe2\xa6\xb2",
"cirE": "\xe2\xa7\x83",
"cirscir": "\xe2\xa7\x82",
"comp": "\xe2\x88\x81",
"daleth": "\xe2\x84\xb8",
"demptyv": "\xe2\xa6\xb1",
"emptyv": "\xe2\x88\x85",
"laemptyv": "\xe2\xa6\xb4",
"lltri": "\xe2\x97\xba",
"lrtri": "\xe2\x8a\xbf",
"oS": "\xe2\x93\x88",
"raemptyv": "\xe2\xa6\xb3",
"range": "\xe2\xa6\xa5",
"ultri": "\xe2\x97\xb8",
"urtri": "\xe2\x97\xb9",
"cire": "\xe2\x89\x97",
"Colone": "\xe2\xa9\xb4",
"ecir": "\xe2\x89\x96",
"el": "\xe2\xaa\x99",
"els": "\xe2\x8b\x9c",
"elsdot": "\xe2\xaa\x97",
"equivDD": "\xe2\xa9\xb8",
"esdot": "\xe2\x89\x90",
"gE": "\xe2\x89\xa7",
"gel": "\xe2\x8b\x9b",
"gEl": "\xe2\x8b\x9b",
"gesdotol": "\xe2\xaa\x84",
"gesles": "\xe2\xaa\x94",
"Gg": "\xe2\x8b\x99",
"gl": "\xe2\x89\xb7",
"gla": "\xe2\xaa\xa5",
"glE": "\xe2\xaa\x92",
"glj": "\xe2\xaa\xa4",
"gsiml": "\xe2\xaa\x90",
"gtcir": "\xe2\xa9\xba",
"lap": "\xe2\x89\xb2",
"lE": "\xe2\x89\xa6",
"leg": "\xe2\x8b\x9a",
"lEg": "\xe2\x8b\x9a",
"les": "\xe2\xa9\xbd",
"lescc": "\xe2\xaa\xa8",
"lesdot": "\xe2\xa9\xbf",
"lesdoto": "\xe2\xaa\x81",
"lesdotor": "\xe2\xaa\x83",
"lesges": "\xe2\xaa\x93",
"lg": "\xe2\x89\xb6",
"lgE": "\xe2\xaa\x91",
"Ll": "\xe2\x8b\x98",
"lsim": "\xe2\x89\xb2",
"lsime": "\xe2\xaa\x8d",
"lsimg": "\xe2\xaa\x8f",
"Lt": "\xe2\x89\xaa",
"ltcc": "\xe2\xaa\xa6",
"ltcir": "\xe2\xa9\xb9",
"ltdot": "\xe2\x8b\x96",
"ltlarr": "\xe2\xa5\xb6",
"ltquest": "\xe2\xa9\xbb",
"ltrie": "\xe2\x8a\xb4",
"prurel": "\xe2\x8a\xb0",
"rtrie": "\xe2\x8a\xb5",
"rtriltri": "\xe2\xa7\x8e",
"siml": "\xe2\xaa\x9d",
"simlE": "\xe2\xaa\x9f",
"smile": "\xe2\x8c\xa3",
"smt": "\xe2\xaa\xaa",
"smte": "\xe2\xaa\xac",
"Sub": "\xe2\x8b\x90",
"suplarr": "\xe2\xa5\xbb",
"trie": "\xe2\x89\x9c",
"vltri": "\xe2\x8a\xb2",
"vrtri": "\xe2\x8a\xb3",
"Vvdash": "\xe2\x8a\xaa",
"boxdl": "\xe2\x94\x90",
"boxdL": "\xe2\x95\x95",
"boxDl": "\xe2\x95\x96",
"boxDL": "\xe2\x95\x97",
"boxhd": "\xe2\x94\xac",
"boxhD": "\xe2\x95\xa5",
"boxHd": "\xe2\x95\xa4",
"boxHD": "\xe2\x95\xa6",
"boxhu": "\xe2\x94\xb4",
"boxhU": "\xe2\x95\xa8",
"boxHu": "\xe2\x95\xa7",
"boxHU": "\xe2\x95\xa9",
"boxul": "\xe2\x94\x98",
"boxuL": "\xe2\x95\x9b",
"boxUl": "\xe2\x95\x9c",
"boxUL": "\xe2\x95\x9d",
"boxvl": "\xe2\x94\xa4",
"boxvL": "\xe2\x95\xa1",
"boxVl": "\xe2\x95\xa2",
"boxVL": "\xe2\x95\xa3",
"circ": "\x5e",
"dblac": "\xcb\x9d",
"acirc": "\xc3\xa2",
"Acirc": "\xc3\x82",
"ecirc": "\xc3\xaa",
"Ecirc": "\xc3\x8a",
"icirc": "\xc3\xae",
"Icirc": "\xc3\x8e",
"ocirc": "\xc3\xb4",
"Ocirc": "\xc3\x94",
"oslash": "\xc3\xb8",
"ucirc": "\xc3\xbb",
"Ucirc": "\xc3\x9b",
"ccirc": "\xc4\x89",
"Ccirc": "\xc4\x88",
"gcirc": "\xc4\x9d",
"Gcirc": "\xc4\x9c",
"hcirc": "\xc4\xa5",
"Hcirc": "\xc4\xa4",
"jcirc": "\xc4\xb5",
"Jcirc": "\xc4\xb4",
"lmidot": "\xc5\x80",
"Lmidot": "\xc4\xbf",
"odblac": "\xc5\x91",
"Odblac": "\xc5\x90",
"scirc": "\xc5\x9d",
"Scirc": "\xc5\x9c",
"udblac": "\xc5\xb1",
"Udblac": "\xc5\xb0",
"wcirc": "\xc5\xb5",
"Wcirc": "\xc5\xb4",
"ycirc": "\xc5\xb7",
"Ycirc": "\xc5\xb6",
"ascr": "\xf0\x9d\x92\xb6",
"Ascr": "\xf0\x9d\x92\x9c",
"bscr": "\xf0\x9d\x92\xb7",
"Bscr": "\xe2\x84\xac",
"cscr": "\xf0\x9d\x92\xb8",
"Cscr": "\xf0\x9d\x92\x9e",
"dscr": "\xf0\x9d\x92\xb9",
"Dscr": "\xf0\x9d\x92\x9f",
"escr": "\xe2\x84\xaf",
"Escr": "\xe2\x84\xb0",
"fscr": "\xf0\x9d\x92\xbb",
"Fscr": "\xe2\x84\xb1",
"gscr": "\xe2\x84\x8a",
"Gscr": "\xf0\x9d\x92\xa2",
"hscr": "\xf0\x9d\x92\xbd",
"Hscr": "\xe2\x84\x8b",
"iscr": "\xf0\x9d\x92\xbe",
"Iscr": "\xe2\x84\x90",
"jscr": "\xf0\x9d\x92\xbf",
"Jscr": "\xf0\x9d\x92\xa5",
"kscr": "\xf0\x9d\x93\x80",
"Kscr": "\xf0\x9d\x92\xa6",
"lscr": "\xe2\x84\x93",
"Lscr": "\xe2\x84\x92",
"mscr": "\xf0\x9d\x93\x82",
"Mscr": "\xe2\x84\xb3",
"nscr": "\xf0\x9d\x93\x83",
"Nscr": "\xf0\x9d\x92\xa9",
"oscr": "\xe2\x84\xb4",
"Oscr": "\xf0\x9d\x92\xaa",
"pscr": "\xf0\x9d\x93\x85",
"Pscr": "\xf0\x9d\x92\xab",
"qscr": "\xf0\x9d\x93\x86",
"Qscr": "\xf0\x9d\x92\xac",
"rscr": "\xf0\x9d\x93\x87",
"Rscr": "\xe2\x84\x9b",
"sscr": "\xf0\x9d\x93\x88",
"Sscr": "\xf0\x9d\x92\xae",
"tscr": "\xf0\x9d\x93\x89",
"Tscr": "\xf0\x9d\x92\xaf",
"uscr": "\xf0\x9d\x93\x8a",
"Uscr": "\xf0\x9d\x92\xb0",
"vscr": "\xf0\x9d\x93\x8b",
"Vscr": "\xf0\x9d\x92\xb1",
"wscr": "\xf0\x9d\x93\x8c",
"Wscr": "\xf0\x9d\x92\xb2",
"xscr": "\xf0\x9d\x93\x8d",
"Xscr": "\xf0\x9d\x92\xb3",
"yscr": "\xf0\x9d\x93\x8e",
"Yscr": "\xf0\x9d\x92\xb4",
"zscr": "\xf0\x9d\x93\x8f",
"Zscr": "\xf0\x9d\x92\xb5",
"laquo": "\xc2\xab",
"larr": "\xe2\x86\x90",
"lcub": "\x7b",
"ldquo": "\xe2\x80\x9c",
"lpar": "\x28",
"lsqb": "\x5b",
"lsquo": "\xe2\x80\x98",
"middot": "\xc2\xb7",
"raquo": "\xc2\xbb",
"rdquo": "\xe2\x80\x9d",
"reg": "\xc2\xae",
"rsquo": "\xe2\x80\x99",
"bull": "\xe2\x80\xa2",
"cir": "\xe2\x97\x8b",
"Dagger": "\xe2\x80\xa1",
"dlcrop": "\xe2\x8c\x8d",
"dtri": "\xe2\x96\xbf",
"dtrif": "\xe2\x96\xbe",
"female": "\xe2\x99\x80",
"hybull": "\xe2\x81\x83",
"ldquor": "\xe2\x80\x9e",
"lozf": "\xe2\xa7\xab",
"lsquor": "\xe2\x80\x9a",
"ltri": "\xe2\x97\x83",
"ltrif": "\xe2\x97\x82",
"male": "\xe2\x99\x82",
"mldr": "\xe2\x80\xa6",
"nldr": "\xe2\x80\xa5",
"phone": "\xe2\x98\x8e",
"rect": "\xe2\x96\xad",
"rsquor": "\xe2\x80\x99",
"rtri": "\xe2\x96\xb9",
"rtrif": "\xe2\x96\xb8",
"sext": "\xe2\x9c\xb6",
"squf": "\xe2\x96\xaa",
"starf": "\xe2\x98\x85",
"telrec": "\xe2\x8c\x95",
"ulcrop": "\xe2\x8c\x8f",
"utri": "\xe2\x96\xb5",
"utrif": "\xe2\x96\xb4",
"aleph": "\xe2\x84\xb5",
"andv": "\xe2\xa9\x9a",
"angrt": "\xe2\x88\x9f",
"angsph": "\xe2\x88\xa2",
"apacir": "\xe2\xa9\xaf",
"Cconint": "\xe2\x88\xb0",
"compfn": "\xe2\x88\x98",
"Conint": "\xe2\x88\xaf",
"dwangle": "\xe2\xa6\xa6",
"epar": "\xe2\x8b\x95",
"eparsl": "\xe2\xa7\xa3",
"eqvparsl": "\xe2\xa7\xa5",
"exist": "\xe2\x88\x83",
"iinfin": "\xe2\xa7\x9c",
"Int": "\xe2\x88\xac",
"intlarhk": "\xe2\xa8\x97",
"lang": "\xe2\x8c\xa9",
"Lang": "\xe3\x80\x8a",
"lbbrk": "\xe3\x80\x94",
"le": "\xe2\x89\xa4",
"loang": "\xef\x95\x98",
"lobrk": "\xe3\x80\x9a",
"lopar": "\xe3\x80\x98",
"nhpar": "\xe2\xab\xb2",
"npolint": "\xe2\xa8\x94",
"olcross": "\xe2\xa6\xbb",
"orv": "\xe2\xa9\x9b",
"par": "\xe2\x88\xa5",
"Prime": "\xe2\x80\xb3",
"profalar": "\xe2\x8c\xae",
"profline": "\xe2\x8c\x92",
"profsurf": "\xe2\x8c\x93",
"qint": "\xe2\xa8\x8c",
"qprime": "\xe2\x81\x97",
"rang": "\xe2\x8c\xaa",
"Rang": "\xe3\x80\x8b",
"rppolint": "\xe2\xa8\x92",
"scpolint": "\xe2\xa8\x93",
"smeparsl": "\xe2\xa7\xa4",
"squarf": "\xe2\x96\xaa",
"tint": "\xe2\x88\xad",
"topcir": "\xe2\xab\xb1",
"tprime": "\xe2\x80\xb4",
"uwangle": "\xe2\xa6\xa7",
"vangrt": "\xe2\x8a\xbe",
"angle": "\xe2\x88\xa0",
"bigtriangledown": "\xe2\x96\xbd",
"bigtriangleup": "\xe2\x96\xb3",
"blacktriangle": "\xe2\x96\xb4",
"blacktriangledown": "\xe2\x96\xbe",
"blacktriangleleft": "\xe2\x97\x82",
"blacktriangleright": "\xe2\x96\xb8",
"bullet": "\xe2\x80\xa2",
"Cayleys": "\xe2\x84\xad",
"circlearrowleft": "\xe2\x86\xba",
"circlearrowright": "\xe2\x86\xbb",
"circledast": "\xe2\x8a\x9b",
"circledcirc": "\xe2\x8a\x9a",
"circleddash": "\xe2\x8a\x9d",
"CircleDot": "\xe2\x8a\x99",
"circledR": "\xc2\xae",
"circledS": "\xe2\x93\x88",
"CircleMinus": "\xe2\x8a\x96",
"CirclePlus": "\xe2\x8a\x95",
"CircleTimes": "\xe2\x8a\x97",
"CloseCurlyDoubleQuote": "\xe2\x80\x9d",
"complement": "\xe2\x88\x81",
"complexes": "\xe2\x84\x82",
"curvearrowleft": "\xe2\x86\xb6",
"DiacriticalDoubleAcute": "\xcb\x9d",
"doublebarwedge": "\xe2\x8c\x86",
"DoubleContourIntegral": "\xe2\x88\xaf",
"DoubleDot": "\xc2\xa8",
"DoubleDownArrow": "\xe2\x87\x93",
"DoubleLeftArrow": "\xe2\x87\x90",
"DoubleLeftRightArrow": "\xe2\x87\x94",
"DoubleLeftTee": "\xe2\xab\xa4",
"DoubleLongLeftArrow": "\xe2\x9f\xb8",
"DoubleLongLeftRightArrow": "\xe2\x9f\xba",
"DoubleLongRightArrow": "\xe2\x9f\xb9",
"DoubleRightArrow": "\xe2\x87\x92",
"DoubleRightTee": "\xe2\x8a\xa8",
"DoubleUpArrow": "\xe2\x87\x91",
"DoubleUpDownArrow": "\xe2\x87\x95",
"DoubleVerticalBar": "\xe2\x88\xa5",
"downharpoonleft": "\xe2\x87\x83",
"Element": "\xe2\x88\x88",
"eqslantless": "\xe2\x8b\x9c",
"exponentiale": "\xe2\x85\x87",
"gtreqless": "\xe2\x8b\x9b",
"gtreqqless": "\xe2\x8b\x9b",
"gtrless": "\xe2\x89\xb7",
"Hat": "\xcc\x82",
"hookleftarrow": "\xe2\x86\xa9",
"InvisibleComma": "\xe2\x81\xa3",
"InvisibleTimes": "\xe2\x81\xa2",
"langle": "\xe2\x8c\xa9",
"LeftAngleBracket": "\xe2\x8c\xa9",
"leftarrow": "\xe2\x86\x90",
"leftarrowtail": "\xe2\x86\xa2",
"LeftDoubleBracket": "\xe3\x80\x9a",
"leftharpoondown": "\xe2\x86\xbd",
"leftharpoonup": "\xe2\x86\xbc",
"leftleftarrows": "\xe2\x87\x87",
"leftrightarrow": "\xe2\x86\x94",
"leftrightarrows": "\xe2\x87\x86",
"leftrightharpoons": "\xe2\x87\x8b",
"leftrightsquigarrow": "\xe2\x86\xad",
"LeftTeeArrow": "\xe2\x86\xa4",
"leftthreetimes": "\xe2\x8b\x8b",
"LeftTriangle": "\xe2\x8a\xb2",
"LeftTriangleEqual": "\xe2\x8a\xb4",
"leq": "\xe2\x89\xa4",
"leqq": "\xe2\x89\xa6",
"leqslant": "\xe2\xa9\xbd",
"lessapprox": "\xe2\x89\xb2",
"lessdot": "\xe2\x8b\x96",
"lesseqgtr": "\xe2\x8b\x9a",
"lesseqqgtr": "\xe2\x8b\x9a",
"LessEqualGreater": "\xe2\x8b\x9a",
"lessgtr": "\xe2\x89\xb6",
"lesssim": "\xe2\x89\xb2",
"LessSlantEqual": "\xe2\xa9\xbd",
"Lleftarrow": "\xe2\x87\x9a",
"longleftarrow": "\xe2\x9f\xb5",
"Longleftarrow": "\xe2\x9f\xb8",
"longleftrightarrow": "\xe2\x9f\xb7",
"Longleftrightarrow": "\xe2\x9f\xba",
"looparrowleft": "\xe2\x86\xab",
"measuredangle": "\xe2\x88\xa1",
"nleftarrow": "\xe2\x86\x9a",
"nleftrightarrow": "\xe2\x86\xae",
"nleq": "\xe2\x89\xb0",
"nless": "\xe2\x89\xae",
"NotDoubleVerticalBar": "\xe2\x88\xa6",
"NotElement": "\xe2\x88\x89",
"NotLeftTriangle": "\xe2\x8b\xaa",
"NotLeftTriangleEqual": "\xe2\x8b\xac",
"NotLessEqual": "\xe2\x89\xb0",
"NotReverseElement": "\xe2\x88\x8c",
"NotRightTriangle": "\xe2\x8b\xab",
"NotRightTriangleEqual": "\xe2\x8b\xad",
"nparallel": "\xe2\x88\xa6",
"ntriangleleft": "\xe2\x8b\xaa",
"ntrianglelefteq": "\xe2\x8b\xac",
"ntriangleright": "\xe2\x8b\xab",
"ntrianglerighteq": "\xe2\x8b\xad",
"OpenCurlyDoubleQuote": "\xe2\x80\x9c",
"parallel": "\xe2\x88\xa5",
"rangle": "\xe2\x8c\xaa",
"ReverseElement": "\xe2\x88\x8b",
"RightAngleBracket": "\xe2\x8c\xaa",
"RightDoubleBracket": "\xe3\x80\x9b",
"rightleftarrows": "\xe2\x87\x84",
"rightleftharpoons": "\xe2\x87\x8c",
"RightTriangle": "\xe2\x8a\xb3",
"RightTriangleEqual": "\xe2\x8a\xb5",
"SmallCircle": "\xe2\x88\x98",
"triangle": "\xe2\x96\xb5",
"triangledown": "\xe2\x96\xbf",
"triangleleft": "\xe2\x97\x83",
"trianglelefteq": "\xe2\x8a\xb4",
"triangleq": "\xe2\x89\x9c",
"triangleright": "\xe2\x96\xb9",
"trianglerighteq": "\xe2\x8a\xb5",
"TripleDot": "\xe2\x83\x9b",
"twoheadleftarrow": "\xe2\x86\x9e",
"upharpoonleft": "\xe2\x86\xbf",
"vartriangleleft": "\xe2\x8a\xb2",
"vartriangleright": "\xe2\x8a\xb3",
"DownLeftRightVector": "\xe2\xa5\x90",
"DownLeftTeeVector": "\xe2\xa5\x9e",
"DownLeftVectorBar": "\xe2\xa5\x96",
"FilledSmallSquare": "\xe2\x97\xbc",
"FilledVerySmallSquare": "\xe2\x96\xaa",
"ic": "\xe2\x81\xa3",
"larrb": "\xe2\x87\xa4",
"LeftDownTeeVector": "\xe2\xa5\xa1",
"LeftDownVectorBar": "\xe2\xa5\x99",
"LeftRightVector": "\xe2\xa5\x8e",
"LeftTeeVector": "\xe2\xa5\x9a",
"LeftTriangleBar": "\xe2\xa7\x8f",
"LeftUpDownVector": "\xe2\xa5\x91",
"LeftUpTeeVector": "\xe2\xa5\xa0",
"LeftUpVectorBar": "\xe2\xa5\x98",
"LeftVectorBar": "\xe2\xa5\x92",
"mapstoleft": "\xe2\x86\xa4",
"rarrb": "\xe2\x87\xa5",
"RightTriangleBar": "\xe2\xa7\x90",
"RuleDelayed": "\xe2\xa7\xb4",
"LessEqualGreater": "\xe2\x8b\x9a",
"LessFullEqual": "\xe2\x89\xa6",
"LessGreater": "\xe2\x89\xb6",
"NotLessGreater": "\xe2\x89\xb8",
"NestedLessLess": "\xe2\x89\xaa",
"LessLess": "\xe2\xaa\xa1",
"LessSlantEqual": "\xe2\xa9\xbd",
"LessTilde": "\xe2\x89\xb2",
"NotLessTilde": "\xe2\x89\xb4",
"NestedGreaterGreater": "\xe2\x89\xab",
"NestedLessLess": "\xe2\x89\xaa",
"NotCongruent": "\xe2\x89\xa2",
"NotCupCap": "\xe2\x89\xad",
"NotDoubleVerticalBar": "\xe2\x88\xa6",
"NotEqual": "\xe2\x89\xa0",
"NotGreater": "\xe2\x89\xaf",
"NotGreaterEqual": "\xe2\x89\xb1",
"NotGreaterLess": "\xe2\x89\xb9",
"NotGreaterTilde": "\xe2\x89\xb5",
"NotGreaterEqual": "\xe2\x89\xb1",
"NotGreaterLess": "\xe2\x89\xb9",
"NotGreaterTilde": "\xe2\x89\xb5",
"NotLeftTriangle": "\xe2\x8b\xaa",
"NotLeftTriangleEqual": "\xe2\x8b\xac",
"NotLeftTriangleEqual": "\xe2\x8b\xac",
"NotLess": "\xe2\x89\xae",
"NotLessEqual": "\xe2\x89\xb0",
"NotLessGreater": "\xe2\x89\xb8",
"NotLessTilde": "\xe2\x89\xb4",
"NotLessEqual": "\xe2\x89\xb0",
"NotLessGreater": "\xe2\x89\xb8",
"NotLessTilde": "\xe2\x89\xb4",
"NotPrecedes": "\xe2\x8a\x80",
"NotPrecedesEqual": "\xe2\xaa\xaf",
"NotPrecedesSlantEqual": "\xe2\x8b\xa0",
"NotPrecedesEqual": "\xe2\xaa\xaf",
"NotPrecedesSlantEqual": "\xe2\x8b\xa0",
"NotRightTriangle": "\xe2\x8b\xab",
"NotRightTriangleEqual": "\xe2\x8b\xad",
"NotRightTriangleEqual": "\xe2\x8b\xad",
"NotSucceeds": "\xe2\x8a\x81",
"NotSucceedsSlantEqual": "\xe2\x8b\xa1",
"NotSucceedsSlantEqual": "\xe2\x8b\xa1",
"NotTilde": "\xe2\x89\x81",
"NotTildeEqual": "\xe2\x89\x84",
"NotTildeFullEqual": "\xe2\x89\x87",
"NotTildeTilde": "\xe2\x89\x89",
"NotTildeEqual": "\xe2\x89\x84",
"NotTildeFullEqual": "\xe2\x89\x87",
"NotTildeTilde": "\xe2\x89\x89",
"NotVerticalBar": "\xe2\x88\xa4",
"NotPrecedes": "\xe2\x8a\x80",
"NotPrecedesEqual": "\xe2\xaa\xaf",
"NotPrecedesSlantEqual": "\xe2\x8b\xa0",
"Precedes": "\xe2\x89\xba",
"PrecedesEqual": "\xe2\xaa\xaf",
"PrecedesSlantEqual": "\xe2\x89\xbc",
"PrecedesTilde": "\xe2\x89\xbe",
"NotPrecedesEqual": "\xe2\xaa\xaf",
"PrecedesEqual": "\xe2\xaa\xaf",
"NotPrecedesSlantEqual": "\xe2\x8b\xa0",
"PrecedesSlantEqual": "\xe2\x89\xbc",
"PrecedesTilde": "\xe2\x89\xbe",
"Proportion": "\xe2\x88\xb7",
"Proportional": "\xe2\x88\x9d",
"Proportional": "\xe2\x88\x9d",
"ReverseEquilibrium": "\xe2\x87\x8b",
"NotRightTriangle": "\xe2\x8b\xab",
"NotRightTriangleEqual": "\xe2\x8b\xad",
"RightTriangle": "\xe2\x8a\xb3",
"RightTriangleEqual": "\xe2\x8a\xb5",
"RightTriangleBar": "\xe2\xa7\x90",
"RightTriangleBar": "\xe2\xa7\x90",
"NotRightTriangleEqual": "\xe2\x8b\xad",
"RightTriangleEqual": "\xe2\x8a\xb5",
"NotSucceeds": "\xe2\x8a\x81",
"NotSucceedsSlantEqual": "\xe2\x8b\xa1",
"Succeeds": "\xe2\x89\xbb",
"SucceedsEqual": "\xe2\x89\xbd",
"SucceedsSlantEqual": "\xe2\x89\xbd",
"SucceedsTilde": "\xe2\x89\xbf",
"SucceedsEqual": "\xe2\x89\xbd",
"NotSucceedsSlantEqual": "\xe2\x8b\xa1",
"SucceedsSlantEqual": "\xe2\x89\xbd",
"SucceedsTilde": "\xe2\x89\xbf",
"DiacriticalTilde": "\xcb\x9c",
"EqualTilde": "\xe2\x89\x82",
"GreaterTilde": "\xe2\x89\xb3",
"LessTilde": "\xe2\x89\xb2",
"NotGreaterTilde": "\xe2\x89\xb5",
"NotLessTilde": "\xe2\x89\xb4",
"NotTilde": "\xe2\x89\x81",
"NotTildeEqual": "\xe2\x89\x84",
"NotTildeFullEqual": "\xe2\x89\x87",
"NotTildeTilde": "\xe2\x89\x89",
"PrecedesTilde": "\xe2\x89\xbe",
"SucceedsTilde": "\xe2\x89\xbf",
"Tilde": "\xe2\x88\xbc",
"TildeEqual": "\xe2\x89\x83",
"TildeFullEqual": "\xe2\x89\x85",
"TildeTilde": "\xe2\x89\x88",
"VerticalTilde": "\xe2\x89\x80",
"NotTildeEqual": "\xe2\x89\x84",
"TildeEqual": "\xe2\x89\x83",
"NotTildeFullEqual": "\xe2\x89\x87",
"TildeFullEqual": "\xe2\x89\x85",
"NotTildeTilde": "\xe2\x89\x89",
"TildeTilde": "\xe2\x89\x88",
"UpTee": "\xe2\x8a\xa5",
"UpTeeArrow": "\xe2\x86\xa5",
"LeftUpTeeVector": "\xe2\xa5\xa0",
"RightUpTeeVector": "\xe2\xa5\x9c",
"DoubleVerticalBar": "\xe2\x88\xa5",
"NotDoubleVerticalBar": "\xe2\x88\xa6",
"NotVerticalBar": "\xe2\x88\xa4",
"VerticalBar": "\xe2\x88\xa3",
"SquareUnion": "\xe2\x8a\x94",
"SquareUnion": "\xe2\x8a\x94",
"Union": "\xe2\x8b\x83",
"UnionPlus": "\xe2\x8a\x8e",
"UnionPlus": "\xe2\x8a\x8e",
"Intersection": "\xe2\x8b\x82",
"SquareIntersection": "\xe2\x8a\x93",
"MinusPlus": "\xe2\x88\x93",
"PlusMinus": "\xc2\xb1",
"SquareIntersection": "\xe2\x8a\x93",
"Vee": "\xe2\x8b\x81",
"CircleMinus": "\xe2\x8a\x96",
"CirclePlus": "\xe2\x8a\x95",
"Sum": "\xe2\x88\x91",
"SquareUnion": "\xe2\x8a\x94",
"Union": "\xe2\x8b\x83",
"UnionPlus": "\xe2\x8a\x8e",
"UnionPlus": "\xe2\x8a\x8e",
"CircleMinus": "\xe2\x8a\x96",
"CirclePlus": "\xe2\x8a\x95",
"ClockwiseContourIntegral": "\xe2\x88\xb2",
"CounterClockwiseContourIntegral": "\xe2\x88\xb3",
"ClockwiseContourIntegral": "\xe2\x88\xb2",
"ContourIntegral": "\xe2\x88\xae",
"CounterClockwiseContourIntegral": "\xe2\x88\xb3",
"DoubleContourIntegral": "\xe2\x88\xaf",
"CounterClockwiseContourIntegral": "\xe2\x88\xb3",
"DoubleContourIntegral": "\xe2\x88\xaf",
"ClockwiseContourIntegral": "\xe2\x88\xb2",
"ContourIntegral": "\xe2\x88\xae",
"CounterClockwiseContourIntegral": "\xe2\x88\xb3",
"DoubleContourIntegral": "\xe2\x88\xaf",
"Integral": "\xe2\x88\xab",
"Cup": "\xe2\x8b\x93",
"CupCap": "\xe2\x89\x8d",
"NotCupCap": "\xe2\x89\xad",
"Cap": "\xe2\x8b\x92",
"CapitalDifferentialD": "\xe2\x85\x85",
"CupCap": "\xe2\x89\x8d",
"NotCupCap": "\xe2\x89\xad",
"VerticalTilde": "\xe2\x89\x80",
"Wedge": "\xe2\x8b\x80",
"CircleTimes": "\xe2\x8a\x97",
"Coproduct": "\xe2\x88\x90",
"Product": "\xe2\x88\x8f",
"Intersection": "\xe2\x8b\x82",
"SquareIntersection": "\xe2\x8a\x93",
"Coproduct": "\xe2\x88\x90",
"Star": "\xe2\x8b\x86",
"CircleDot": "\xe2\x8a\x99",
"InvisibleTimes": "\xe2\x81\xa2",
"CenterDot": "\xc2\xb7",
"CircleTimes": "\xe2\x8a\x97",
"Vee": "\xe2\x8b\x81",
"Wedge": "\xe2\x8b\x80",
"Diamond": "\xe2\x8b\x84",
"Backslash": "\xe2\x88\x96",
"MinusPlus": "\xe2\x88\x93",
"PlusMinus": "\xc2\xb1",
"Cross": "\xe2\xa8\xaf",
"CircleDot": "\xe2\x8a\x99",
"SmallCircle": "\xe2\x88\x98",
"NotSquareSubsetEqual": "\xe2\x8b\xa2",
"NotSquareSupersetEqual": "\xe2\x8b\xa3",
"Square": "\xe2\x96\xa1",
"SquareIntersection": "\xe2\x8a\x93",
"SquareSubset": "\xe2\x8a\x8f",
"SquareSubsetEqual": "\xe2\x8a\x91",
"SquareSuperset": "\xe2\x8a\x90",
"SquareSupersetEqual": "\xe2\x8a\x92",
"SquareUnion": "\xe2\x8a\x94",
"EmptySmallSquare": "\xe2\x97\xbb",
"EmptyVerySmallSquare": "\xe2\x96\xab",
"FilledSmallSquare": "\xe2\x97\xbc",
"FilledVerySmallSquare": "\xe2\x96\xaa",
"Delta": "\xce\x94",
"Del": "\xe2\x88\x87",
"RuleDelayed": "\xe2\xa7\xb4",
"PartialD": "\xe2\x88\x82",
"CapitalDifferentialD": "\xe2\x85\x85",
"CapitalDifferentialD": "\xe2\x85\x85",
"DifferentialD": "\xe2\x85\x86",
"Sqrt": "\xe2\x88\x9a",
"DoubleDownArrow": "\xe2\x87\x93",
"DoubleLongLeftArrow": "\xe2\x9f\xb8",
"DoubleLongLeftRightArrow": "\xe2\x9f\xba",
"DoubleLongRightArrow": "\xe2\x9f\xb9",
"DoubleUpArrow": "\xe2\x87\x91",
"DoubleUpDownArrow": "\xe2\x87\x95",
"DoubleDownArrow": "\xe2\x87\x93",
"DoubleUpDownArrow": "\xe2\x87\x95",
"DownArrow": "\xe2\x86\x93",
"DownArrowUpArrow": "\xe2\x87\xb5",
"UpArrowDownArrow": "\xe2\x87\x85",
"UpDownArrow": "\xe2\x86\x95",
"DownArrowBar": "\xe2\xa4\x93",
"DownArrowBar": "\xe2\xa4\x93",
"DownArrowUpArrow": "\xe2\x87\xb5",
"DownTeeArrow": "\xe2\x86\xa7",
"LeftDownTeeVector": "\xe2\xa5\xa1",
"LeftDownVector": "\xe2\x87\x83",
"LeftDownVectorBar": "\xe2\xa5\x99",
"LeftDownVectorBar": "\xe2\xa5\x99",
"LeftUpDownVector": "\xe2\xa5\x91",
"LeftUpTeeVector": "\xe2\xa5\xa0",
"LeftUpVector": "\xe2\x86\xbf",
"LeftUpVectorBar": "\xe2\xa5\x98",
"LeftUpVectorBar": "\xe2\xa5\x98",
"DoubleLongLeftArrow": "\xe2\x9f\xb8",
"LongLeftArrow": "\xe2\x9f\xb5",
"DoubleLongLeftRightArrow": "\xe2\x9f\xba",
"LongLeftRightArrow": "\xe2\x9f\xb7",
"DoubleLongRightArrow": "\xe2\x9f\xb9",
"LongRightArrow": "\xe2\x9f\xb6",
"ReverseUpEquilibrium": "\xe2\xa5\xaf",
"RightDownTeeVector": "\xe2\xa5\x9d",
"RightDownVector": "\xe2\x87\x82",
"RightDownVectorBar": "\xe2\xa5\x95",
"RightDownVectorBar": "\xe2\xa5\x95",
"RightUpDownVector": "\xe2\xa5\x8f",
"RightUpTeeVector": "\xe2\xa5\x9c",
"RightUpVector": "\xe2\x86\xbe",
"RightUpVectorBar": "\xe2\xa5\x94",
"RightUpVectorBar": "\xe2\xa5\x94",
"DoubleUpArrow": "\xe2\x87\x91",
"DownArrowUpArrow": "\xe2\x87\xb5",
"UpArrow": "\xe2\x86\x91",
"UpArrowDownArrow": "\xe2\x87\x85",
"UpArrowBar": "\xe2\xa4\x92",
"UpArrowBar": "\xe2\xa4\x92",
"UpArrowDownArrow": "\xe2\x87\x85",
"DoubleUpDownArrow": "\xe2\x87\x95",
"UpDownArrow": "\xe2\x86\x95",
"ReverseUpEquilibrium": "\xe2\xa5\xaf",
"UpEquilibrium": "\xe2\xa5\xae",
"UpTeeArrow": "\xe2\x86\xa5",
"ApplyFunction": "\xe2\x81\xa1",
"Breve": "\xcb\x98",
"DownBreve": "\xcc\x91",
"Cedilla": "\xc2\xb8",
"DiacriticalGrave": "\x60",
"DiacriticalDot": "\xcb\x99",
"DiacriticalDoubleAcute": "\xcb\x9d",
"DiacriticalAcute": "\xc2\xb4",
"DiacriticalTilde": "\xcb\x9c",
"DoubleDot": "\xc2\xa8",
"DownBreve": "\xcc\x91",
"Hacek": "\xcb\x87",
"Hat": "\xcc\x82",
"OverBar": "\xc2\xaf",
"OverBrace": "\xef\xb8\xb7",
"OverBracket": "\xe2\x8e\xb4",
"OverParenthesis": "\xef\xb8\xb5",
"TripleDot": "\xe2\x83\x9b",
"UnderBar": "\xcc\xb2",
"UnderBrace": "\xef\xb8\xb8",
"UnderBracket": "\xe2\x8e\xb5",
"UnderParenthesis": "\xef\xb8\xb6",
}
map = {}
for key, val in map_utf8.items():
try:
map[key] = val.decode("utf-8")
except: continue
|
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron_lib import constants as n_const
from neutron_lib import exceptions as n_exc
import testtools
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.db import l3_db
from neutron.extensions import l3
from neutron import manager
from neutron.tests import base
class TestL3_NAT_dbonly_mixin(base.BaseTestCase):
def setUp(self):
super(TestL3_NAT_dbonly_mixin, self).setUp()
self.db = l3_db.L3_NAT_dbonly_mixin()
def test__each_port_having_fixed_ips_none(self):
"""Be sure the method returns an empty list when None is passed"""
filtered = l3_db.L3_NAT_dbonly_mixin._each_port_having_fixed_ips(None)
self.assertEqual([], list(filtered))
def test__each_port_having_fixed_ips(self):
"""Basic test that ports without fixed ips are filtered out"""
ports = [{'id': 'a', 'fixed_ips': [mock.sentinel.fixedip]},
{'id': 'b'}]
filtered = l3_db.L3_NAT_dbonly_mixin._each_port_having_fixed_ips(ports)
ids = [p['id'] for p in filtered]
self.assertEqual(['a'], ids)
def test__get_subnets_by_network_no_query(self):
"""Basic test that no query is performed if no Ports are passed"""
context = mock.Mock()
with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p:
self.db._get_subnets_by_network_list(context, [])
self.assertFalse(context.session.query.called)
self.assertFalse(get_p.called)
def test__get_subnets_by_network(self):
"""Basic test that the right query is called"""
context = mock.MagicMock()
query = context.session.query().outerjoin().filter()
query.__iter__.return_value = [(mock.sentinel.subnet_db,
mock.sentinel.address_scope_id)]
with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p:
get_p()._make_subnet_dict.return_value = {
'network_id': mock.sentinel.network_id}
subnets = self.db._get_subnets_by_network_list(
context, [mock.sentinel.network_id])
self.assertEqual({
mock.sentinel.network_id: [{
'address_scope_id': mock.sentinel.address_scope_id,
'network_id': mock.sentinel.network_id}]}, subnets)
def test__populate_ports_for_subnets_none(self):
"""Basic test that the method runs correctly with no ports"""
ports = []
with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p:
get_p().get_networks.return_value = []
self.db._populate_mtu_and_subnets_for_ports(mock.sentinel.context,
ports)
self.assertEqual([], ports)
@mock.patch.object(l3_db.L3_NAT_dbonly_mixin,
'_get_subnets_by_network_list')
def test__populate_ports_for_subnets(self, get_subnets_by_network):
cidr = "2001:db8::/64"
subnet = {'id': mock.sentinel.subnet_id,
'cidr': cidr,
'gateway_ip': mock.sentinel.gateway_ip,
'dns_nameservers': mock.sentinel.dns_nameservers,
'ipv6_ra_mode': mock.sentinel.ipv6_ra_mode,
'subnetpool_id': mock.sentinel.subnetpool_id,
'address_scope_id': mock.sentinel.address_scope_id}
get_subnets_by_network.return_value = {'net_id': [subnet]}
ports = [{'network_id': 'net_id',
'id': 'port_id',
'fixed_ips': [{'subnet_id': mock.sentinel.subnet_id}]}]
with mock.patch.object(manager.NeutronManager, 'get_plugin') as get_p:
get_p().get_networks.return_value = [{'id': 'net_id', 'mtu': 1446}]
self.db._populate_mtu_and_subnets_for_ports(mock.sentinel.context,
ports)
keys = ('id', 'cidr', 'gateway_ip', 'ipv6_ra_mode',
'subnetpool_id', 'dns_nameservers')
address_scopes = {4: None, 6: mock.sentinel.address_scope_id}
self.assertEqual([{'extra_subnets': [],
'fixed_ips': [{'subnet_id':
mock.sentinel.subnet_id,
'prefixlen': 64}],
'id': 'port_id',
'mtu': 1446,
'network_id': 'net_id',
'subnets': [{k: subnet[k] for k in keys}],
'address_scopes': address_scopes}], ports)
def test__get_sync_floating_ips_no_query(self):
"""Basic test that no query is performed if no router ids are passed"""
db = l3_db.L3_NAT_dbonly_mixin()
context = mock.Mock()
db._get_sync_floating_ips(context, [])
self.assertFalse(context.session.query.called)
@mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_floatingip_dict')
def test__make_floatingip_dict_with_scope(self, make_fip_dict):
db = l3_db.L3_NAT_dbonly_mixin()
make_fip_dict.return_value = {'id': mock.sentinel.fip_ip}
result = db._make_floatingip_dict_with_scope(
mock.sentinel.floating_ip_db, mock.sentinel.address_scope_id)
self.assertEqual({
'fixed_ip_address_scope': mock.sentinel.address_scope_id,
'id': mock.sentinel.fip_ip}, result)
def test__unique_floatingip_iterator(self):
query = mock.MagicMock()
query.order_by().__iter__.return_value = [
({'id': 'id1'}, 'scope1'),
({'id': 'id1'}, 'scope1'),
({'id': 'id2'}, 'scope2'),
({'id': 'id2'}, 'scope2'),
({'id': 'id2'}, 'scope2'),
({'id': 'id3'}, 'scope3')]
query.reset_mock()
result = list(
l3_db.L3_NAT_dbonly_mixin._unique_floatingip_iterator(query))
query.order_by.assert_called_once_with(l3_db.FloatingIP.id)
self.assertEqual([({'id': 'id1'}, 'scope1'),
({'id': 'id2'}, 'scope2'),
({'id': 'id3'}, 'scope3')], result)
@mock.patch.object(manager.NeutronManager, 'get_plugin')
def test_prevent_l3_port_deletion_port_not_found(self, gp):
# port not found doesn't prevent
gp.return_value.get_port.side_effect = n_exc.PortNotFound(port_id='1')
self.db.prevent_l3_port_deletion(None, None)
@mock.patch.object(manager.NeutronManager, 'get_plugin')
def test_prevent_l3_port_device_owner_not_router(self, gp):
# ignores other device owners
gp.return_value.get_port.return_value = {'device_owner': 'cat'}
self.db.prevent_l3_port_deletion(None, None)
@mock.patch.object(manager.NeutronManager, 'get_plugin')
def test_prevent_l3_port_no_fixed_ips(self, gp):
# without fixed IPs is allowed
gp.return_value.get_port.return_value = {
'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF, 'fixed_ips': [],
'id': 'f'
}
self.db.prevent_l3_port_deletion(None, None)
@mock.patch.object(manager.NeutronManager, 'get_plugin')
def test_prevent_l3_port_no_router(self, gp):
# without router is allowed
gp.return_value.get_port.return_value = {
'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF,
'device_id': '44', 'id': 'f',
'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]}
self.db.get_router = mock.Mock()
self.db.get_router.side_effect = l3.RouterNotFound(router_id='44')
self.db.prevent_l3_port_deletion(mock.Mock(), None)
@mock.patch.object(manager.NeutronManager, 'get_plugin')
def test_prevent_l3_port_existing_router(self, gp):
gp.return_value.get_port.return_value = {
'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF,
'device_id': 'some_router', 'id': 'f',
'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]}
self.db.get_router = mock.Mock()
with testtools.ExpectedException(n_exc.ServicePortInUse):
self.db.prevent_l3_port_deletion(mock.Mock(), None)
@mock.patch.object(manager.NeutronManager, 'get_plugin')
def test_prevent_l3_port_existing_floating_ip(self, gp):
gp.return_value.get_port.return_value = {
'device_owner': n_const.DEVICE_OWNER_FLOATINGIP,
'device_id': 'some_flip', 'id': 'f',
'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]}
self.db.get_floatingip = mock.Mock()
with testtools.ExpectedException(n_exc.ServicePortInUse):
self.db.prevent_l3_port_deletion(mock.Mock(), None)
@mock.patch.object(l3_db, '_notify_subnetpool_address_scope_update')
def test_subscribe_address_scope_of_subnetpool(self, notify):
l3_db.L3RpcNotifierMixin._subscribe_callbacks()
registry.notify(resources.SUBNETPOOL_ADDRESS_SCOPE,
events.AFTER_UPDATE, mock.ANY, context=mock.ANY,
subnetpool_id='fake_id')
notify.assert_called_once_with(resources.SUBNETPOOL_ADDRESS_SCOPE,
events.AFTER_UPDATE, mock.ANY,
context=mock.ANY,
subnetpool_id='fake_id')
class L3_NAT_db_mixin(base.BaseTestCase):
def setUp(self):
super(L3_NAT_db_mixin, self).setUp()
self.db = l3_db.L3_NAT_db_mixin()
def _test_create_router(self, external_gateway_info=None):
router_db = l3_db.Router(id='123')
router_dict = {'id': '123', 'tenant_id': '456',
'external_gateway_info': external_gateway_info}
# Need to use a copy here as the create_router method pops the gateway
# information
router_input = {'router': router_dict.copy()}
with mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_create_router_db',
return_value=router_db) as crd,\
mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict',
return_value=router_dict),\
mock.patch.object(l3_db.L3_NAT_dbonly_mixin,
'_update_router_gw_info') as urgi,\
mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_get_router',
return_value=router_db),\
mock.patch.object(l3_db.L3_NAT_db_mixin, 'notify_router_updated')\
as nru:
self.db.create_router(mock.ANY, router_input)
self.assertTrue(crd.called)
if external_gateway_info:
self.assertTrue(urgi.called)
self.assertTrue(nru.called)
else:
self.assertFalse(urgi.called)
self.assertFalse(nru.called)
def test_create_router_no_gateway(self):
self._test_create_router()
def test_create_router_gateway(self):
ext_gateway_info = {'network_id': 'net-id', 'enable_snat': True,
'external_fixed_ips': [
{'subnet_id': 'subnet-id',
'ip_address': 'ip'}]}
self._test_create_router(ext_gateway_info)
|
|
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from pycsw import __version__
LOGGER = logging.getLogger(__name__)
class StaticContext(object):
"""core configuration"""
def __init__(self):
"""initializer"""
LOGGER.debug('Initializing static context')
self.version = __version__
self.ogc_schemas_base = 'http://schemas.opengis.net'
self.languages = {
'en': 'english',
'fr': 'french',
'el': 'greek',
}
self.namespaces = {
'atom': 'http://www.w3.org/2005/Atom',
'csw': 'http://www.opengis.net/cat/csw/2.0.2',
'dc': 'http://purl.org/dc/elements/1.1/',
'dct': 'http://purl.org/dc/terms/',
'dif': 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/',
'fgdc': 'http://www.opengis.net/cat/csw/csdgm',
'gmd': 'http://www.isotc211.org/2005/gmd',
'gml': 'http://www.opengis.net/gml',
'ogc': 'http://www.opengis.net/ogc',
'os': 'http://a9.com/-/spec/opensearch/1.1/',
'ows': 'http://www.opengis.net/ows',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'sitemap': 'http://www.sitemaps.org/schemas/sitemap/0.9',
'soapenv': 'http://www.w3.org/2003/05/soap-envelope',
'xlink': 'http://www.w3.org/1999/xlink',
'xs': 'http://www.w3.org/2001/XMLSchema',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
self.md_core_model = {
'typename': 'pycsw:CoreMetadata',
'outputschema': 'http://pycsw.org/metadata',
'mappings': {
'pycsw:Identifier': 'identifier',
# CSW typename (e.g. csw:Record, md:MD_Metadata)
'pycsw:Typename': 'typename',
# schema namespace, i.e. http://www.isotc211.org/2005/gmd
'pycsw:Schema': 'schema',
# origin of resource, either 'local', or URL to web service
'pycsw:MdSource': 'mdsource',
# date of insertion
'pycsw:InsertDate': 'insert_date', # date of insertion
# raw XML metadata
'pycsw:XML': 'xml',
# bag of metadata element and attributes ONLY, no XML tages
'pycsw:AnyText': 'anytext',
'pycsw:Language': 'language',
'pycsw:Title': 'title',
'pycsw:Abstract': 'abstract',
'pycsw:Keywords': 'keywords',
'pycsw:KeywordType': 'keywordstype',
'pycsw:Format': 'format',
'pycsw:Source': 'source',
'pycsw:Date': 'date',
'pycsw:Modified': 'date_modified',
'pycsw:Type': 'type',
# geometry, specified in OGC WKT
'pycsw:BoundingBox': 'wkt_geometry',
'pycsw:CRS': 'crs',
'pycsw:AlternateTitle': 'title_alternate',
'pycsw:RevisionDate': 'date_revision',
'pycsw:CreationDate': 'date_creation',
'pycsw:PublicationDate': 'date_publication',
'pycsw:OrganizationName': 'organization',
'pycsw:SecurityConstraints': 'securityconstraints',
'pycsw:ParentIdentifier': 'parentidentifier',
'pycsw:TopicCategory': 'topicategory',
'pycsw:ResourceLanguage': 'resourcelanguage',
'pycsw:GeographicDescriptionCode': 'geodescode',
'pycsw:Denominator': 'denominator',
'pycsw:DistanceValue': 'distancevalue',
'pycsw:DistanceUOM': 'distanceuom',
'pycsw:TempExtent_begin': 'time_begin',
'pycsw:TempExtent_end': 'time_end',
'pycsw:ServiceType': 'servicetype',
'pycsw:ServiceTypeVersion': 'servicetypeversion',
'pycsw:Operation': 'operation',
'pycsw:CouplingType': 'couplingtype',
'pycsw:OperatesOn': 'operateson',
'pycsw:OperatesOnIdentifier': 'operatesonidentifier',
'pycsw:OperatesOnName': 'operatesoname',
'pycsw:Degree': 'degree',
'pycsw:AccessConstraints': 'accessconstraints',
'pycsw:OtherConstraints': 'otherconstraints',
'pycsw:Classification': 'classification',
'pycsw:ConditionApplyingToAccessAndUse': 'conditionapplyingtoaccessanduse',
'pycsw:Lineage': 'lineage',
'pycsw:ResponsiblePartyRole': 'responsiblepartyrole',
'pycsw:SpecificationTitle': 'specificationtitle',
'pycsw:SpecificationDate': 'specificationdate',
'pycsw:SpecificationDateType': 'specificationdatetype',
'pycsw:Creator': 'creator',
'pycsw:Publisher': 'publisher',
'pycsw:Contributor': 'contributor',
'pycsw:Relation': 'relation',
# links: format "name,description,protocol,url[^,,,[^,,,]]"
'pycsw:Links': 'links',
}
}
self.model = {
'operations': {
'GetCapabilities': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'sections': {
'values': ['ServiceIdentification', 'ServiceProvider',
'OperationsMetadata', 'Filter_Capabilities']
}
}
},
'DescribeRecord': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'schemaLanguage': {
'values': ['http://www.w3.org/XML/Schema',
'http://www.w3.org/TR/xmlschema-1/',
'http://www.w3.org/2001/XMLSchema']
},
'typeName': {
'values': ['csw:Record']
},
'outputFormat': {
'values': ['application/xml', 'application/json']
}
}
},
'GetRecords': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'resultType': {
'values': ['hits', 'results', 'validate']
},
'typeNames': {
'values': ['csw:Record']
},
'outputSchema': {
'values': ['http://www.opengis.net/cat/csw/2.0.2']
},
'outputFormat': {
'values': ['application/xml', 'application/json']
},
'CONSTRAINTLANGUAGE': {
'values': ['FILTER', 'CQL_TEXT']
},
'ElementSetName': {
'values': ['brief', 'summary', 'full']
}
},
'constraints': {
}
},
'GetRecordById': {
'methods': {
'get': True,
'post': True,
},
'parameters': {
'outputSchema': {
'values': ['http://www.opengis.net/cat/csw/2.0.2']
},
'outputFormat': {
'values': ['application/xml', 'application/json']
},
'ElementSetName': {
'values': ['brief', 'summary', 'full']
}
}
},
'GetRepositoryItem': {
'methods': {
'get': True,
'post': False,
},
'parameters': {
}
}
},
'parameters': {
'version': {
'values': ['2.0.2']
},
'service': {
'values': ['CSW']
}
},
'constraints': {
'MaxRecordDefault': {
'values': ['10']
},
'PostEncoding': {
'values': ['XML', 'SOAP']
},
'XPathQueryables': {
'values': ['allowed']
}
},
'typenames': {
'csw:Record': {
'outputschema': 'http://www.opengis.net/cat/csw/2.0.2',
'queryables': {
'SupportedDublinCoreQueryables': {
# map Dublin Core queryables to core metadata model
'dc:title':
{'dbcol': self.md_core_model['mappings']['pycsw:Title']},
'dc:creator':
{'dbcol': self.md_core_model['mappings']['pycsw:Creator']},
'dc:subject':
{'dbcol': self.md_core_model['mappings']['pycsw:Keywords']},
'dct:abstract':
{'dbcol': self.md_core_model['mappings']['pycsw:Abstract']},
'dc:publisher':
{'dbcol': self.md_core_model['mappings']['pycsw:Publisher']},
'dc:contributor':
{'dbcol': self.md_core_model['mappings']['pycsw:Contributor']},
'dct:modified':
{'dbcol': self.md_core_model['mappings']['pycsw:Modified']},
'dc:date':
{'dbcol': self.md_core_model['mappings']['pycsw:Date']},
'dc:type':
{'dbcol': self.md_core_model['mappings']['pycsw:Type']},
'dc:format':
{'dbcol': self.md_core_model['mappings']['pycsw:Format']},
'dc:identifier':
{'dbcol': self.md_core_model['mappings']['pycsw:Identifier']},
'dc:source':
{'dbcol': self.md_core_model['mappings']['pycsw:Source']},
'dc:language':
{'dbcol': self.md_core_model['mappings']['pycsw:Language']},
'dc:relation':
{'dbcol': self.md_core_model['mappings']['pycsw:Relation']},
'dc:rights':
{'dbcol':
self.md_core_model['mappings']['pycsw:AccessConstraints']},
# bbox and full text map to internal fixed columns
'ows:BoundingBox':
{'dbcol': self.md_core_model['mappings']['pycsw:BoundingBox']},
'csw:AnyText':
{'dbcol': self.md_core_model['mappings']['pycsw:AnyText']},
}
}
}
}
}
def gen_domains(self):
"""Generate parameter domain model"""
domain = {}
domain['methods'] = {'get': True, 'post': True}
domain['parameters'] = {'ParameterName': {'values': []}}
for operation in self.model['operations'].keys():
for parameter in self.model['operations'][operation]['parameters']:
domain['parameters']['ParameterName']['values'].append('%s.%s' %
(operation, parameter))
return domain
def refresh_dc(self, mappings):
"""Refresh Dublin Core mappings"""
LOGGER.debug('refreshing Dublin Core mappings with %s' % str(mappings))
defaults = {
'dc:title': 'pycsw:Title',
'dc:creator': 'pycsw:Creator',
'dc:subject': 'pycsw:Keywords',
'dct:abstract': 'pycsw:Abstract',
'dc:publisher': 'pycsw:Publisher',
'dc:contributor': 'pycsw:Contributor',
'dct:modified': 'pycsw:Modified',
'dc:date': 'pycsw:Date',
'dc:type': 'pycsw:Type',
'dc:format': 'pycsw:Format',
'dc:identifier': 'pycsw:Identifier',
'dc:source': 'pycsw:Source',
'dc:language': 'pycsw:Language',
'dc:relation': 'pycsw:Relation',
'dc:rights': 'pycsw:AccessConstraints',
'ows:BoundingBox': 'pycsw:BoundingBox',
'csw:AnyText': 'pycsw:AnyText',
}
for k, val in defaults.iteritems():
self.model['typenames']['csw:Record']['queryables']['SupportedDublinCoreQueryables'][k] = \
{'dbcol': mappings['mappings'][val]}
|
|
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/check_frontend_test_coverage.py."""
from __future__ import annotations
import builtins
import os
import subprocess
import sys
from core import python_utils
from core.tests import test_utils
from . import check_frontend_test_coverage
class CheckFrontendCoverageTests(test_utils.GenericTestBase):
def setUp(self):
super(CheckFrontendCoverageTests, self).setUp()
self.lcov_items_list = None
self.check_function_calls = {
'open_file_is_called': False,
'exists_is_called': False,
}
self.expected_check_function_calls = {
'open_file_is_called': True,
'exists_is_called': True,
}
self.printed_messages = []
class MockFile:
def __init__(self, lcov_items_list):
self.lcov_items_list = lcov_items_list
def read(self): # pylint: disable=missing-docstring
return self.lcov_items_list
def mock_open_file(
file_name, option
): # pylint: disable=unused-argument
self.check_function_calls['open_file_is_called'] = True
return MockFile(self.lcov_items_list)
def mock_exists(unused_path):
self.check_function_calls['exists_is_called'] = True
return True
def mock_print(message):
self.printed_messages.append(message)
def mock_check_call(command): # pylint: disable=unused-argument
self.check_function_calls['check_call_is_called'] = True
self.open_file_swap = self.swap(
python_utils, 'open_file', mock_open_file
)
self.exists_swap = self.swap(os.path, 'exists', mock_exists)
self.print_swap = self.swap(builtins, 'print', mock_print)
self.check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call
)
def test_get_stanzas_from_lcov_file(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:5\n'
'end_of_record\n'
'SF:/opensource/oppia/file2.ts\n'
'LF:10\n'
'LH:5\n'
'end_of_record\n'
'SF:/opensource/oppia/file3.ts\n'
'LF:10\n'
'LH:5\n'
'end_of_record\n'
)
with self.open_file_swap:
stanzas = check_frontend_test_coverage.get_stanzas_from_lcov_file()
self.assertEqual(stanzas[0].file_name, 'file.ts')
self.assertEqual(stanzas[0].total_lines, 10)
self.assertEqual(stanzas[0].covered_lines, 5)
self.assertEqual(stanzas[1].file_name, 'file2.ts')
self.assertEqual(stanzas[1].total_lines, 10)
self.assertEqual(stanzas[1].covered_lines, 5)
self.assertEqual(stanzas[2].file_name, 'file3.ts')
self.assertEqual(stanzas[2].total_lines, 10)
self.assertEqual(stanzas[2].covered_lines, 5)
def test_get_stanzas_from_lcov_file_file_name_exception(self):
self.lcov_items_list = (
'SF:\n'
'LF:10\n'
'LH:5\n'
'end_of_record\n'
)
with self.open_file_swap:
with self.assertRaisesRegexp(
Exception,
'The test path is empty or null. '
'It\'s not possible to diff the test coverage correctly.',
):
check_frontend_test_coverage.get_stanzas_from_lcov_file()
def test_get_stanzas_from_lcov_file_total_lines_exception(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:\n'
'LH:5\n'
'end_of_record\n'
)
with self.open_file_swap:
with self.assertRaisesRegexp(
Exception,
'It wasn\'t possible to get the total lines of file.ts file.'
'It\'s not possible to diff the test coverage correctly.',
):
check_frontend_test_coverage.get_stanzas_from_lcov_file()
def test_get_stanzas_from_lcov_file_covered_lines_exception(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:\n'
'end_of_record\n'
)
with self.open_file_swap:
with self.assertRaisesRegexp(
Exception,
'It wasn\'t possible to get the covered lines of file.ts file.'
'It\'s not possible to diff the test coverage correctly.',
):
check_frontend_test_coverage.get_stanzas_from_lcov_file()
def test_check_coverage_changes(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:/opensource/oppia/file2.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES',
['file.ts', 'file2.ts'],
)
check_function_calls = {'sys_exit_is_called': False}
expected_check_function_calls = {'sys_exit_is_called': False}
def mock_sys_exit(error_message): # pylint: disable=unused-argument
check_function_calls['sys_exit_is_called'] = True
sys_exit_swap = self.swap(sys, 'exit', mock_sys_exit)
with sys_exit_swap, self.exists_swap, self.open_file_swap, self.print_swap: # pylint: disable=line-too-long
with not_fully_covered_files_swap:
check_frontend_test_coverage.check_coverage_changes()
self.assertEqual(
check_function_calls, expected_check_function_calls
)
def test_check_coverage_changes_error(self):
def mock_exists(unused_path):
return False
exists_swap = self.swap(os.path, 'exists', mock_exists)
with exists_swap:
with self.assertRaisesRegexp(
Exception,
'Expected lcov file to be'
r' available at [A-Za-z\._/]+, but the file does not exist.',
):
check_frontend_test_coverage.check_coverage_changes()
def test_check_coverage_changes_for_covered_files(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:node_modules/oppia/anotherfile.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage, 'NOT_FULLY_COVERED_FILENAMES', []
)
with self.exists_swap, self.open_file_swap, self.print_swap:
with not_fully_covered_files_swap, self.capture_logging() as logs:
with self.assertRaisesRegexp(SystemExit, '1'):
check_frontend_test_coverage.check_coverage_changes()
self.assertEqual(
logs,
[
'\033[1mfile.ts\033[0m seems to be not completely '
'tested. Make sure it\'s fully covered.'
],
)
def test_check_coverage_changes_remove_file(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:10\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES',
['file.ts'],
)
with self.exists_swap, self.open_file_swap, self.print_swap:
with not_fully_covered_files_swap, self.capture_logging() as logs:
with self.assertRaisesRegexp(SystemExit, '1'):
check_frontend_test_coverage.check_coverage_changes()
self.assertEqual(
logs,
[
'\033[1mfile.ts\033[0m seems to be fully covered! '
'Before removing it manually from the denylist '
'in the file '
'scripts/check_frontend_test_coverage.py, please '
'make sure you\'ve followed the unit tests rules '
'correctly on: '
'https://github.com/oppia/oppia/wiki/Frontend-unit'
'-tests-guide#rules'
],
)
def test_check_coverage_changes_when_renaming_file(self):
self.lcov_items_list = (
'SF:/opensource/oppia/newfilename.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES',
['file.ts'],
)
with self.exists_swap, self.open_file_swap, self.print_swap:
with not_fully_covered_files_swap, self.capture_logging() as logs:
with self.assertRaisesRegexp(SystemExit, '1'):
check_frontend_test_coverage.check_coverage_changes()
self.assertEqual(
logs,
[
'\033[1mnewfilename.ts\033[0m seems to be not '
'completely tested. Make sure it\'s fully covered.\n'
'\033[1mfile.ts\033[0m is in the frontend test '
'coverage denylist but it doesn\'t exist anymore. If '
'you have renamed it, please make sure to remove the '
'old file name and add the new file name in the '
'denylist in the file scripts/'
'check_frontend_test_coverage.py.'
],
)
def test_fully_covered_filenames_is_sorted(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:/opensource/oppia/anotherfile.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:node_modules/oppia/thirdfile.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES',
['anotherfile.tsfile.ts'],
)
check_function_calls = {'sys_exit_is_called': False}
expected_check_function_calls = {'sys_exit_is_called': False}
def mock_sys_exit(error_message): # pylint: disable=unused-argument
check_function_calls['sys_exit_is_called'] = True
sys_exit_swap = self.swap(sys, 'exit', mock_sys_exit)
with sys_exit_swap, self.exists_swap, self.open_file_swap:
with self.print_swap, not_fully_covered_files_swap:
(
check_frontend_test_coverage
.check_not_fully_covered_filenames_list_is_sorted()
)
self.assertEqual(
check_function_calls, expected_check_function_calls
)
def test_fully_covered_filenames_is_not_sorted(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
'SF:/opensource/oppia/anotherfile.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES',
['file.ts', 'anotherfile.ts'],
)
with self.exists_swap, self.open_file_swap, self.print_swap:
with not_fully_covered_files_swap, self.capture_logging() as logs:
with self.assertRaisesRegexp(SystemExit, '1'):
(
check_frontend_test_coverage
.check_not_fully_covered_filenames_list_is_sorted()
)
self.assertEqual(
logs,
[
'The \033[1mNOT_FULLY_COVERED_FILENAMES\033[0m list '
'must be kept in alphabetical order.'
],
)
def test_function_calls(self):
self.lcov_items_list = (
'SF:/opensource/oppia/file.ts\n'
'LF:10\n'
'LH:9\n'
'end_of_record\n'
)
not_fully_covered_files_swap = self.swap(
check_frontend_test_coverage,
'NOT_FULLY_COVERED_FILENAMES',
['file.ts'],
)
with self.check_call_swap, self.exists_swap, self.open_file_swap:
with not_fully_covered_files_swap:
check_frontend_test_coverage.main()
self.assertEqual(
self.check_function_calls, self.expected_check_function_calls
)
|
|
#! /usr/bin/env python
"""Write structured grids to NetCDF files.
Write netcdf
++++++++++++
.. autosummary::
~landlab.io.netcdf.write.write_netcdf
"""
import pathlib
import numpy as np
import xarray as xr
from landlab.io.netcdf._constants import (
_AXIS_COORDINATE_NAMES,
_AXIS_DIMENSION_NAMES,
_NP_TO_NC_TYPE,
)
def _set_netcdf_attributes(root, attrs):
"""Set attributes of a netcdf file.
Set attributes of the netCDF Database object, *root*. Attributes are
given as key/value pairs from *attrs*.
Parameters
----------
root : netcdf_file
A NetCDF file.
attrs : dict
Attributes as key-value pairs.
"""
for (key, val) in attrs.items():
setattr(root, key, val)
def _get_dimension_names(shape):
"""Get dimension names.
Parameters
----------
shape : tuple of int
Shape of a structured grid.
Returns
-------
tuple of str
Dimension names for the NetCDF file.
Examples
--------
>>> from landlab.io.netcdf.write import _get_dimension_names
>>> _get_dimension_names((4, ))
['ni']
>>> _get_dimension_names((4, 5))
['nj', 'ni']
>>> _get_dimension_names((4, 5, 6))
['nk', 'nj', 'ni']
"""
names = _AXIS_DIMENSION_NAMES[-1 : -(len(shape) + 1) : -1]
return names[::-1]
def _get_dimension_sizes(shape):
"""Get dimension sizes.
Parameters
----------
shape : tuple of int
Shape of a structured grid.
Returns
-------
dict
Dimension sizes.
Examples
--------
>>> from landlab.io.netcdf.write import _get_dimension_sizes
>>> _get_dimension_sizes((4, ))
{'ni': 4}
>>> sizes = _get_dimension_sizes((4, 5))
>>> sizes['ni'], sizes['nj']
(5, 4)
>>> sizes = _get_dimension_sizes((4, 5, 6))
>>> sizes['ni'], sizes['nj'], sizes['nk']
(6, 5, 4)
"""
names = _AXIS_DIMENSION_NAMES[-1 : -(len(shape) + 1) : -1]
sizes = dict()
for (axis, name) in enumerate(names):
sizes[name] = shape[-(axis + 1)]
return sizes
def _get_axes_names(shape):
"""Get names of the axes.
Parameters
----------
shape : tuple of int
Shape of a structured grid.
Returns
-------
tuple of str
Names of the axes for the NetCDF file.
Examples
--------
>>> from landlab.io.netcdf.write import _get_axes_names
>>> _get_axes_names((2, ))
['x']
>>> _get_axes_names((2, 3))
['y', 'x']
>>> _get_axes_names((2, 3, 4))
['z', 'y', 'x']
"""
names = _AXIS_COORDINATE_NAMES[-1 : -(len(shape) + 1) : -1]
return names[::-1]
def _get_cell_bounds(shape, spacing=(1.0, 1.0), origin=(0.0, 0.0)):
"""Get bounds arrays for square cells.
Parameters
----------
shape : tuple of int
Shape of the grid in cell corners.
spacing : tuple of float
Height and width of cells.
origin : tuple of float
Coordinates of lower-left corner of lower-left cell.
Returns
-------
(y, x) : tuple of ndarray
Tuple of the *y* and *x* coordinates of each cell corner (ordered
counter-clockwise starting from lower-right. The shape of the returned
arrays will be *(rows, cols, 4)*.
Examples
--------
>>> from landlab.io.netcdf.write import _get_cell_bounds
>>> bounds = _get_cell_bounds((3, 4))
>>> bounds['y_bnds'] # doctest: +NORMALIZE_WHITESPACE
array([[[ 0., 1., 1., 0.], [ 0., 1., 1., 0.], [ 0., 1., 1., 0.]],
[[ 1., 2., 2., 1.], [ 1., 2., 2., 1.], [ 1., 2., 2., 1.]]])
>>> bounds['x_bnds'] # doctest: +NORMALIZE_WHITESPACE
array([[[ 1., 1., 0., 0.], [ 2., 2., 1., 1.], [ 3., 3., 2., 2.]],
[[ 1., 1., 0., 0.], [ 2., 2., 1., 1.], [ 3., 3., 2., 2.]]])
"""
rows = np.arange(shape[0]) * spacing[0] + origin[0]
cols = np.arange(shape[1]) * spacing[1] + origin[1]
corner_y, corner_x = np.meshgrid(rows, cols, indexing="ij")
y_bnds = np.vstack(
(
corner_y[:-1, 1:].flat,
corner_y[1:, 1:].flat,
corner_y[1:, :-1].flat,
corner_y[:-1, :-1].flat,
)
).T
x_bnds = np.vstack(
(
corner_x[:-1, 1:].flat,
corner_x[1:, 1:].flat,
corner_x[1:, :-1].flat,
corner_x[:-1, :-1].flat,
)
).T
return {
"y_bnds": y_bnds.reshape((shape[0] - 1, shape[1] - 1, 4)),
"x_bnds": x_bnds.reshape((shape[0] - 1, shape[1] - 1, 4)),
}
def _set_netcdf_cell_structured_dimensions(root, shape):
"""Set dimensions for a structured grid of cells.
Parameters
----------
root : netcdf_file
A NetCDF file.
shape : tuple of int
Shape of the cell grid (rows of cells, columns of cells).
"""
if len(shape) < 1 or len(shape) > 3:
raise ValueError("grid dimension must be 1, 2, or 3")
dimensions = _get_dimension_sizes(shape)
dims = root.dimensions
if "nt" not in dims:
root.createDimension("nt", None)
for (name, dim_size) in dimensions.items():
if name not in dims:
root.createDimension(name, dim_size - 2)
root.createDimension("nv", 4)
def _set_netcdf_structured_dimensions(root, shape):
"""Set dimensions for a structured grid.
Add dimensions to *root* for a structured grid of size *shape*. The
dimension names will be 'ni', 'nj', and 'nk'. 'ni' is the length of the
fast dimension, followed by 'nj', and then 'nk'.
For example, a grid with shape (3, 4, 5) will have dimensions ni=5,
nj=4, and nk=3. Lower dimension grids simply drop the slowest dimension.
Thus, a grid with shape (3, 4) has dimensions ni=4, and nj=3.
Parameters
----------
root : netcdf_file
A NetCDF file.
shape : tuple of int
Shape of the grid.
"""
if len(shape) < 1 or len(shape) > 3:
raise ValueError("grid dimension must be 1, 2, or 3")
dimensions = _get_dimension_sizes(shape)
dims = root.dimensions
if "nt" not in dims:
root.createDimension("nt", None)
for (name, dim_size) in dimensions.items():
if name not in dims:
root.createDimension(name, dim_size)
def _set_netcdf_variables(root, fields, **kwds):
"""Set the field variables.
First set the variables that define the grid and then the variables
at the grid nodes and cells.
"""
names = kwds.pop("names", None)
_add_spatial_variables(root, fields, **kwds)
_add_variables_at_points(root, fields, names=names)
def _set_netcdf_raster_variables(root, fields, **kwds):
"""Set the field variables for rasters.
First set the variables that define the grid and then the variables
at the grid nodes and cells.
"""
names = kwds.pop("names", None)
_add_raster_spatial_variables(root, fields, **kwds)
_add_variables_at_points(root, fields, names=names)
def _set_netcdf_cell_variables(root, fields, **kwds):
"""Set the cell field variables.
First set the variables that define the grid and then the variables
at the grid nodes and cells.
"""
names = kwds.pop("names", None)
_add_cell_spatial_variables(root, fields, **kwds)
_add_variables_at_cells(root, fields, names=names)
def _add_cell_spatial_variables(root, grid, **kwds):
"""Add the spatial variables that describe the cell grid."""
long_name = kwds.get("long_name", {})
cell_grid_shape = [dim - 1 for dim in grid.shape]
spatial_variable_shape = _get_dimension_names(cell_grid_shape)
bounds = _get_cell_bounds(
cell_grid_shape,
spacing=(grid.dy, grid.dx),
origin=(grid.dy * 0.5, grid.dx * 0.5),
)
shape = spatial_variable_shape + ["nv"]
for name, values in bounds.items():
# var = root.createVariable(name, 'f8', shape)
# var[:] = values
try:
var = root.variables[name]
except KeyError:
var = root.createVariable(name, "f8", shape)
var[:] = values
axis = grid.axis_name.index(name[0])
var.units = str(grid.axis_units[axis])
try:
var.long_name = str(long_name[name])
except KeyError:
var.long_name = str(grid.axis_name[axis])
def _add_spatial_variables(root, grid, **kwds):
"""Add spatial variables to a NetCDF file.
Add the variables to *root* that define the structured grid, *grid*.
Parameters
----------
root : netcdf_file
A NetCDF file.
grid : RasterModelGrid
A structured grid.
long_name : dict, optional
Long name for each spatial variable to add. Keys are grid field
names, values are corresponding long names.
"""
long_name = kwds.get("long_name", {})
netcdf_vars = root.variables
spatial_variable_names = _get_axes_names(grid.shape)
spatial_variable_shape = _get_dimension_names(grid.shape)
for (axis, name) in enumerate(spatial_variable_names):
try:
var = netcdf_vars[name]
except KeyError:
var = root.createVariable(name, "f8", spatial_variable_shape)
coords = grid.node_axis_coordinates(axis=axis).view()
coords.shape = var.shape
var[:] = coords
var.units = grid.axis_units[axis].encode("utf-8")
try:
var.long_name = long_name[name].encode("utf-8")
except KeyError:
var.long_name = grid.axis_name[axis].encode("utf-8")
def _add_raster_spatial_variables(root, grid, **kwds):
"""Add spatial variables to a NetCDF file for rasters.
Add the variables to *root* that define the structured grid, *grid*.
Parameters
----------
root : netcdf_file
A NetCDF file.
grid : RasterModelGrid
A structured grid.
long_name : dict, optional
Long name for each spatial variable to add. Keys are grid field
names, values are corresponding long names.
"""
long_name = kwds.get("long_name", {})
netcdf_vars = root.variables
spatial_variable_names = _get_axes_names(grid.shape)
spatial_variable_shape = _get_dimension_names(grid.shape)
for (axis, name) in enumerate(spatial_variable_names):
try:
var = netcdf_vars[name]
except KeyError:
var = root.createVariable(name, "f8", [spatial_variable_shape[axis]])
coords = grid.node_axis_coordinates(axis=axis).view().reshape(grid.shape)
if axis == 1:
coords = coords[1, :]
elif axis == 0:
coords = coords[:, 1]
else:
raise NotImplementedError("")
coords.shape = var.shape
var[:] = coords
var.units = str(grid.axis_units[axis])
try:
var.long_name = str(long_name[name])
except KeyError:
var.long_name = str(grid.axis_name[axis])
def _add_variables_at_points(root, fields, names=None):
if isinstance(names, str):
names = [names]
names = names or fields["node"].keys()
netcdf_vars = root.variables
spatial_variable_shape = _get_dimension_names(fields.shape)
try:
n_times = len(netcdf_vars["t"]) - 1
except TypeError:
n_times = len(netcdf_vars["t"][:]) - 1
except KeyError:
n_times = 0
node_fields = fields["node"]
for var_name in names:
try:
var = netcdf_vars[var_name]
except KeyError:
var = root.createVariable(
var_name,
_NP_TO_NC_TYPE[str(node_fields[var_name][0].dtype)],
["nt"] + spatial_variable_shape,
)
if node_fields[var_name].size > 1:
data = node_fields[var_name].view()
data.shape = var.shape[1:]
try:
var[n_times, :] = data
except ValueError:
raise
else:
var[n_times] = node_fields[var_name].flat[0]
var.units = node_fields.units[var_name] or "?"
var.long_name = var_name
if hasattr(fields, "grid_mapping"):
setattr(var, "grid_mapping", fields.grid_mapping["name"])
def _add_variables_at_cells(root, fields, names=None):
if isinstance(names, str):
names = [names]
names = names or fields["cell"].keys()
netcdf_vars = root.variables
cell_grid_shape = [dim - 1 for dim in fields.shape]
spatial_variable_shape = _get_dimension_names(cell_grid_shape)
try:
n_times = len(netcdf_vars["t"]) - 1
except KeyError:
n_times = 0
cell_fields = fields["cell"]
for var_name in names:
try:
var = netcdf_vars[var_name]
except KeyError:
var = root.createVariable(
var_name,
_NP_TO_NC_TYPE[str(cell_fields[var_name].dtype)],
["nt"] + spatial_variable_shape,
)
if cell_fields[var_name].size > 1:
data = cell_fields[var_name].view()
data.shape = var.shape[1:]
try:
var[n_times, :] = data
except ValueError:
raise
else:
var[n_times] = cell_fields[var_name].flat[0]
var.units = str(cell_fields.units[var_name] or "?")
var.long_name = str(var_name)
def _add_time_variable(root, time, **kwds):
"""Add a time value to a NetCDF file.
Append a new time value to the time variable of a NetCDF file. If there
is not time variable, create one. The time variable is named, ``t``.
Parameters
----------
root : netcdf_file
A NetCDF file.
time : float
The time.
units : str, optional
Time units.
reference : str, optional
Reference time.
"""
units = kwds.get("units", "days")
reference = kwds.get("reference", "00:00:00 UTC")
netcdf_vars = root.variables
try:
time_var = netcdf_vars["t"]
except KeyError:
time_var = root.createVariable("t", "f8", ("nt",))
time_var.units = " ".join([units, "since", reference])
time_var.long_name = "time"
try:
n_times = len(time_var)
except TypeError:
n_times = len(time_var[:])
if time is not None:
time_var[n_times] = time
else:
time_var[n_times] = n_times
def _set_netcdf_grid_mapping_variable(root, grid_mapping):
"""Create grid mapping variable, if necessary."""
name = grid_mapping.pop("name")
var = root.createVariable(name, "S1", dimensions=())
for attr in grid_mapping.keys():
setattr(var, attr, grid_mapping[attr])
_VALID_NETCDF_FORMATS = set(
["NETCDF3_CLASSIC", "NETCDF3_64BIT", "NETCDF4_CLASSIC", "NETCDF4"]
)
def _guess_at_location(fields, names):
"""Guess where the values should be located."""
node_fields = set(fields["node"].keys())
cell_fields = set(fields["cell"].keys())
if names is None or len(names) == 0:
if len(node_fields) > 0:
at = "node"
else:
at = "cell"
else:
if node_fields.issuperset(names):
at = "node"
elif cell_fields.issuperset(names):
at = "cell"
else:
at = None
return at
def write_netcdf(
path,
grid,
attrs=None,
append=False,
format="NETCDF3_64BIT",
names=None,
at=None,
time=None,
raster=False,
):
"""Write landlab fields to netcdf.
Write the data and grid information for *grid* to *path* as NetCDF.
If the *append* keyword argument in True, append the data to an existing
file, if it exists. Otherwise, clobber an existing files.
Parameters
----------
path : str
Path to output file.
grid : RasterModelGrid
Landlab RasterModelGrid object that holds a grid and associated values.
append : boolean, optional
Append data to an existing file, otherwise clobber the file.
format : {'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', 'NETCDF4'}
Format of output netcdf file.
attrs : dict
Attributes to add to netcdf file.
names : iterable of str, optional
Names of the fields to include in the netcdf file. If not provided,
write all fields.
at : {'node', 'cell'}, optional
The location where values are defined.
raster : bool, optional
Indicate whether spatial dimensions are written as full value arrays
(default) or just as coordinate dimensions.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.io.netcdf import write_netcdf
Create a uniform rectilinear grid with four rows and 3 columns, and add
some data fields to it.
>>> rmg = RasterModelGrid((4, 3))
>>> rmg.at_node["topographic__elevation"] = np.arange(12.0)
>>> rmg.at_node["uplift_rate"] = 2.0 * np.arange(12.0)
Create a temporary directory to write the netcdf file into.
>>> import tempfile, os
>>> temp_dir = tempfile.mkdtemp()
>>> os.chdir(temp_dir)
Write the grid to a netcdf3 file but only include the *uplift_rate*
data in the file.
>>> write_netcdf("test.nc", rmg, format="NETCDF3_64BIT", names="uplift_rate")
Read the file back in and check its contents.
>>> from scipy.io import netcdf
>>> fp = netcdf.netcdf_file('test.nc', 'r')
>>> 'uplift_rate' in fp.variables
True
>>> 'topographic__elevation' in fp.variables
False
>>> fp.variables['uplift_rate'][:].flatten()
array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20.,
22.])
>>> rmg.at_cell["air__temperature"] = np.arange(2.0)
>>> write_netcdf("test-cell.nc", rmg, format="NETCDF3_64BIT",
... names="air__temperature", at="cell")
"""
path = pathlib.Path(path)
if append and not path.exists():
append = False
if at not in (None, "cell", "node"):
raise ValueError("value location not understood")
if isinstance(names, str):
names = (names,)
at = at or _guess_at_location(grid, names) or "node"
if names is None:
names = grid[at].keys()
if not set(grid[at].keys()).issuperset(names):
raise ValueError("values must be on either cells or nodes, not both")
attrs = attrs or {}
dims = ("nt", "nj", "ni")
shape = grid.shape
if at == "cell":
shape = shape[0] - 2, shape[1] - 2
data = {}
if append:
with xr.open_dataset(path) as dataset:
time_varying_names = [
name for name in dataset.variables if "nt" in dataset[name].dims
]
for name in set(time_varying_names) & set(names):
values = getattr(grid, "at_" + at)[name].reshape((1,) + shape)
data[name] = (dims, np.concatenate([dataset[name].values, values]))
if "t" not in dataset.variables:
times = np.arange(len(dataset["nt"]) + 1)
else:
times = np.concatenate((dataset["t"].values, [0.0]))
if time is None:
times[-1] = times[-2] + 1.0
else:
times[-1] = time
data["t"] = (("nt",), times)
if at == "cell":
data["x_bnds"] = (
("nj", "ni", "nv"),
grid.x_of_corner[grid.corners_at_cell].reshape(shape + (4,)),
)
data["y_bnds"] = (
("nj", "ni", "nv"),
grid.y_of_corner[grid.corners_at_cell].reshape(shape + (4,)),
)
else:
if raster:
data["x"] = (("ni"), grid.x_of_node.reshape(shape)[0, :])
data["y"] = (("nj"), grid.y_of_node.reshape(shape)[:, 0])
else:
data["x"] = (("nj", "ni"), grid.x_of_node.reshape(shape))
data["y"] = (("nj", "ni"), grid.y_of_node.reshape(shape))
if not append:
if time is not None:
data["t"] = (("nt",), [time])
for name in names:
data[name] = (
dims,
getattr(grid, "at_" + at)[name].reshape((-1,) + shape),
)
dataset = xr.Dataset(data, attrs=attrs)
dataset.to_netcdf(path, mode="w", format=format, unlimited_dims=("nt",))
def write_raster_netcdf(
path,
fields,
attrs=None,
append=False,
time=None,
format="NETCDF4",
names=None,
at=None,
):
"""Write Raster Model Grid landlab fields to netcdf.
Write the data and grid information for *fields* to *path* as NetCDF.
This method is for Raster Grids only and takes advantage of regular x and
y spacing to save memory.
Rather that writing x and y of node locations at all (nr x nc) locations,
it writes a 1D array each for x and y.
A more modern version of this might write x and y location as a netcdf
coordinate. However, the original version of this function wrote x and y
as data variables rather than coordinates.
If the *append* keyword argument in True, append the data to an existing
file, if it exists. Otherwise, clobber an existing files.
Parameters
----------
path : str
Path to output file.
fields : field-like
Landlab field object that holds a grid and associated values. This must
be a Raster type.
append : boolean, optional
Append data to an existing file, otherwise clobber the file.
time : float, optional
Add a time to the time variable.
format : {'NETCDF4'}
Format of output netcdf file.
attrs : dict
Attributes to add to netcdf file.
names : iterable of str, optional
Names of the fields to include in the netcdf file. If not provided,
write all fields.
at : {'node'}, optional
The location where values are defined. Presently only implemented for
type 'node'.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.io.netcdf import write_raster_netcdf
Create a uniform rectilinear grid with four rows and 3 columns, and add
some data fields to it.
>>> rmg = RasterModelGrid((4, 3))
>>> rmg.shape
(4, 3)
>>> rmg.at_node["topographic__elevation"] = np.arange(12.0)
>>> rmg.at_node["uplift_rate"] = 2.0 * np.arange(12.0)
Create a temporary directory to write the netcdf file into.
>>> import tempfile, os
>>> temp_dir = tempfile.mkdtemp()
>>> os.chdir(temp_dir)
Write the grid to a netcdf4 file but only include the *uplift_rate*
data in the file.
>>> write_raster_netcdf(
... "test.nc",
... rmg,
... format="NETCDF3_64BIT",
... names="uplift_rate",
... )
Read the file back in and check its contents.
>>> from scipy.io import netcdf
>>> fp = netcdf.netcdf_file('test.nc', 'r')
>>> 'uplift_rate' in fp.variables
True
>>> 'topographic__elevation' in fp.variables
False
>>> fp.variables['uplift_rate'][:].flatten()
array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20.,
22.])
>>> fp.variables['x'][:]
array([ 0., 1., 2.])
>>> fp.variables['y'][:]
array([ 0., 1., 2., 3.])
Read now with read_netcdf
>>> from landlab.io.netcdf import read_netcdf
>>> grid = read_netcdf("test.nc")
>>> grid.shape
(4, 3)
>>> grid.x_of_node
array([ 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.])
>>> grid.y_of_node
array([ 0., 0., 0., 1., 1., 1., 2., 2., 2., 3., 3., 3.])
>>> grid.at_node["uplift_rate"]
array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20.,
22.])
"""
return write_netcdf(
path,
fields,
attrs=attrs,
append=append,
format=format,
names=names,
at=at,
time=time,
raster=True,
)
|
|
from __future__ import annotations
from django.db import connection
from django.test import TestCase
from django.test.utils import override_settings
from django_mysql.rewrite_query import rewrite_query
from tests.testapp.utils import CaptureLastQuery
class RewriteQueryTests(TestCase):
def test_it_doesnt_touch_normal_queries(self):
self.check_identity("SELECT 1")
self.check_identity("SELECT col_a, col_b FROM sometable WHERE 1")
def test_bad_rewrites_ignored(self):
assert (
rewrite_query(
"SELECT col_a FROM sometable WHERE (/*QueryRewrite':STRAY_JOIN*/1)"
)
== "SELECT col_a FROM sometable WHERE (1)"
)
assert (
rewrite_query("SELECT col_a FROM sometable WHERE (/*QueryRewrite':*/1)")
== "SELECT col_a FROM sometable WHERE (1)"
)
assert (
rewrite_query(
"UPDATE col_a SET pants='onfire' WHERE "
+ "(/*QueryRewrite':STRAIGHT_JOIN*/1)"
)
== "UPDATE col_a SET pants='onfire' WHERE (1)"
)
def test_non_select_update_deletes_ignored(self):
assert (
rewrite_query("SHOW TABLES /*QueryRewrite':STRAIGHT_JOIN*/")
== "SHOW TABLES "
)
def check_identity(self, query):
assert rewrite_query(query) == query
def test_straight_join(self):
assert rewrite_query(
"SELECT col_a, col_b FROM sometable WHERE nothing() AND "
+ "(/*QueryRewrite':STRAIGHT_JOIN*/1)"
) == (
"SELECT STRAIGHT_JOIN col_a, col_b FROM sometable WHERE "
+ "nothing() AND (1)"
)
def test_straight_join_preceeding_whitespace(self):
assert rewrite_query(
" SELECT col_a, col_b FROM sometable WHERE nothing() AND "
+ "(/*QueryRewrite':STRAIGHT_JOIN*/1)"
) == (
"SELECT STRAIGHT_JOIN col_a, col_b FROM sometable WHERE "
+ "nothing() AND (1)"
)
def test_straight_join_with_comment(self):
assert (
rewrite_query(
"SELECT /* HI MUM */ col_a FROM sometable WHERE "
+ "(/*QueryRewrite':STRAIGHT_JOIN*/1)"
)
== "SELECT /* HI MUM */ STRAIGHT_JOIN col_a FROM sometable WHERE (1)"
)
def test_straight_join_with_comments(self):
assert (
rewrite_query(
"SELECT /* GOODBYE */ col_a FROM sometable WHERE "
+ "(/*QueryRewrite':STRAIGHT_JOIN*/1)"
)
== "SELECT /* GOODBYE */ STRAIGHT_JOIN col_a FROM sometable WHERE (1)"
)
def test_straight_join_with_repeat_comments(self):
assert rewrite_query(
"SELECT /* A */ /* B */ /* C */ col_a FROM sometable "
+ "WHERE (/*QueryRewrite':STRAIGHT_JOIN*/1)"
) == (
"SELECT /* A */ /* B */ /* C */ STRAIGHT_JOIN col_a FROM "
+ "sometable WHERE (1)"
)
def test_straight_join_with_spaceless_comment(self):
assert (
rewrite_query(
"SELECT/* this*/col_a FROM sometable "
+ "WHERE (/*QueryRewrite':STRAIGHT_JOIN*/1)"
)
== "SELECT /* this*/ STRAIGHT_JOIN col_a FROM sometable WHERE (1)"
)
def test_straight_join_idempotent(self):
assert rewrite_query(
"SELECT col_a, col_b FROM sometable "
+ "WHERE nothing() AND (/*QueryRewrite':STRAIGHT_JOIN*/1) "
+ "AND (/*QueryRewrite':STRAIGHT_JOIN*/1)"
) == (
"SELECT STRAIGHT_JOIN col_a, col_b FROM sometable "
+ "WHERE nothing() AND (1) AND (1)"
)
def test_straight_join_doesnt_affect_distinct(self):
assert (
rewrite_query(
"SELECT DISTINCT col_a FROM sometable WHERE "
+ "(/*QueryRewrite':STRAIGHT_JOIN*/1)"
)
== "SELECT DISTINCT STRAIGHT_JOIN col_a FROM sometable WHERE (1)"
)
def test_straight_join_doesnt_affect_all_and_highpriority(self):
assert (
rewrite_query(
"SELECT ALL HIGH_PRIORITY col_a FROM sometable WHERE "
+ "(/*QueryRewrite':STRAIGHT_JOIN*/1)"
)
== "SELECT ALL HIGH_PRIORITY STRAIGHT_JOIN col_a FROM sometable WHERE (1)"
)
def test_2_straight_joins_dont_affect_all_and_highpriority(self):
assert rewrite_query(
"SELECT ALL HIGH_PRIORITY col_a FROM sometable "
+ "WHERE (/*QueryRewrite':STRAIGHT_JOIN*/1) AND "
+ "(/*QueryRewrite':STRAIGHT_JOIN*/1)"
) == (
"SELECT ALL HIGH_PRIORITY STRAIGHT_JOIN col_a FROM sometable "
+ "WHERE (1) AND (1)"
)
def test_multiple_hints(self):
assert rewrite_query(
"SELECT col_a FROM sometable "
+ "WHERE (/*QueryRewrite':STRAIGHT_JOIN*/1) AND "
+ "(/*QueryRewrite':SQL_NO_CACHE*/1)"
) == (
"SELECT STRAIGHT_JOIN SQL_NO_CACHE col_a FROM sometable "
+ "WHERE (1) AND (1)"
)
def test_mutually_exclusive_latest_wins(self):
assert (
rewrite_query(
"SELECT col_a FROM sometable "
+ "WHERE (/*QueryRewrite':SQL_CACHE*/1) AND "
+ "(/*QueryRewrite':SQL_NO_CACHE*/1)"
)
== "SELECT SQL_NO_CACHE col_a FROM sometable WHERE (1) AND (1)"
)
def test_labelling(self):
assert (
rewrite_query(
"SELECT col_a FROM sometable WHERE (/*QueryRewrite':label=himum*/1)"
)
== "SELECT /*himum*/ col_a FROM sometable WHERE (1)"
)
def test_labelling_mysql_57_hint(self):
assert rewrite_query(
"SELECT col_a FROM t1 "
+ "WHERE (/*QueryRewrite':label=+ NO_RANGE_OPTIMIZATION(t1 "
+ "PRIMARY) */1)"
) == (
"SELECT /*+ NO_RANGE_OPTIMIZATION(t1 PRIMARY) */ col_a FROM "
+ "t1 WHERE (1)"
)
def test_not_case_sensitive(self):
assert (
rewrite_query(
"select col_a from sometable where (/*QueryRewrite':label=himum*/1)"
)
== "select /*himum*/ col_a from sometable where (1)"
)
def test_bad_query_not_rewritten(self):
assert (
rewrite_query(
"SELECTSTRAIGHT_JOIN col_a FROM sometable WHERE "
+ "(/*QueryRewrite':label=hi*/1)"
)
== "SELECTSTRAIGHT_JOIN col_a FROM sometable WHERE (1)"
)
def test_index_hint_use(self):
assert (
rewrite_query(
"SELECT col_a FROM `sometable` WHERE "
+ "(/*QueryRewrite':index=`sometable` USE `col_a_idx`*/1)"
)
== "SELECT col_a FROM `sometable` USE INDEX (`col_a_idx`) WHERE (1)"
)
def test_index_ignore(self):
assert rewrite_query(
"SELECT col_a FROM `sometable` WHERE "
+ "(/*QueryRewrite':index=`sometable` IGNORE `col_a_idx`*/1)"
) == ("SELECT col_a FROM `sometable` IGNORE INDEX (`col_a_idx`) " + "WHERE (1)")
def test_index_force(self):
assert rewrite_query(
"SELECT col_a FROM `sometable` WHERE "
+ "(/*QueryRewrite':index=`sometable` FORCE `col_a_idx`*/1)"
) == ("SELECT col_a FROM `sometable` FORCE INDEX (`col_a_idx`) " + "WHERE (1)")
def test_index_nonsense_does_nothing(self):
assert (
rewrite_query(
"SELECT col_a FROM `sometable` WHERE "
+ "(/*QueryRewrite':index=`sometable` MAHOGANY `col_a_idx`*/1)"
)
== "SELECT col_a FROM `sometable` WHERE (1)"
)
def test_index_hint_use_secondary(self):
assert rewrite_query(
"SELECT col_a, col_b FROM `sometable` INNER JOIN `othertable` "
+ "WHERE (/*QueryRewrite':index=`othertable` USE `myindex`*/1)"
) == (
"SELECT col_a, col_b FROM `sometable` INNER JOIN `othertable` "
+ "USE INDEX (`myindex`) WHERE (1)"
)
def test_index_hint_with_alias(self):
assert rewrite_query(
"SELECT col_a, col_b FROM `sometable` U1 "
+ "WHERE (/*QueryRewrite':index=`sometable` USE `col_a_idx`*/1)"
) == (
"SELECT col_a, col_b FROM `sometable` U1 "
+ "USE INDEX (`col_a_idx`) WHERE (1)"
)
def test_index_hint_multiple_indexes(self):
assert rewrite_query(
"SELECT col_a FROM `tabl` WHERE "
"(/*QueryRewrite':index=`tabl` IGNORE `idx1`,`idx2`*/1)"
) == ("SELECT col_a FROM `tabl` IGNORE INDEX (`idx1`,`idx2`) WHERE " + "(1)")
def test_index_hint_multiple_hints(self):
assert rewrite_query(
"SELECT col_a FROM `sometable` "
+ "WHERE (/*QueryRewrite':index=`sometable` IGNORE `idx1`*/1) "
+ "AND (/*QueryRewrite':index=`sometable` IGNORE `idx2`*/1)"
) == (
"SELECT col_a FROM `sometable` IGNORE INDEX (`idx2`) "
+ "IGNORE INDEX (`idx1`) WHERE (1) AND (1)"
)
def test_index_hint_for_join(self):
assert rewrite_query(
"SELECT `sometable`.col_a, `sometable2`.col_b "
+ "FROM `sometable` NATURAL JOIN `sometable2` "
+ "WHERE (/*QueryRewrite':index=`sometable` IGNORE FOR JOIN "
+ "`idx`*/1)"
) == (
"SELECT `sometable`.col_a, `sometable2`.col_b FROM "
+ "`sometable` IGNORE INDEX FOR JOIN (`idx`) NATURAL JOIN "
+ "`sometable2` WHERE (1)"
)
def test_index_hint_for_group_by(self):
assert rewrite_query(
"SELECT col_a, SUM(col_b) FROM `sometable` "
+ "WHERE (/*QueryRewrite':index=`sometable` FORCE FOR GROUP BY "
+ "`idx`*/1) GROUP BY col_a"
) == (
"SELECT col_a, SUM(col_b) FROM `sometable` FORCE INDEX FOR "
+ "GROUP BY (`idx`) WHERE (1) GROUP BY col_a"
)
def test_index_hint_for_order_by(self):
assert rewrite_query(
"SELECT col_a FROM `sometable` "
+ "WHERE (/*QueryRewrite':index=`sometable` USE FOR ORDER BY "
+ "`idx` */1) ORDER BY col_a"
) == (
"SELECT col_a FROM `sometable` USE INDEX FOR ORDER BY (`idx`) "
+ "WHERE (1) ORDER BY col_a"
)
def test_it_is_instrumented(self):
with CaptureLastQuery() as cap, connection.cursor() as cursor:
cursor.execute(
"SELECT 1 FROM DUAL WHERE (/*QueryRewrite':STRAIGHT_JOIN*/1)"
)
assert cap.query == "SELECT STRAIGHT_JOIN 1 FROM DUAL WHERE (1)"
@override_settings(DJANGO_MYSQL_REWRITE_QUERIES=False)
def test_instrumentation_can_be_disabled(self):
query = "SELECT 1 FROM DUAL WHERE (/*QueryRewrite':STRAIGHT_JOIN*/1)"
with CaptureLastQuery() as cap, connection.cursor() as cursor:
cursor.execute(query)
assert cap.query == query
|
|
"""
SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 16.0.0)
* cryptography (minimum 1.3.4, from pyopenssl)
* idna (minimum 2.0, from cryptography)
However, pyopenssl depends on cryptography, which depends on idna, so while we
use all three directly here we end up having relatively few packages required.
You can install them with the following command:
pip install pyopenssl cryptography idna
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
"""
from __future__ import absolute_import
import OpenSSL.SSL
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend as openssl_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
try:
from cryptography.x509 import UnsupportedExtension
except ImportError:
# UnsupportedExtension is gone in cryptography >= 2.1.0
class UnsupportedExtension(Exception):
pass
from socket import timeout, error as SocketError
from io import BytesIO
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
import logging
import ssl
from ..packages import six
import sys
from .. import util
__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
# SNI always works.
HAS_SNI = True
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"):
_openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD
if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items())
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
log = logging.getLogger(__name__)
def inject_into_urllib3():
"Monkey-patch urllib3 with PyOpenSSL-backed SSL-support."
_validate_dependencies_met()
util.SSLContext = PyOpenSSLContext
util.ssl_.SSLContext = PyOpenSSLContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_PYOPENSSL = True
util.ssl_.IS_PYOPENSSL = True
def extract_from_urllib3():
"Undo monkey-patching by :func:`inject_into_urllib3`."
util.SSLContext = orig_util_SSLContext
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_PYOPENSSL = False
util.ssl_.IS_PYOPENSSL = False
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
# Method added in `cryptography==1.1`; not available in older versions
from cryptography.x509.extensions import Extensions
if getattr(Extensions, "get_extension_for_class", None) is None:
raise ImportError(
"'cryptography' module missing required functionality. "
"Try upgrading to v1.3.4 or newer."
)
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
# attribute is only present on those versions.
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, "_x509", None) is None:
raise ImportError(
"'pyOpenSSL' module missing required functionality. "
"Try upgrading to v0.14 or newer."
)
def _dnsname_to_stdlib(name):
"""
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
If the name cannot be idna-encoded then we return None signalling that
the name given should be skipped.
"""
def idna_encode(name):
"""
Borrowed wholesale from the Python Cryptography Project. It turns out
that we can't just safely call `idna.encode`: it can explode for
wildcard names. This avoids that problem.
"""
from pipenv.patched.notpip._vendor import idna
try:
for prefix in [u"*.", u"."]:
if name.startswith(prefix):
name = name[len(prefix) :]
return prefix.encode("ascii") + idna.encode(name)
return idna.encode(name)
except idna.core.IDNAError:
return None
# Don't send IPv6 addresses through the IDNA encoder.
if ":" in name:
return name
name = idna_encode(name)
if name is None:
return None
elif sys.version_info >= (3, 0):
name = name.decode("utf-8")
return name
def get_subj_alt_name(peer_cert):
"""
Given an PyOpenSSL certificate, provides all the subject alternative names.
"""
# Pass the cert to cryptography, which has much better APIs for this.
if hasattr(peer_cert, "to_cryptography"):
cert = peer_cert.to_cryptography()
else:
# This is technically using private APIs, but should work across all
# relevant versions before PyOpenSSL got a proper API for this.
cert = _Certificate(openssl_backend, peer_cert._x509)
# We want to find the SAN extension. Ask Cryptography to locate it (it's
# faster than looping in Python)
try:
ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
except x509.ExtensionNotFound:
# No such extension, return the empty list.
return []
except (
x509.DuplicateExtension,
UnsupportedExtension,
x509.UnsupportedGeneralNameType,
UnicodeError,
) as e:
# A problem has been found with the quality of the certificate. Assume
# no SAN field is present.
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
# We want to return dNSName and iPAddress fields. We need to cast the IPs
# back to strings because the match_hostname function wants them as
# strings.
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
# decoded. This is pretty frustrating, but that's what the standard library
# does with certificates, and so we need to attempt to do the same.
# We also want to skip over names which cannot be idna encoded.
names = [
("DNS", name)
for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
if name is not None
]
names.extend(
("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress)
)
return names
class WrappedSocket(object):
"""API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
"""
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
return b""
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b""
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout("The read operation timed out")
else:
return self.recv(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout("The read operation timed out")
else:
return self.recv_into(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
if not util.wait_for_write(self.socket, self.socket.gettimeout()):
raise timeout()
continue
except OpenSSL.SSL.SysCallError as e:
raise SocketError(str(e))
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(
data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]
)
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509)
return {
"subject": ((("commonName", x509.get_subject().CN),),),
"subjectAltName": get_subj_alt_name(x509),
}
def version(self):
return self.connection.get_protocol_version_name()
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
makefile = backport_makefile
WrappedSocket.makefile = makefile
class PyOpenSSLContext(object):
"""
I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
for translating the interface of the standard library ``SSLContext`` object
to calls into PyOpenSSL.
"""
def __init__(self, protocol):
self.protocol = _openssl_versions[protocol]
self._ctx = OpenSSL.SSL.Context(self.protocol)
self._options = 0
self.check_hostname = False
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
@verify_mode.setter
def verify_mode(self, value):
self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def set_ciphers(self, ciphers):
if isinstance(ciphers, six.text_type):
ciphers = ciphers.encode("utf-8")
self._ctx.set_cipher_list(ciphers)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
if cafile is not None:
cafile = cafile.encode("utf-8")
if capath is not None:
capath = capath.encode("utf-8")
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_chain_file(certfile)
if password is not None:
if not isinstance(password, six.binary_type):
password = password.encode("utf-8")
self._ctx.set_passwd_cb(lambda *_: password)
self._ctx.use_privatekey_file(keyfile or certfile)
def wrap_socket(
self,
sock,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None,
):
cnx = OpenSSL.SSL.Connection(self._ctx, sock)
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
server_hostname = server_hostname.encode("utf-8")
if server_hostname is not None:
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(sock, sock.gettimeout()):
raise timeout("select timed out")
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("bad handshake: %r" % e)
break
return WrappedSocket(cnx, sock)
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# gen_oxml_classes.py
#
"""
Generate class definitions for Open XML elements based on set of declarative
properties.
"""
import sys
from string import Template
class ElementDef(object):
"""
Schema-related definition of an Open XML element
"""
instances = []
def __init__(self, tag, classname):
self.instances.append(self)
self.tag = tag
tagparts = tag.split(':')
self.nsprefix = tagparts[0]
self.tagname = tagparts[1]
self.classname = classname
self.children = []
self.attributes = []
def __getitem__(self, key):
return self.__getattribute__(key)
def add_child(self, tag, cardinality='?'):
self.children.append(ChildDef(self, tag, cardinality))
def add_attribute(self, name, required=False, default=None):
self.attributes.append(AttributeDef(self, name, required, default))
def add_attributes(self, *names):
for name in names:
self.attributes.append(AttributeDef(self, name, False, None))
@property
def indent(self):
indent_len = 12 - len(self.tagname)
if indent_len < 0:
indent_len = 0
return ' ' * indent_len
@property
def max_attr_name_len(self):
return max([len(attr.name) for attr in self.attributes])
@property
def max_child_tagname_len(self):
return max([len(child.tagname) for child in self.children])
@property
def optional_children(self):
return [child for child in self.children if not child.is_required]
@property
def required_attributes(self):
return [attr for attr in self.attributes if attr.is_required]
@property
def required_children(self):
return [child for child in self.children if child.is_required]
class AttributeDef(object):
"""
Attribute definition
"""
def __init__(self, element, name, required, default):
self.element = element
self.name = name
self.required = required
self.default = default
self.varname = name.replace(':', '_')
def __getitem__(self, key):
return self.__getattribute__(key)
@property
def padding(self):
return ' ' * (self.element.max_attr_name_len - len(self.name))
@property
def indent(self):
return ' ' * self.element.max_attr_name_len
@property
def is_required(self):
return self.required
class ChildDef(object):
"""
Child element definition
"""
def __init__(self, element, tag, cardinality):
self.element = element
self.tag = tag
self.cardinality = cardinality
if not ':' in tag:
tmpl = "missing namespace prefix in tag: '%s'"
raise ValueError(tmpl % tag)
tagparts = tag.split(':')
self.nsprefix = tagparts[0]
self.tagname = tagparts[1]
def __getitem__(self, key):
return self.__getattribute__(key)
@property
def indent(self):
indent_len = self.element.max_child_tagname_len - len(self.tagname)
return ' ' * indent_len
@property
def is_required(self):
return self.cardinality in '1+'
# ============================================================================
# Code templates
# ============================================================================
class tmpl(object):
attr_accessor = Template(" $varname$padding = property(lambda self: se"
"lf.get('$name'),\n$indent lambda self, value: self.se"
"t('$name', value))\n")
class_def_head = Template('''class $classname(ElementBase):\n """<$n'''
'''sprefix:$tagname> custom element class"""\n''')
class_mapping = Template(" , '$tagname'$indent : $classname\n\n")
ns_attr_accessor = Template(
" $varname$padding = property(lambda self: self.get(_qtag('$name')"
"),\n$indent lambda self, value: self.set(_qtag('$name"
"'), value))\n")
ns_reqd_attribute_constructor = Template(" _required_attribute(sel"
"f, _qtag('$name'), default='$default')\n")
optional_child_accessor = Template(" $tagname$indent = property(lambda"
" self: _get_child_or_append(self, '$tag'))\n")
reqd_attr_constructor = Template(" _required_attribute(self, '$nam"
"e', default='$default')\n")
reqd_child_accessor = Template(" $tagname$indent = property(lambda sel"
"f: _child(self, '$tag'))\n")
reqd_child_constructor = Template(" _required_child(self, '$tag')"
"\n")
# ============================================================================
# binding specs
# ============================================================================
# sldMaster = ElementDef('p:sldMaster', 'CT_SlideMaster')
# sldMaster.add_child('p:cSld' , cardinality='1')
# sldMaster.add_child('p:clrMap' , cardinality='1')
# sldMaster.add_child('p:sldLayoutIdLst', cardinality='?')
# sldMaster.add_child('p:transition' , cardinality='?')
# sldMaster.add_child('p:timing' , cardinality='?')
# sldMaster.add_child('p:hf' , cardinality='?')
# sldMaster.add_child('p:txStyles' , cardinality='?')
# sldMaster.add_child('p:extLst' , cardinality='?')
# sldMaster.add_attributes('preserve')
def class_template(element):
out = ''
out += tmpl.class_mapping.substitute(element)
out += tmpl.class_def_head.substitute(element)
if element.required_children or element.required_attributes:
out += ' def _init(self):\n'
for child in element.required_children:
out += tmpl.reqd_child_constructor.substitute(child)
for attribute in element.required_attributes:
if ':' in attribute.name:
out += tmpl.ns_reqd_attr_constructor.substitute(attribute)
else:
out += tmpl.reqd_attr_constructor.substitute(attribute)
out += '\n'
if element.children:
out += ' # child accessors -----------------\n'
for child in element.required_children:
out += tmpl.reqd_child_accessor.substitute(child)
for child in element.optional_children:
out += tmpl.optional_child_accessor.substitute(child)
out += '\n'
if element.attributes:
out += ' # attribute accessors -------------\n'
for attribute in element.attributes:
if ':' in attribute.name:
out += tmpl.ns_attr_accessor.substitute(attribute)
else:
out += tmpl.attr_accessor.substitute(attribute)
out += '\n'
out += '\n'
return out
elements = ElementDef.instances
out = '\n'
for element in elements:
out += class_template(element)
print out
sys.exit()
# ============================================================================
# Element definitions
# ============================================================================
# blip = ElementDef('a:blip', 'CT_Blip')
# blip.add_child('a:alphaBiLevel' , cardinality='?')
# blip.add_child('a:alphaCeiling' , cardinality='?')
# blip.add_child('a:alphaFloor' , cardinality='?')
# blip.add_child('a:alphaInv' , cardinality='?')
# blip.add_child('a:alphaMod' , cardinality='?')
# blip.add_child('a:alphaModFix' , cardinality='?')
# blip.add_child('a:alphaRepl' , cardinality='?')
# blip.add_child('a:biLevel' , cardinality='?')
# blip.add_child('a:blur' , cardinality='?')
# blip.add_child('a:clrChange' , cardinality='?')
# blip.add_child('a:clrRepl' , cardinality='?')
# blip.add_child('a:duotone' , cardinality='?')
# blip.add_child('a:fillOverlay' , cardinality='?')
# blip.add_child('a:grayscl' , cardinality='?')
# blip.add_child('a:hsl' , cardinality='?')
# blip.add_child('a:lum' , cardinality='?')
# blip.add_child('a:tint' , cardinality='?')
# blip.add_child('a:extLst' , cardinality='?')
# blip.add_attributes('r_embed', 'r_link', 'cstate')
# blipFill = ElementDef('p:blipFill', 'CT_BlipFillProperties')
# blipFill.add_child('a:blip' , cardinality='?')
# blipFill.add_child('a:srcRect' , cardinality='?')
# blipFill.add_child('a:tile' , cardinality='?')
# blipFill.add_child('a:stretch' , cardinality='?')
# blipFill.add_attributes('dpi', 'rotWithShape')
# bodyPr = ElementDef('a:bodyPr', 'CT_TextBodyProperties')
# bodyPr.add_child('a:spAutoFit', cardinality='?')
# bodyPr.add_attribute('wrap')
# bodyPr.add_attribute('rtlCol')
# bodyPr.add_attribute('anchor')
# bodyPr.add_attribute('anchorCtr')
# cNvPr = ElementDef('p:cNvPr', 'CT_NonVisualDrawingProps')
# cNvPr.add_child('a:hlinkClick' , cardinality='?')
# cNvPr.add_child('a:hlinkHover' , cardinality='?')
# cNvPr.add_child('a:extLst' , cardinality='?')
# cNvPr.add_attribute('id', required=True, default='0')
# cNvPr.add_attribute('name', required=True, default='Unnamed')
# cNvPr.add_attributes('descr', 'hidden', 'title')
# cNvSpPr = ElementDef('p:cNvSpPr', 'CT_NonVisualDrawingShapeProps')
# cNvSpPr.add_child('a:spLocks', cardinality='?')
# cNvSpPr.add_child('a:extLst' , cardinality='?')
# cNvSpPr.add_attributes('txBox')
# cSld = ElementDef('p:cSld', 'CT_CommonSlideData')
# cSld.add_child('p:bg' , cardinality='?')
# cSld.add_child('p:spTree' , cardinality='1')
# cSld.add_child('p:custDataLst', cardinality='?')
# cSld.add_child('p:controls' , cardinality='?')
# cSld.add_child('p:extLst' , cardinality='?')
# cSld.add_attributes('name')
# defRPr = ElementDef('a:defRPr', 'CT_TextCharacterProperties')
# defRPr.add_child('a:ln' , cardinality='?')
# defRPr.add_child('a:noFill' , cardinality='?')
# defRPr.add_child('a:solidFill' , cardinality='?')
# defRPr.add_child('a:gradFill' , cardinality='?')
# defRPr.add_child('a:blipFill' , cardinality='?')
# defRPr.add_child('a:pattFill' , cardinality='?')
# defRPr.add_child('a:grpFill' , cardinality='?')
# defRPr.add_child('a:effectLst' , cardinality='?')
# defRPr.add_child('a:effectDag' , cardinality='?')
# defRPr.add_child('a:highlight' , cardinality='?')
# defRPr.add_child('a:uLnTx' , cardinality='?')
# defRPr.add_child('a:uLn' , cardinality='?')
# defRPr.add_child('a:uFillTx' , cardinality='?')
# defRPr.add_child('a:uFill' , cardinality='?')
# defRPr.add_child('a:latin' , cardinality='?')
# defRPr.add_child('a:ea' , cardinality='?')
# defRPr.add_child('a:cs' , cardinality='?')
# defRPr.add_child('a:sym' , cardinality='?')
# defRPr.add_child('a:hlinkClick' , cardinality='?')
# defRPr.add_child('a:hlinkMouseOver', cardinality='?')
# defRPr.add_child('a:rtl' , cardinality='?')
# defRPr.add_child('a:extLst' , cardinality='?')
# defRPr.add_attributes('kumimoji', 'lang', 'altLang', 'sz', 'b', 'i', 'u',
# 'strike', 'kern', 'cap', 'spc', 'normalizeH', 'baseline', 'noProof',
# 'dirty', 'err', 'smtClean', 'smtId', 'bmk')
# nvGrpSpPr = ElementDef('p:nvGrpSpPr', 'CT_GroupShapeNonVisual')
# nvGrpSpPr.add_child('p:cNvPr' , cardinality='1')
# nvGrpSpPr.add_child('p:cNvGrpSpPr', cardinality='1')
# nvGrpSpPr.add_child('p:nvPr' , cardinality='1')
# nvPicPr = ElementDef('p:nvPicPr', 'CT_PictureNonVisual')
# nvPicPr.add_child('p:cNvPr' , cardinality='1')
# nvPicPr.add_child('p:cNvPicPr' , cardinality='1')
# nvPicPr.add_child('p:nvPr' , cardinality='1')
# nvPr = ElementDef('p:nvPr', 'CT_ApplicationNonVisualDrawingProps')
# nvPr.add_child('p:ph' , cardinality='?')
# nvPr.add_child('p:audioCd' , cardinality='?')
# nvPr.add_child('p:wavAudioFile' , cardinality='?')
# nvPr.add_child('p:audioFile' , cardinality='?')
# nvPr.add_child('p:videoFile' , cardinality='?')
# nvPr.add_child('p:quickTimeFile', cardinality='?')
# nvPr.add_child('p:custDataLst' , cardinality='?')
# nvPr.add_child('p:extLst' , cardinality='?')
# nvPr.add_attributes('isPhoto', 'userDrawn')
# nvSpPr = ElementDef('p:nvSpPr', 'CT_ShapeNonVisual')
# nvSpPr.add_child('p:cNvPr' , cardinality='1')
# nvSpPr.add_child('p:cNvSpPr', cardinality='1')
# nvSpPr.add_child('p:nvPr' , cardinality='1')
# off = ElementDef('a:off', 'CT_Point2D')
# off.add_attribute('x', required=True, default='0')
# off.add_attribute('y', required=True, default='0')
# p = ElementDef('a:p', 'CT_TextParagraph')
# p.add_child('a:pPr' , cardinality='?')
# p.add_child('a:r' , cardinality='*')
# p.add_child('a:br' , cardinality='*')
# p.add_child('a:fld' , cardinality='*')
# p.add_child('a:endParaRPr', cardinality='?')
# ph = ElementDef('p:ph', 'CT_Placeholder')
# ph.add_child('p:extLst', cardinality='?')
# ph.add_attributes('type', 'orient', 'sz', 'idx', 'hasCustomPrompt')
# pic = ElementDef('p:pic', 'CT_Picture')
# pic.add_child('p:nvPicPr' , cardinality='1')
# pic.add_child('p:blipFill' , cardinality='1')
# pic.add_child('p:spPr' , cardinality='1')
# pic.add_child('p:style' , cardinality='?')
# pic.add_child('p:extLst' , cardinality='?')
# pPr = ElementDef('a:pPr', 'CT_TextParagraphProperties')
# pPr.add_child('a:lnSpc' , cardinality='?')
# pPr.add_child('a:spcBef' , cardinality='?')
# pPr.add_child('a:spcAft' , cardinality='?')
# pPr.add_child('a:buClrTx' , cardinality='?')
# pPr.add_child('a:buClr' , cardinality='?')
# pPr.add_child('a:buSzTx' , cardinality='?')
# pPr.add_child('a:buSzPct' , cardinality='?')
# pPr.add_child('a:buSzPts' , cardinality='?')
# pPr.add_child('a:buFontTx' , cardinality='?')
# pPr.add_child('a:buFont' , cardinality='?')
# pPr.add_child('a:buNone' , cardinality='?')
# pPr.add_child('a:buAutoNum', cardinality='?')
# pPr.add_child('a:buChar' , cardinality='?')
# pPr.add_child('a:buBlip' , cardinality='?')
# pPr.add_child('a:tabLst' , cardinality='?')
# pPr.add_child('a:defRPr' , cardinality='?')
# pPr.add_child('a:extLst' , cardinality='?')
# pPr.add_attributes('marL', 'marR', 'lvl', 'indent', 'algn', 'defTabSz',
# 'rtl', 'eaLnBrk', 'fontAlgn', 'latinLnBrk', 'hangingPunct')
# presentation = ElementDef('p:presentation', 'CT_Presentation')
# presentation.add_child('p:sldMasterIdLst' , cardinality='?')
# presentation.add_child('p:notesMasterIdLst' , cardinality='?')
# presentation.add_child('p:handoutMasterIdLst', cardinality='?')
# presentation.add_child('p:sldIdLst' , cardinality='?')
# presentation.add_child('p:sldSz' , cardinality='?')
# presentation.add_child('p:notesSz' , cardinality='1')
# presentation.add_child('p:smartTags' , cardinality='?')
# presentation.add_child('p:embeddedFontLst' , cardinality='?')
# presentation.add_child('p:custShowLst' , cardinality='?')
# presentation.add_child('p:photoAlbum' , cardinality='?')
# presentation.add_child('p:custDataLst' , cardinality='?')
# presentation.add_child('p:kinsoku' , cardinality='?')
# presentation.add_child('p:defaultTextStyle' , cardinality='?')
# presentation.add_child('p:modifyVerifier' , cardinality='?')
# presentation.add_child('p:extLst' , cardinality='?')
# presentation.add_attributes('serverZoom', 'firstSlideNum',
# 'showSpecialPlsOnTitleSld', 'rtl', 'removePersonalInfoOnSave',
# 'compatMode', 'strictFirstAndLastChars', 'embedTrueTypeFonts',
# 'saveSubsetFonts', 'autoCompressPictures', 'bookmarkIdSeed',
# 'conformance')
# rPr = ElementDef('a:rPr', 'CT_TextCharacterProperties')
# rPr.add_child('a:ln', cardinality='?')
# rPr.add_attribute('sz')
# rPr.add_attribute('b')
# rPr.add_attribute('i')
# rPr.add_attribute('u')
# rPr.add_attribute('strike')
# rPr.add_attribute('kern')
# rPr.add_attribute('cap')
# rPr.add_attribute('spc')
# rPr.add_attribute('baseline')
# sld = ElementDef('p:sld', 'CT_Slide')
# sld.add_child('p:cSld' , cardinality='1')
# sld.add_child('p:clrMapOvr' , cardinality='?')
# sld.add_child('p:transition' , cardinality='?')
# sld.add_child('p:timing' , cardinality='?')
# sld.add_child('p:extLst' , cardinality='?')
# sld.add_attributes('showMasterSp', 'showMasterPhAnim', 'show')
# sldId = ElementDef('p:sldId', 'CT_SlideIdListEntry')
# sldId.add_child('p:extLst', cardinality='?')
# sldId.add_attribute('id', required=True, default='')
# sldId.add_attribute('r:id', required=True, default='')
# sldIdLst = ElementDef('p:sldIdLst', 'CT_SlideIdList')
# sldIdLst.add_child('p:sldId', cardinality='*')
# sldLayout = ElementDef('p:sldLayout', 'CT_SlideLayout')
# sldLayout.add_child('p:cSld' , cardinality='1')
# sldLayout.add_child('p:clrMapOvr' , cardinality='?')
# sldLayout.add_child('p:transition', cardinality='?')
# sldLayout.add_child('p:timing' , cardinality='?')
# sldLayout.add_child('p:hf' , cardinality='?')
# sldLayout.add_child('p:extLst' , cardinality='?')
# sldLayout.add_attributes('showMasterSp', 'showMasterPhAnim', 'matchingName', 'type', 'preserve', 'userDrawn')
# spLocks = ElementDef('a:spLocks', 'CT_ShapeLocking')
# spLocks.add_child('a:extLst', cardinality='?')
# spLocks.add_attributes('noGrp', 'noSelect', 'noRot', 'noChangeAspect',
# 'noMove', 'noResize', 'noEditPoints', 'noAdjustHandles',
# 'noChangeArrowheads', 'noChangeShapeType', 'noTextEdit')
# spPr = ElementDef('p:spPr', 'CT_ShapeProperties')
# spPr.add_child('a:xfrm' , cardinality='?')
# spPr.add_child('a:custGeom' , cardinality='?')
# spPr.add_child('a:prstGeom' , cardinality='?')
# spPr.add_child('a:noFill' , cardinality='?')
# spPr.add_child('a:solidFill' , cardinality='?')
# spPr.add_child('a:gradFill' , cardinality='?')
# spPr.add_child('a:blipFill' , cardinality='?')
# spPr.add_child('a:pattFill' , cardinality='?')
# spPr.add_child('a:grpFill' , cardinality='?')
# spPr.add_child('a:ln' , cardinality='?')
# spPr.add_child('a:effectLst' , cardinality='?')
# spPr.add_child('a:effectDag' , cardinality='?')
# spPr.add_child('a:scene3d' , cardinality='?')
# spPr.add_child('a:sp3d' , cardinality='?')
# spPr.add_child('a:extLst' , cardinality='?')
# spPr.add_attribute('bwMode')
# spTree = ElementDef('p:spTree', 'CT_GroupShape')
# spTree.add_child('p:nvGrpSpPr' , cardinality='1')
# spTree.add_child('p:grpSpPr' , cardinality='1')
# spTree.add_child('p:sp' , cardinality='?')
# spTree.add_child('p:grpSp' , cardinality='?')
# spTree.add_child('p:graphicFrame', cardinality='?')
# spTree.add_child('p:cxnSp' , cardinality='?')
# spTree.add_child('p:pic' , cardinality='?')
# spTree.add_child('p:contentPart' , cardinality='?')
# spTree.add_child('p:extLst' , cardinality='?')
# stretch = ElementDef('a:stretch', 'CT_StretchInfoProperties')
# stretch.add_child('a:fillRect' , cardinality='?')
# txBody = ElementDef('p:txBody', 'CT_TextBody')
# txBody.add_child('a:bodyPr' , cardinality='1')
# txBody.add_child('a:lstStyle' , cardinality='?')
# txBody.add_child('a:p' , cardinality='+')
|
|
import logging
from urllib import quote
from urlparse import urlparse
from pylons import config
import ckan.lib.base as base
import ckan.model as model
import ckan.lib.helpers as h
import ckan.new_authz as new_authz
import ckan.logic as logic
import ckan.logic.schema as schema
import ckan.lib.captcha as captcha
import ckan.lib.mailer as mailer
import ckan.lib.navl.dictization_functions as dictization_functions
import ckan.plugins as p
from ckan.common import _, c, g, request
log = logging.getLogger(__name__)
abort = base.abort
render = base.render
validate = base.validate
check_access = logic.check_access
get_action = logic.get_action
NotFound = logic.NotFound
NotAuthorized = logic.NotAuthorized
ValidationError = logic.ValidationError
DataError = dictization_functions.DataError
unflatten = dictization_functions.unflatten
class UserController(base.BaseController):
def __before__(self, action, **env):
base.BaseController.__before__(self, action, **env)
try:
context = {'model': model, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
check_access('site_read', context)
except NotAuthorized:
if c.action not in ('login', 'request_reset', 'perform_reset',):
abort(401, _('Not authorized to see this page'))
## hooks for subclasses
new_user_form = 'user/new_user_form.html'
edit_user_form = 'user/edit_user_form.html'
def _new_form_to_db_schema(self):
return schema.user_new_form_schema()
def _db_to_new_form_schema(self):
'''This is an interface to manipulate data from the database
into a format suitable for the form (optional)'''
def _edit_form_to_db_schema(self):
return schema.user_edit_form_schema()
def _db_to_edit_form_schema(self):
'''This is an interface to manipulate data from the database
into a format suitable for the form (optional)'''
def _setup_template_variables(self, context, data_dict):
c.is_sysadmin = new_authz.is_sysadmin(c.user)
try:
user_dict = get_action('user_show')(context, data_dict)
except NotFound:
abort(404, _('User not found'))
except NotAuthorized:
abort(401, _('Not authorized to see this page'))
c.user_dict = user_dict
c.is_myself = user_dict['name'] == c.user
c.about_formatted = h.render_markdown(user_dict['about'])
## end hooks
def _get_repoze_handler(self, handler_name):
'''Returns the URL that repoze.who will respond to and perform a
login or logout.'''
return getattr(request.environ['repoze.who.plugins']['friendlyform'],
handler_name)
def index(self):
LIMIT = 20
page = int(request.params.get('page', 1))
c.q = request.params.get('q', '')
c.order_by = request.params.get('order_by', 'name')
context = {'return_query': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'q': c.q,
'order_by': c.order_by}
try:
check_access('user_list', context, data_dict)
except NotAuthorized:
abort(401, _('Not authorized to see this page'))
users_list = get_action('user_list')(context, data_dict)
c.page = h.Page(
collection=users_list,
page=page,
url=h.pager_url,
item_count=users_list.count(),
items_per_page=LIMIT
)
return render('user/list.html')
def read(self, id=None):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'for_view': True}
data_dict = {'id': id,
'user_obj': c.userobj}
context['with_related'] = True
self._setup_template_variables(context, data_dict)
# The legacy templates have the user's activity stream on the user
# profile page, new templates do not.
if h.asbool(config.get('ckan.legacy_templates', False)):
c.user_activity_stream = get_action('user_activity_list_html')(
context, {'id': c.user_dict['id']})
return render('user/read.html')
def me(self, locale=None):
if not c.user:
h.redirect_to(locale=locale, controller='user', action='login',
id=None)
user_ref = c.userobj.get_reference_preferred_for_uri()
h.redirect_to(locale=locale, controller='user', action='dashboard',
id=user_ref)
def register(self, data=None, errors=None, error_summary=None):
context = {'model': model, 'session': model.Session, 'user': c.user,
'auth_user_obj': c.userobj}
try:
check_access('user_create', context)
except NotAuthorized:
abort(401, _('Unauthorized to register as a user.'))
return self.new(data, errors, error_summary)
def new(self, data=None, errors=None, error_summary=None):
'''GET to display a form for registering a new user.
or POST the form data to actually do the user registration.
'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj,
'schema': self._new_form_to_db_schema(),
'save': 'save' in request.params}
try:
check_access('user_create', context)
except NotAuthorized:
abort(401, _('Unauthorized to create a user'))
if context['save'] and not data:
return self._save_new(context)
if c.user and not data:
# #1799 Don't offer the registration form if already logged in
return render('user/logout_first.html')
data = data or {}
errors = errors or {}
error_summary = error_summary or {}
vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
c.is_sysadmin = new_authz.is_sysadmin(c.user)
c.form = render(self.new_user_form, extra_vars=vars)
return render('user/new.html')
def delete(self, id):
'''Delete user with id passed as parameter'''
context = {'model': model,
'session': model.Session,
'user': c.user,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
get_action('user_delete')(context, data_dict)
user_index = h.url_for(controller='user', action='index')
h.redirect_to(user_index)
except NotAuthorized:
msg = _('Unauthorized to delete user with id "{user_id}".')
abort(401, msg.format(user_id=id))
def _save_new(self, context):
try:
data_dict = logic.clean_dict(unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
captcha.check_recaptcha(request)
user = get_action('user_create')(context, data_dict)
except NotAuthorized:
abort(401, _('Unauthorized to create user %s') % '')
except NotFound, e:
abort(404, _('User not found'))
except DataError:
abort(400, _(u'Integrity Error'))
except captcha.CaptchaError:
error_msg = _(u'Bad Captcha. Please try again.')
h.flash_error(error_msg)
return self.new(data_dict)
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.new(data_dict, errors, error_summary)
if not c.user:
# Redirect to a URL picked up by repoze.who which performs the
# login
login_url = self._get_repoze_handler('login_handler_path')
# We need to pass the logged in URL as came_from parameter
# otherwise we lose the language setting
came_from = h.url_for(controller='user', action='logged_in',
__ckan_no_root=True)
redirect_url = '{0}?login={1}&password={2}&came_from={3}'
h.redirect_to(redirect_url.format(
login_url,
str(data_dict['name']),
quote(data_dict['password1'].encode('utf-8')),
came_from))
else:
# #1799 User has managed to register whilst logged in - warn user
# they are not re-logged in as new user.
h.flash_success(_('User "%s" is now registered but you are still '
'logged in as "%s" from before') %
(data_dict['name'], c.user))
return render('user/logout_first.html')
def edit(self, id=None, data=None, errors=None, error_summary=None):
context = {'save': 'save' in request.params,
'schema': self._edit_form_to_db_schema(),
'model': model, 'session': model.Session,
'user': c.user, 'auth_user_obj': c.userobj
}
if id is None:
if c.userobj:
id = c.userobj.id
else:
abort(400, _('No user specified'))
data_dict = {'id': id}
try:
check_access('user_update', context, data_dict)
except NotAuthorized:
abort(401, _('Unauthorized to edit a user.'))
if (context['save']) and not data:
return self._save_edit(id, context)
try:
old_data = get_action('user_show')(context, data_dict)
schema = self._db_to_edit_form_schema()
if schema:
old_data, errors = validate(old_data, schema)
c.display_name = old_data.get('display_name')
c.user_name = old_data.get('name')
data = data or old_data
except NotAuthorized:
abort(401, _('Unauthorized to edit user %s') % '')
except NotFound:
abort(404, _('User not found'))
user_obj = context.get('user_obj')
if not (new_authz.is_sysadmin(c.user)
or c.user == user_obj.name):
abort(401, _('User %s not authorized to edit %s') %
(str(c.user), id))
errors = errors or {}
vars = {'data': data, 'errors': errors, 'error_summary': error_summary}
self._setup_template_variables({'model': model,
'session': model.Session,
'user': c.user or c.author},
data_dict)
c.is_myself = True
c.show_email_notifications = h.asbool(
config.get('ckan.activity_streams_email_notifications'))
c.form = render(self.edit_user_form, extra_vars=vars)
return render('user/edit.html')
def _save_edit(self, id, context):
try:
data_dict = logic.clean_dict(unflatten(
logic.tuplize_dict(logic.parse_params(request.params))))
context['message'] = data_dict.get('log_message', '')
data_dict['id'] = id
# MOAN: Do I really have to do this here?
if 'activity_streams_email_notifications' not in data_dict:
data_dict['activity_streams_email_notifications'] = False
user = get_action('user_update')(context, data_dict)
h.flash_success(_('Profile updated'))
h.redirect_to(controller='user', action='read', id=user['name'])
except NotAuthorized:
abort(401, _('Unauthorized to edit user %s') % id)
except NotFound, e:
abort(404, _('User not found'))
except DataError:
abort(400, _(u'Integrity Error'))
except ValidationError, e:
errors = e.error_dict
error_summary = e.error_summary
return self.edit(id, data_dict, errors, error_summary)
def login(self, error=None):
# Do any plugin login stuff
for item in p.PluginImplementations(p.IAuthenticator):
item.login()
if 'error' in request.params:
h.flash_error(request.params['error'])
if request.environ['SCRIPT_NAME'] and g.openid_enabled:
# #1662 restriction
log.warn('Cannot mount CKAN at a URL and login with OpenID.')
g.openid_enabled = False
if not c.user:
came_from = request.params.get('came_from')
if not came_from:
came_from = h.url_for(controller='user', action='logged_in',
__ckan_no_root=True)
c.login_handler = h.url_for(
self._get_repoze_handler('login_handler_path'),
came_from=came_from)
if error:
vars = {'error_summary': {'': error}}
else:
vars = {}
#return render('user/login.html', extra_vars=vars)
# LAit customization
return render('_login.html')
else:
# LAit customization
return render('_login.html')
def logged_in(self):
# redirect if needed
came_from = request.params.get('came_from', '')
if self._sane_came_from(came_from):
return h.redirect_to(str(came_from))
if c.user:
context = None
data_dict = {'id': c.user}
user_dict = get_action('user_show')(context, data_dict)
h.flash_success(_("%s is now logged in") %
user_dict['display_name'])
# LAit customization
return render('_home.html')
else:
err = _('Login failed. Bad username or password.')
if g.openid_enabled:
err += _(' (Or if using OpenID, it hasn\'t been associated '
'with a user account.)')
if h.asbool(config.get('ckan.legacy_templates', 'false')):
h.flash_error(err)
h.redirect_to(controller='user',
action='login', came_from=came_from)
else:
return self.login(error=err)
def logout(self):
# Do any plugin logout stuff
for item in p.PluginImplementations(p.IAuthenticator):
item.logout()
url = h.url_for(controller='user', action='logged_out_page',
__ckan_no_root=True)
h.redirect_to(self._get_repoze_handler('logout_handler_path') +
'?came_from=' + url)
def logged_out(self):
# redirect if needed
came_from = request.params.get('came_from', '')
if self._sane_came_from(came_from):
return h.redirect_to(str(came_from))
h.redirect_to(controller='user', action='logged_out_page')
def logged_out_page(self):
# LAit customization
return render('_home.html')
def request_reset(self):
context = {'model': model, 'session': model.Session, 'user': c.user,
'auth_user_obj': c.userobj}
data_dict = {'id': request.params.get('user')}
try:
check_access('request_reset', context)
except NotAuthorized:
abort(401, _('Unauthorized to request reset password.'))
if request.method == 'POST':
id = request.params.get('user')
context = {'model': model,
'user': c.user}
data_dict = {'id': id}
user_obj = None
try:
user_dict = get_action('user_show')(context, data_dict)
user_obj = context['user_obj']
except NotFound:
# Try searching the user
del data_dict['id']
data_dict['q'] = id
if id and len(id) > 2:
user_list = get_action('user_list')(context, data_dict)
if len(user_list) == 1:
# This is ugly, but we need the user object for the
# mailer,
# and user_list does not return them
del data_dict['q']
data_dict['id'] = user_list[0]['id']
user_dict = get_action('user_show')(context, data_dict)
user_obj = context['user_obj']
elif len(user_list) > 1:
h.flash_error(_('"%s" matched several users') % (id))
else:
h.flash_error(_('No such user: %s') % id)
else:
h.flash_error(_('No such user: %s') % id)
if user_obj:
try:
mailer.send_reset_link(user_obj)
h.flash_success(_('Please check your inbox for '
'a reset code.'))
h.redirect_to('/')
except mailer.MailerException, e:
h.flash_error(_('Could not send reset link: %s') %
unicode(e))
return render('user/request_reset.html')
def perform_reset(self, id):
# FIXME 403 error for invalid key is a non helpful page
# FIXME We should reset the reset key when it is used to prevent
# reuse of the url
context = {'model': model, 'session': model.Session,
'user': id,
'keep_email': True}
try:
check_access('user_reset', context)
except NotAuthorized:
abort(401, _('Unauthorized to reset password.'))
try:
data_dict = {'id': id}
user_dict = get_action('user_show')(context, data_dict)
user_obj = context['user_obj']
except NotFound, e:
abort(404, _('User not found'))
c.reset_key = request.params.get('key')
if not mailer.verify_reset_link(user_obj, c.reset_key):
h.flash_error(_('Invalid reset key. Please try again.'))
abort(403)
if request.method == 'POST':
try:
context['reset_password'] = True
new_password = self._get_form_password()
user_dict['password'] = new_password
user_dict['reset_key'] = c.reset_key
user_dict['state'] = model.State.ACTIVE
user = get_action('user_update')(context, user_dict)
h.flash_success(_("Your password has been reset."))
h.redirect_to('/')
except NotAuthorized:
h.flash_error(_('Unauthorized to edit user %s') % id)
except NotFound, e:
h.flash_error(_('User not found'))
except DataError:
h.flash_error(_(u'Integrity Error'))
except ValidationError, e:
h.flash_error(u'%r' % e.error_dict)
except ValueError, ve:
h.flash_error(unicode(ve))
c.user_dict = user_dict
return render('user/perform_reset.html')
def _get_form_password(self):
password1 = request.params.getone('password1')
password2 = request.params.getone('password2')
if (password1 is not None and password1 != ''):
if not len(password1) >= 4:
raise ValueError(_('Your password must be 4 '
'characters or longer.'))
elif not password1 == password2:
raise ValueError(_('The passwords you entered'
' do not match.'))
return password1
raise ValueError(_('You must provide a password'))
def followers(self, id=None):
context = {'for_view': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'id': id, 'user_obj': c.userobj}
self._setup_template_variables(context, data_dict)
f = get_action('user_follower_list')
try:
c.followers = f(context, {'id': c.user_dict['id']})
except NotAuthorized:
abort(401, _('Unauthorized to view followers %s') % '')
return render('user/followers.html')
def activity(self, id, offset=0):
'''Render this user's public activity stream page.'''
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'for_view': True}
data_dict = {'id': id, 'user_obj': c.userobj}
try:
check_access('user_show', context, data_dict)
except NotAuthorized:
abort(401, _('Not authorized to see this page'))
self._setup_template_variables(context, data_dict)
c.user_activity_stream = get_action('user_activity_list_html')(
context, {'id': c.user_dict['id'], 'offset': offset})
return render('user/activity_stream.html')
def _get_dashboard_context(self, filter_type=None, filter_id=None, q=None):
'''Return a dict needed by the dashboard view to determine context.'''
def display_name(followee):
'''Return a display name for a user, group or dataset dict.'''
display_name = followee.get('display_name')
fullname = followee.get('fullname')
title = followee.get('title')
name = followee.get('name')
return display_name or fullname or title or name
if (filter_type and filter_id):
context = {
'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'for_view': True
}
data_dict = {'id': filter_id}
followee = None
action_functions = {
'dataset': 'package_show',
'user': 'user_show',
'group': 'group_show'
}
action_function = logic.get_action(
action_functions.get(filter_type))
# Is this a valid type?
if action_function is None:
abort(404, _('Follow item not found'))
try:
followee = action_function(context, data_dict)
except NotFound:
abort(404, _('{0} not found').format(filter_type))
except NotAuthorized:
abort(401, _('Unauthorized to read {0} {1}').format(
filter_type, id))
if followee is not None:
return {
'filter_type': filter_type,
'q': q,
'context': display_name(followee),
'selected_id': followee.get('id'),
'dict': followee,
}
return {
'filter_type': filter_type,
'q': q,
'context': _('Everything'),
'selected_id': False,
'dict': None,
}
def dashboard(self, id=None, offset=0):
context = {'model': model, 'session': model.Session,
'user': c.user or c.author, 'auth_user_obj': c.userobj,
'for_view': True}
data_dict = {'id': id, 'user_obj': c.userobj, 'offset': offset}
self._setup_template_variables(context, data_dict)
q = request.params.get('q', u'')
filter_type = request.params.get('type', u'')
filter_id = request.params.get('name', u'')
c.followee_list = get_action('followee_list')(
context, {'id': c.userobj.id, 'q': q})
c.dashboard_activity_stream_context = self._get_dashboard_context(
filter_type, filter_id, q)
c.dashboard_activity_stream = h.dashboard_activity_stream(
c.userobj.id, filter_type, filter_id, offset
)
# Mark the user's new activities as old whenever they view their
# dashboard page.
get_action('dashboard_mark_activities_old')(context, {})
return render('user/dashboard.html')
def dashboard_datasets(self):
context = {'for_view': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'user_obj': c.userobj}
self._setup_template_variables(context, data_dict)
return render('user/dashboard_datasets.html')
def dashboard_organizations(self):
context = {'for_view': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'user_obj': c.userobj}
self._setup_template_variables(context, data_dict)
return render('user/dashboard_organizations.html')
def dashboard_groups(self):
context = {'for_view': True, 'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'user_obj': c.userobj}
self._setup_template_variables(context, data_dict)
return render('user/dashboard_groups.html')
def follow(self, id):
'''Start following this user.'''
context = {'model': model,
'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
get_action('follow_user')(context, data_dict)
user_dict = get_action('user_show')(context, data_dict)
h.flash_success(_("You are now following {0}").format(
user_dict['display_name']))
except ValidationError as e:
error_message = (e.extra_msg or e.message or e.error_summary
or e.error_dict)
h.flash_error(error_message)
except NotAuthorized as e:
h.flash_error(e.extra_msg)
h.redirect_to(controller='user', action='read', id=id)
def unfollow(self, id):
'''Stop following this user.'''
context = {'model': model,
'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj}
data_dict = {'id': id}
try:
get_action('unfollow_user')(context, data_dict)
user_dict = get_action('user_show')(context, data_dict)
h.flash_success(_("You are no longer following {0}").format(
user_dict['display_name']))
except (NotFound, NotAuthorized) as e:
error_message = e.extra_msg or e.message
h.flash_error(error_message)
except ValidationError as e:
error_message = (e.error_summary or e.message or e.extra_msg
or e.error_dict)
h.flash_error(error_message)
h.redirect_to(controller='user', action='read', id=id)
def _sane_came_from(self, url):
'''Returns True if came_from is local'''
if not url or (len(url) >= 2 and url.startswith('//')):
return False
parsed = urlparse(url)
if parsed.scheme:
domain = urlparse(h.url_for('/', qualified=True)).netloc
if domain != parsed.netloc:
return False
return True
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import collections
import contextlib
import ctypes
import logging
import os
import platform
import re
import socket
import struct
import subprocess
import sys
import time
import zipfile
from telemetry.core import exceptions
from telemetry.core.platform import desktop_platform_backend
from telemetry.core.platform import platform_backend
from telemetry.core.platform.power_monitor import msr_power_monitor
from telemetry.core import util
from telemetry import decorators
from telemetry.util import cloud_storage
from telemetry.util import path
try:
import pywintypes # pylint: disable=F0401
import win32api # pylint: disable=F0401
from win32com.shell import shell # pylint: disable=F0401,E0611
from win32com.shell import shellcon # pylint: disable=F0401,E0611
import win32con # pylint: disable=F0401
import win32gui # pylint: disable=F0401
import win32process # pylint: disable=F0401
import win32security # pylint: disable=F0401
except ImportError:
pywintypes = None
shell = None
shellcon = None
win32api = None
win32con = None
win32gui = None
win32process = None
win32security = None
def _InstallWinRing0():
"""WinRing0 is used for reading MSRs."""
executable_dir = os.path.dirname(sys.executable)
python_is_64_bit = sys.maxsize > 2 ** 32
dll_file_name = 'WinRing0x64.dll' if python_is_64_bit else 'WinRing0.dll'
dll_path = os.path.join(executable_dir, dll_file_name)
os_is_64_bit = platform.machine().endswith('64')
driver_file_name = 'WinRing0x64.sys' if os_is_64_bit else 'WinRing0.sys'
driver_path = os.path.join(executable_dir, driver_file_name)
# Check for WinRing0 and download if needed.
if not (os.path.exists(dll_path) and os.path.exists(driver_path)):
win_binary_dir = os.path.join(
path.GetTelemetryDir(), 'bin', 'win', 'AMD64')
zip_path = os.path.join(win_binary_dir, 'winring0.zip')
cloud_storage.GetIfChanged(zip_path, bucket=cloud_storage.PUBLIC_BUCKET)
try:
with zipfile.ZipFile(zip_path, 'r') as zip_file:
error_message = (
'Failed to extract %s into %s. If python claims that '
'the zip file is locked, this may be a lie. The problem may be '
'that python does not have write permissions to the destination '
'directory.'
)
# Install DLL.
if not os.path.exists(dll_path):
try:
zip_file.extract(dll_file_name, executable_dir)
except:
logging.error(error_message % (dll_file_name, executable_dir))
raise
# Install kernel driver.
if not os.path.exists(driver_path):
try:
zip_file.extract(driver_file_name, executable_dir)
except:
logging.error(error_message % (driver_file_name, executable_dir))
raise
finally:
os.remove(zip_path)
def TerminateProcess(process_handle):
if not process_handle:
return
if win32process.GetExitCodeProcess(process_handle) == win32con.STILL_ACTIVE:
win32process.TerminateProcess(process_handle, 0)
process_handle.close()
class WinPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
def __init__(self):
super(WinPlatformBackend, self).__init__()
self._msr_server_handle = None
self._msr_server_port = None
self._power_monitor = msr_power_monitor.MsrPowerMonitor(self)
@classmethod
def IsPlatformBackendForHost(cls):
return sys.platform == 'win32'
def __del__(self):
self.close()
def close(self):
self.CloseMsrServer()
def CloseMsrServer(self):
if not self._msr_server_handle:
return
TerminateProcess(self._msr_server_handle)
self._msr_server_handle = None
self._msr_server_port = None
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
performance_info = self._GetPerformanceInfo()
return performance_info.CommitTotal * performance_info.PageSize / 1024
@decorators.Cache
def GetSystemTotalPhysicalMemory(self):
performance_info = self._GetPerformanceInfo()
return performance_info.PhysicalTotal * performance_info.PageSize / 1024
def GetCpuStats(self, pid):
cpu_info = self._GetWin32ProcessInfo(win32process.GetProcessTimes, pid)
# Convert 100 nanosecond units to seconds
cpu_time = (cpu_info['UserTime'] / 1e7 +
cpu_info['KernelTime'] / 1e7)
return {'CpuProcessTime': cpu_time}
def GetCpuTimestamp(self):
"""Return current timestamp in seconds."""
return {'TotalTime': time.time()}
def GetMemoryStats(self, pid):
memory_info = self._GetWin32ProcessInfo(
win32process.GetProcessMemoryInfo, pid)
return {'VM': memory_info['PagefileUsage'],
'VMPeak': memory_info['PeakPagefileUsage'],
'WorkingSetSize': memory_info['WorkingSetSize'],
'WorkingSetSizePeak': memory_info['PeakWorkingSetSize']}
def KillProcess(self, pid, kill_process_tree=False):
# os.kill for Windows is Python 2.7.
cmd = ['taskkill', '/F', '/PID', str(pid)]
if kill_process_tree:
cmd.append('/T')
subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
def GetSystemProcessInfo(self):
# [3:] To skip 2 blank lines and header.
lines = subprocess.Popen(
['wmic', 'process', 'get',
'CommandLine,CreationDate,Name,ParentProcessId,ProcessId',
'/format:csv'],
stdout=subprocess.PIPE).communicate()[0].splitlines()[3:]
process_info = []
for line in lines:
if not line:
continue
parts = line.split(',')
pi = {}
pi['ProcessId'] = int(parts[-1])
pi['ParentProcessId'] = int(parts[-2])
pi['Name'] = parts[-3]
creation_date = None
if parts[-4]:
creation_date = float(re.split('[+-]', parts[-4])[0])
pi['CreationDate'] = creation_date
pi['CommandLine'] = ','.join(parts[1:-4])
process_info.append(pi)
return process_info
def GetChildPids(self, pid):
"""Retunds a list of child pids of |pid|."""
ppid_map = collections.defaultdict(list)
creation_map = {}
for pi in self.GetSystemProcessInfo():
ppid_map[pi['ParentProcessId']].append(pi['ProcessId'])
if pi['CreationDate']:
creation_map[pi['ProcessId']] = pi['CreationDate']
def _InnerGetChildPids(pid):
if not pid or pid not in ppid_map:
return []
ret = [p for p in ppid_map[pid] if creation_map[p] >= creation_map[pid]]
for child in ret:
if child == pid:
continue
ret.extend(_InnerGetChildPids(child))
return ret
return _InnerGetChildPids(pid)
def GetCommandLine(self, pid):
for pi in self.GetSystemProcessInfo():
if pid == pi['ProcessId']:
return pi['CommandLine']
raise exceptions.ProcessGoneException()
@decorators.Cache
def GetArchName(self):
return platform.machine()
def GetOSName(self):
return 'win'
@decorators.Cache
def GetOSVersionName(self):
os_version = platform.uname()[3]
if os_version.startswith('5.1.'):
return platform_backend.XP
if os_version.startswith('6.0.'):
return platform_backend.VISTA
if os_version.startswith('6.1.'):
return platform_backend.WIN7
if os_version.startswith('6.2.'):
return platform_backend.WIN8
raise NotImplementedError('Unknown win version %s.' % os_version)
def CanFlushIndividualFilesFromSystemCache(self):
return True
def _GetWin32ProcessInfo(self, func, pid):
mask = (win32con.PROCESS_QUERY_INFORMATION |
win32con.PROCESS_VM_READ)
handle = None
try:
handle = win32api.OpenProcess(mask, False, pid)
return func(handle)
except pywintypes.error, e:
errcode = e[0]
if errcode == 87:
raise exceptions.ProcessGoneException()
raise
finally:
if handle:
win32api.CloseHandle(handle)
def _GetPerformanceInfo(self):
class PerformanceInfo(ctypes.Structure):
"""Struct for GetPerformanceInfo() call
http://msdn.microsoft.com/en-us/library/ms683210
"""
_fields_ = [('size', ctypes.c_ulong),
('CommitTotal', ctypes.c_size_t),
('CommitLimit', ctypes.c_size_t),
('CommitPeak', ctypes.c_size_t),
('PhysicalTotal', ctypes.c_size_t),
('PhysicalAvailable', ctypes.c_size_t),
('SystemCache', ctypes.c_size_t),
('KernelTotal', ctypes.c_size_t),
('KernelPaged', ctypes.c_size_t),
('KernelNonpaged', ctypes.c_size_t),
('PageSize', ctypes.c_size_t),
('HandleCount', ctypes.c_ulong),
('ProcessCount', ctypes.c_ulong),
('ThreadCount', ctypes.c_ulong)]
def __init__(self):
self.size = ctypes.sizeof(self)
# pylint: disable=bad-super-call
super(PerformanceInfo, self).__init__()
performance_info = PerformanceInfo()
ctypes.windll.psapi.GetPerformanceInfo(
ctypes.byref(performance_info), performance_info.size)
return performance_info
def IsCurrentProcessElevated(self):
if self.GetOSVersionName() < platform_backend.VISTA:
# TOKEN_QUERY is not defined before Vista. All processes are elevated.
return True
handle = win32process.GetCurrentProcess()
with contextlib.closing(
win32security.OpenProcessToken(handle, win32con.TOKEN_QUERY)) as token:
return bool(win32security.GetTokenInformation(
token, win32security.TokenElevation))
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
"""Launch an application. Returns a PyHANDLE object."""
parameters = ' '.join(parameters) if parameters else ''
if elevate_privilege and not self.IsCurrentProcessElevated():
# Use ShellExecuteEx() instead of subprocess.Popen()/CreateProcess() to
# elevate privileges. A new console will be created if the new process has
# different permissions than this process.
proc_info = shell.ShellExecuteEx(
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS | shellcon.SEE_MASK_NO_CONSOLE,
lpVerb='runas' if elevate_privilege else '',
lpFile=application,
lpParameters=parameters,
nShow=win32con.SW_HIDE)
if proc_info['hInstApp'] <= 32:
raise Exception('Unable to launch %s' % application)
return proc_info['hProcess']
else:
handle, _, _, _ = win32process.CreateProcess(
None, application + ' ' + parameters, None, None, False,
win32process.CREATE_NO_WINDOW, None, None, win32process.STARTUPINFO())
return handle
def CanMonitorPower(self):
return self._power_monitor.CanMonitorPower()
def CanMeasurePerApplicationPower(self):
return self._power_monitor.CanMeasurePerApplicationPower()
def StartMonitoringPower(self, browser):
self._power_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._power_monitor.StopMonitoringPower()
def _StartMsrServerIfNeeded(self):
if self._msr_server_handle:
return
_InstallWinRing0()
self._msr_server_port = util.GetUnreservedAvailableLocalPort()
# It might be flaky to get a port number without reserving it atomically,
# but if the server process chooses a port, we have no way of getting it.
# The stdout of the elevated process isn't accessible.
parameters = (
os.path.join(os.path.dirname(__file__), 'msr_server_win.py'),
str(self._msr_server_port),
)
self._msr_server_handle = self.LaunchApplication(
sys.executable, parameters, elevate_privilege=True)
# Wait for server to start.
try:
socket.create_connection(('127.0.0.1', self._msr_server_port), 5).close()
except socket.error:
self.CloseMsrServer()
atexit.register(TerminateProcess, self._msr_server_handle)
def ReadMsr(self, msr_number, start=0, length=64):
self._StartMsrServerIfNeeded()
if not self._msr_server_handle:
raise OSError('Unable to start MSR server.')
sock = socket.create_connection(('127.0.0.1', self._msr_server_port), 0.1)
try:
sock.sendall(struct.pack('I', msr_number))
response = sock.recv(8)
finally:
sock.close()
return struct.unpack('Q', response)[0] >> start & ((1 << length) - 1)
def IsCooperativeShutdownSupported(self):
return True
def CooperativelyShutdown(self, proc, app_name):
pid = proc.pid
# http://timgolden.me.uk/python/win32_how_do_i/
# find-the-window-for-my-subprocess.html
#
# It seems that intermittently this code manages to find windows
# that don't belong to Chrome -- for example, the cmd.exe window
# running slave.bat on the tryservers. Try to be careful about
# finding only Chrome's windows. This works for both the browser
# and content_shell.
#
# It seems safest to send the WM_CLOSE messages after discovering
# all of the sub-process's windows.
def find_chrome_windows(hwnd, hwnds):
_, win_pid = win32process.GetWindowThreadProcessId(hwnd)
if (pid == win_pid and
win32gui.IsWindowVisible(hwnd) and
win32gui.IsWindowEnabled(hwnd) and
win32gui.GetClassName(hwnd).lower().startswith(app_name)):
hwnds.append(hwnd)
return True
hwnds = []
win32gui.EnumWindows(find_chrome_windows, hwnds)
if hwnds:
for hwnd in hwnds:
win32gui.SendMessage(hwnd, win32con.WM_CLOSE, 0, 0)
return True
else:
logging.info('Did not find any windows owned by target process')
return False
|
|
from collections import defaultdict
from traceback import format_exc
from struct import unpack
from errno import EINTR
import socket
from clusto.services.config import conf, get_logger
log = get_logger('clusto.dhcp', 'INFO')
import logging
runtime = logging.getLogger('scapy.runtime')
runtime.setLevel(logging.ERROR)
loading = logging.getLogger('scapy.loading')
loading.setLevel(logging.ERROR)
from scapy.all import BOOTP, DHCP, DHCPTypes, DHCPOptions, DHCPRevOptions
from IPy import IP
import clusto
extra = conf('dhcp.extra_options')
extra = dict([(int(k), str(v)) for k, v in extra.items()])
DHCPOptions.update(extra)
for k,v in DHCPOptions.iteritems():
if type(v) is str:
n = v
v = None
else:
n = v.name
DHCPRevOptions[n] = (k,v)
class DHCPRequest(object):
def __init__(self, packet):
self.packet = packet
self.parse()
def parse(self):
options = self.packet[DHCP].options
hwaddr = ':'.join(['%02x' % ord(x) for x in self.packet.chaddr[:6]])
mac = None
options = dict([x for x in options if isinstance(x, tuple)])
if 'client_id' in options:
# This format is completely nonstandard
try:
mac = unpack('>6s', options['client_id'][1:])[0]
options['client_id'] = ':'.join(['%02x' % ord(x) for x in mac]).lower()
except:
log.warning('Unable to parse client_id from %s, ignoring', hwaddr)
self.type = DHCPTypes[options['message-type']]
self.hwaddr = hwaddr
self.options = options
class DHCPResponse(object):
def __init__(self, type, offerip=None, options={}, request=None):
self.type = type
self.offerip = offerip
self.serverip = socket.gethostbyname(socket.gethostname())
self.options = options
self.request = request
def set_type(self, type):
self.type = type
def build(self):
options = [
('message-type', self.type)
]
pxelinux = False
for k, v in self.options.items():
if k == 'enabled': continue
if not k in DHCPRevOptions:
log.warning('Unknown DHCP option: %s' % k)
continue
if k.startswith('pxelinux'):
pxelinux = True
if isinstance(v, unicode):
v = v.encode('ascii', 'ignore')
options.append((k, v))
if pxelinux:
options.append(('pxelinux-magic', '\xf1\x00\x75\x7e'))
bootp_options = {
'op': 2,
'xid': self.request.packet.xid,
'ciaddr': '0.0.0.0',
'yiaddr': self.offerip,
'chaddr': self.request.packet.chaddr,
}
if 'tftp_server' in self.options:
bootp_options['siaddr'] = self.options['tftp_server']
if 'tftp_filename' in self.options:
bootp_options['file'] = self.options['tftp_filename']
for k, v in bootp_options.items():
if isinstance(v, unicode):
bootp_options[k] = v.encode('ascii', 'ignore')
pkt = BOOTP(**bootp_options)/DHCP(options=options)
#pkt.show()
return pkt.build()
class DHCPServer(object):
def __init__(self, bind_address=('0.0.0.0', 67)):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(bind_address)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_id = socket.gethostbyname(socket.gethostname())
def run(self):
while True:
try:
data, address = self.sock.recvfrom(4096)
except KeyboardInterrupt:
break
except socket.error, e:
if e.args[0] == EINTR:
continue
log.error(format_exc())
break
packet = BOOTP(data)
request = DHCPRequest(packet)
log.debug('%s %s' % (request.type, request.hwaddr))
methodname = 'handle_%s' % request.type
if hasattr(self, methodname):
method = getattr(self, methodname)
try:
method(request)
except:
log.error(format_exc())
continue
def send(self, address, data):
while data:
bytes = self.sock.sendto(str(data), 0, (address, 68))
data = data[bytes:]
class ClustoDHCPServer(DHCPServer):
def __init__(self):
DHCPServer.__init__(self)
self.offers = {}
log.info('Clusto DHCP server starting')
def handle_request(self, request):
chaddr = request.packet.chaddr
if not chaddr in self.offers:
log.warning('Got a request before sending an offer from %s' % request.hwaddr)
return
response = self.offers[chaddr]
response.type = 'ack'
self.send('255.255.255.255', response.build())
def handle_discover(self, request):
if conf('dhcp.update_ipmi'):
self.update_ipmi(request)
attrs = [{
'key': 'port-nic-eth',
'subkey': 'mac',
'number': 1,
'value': request.hwaddr,
}]
server = clusto.get_entities(attrs=attrs)
if not server:
return
if len(server) > 1:
log.warning('More than one server with address %s: %s' % (request.hwaddr, ', '.join([x.name for x in server])))
return
server = server[0]
enabled = server.attrs(key='dhcp', subkey='enabled', merge_container_attrs=True)
if not enabled or not enabled[0].value:
log.info('DHCP not enabled for %s' % server.name)
return
ips = defaultdict(dict)
for attr in server.attrs(key='ip'):
ips[attr.number][attr.subkey] = attr.value
for num, ip in ips.items():
if IP(ip['ipstring']).iptype() != 'PRIVATE':
del ips[num]
if not ips:
log.info('No private IP assigned for %s' % server.name)
return
ip = ips.values().pop()
ipman = dict([(x.key, x.value) for x in ip['manager'].attrs(subkey='property')])
#ipman = dict([(x['key'], x['value']) for x in clusto.get_ip_manager(ip).attrs() if x['subkey'] == 'property'])
ipy = IP('%s/%s' % (ip['ipstring'], ipman['netmask']), make_net=True)
options = {
'server_id': self.server_id,
'lease_time': 3600,
'renewal_time': 1600,
'subnet_mask': ipman['netmask'],
'broadcast_address': ipy.broadcast().strNormal(),
'router': ipman['gateway'],
'hostname': server.hostname,
}
log.info('Sending offer to %s, options: %s' % (server.name, options))
for attr in server.attrs(key='dhcp', merge_container_attrs=True):
options[attr.subkey] = attr.value
response = DHCPResponse(type='offer', offerip=ip['ipstring'], options=options, request=request)
self.offers[request.packet.chaddr] = response
self.send('255.255.255.255', response.build())
def update_ipmi(self, request):
attrs = [{
'key': 'bootstrap',
'subkey': 'mac',
'value': request.hwaddr,
}, {
'key': 'port-nic-eth',
'subkey': 'mac',
'number': 1,
'value': request.hwaddr,
}]
server = clusto.get_entities(attrs=attrs)
if not server:
return
try:
server = server[0]
if request.options.get('vendor_class_id', None) == 'udhcp 0.9.9-pre':
# This is an IPMI request
#logging.debug('Associating IPMI %s %s' % (request.hwaddr, server.name))
server.set_port_attr('nic-eth', 1, 'ipmi-mac', request.hwaddr)
else:
#logging.debug('Associating physical %s %s' % (requst.hwaddr, server.name))
server.set_port_attr('nic-eth', 1, 'mac', request.hwaddr)
except:
log.error('Error updating server MAC: %s' % format_exc())
|
|
import pyopencl as cl
import numpy as np
import scipy as sp
from scipy import misc
from scipy import ndimage
import random as r
import datetime as date
import time
from PIL import Image
import os
import tkFileDialog as tfd
from Tkinter import Tk
#Don't truncate printed arrays
np.set_printoptions(threshold=np.nan)
class CL:
def __init__(self):
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
self.tickState = False
#Load kernel file and load as internal program
def loadProgram(self, filename):
#read in the OpenCL source file as a string
f = open(filename, 'r')
fstr = "".join(f.readlines())
f.close()
#print fstr
#create the program
return cl.Program(self.ctx, fstr).build()
#Load last configuration
def loadConfig(self, filename):
#read
f = open(filename, 'r')
fstr = "".join(f.readlines())
return fstr
#initialize host side (CPU) arrays
def initHostArrays(self, res_expo):
#Use ar_ySize below to increase the worldspace, should be a power of 2
self.ar_ySize = np.int32(2**res_expo)
#-------------------------------------------
self.a = np.ones((self.ar_ySize,self.ar_ySize), dtype=np.int32)
#Create the cl buffers
def initBuffers(self):
mf = cl.mem_flags
#create OpenCL buffers
self.a_buf = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.a)
self.b_buf = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.a)
self.dest_buf = cl.Buffer(self.ctx, mf.WRITE_ONLY, self.a.nbytes)
#Run Kernel, swapping/transitioning cell states between buffer a & b
def execute(self):
if self.tickState == False:
self.kAutomata.RunAutomata(self.queue, self.a.shape, None, self.ar_ySize, self.a_buf, self.b_buf)
else:
self.kAutomata.RunAutomata(self.queue, self.a.shape, None, self.ar_ySize, self.b_buf, self.a_buf)
self.tickState = not self.tickState
#Read from GPU buffer to host's array (self.a)
def getData(self):
if self.tickState == False:
self.kUtil.GetWorld(self.queue, self.a.shape, None, self.ar_ySize, self.b_buf, self.dest_buf)
cl.enqueue_read_buffer(self.queue, self.dest_buf, self.a).wait()
else:
self.kUtil.GetWorld(self.queue, self.a.shape, None, self.ar_ySize, self.a_buf, self.dest_buf)
cl.enqueue_read_buffer(self.queue, self.dest_buf, self.a).wait()
#Seed, fill buffer
def seed(self, seedRand):
#select random seed for the fill
np.random.seed(r.randint(0,100000))
seedAr = np.random.randint(seedRand, size=(self.ar_ySize, self.ar_ySize))
#Limit it to 1s and 0s, then assign it
seedAr = np.where(seedAr==1, 1, 0)
self.a = np.int32(seedAr)
#Seed from image
def loadImg(self, seedImage):
img = np.array(Image.open(seedImage))
img2 = np.where(img != 0, 1, 0)
self.a = np.int32(img2)
#Print the output array
def render(self):
print self.a
#Write Bitmap File Render
def bitRender(self, rn, zoom):
name = "Out/"+"image" + str(rn)
sp.misc.imsave(name + '.bmp', sp.ndimage.zoom(self.a, zoom, order=0))
#----------------------------------------------------------
#-------------------- Entry Point -------------------------
#----------------------------------------------------------
if __name__ == "__main__":
MainCL = CL()
MainCL.kUtil = MainCL.loadProgram("KernelUtils.cl")
#For hiding unused GUI
fauxgui = Tk()
fauxgui.withdraw()
#--- Preset Configuration -------------
res_expo = 10
seed_strength = 3
iterations = 100
renderEvery = 1
image_magnification = 1
#----------------------------------------------------------
#--------------USER INPUT & CONFIG-------------------------
#----------------------------------------------------------
last_config = MainCL.loadConfig("Last_Config")
vetoConfig = False
list = [item for item in last_config.split(',') if item.strip()]
#replay last config?
uinput = raw_input(" > Replay last custom configuration? (Y/N): ")
if uinput != "" and uinput != "n" and uinput != "N":
#Overrite the defaults
res_expo = int(list[0])
ruleFName = list[1]
seedImageFile = list[2]
seed_strength = int(list[3])
iterations = int(list[4])
renderEvery = int(list[5])
image_magnification = int(list[6])
vetoConfig = True
if(not vetoConfig):
uinput = raw_input(" > (Int) [2 ^ " + str(res_expo) + " = " + str(2**res_expo) + "] Resolution: 2 to the power of: ")
if uinput != "":
res_expo = int(uinput)
#Set the resolution
MainCL.initHostArrays(res_expo)
if(not vetoConfig):
#Have the user select one of the kernel automata rules
ruleFName = tfd.askopenfilename(initialdir="./RuleKernels", title="Select Kernel Rule (*.cl)")
usePreset = raw_input(" > Use preset configuration? (Y/N): ")
#Load the selected kernel
print " > LOADING KERNEL"
MainCL.kAutomata = MainCL.loadProgram(ruleFName)
#Randomly seed host array
MainCL.seed(seed_strength)
if(not vetoConfig):
if usePreset == "N" or usePreset == "n":
#Query user about seeding the initial cell configurations
SeedType = raw_input(" > Seed from bitmap file? (Y/N): ")
if SeedType != "" and SeedType != "n" and SeedType != "N":
#Seed from image
MainCL.loadImg(tfd.askopenfilename(initialdir="./SeedImages", title="Select Seeding-Image File (*.bmp)"))
else:
#Seed Strength
uinput = raw_input(" > (Int) [" + str(seed_strength) + "] Enter random seed strength (1/x): ")
if uinput != "":
MainCL.seed(int(uinput))
#number of frames to calculate
uinput = raw_input(" > (Int) [" + str(iterations) + "] Enter number of frames to calculate: ")
if uinput != "":
iterations = int(uinput)
#render every x frames
uinput = raw_input(" > (Int) [" + str(renderEvery) + "] Render every x frames: ")
if uinput != "":
renderEvery = int(uinput)
uinput = raw_input(" > (Int) [" + str(image_magnification) + "] Magnify rendered pixels by: ")
if uinput != "":
image_magnification = int(uinput)
uinput = raw_input(" > Save configuration? (Y/N): ")
if uinput != "" and uinput != "n" and uinput != "N":
#Save presets
sOut = str(res_expo) + ","
sOut += ruleFName + ","
sOut += "null" + ","
sOut += str(seed_strength) + ","
sOut += str(iterations) + ","
sOut += str(renderEvery) + ","
sOut += str(image_magnification)
config_file = open("Last_Config", "w")
config_file.write(sOut)
config_file.close()
#----------------------------------------------------------
#-------------- END USER INPUT & CONFIG--------------------
#----------------------------------------------------------
#-----
# Begin main program loop
#-----
MainCL.initBuffers()
#Diagnostics
total_cells = iterations * MainCL.ar_ySize*MainCL.ar_ySize
renderNum = 0
#Run the loop
print "Running:", "{:,}".format(MainCL.ar_ySize), "x", "{:,}".format(MainCL.ar_ySize), "for", "{:,}".format(iterations), "iterations,", "{:,}".format(total_cells), "total cells"
time1=time.clock()
for i in range(iterations):
MainCL.execute()
if i % renderEvery == 0:
MainCL.getData()
MainCL.bitRender(renderNum, image_magnification)
time2=time.clock()
if i != 0:
print "Img:", renderNum+1, "/", "{:,}".format(iterations/renderEvery), " - ", (float(renderNum+1)/float(iterations/renderEvery))*100, "%", "of", "{:,}".format(total_cells)
print "ETA:", int((total_cells/(renderEvery*MainCL.ar_ySize*MainCL.ar_ySize/(time2-time1)))*(float(renderNum+1)/float(iterations/renderEvery))), "/", unicode(int(total_cells/(renderEvery*MainCL.ar_ySize*MainCL.ar_ySize/(time2-time1)))), "s. Cells/Sec.", "{:,}".format(int(renderEvery*MainCL.ar_ySize*MainCL.ar_ySize/(time2-time1)))
time1 = time2
renderNum += 1
#Write the output to the terminal (for testing)
if MainCL.ar_ySize <= 100:
print " > Begin CPU Render"
MainCL.render()
else:
print " > Array size must be <= 100 to attempt a terminal render"
print " > DONE!"
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.policy_templates.writers.reg_writer'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
import unittest
from grit.format.policy_templates.writers import writer_unittest_common
class RegWriterUnittest(writer_unittest_common.WriterUnittestCommon):
'''Unit tests for RegWriter.'''
NEWLINE = '\r\n'
def CompareOutputs(self, output, expected_output):
'''Compares the output of the reg_writer with its expected output.
Args:
output: The output of the reg writer as returned by grit.
expected_output: The expected output.
Raises:
AssertionError: if the two strings are not equivalent.
'''
self.assertEquals(
output.strip(),
expected_output.strip())
def testEmpty(self):
# Test the handling of an empty policy list.
grd = self.PrepareTest(
'{'
' "policy_definitions": [],'
' "placeholders": [],'
' "messages": {}'
'}')
output = self.GetOutput(grd, 'fr', {'_chromium': '1', }, 'reg', 'en')
expected_output = 'Windows Registry Editor Version 5.00'
self.CompareOutputs(output, expected_output)
def testMainPolicy(self):
# Tests a policy group with a single policy of type 'main'.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "MainPolicy",'
' "type": "main",'
' "features": { "can_be_recommended": True },'
' "caption": "",'
' "desc": "",'
' "supported_on": ["chrome.win:8-"],'
' "example_value": True'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome]',
'"MainPolicy"=dword:00000001',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome\\Recommended]',
'"MainPolicy"=dword:00000001'])
self.CompareOutputs(output, expected_output)
def testRecommendedMainPolicy(self):
# Tests a policy group with a single policy of type 'main'.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "MainPolicy",'
' "type": "main",'
' "features": {'
' "can_be_recommended": True,'
' "can_be_mandatory": False '
' },'
' "caption": "",'
' "desc": "",'
' "supported_on": ["chrome.win:8-"],'
' "example_value": True'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome\\Recommended]',
'"MainPolicy"=dword:00000001'])
self.CompareOutputs(output, expected_output)
def testStringPolicy(self):
# Tests a policy group with a single policy of type 'string'.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "StringPolicy",'
' "type": "string",'
' "caption": "",'
' "desc": "",'
' "supported_on": ["chrome.win:8-"],'
' "example_value": "hello, world! \\\" \\\\"'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium]',
'"StringPolicy"="hello, world! \\\" \\\\"'])
self.CompareOutputs(output, expected_output)
def testIntPolicy(self):
# Tests a policy group with a single policy of type 'int'.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "IntPolicy",'
' "type": "int",'
' "caption": "",'
' "desc": "",'
' "supported_on": ["chrome.win:8-"],'
' "example_value": 26'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium]',
'"IntPolicy"=dword:0000001a'])
self.CompareOutputs(output, expected_output)
def testIntEnumPolicy(self):
# Tests a policy group with a single policy of type 'int-enum'.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "EnumPolicy",'
' "type": "int-enum",'
' "caption": "",'
' "desc": "",'
' "items": ['
' {"name": "ProxyServerDisabled", "value": 0, "caption": ""},'
' {"name": "ProxyServerAutoDetect", "value": 1, "caption": ""},'
' ],'
' "supported_on": ["chrome.win:8-"],'
' "example_value": 1'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome]',
'"EnumPolicy"=dword:00000001'])
self.CompareOutputs(output, expected_output)
def testStringEnumPolicy(self):
# Tests a policy group with a single policy of type 'string-enum'.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "EnumPolicy",'
' "type": "string-enum",'
' "caption": "",'
' "desc": "",'
' "items": ['
' {"name": "ProxyServerDisabled", "value": "one",'
' "caption": ""},'
' {"name": "ProxyServerAutoDetect", "value": "two",'
' "caption": ""},'
' ],'
' "supported_on": ["chrome.win:8-"],'
' "example_value": "two"'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome]',
'"EnumPolicy"="two"'])
self.CompareOutputs(output, expected_output)
def testListPolicy(self):
# Tests a policy group with a single policy of type 'list'.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "ListPolicy",'
' "type": "list",'
' "caption": "",'
' "desc": "",'
' "supported_on": ["chrome.linux:8-"],'
' "example_value": ["foo", "bar"]'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium\\ListPolicy]',
'"1"="foo"',
'"2"="bar"'])
def testStringEnumListPolicy(self):
# Tests a policy group with a single policy of type 'string-enum-list'.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "ListPolicy",'
' "type": "string-enum-list",'
' "caption": "",'
' "desc": "",'
' "items": ['
' {"name": "ProxyServerDisabled", "value": "foo",'
' "caption": ""},'
' {"name": "ProxyServerAutoDetect", "value": "bar",'
' "caption": ""},'
' ],'
' "supported_on": ["chrome.linux:8-"],'
' "example_value": ["foo", "bar"]'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium\\ListPolicy]',
'"1"="foo"',
'"2"="bar"'])
def testDictionaryPolicy(self):
# Tests a policy group with a single policy of type 'dict'.
example = {
'bool': True,
'dict': {
'a': 1,
'b': 2,
},
'int': 10,
'list': [1, 2, 3],
'string': 'abc',
}
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "DictionaryPolicy",'
' "type": "dict",'
' "caption": "",'
' "desc": "",'
' "supported_on": ["chrome.win:8-"],'
' "example_value": ' + str(example) +
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium]',
'"DictionaryPolicy"="{"bool": true, "dict": {"a": 1, '
'"b": 2}, "int": 10, "list": [1, 2, 3], "string": "abc"}"'])
self.CompareOutputs(output, expected_output)
def testNonSupportedPolicy(self):
# Tests a policy that is not supported on Windows, so it shouldn't
# be included in the .REG file.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "NonWindowsPolicy",'
' "type": "list",'
' "caption": "",'
' "desc": "",'
' "supported_on": ["chrome.mac:8-"],'
' "example_value": ["a"]'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00'])
self.CompareOutputs(output, expected_output)
def testPolicyGroup(self):
# Tests a policy group that has more than one policies.
grd = self.PrepareTest(
'{'
' "policy_definitions": ['
' {'
' "name": "Group1",'
' "type": "group",'
' "caption": "",'
' "desc": "",'
' "policies": [{'
' "name": "Policy1",'
' "type": "list",'
' "caption": "",'
' "desc": "",'
' "supported_on": ["chrome.win:8-"],'
' "example_value": ["a", "b"]'
' },{'
' "name": "Policy2",'
' "type": "string",'
' "caption": "",'
' "desc": "",'
' "supported_on": ["chrome.win:8-"],'
' "example_value": "c"'
' }],'
' },'
' ],'
' "placeholders": [],'
' "messages": {},'
'}')
output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
expected_output = self.NEWLINE.join([
'Windows Registry Editor Version 5.00',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium]',
'"Policy2"="c"',
'',
'[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium\\Policy1]',
'"1"="a"',
'"2"="b"'])
self.CompareOutputs(output, expected_output)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
EMC_ROOT = 'root/emc'
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
INFO_SRC_V3 = 3
ACTIVATESNAPVX = 4
DEACTIVATESNAPVX = 19
SNAPSYNCTYPE = 7
class EMCVMAXProvisionV3(object):
"""Provisioning Class for SMI-S based EMC volume drivers.
This Provisioning class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
def delete_volume_from_pool(
self, conn, storageConfigservice, volumeInstanceName, volumeName,
extraSpecs):
"""Given the volume instance remove it from the pool.
:param conn: connection to the ecom server
:param storageConfigservice: volume created from job
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param extraSpecs: additional info
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
if isinstance(volumeInstanceName, list):
theElements = volumeInstanceName
volumeName = 'Bulk Delete'
else:
theElements = [volumeInstanceName]
rc, job = conn.InvokeMethod(
'ReturnElementsToStoragePool', storageConfigservice,
TheElements=theElements)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Delete Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ReturnElementsToStoragePool took: "
"%(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc
def create_volume_from_sg(
self, conn, storageConfigService, volumeName,
sgInstanceName, volumeSize, extraSpecs):
"""Create the volume and associate it with a storage group.
We use EMCCollections parameter to supply a Device Masking Group
to contain a newly created storage volume.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service
:param volumeName: the volume name (String)
:param sgInstanceName: the storage group instance name
associated with an SLO
:param volumeSize: volume size (String)
:param extraSpecs: additional info
:returns: dict -- volumeDict - the volume dict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@lockutils.synchronized(storageGroupInstance['ElementName'],
"emc-sg-", True)
def do_create_volume_from_sg():
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, ElementName=volumeName,
EMCCollections=[sgInstanceName],
ElementType=self.utils.get_num(THINPROVISIONING, '16'),
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Create Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
return do_create_volume_from_sg()
def _find_new_storage_group(
self, conn, maskingGroupDict, storageGroupName):
"""After creating an new storage group find it and return it.
:param conn: connection to the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:param storageGroupName: storage group name (String)
:returns: maskingGroupDict['MaskingGroup'] or None
"""
foundStorageGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundStorageGroupInstanceName
def get_volume_dict_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return self.create_volume_dict(associators[0].path)
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def get_volume_from_job(self, conn, jobInstance):
"""Given the jobInstance determine the volume Instance.
:param conn: the ecom connection
:param jobInstance: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
associators = conn.Associators(
jobInstance,
ResultClass='EMC_StorageVolume')
if len(associators) > 0:
return associators[0]
else:
exceptionMessage = (_(
"Unable to get storage volume from job."))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
def create_volume_dict(self, volumeInstanceName):
"""Create volume dictionary
:param volumeInstanceName: the instance of a job
:returns: dict -- volumeDict - an instance of a volume
"""
volpath = volumeInstanceName
volumeDict = {}
volumeDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
volumeDict['keybindings'] = keys
return volumeDict
def create_element_replica(
self, conn, repServiceInstanceName,
cloneName, syncType, sourceInstance, extraSpecs,
targetInstance=None, rsdInstance=None):
"""Make SMI-S call to create replica for source element.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param extraSpecs: additional info
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
LOG.debug("Create replica: %(clone)s "
"syncType: %(syncType)s Source: %(source)s.",
{'clone': cloneName,
'syncType': syncType,
'source': sourceInstance.path})
storageSystemName = sourceInstance['SystemName']
__, __, sgInstanceName = (
self.utils.get_v3_default_sg_instance_name(
conn, extraSpecs[self.utils.POOL],
extraSpecs[self.utils.SLO],
extraSpecs[self.utils.WORKLOAD], storageSystemName))
try:
storageGroupInstance = conn.GetInstance(sgInstanceName)
except Exception:
exceptionMessage = (_(
"Unable to get the name of the storage group"))
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
@lockutils.synchronized(storageGroupInstance['ElementName'],
"emc-sg-", True)
def do_create_element_replica():
if targetInstance is None and rsdInstance is None:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=self.utils.get_num(syncType, '16'),
SourceElement=sourceInstance.path,
Collections=[sgInstanceName])
else:
rc, job = self._create_element_replica_extra_params(
conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance,
sgInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Cloned Volume: %(cloneName)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'cloneName': cloneName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateElementReplica "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
return do_create_element_replica()
def _create_element_replica_extra_params(
self, conn, repServiceInstanceName, cloneName, syncType,
sourceInstance, targetInstance, rsdInstance, sgInstanceName):
"""CreateElementReplica using extra parameters.
:param conn: the connection to the ecom server
:param repServiceInstanceName: replication service
:param cloneName: clone volume name
:param syncType: 7=snapshot, 8=clone
:param sourceInstance: source volume instance
:param targetInstance: Target volume instance. Default None
:param rsdInstance: replication settingdata instance. Default None
:param sgInstanceName: pool instance name
:returns: int -- rc - return code
:returns: job - job object of the replica creation operation
"""
syncType = self.utils.get_num(syncType, '16')
if targetInstance and rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path,
ReplicationSettingData=rsdInstance)
elif targetInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
TargetElement=targetInstance.path)
elif rsdInstance:
rc, job = conn.InvokeMethod(
'CreateElementReplica', repServiceInstanceName,
ElementName=cloneName,
SyncType=syncType,
SourceElement=sourceInstance.path,
ReplicationSettingData=rsdInstance,
Collections=[sgInstanceName])
return rc, job
def break_replication_relationship(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Deletes the relationship between the clone/snap and source volume.
Makes an SMI-S call to break clone relationship between the clone
volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to break replication relationship if True
:returns: rc - return code
:returns: job - job object of the replica creation operation
"""
LOG.debug("Break replication relationship: %(sv)s "
"operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs, force)
def create_storage_group_v3(self, conn, controllerConfigService,
groupName, srp, slo, workload, extraSpecs):
"""Create the volume in the specified pool.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param groupName: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extraSpecs: additional info
:returns: storageGroupInstanceName - storage group instance name
"""
startTime = time.time()
@lockutils.synchronized(groupName, "emc-sg-", True)
def do_create_storage_group_v3():
if slo and workload:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'),
EMCSRP=srp,
EMCSLO=slo,
EMCWorkload=workload)
else:
rc, job = conn.InvokeMethod(
'CreateGroup',
controllerConfigService,
GroupName=groupName,
Type=self.utils.get_num(4, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, job, extraSpecs)
if rc != 0:
LOG.error(_LE(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s."),
{'groupName': groupName,
'rc': rc,
'error': errordesc})
raise
LOG.debug("InvokeMethod CreateGroup "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
foundStorageGroupInstanceName = self._find_new_storage_group(
conn, job, groupName)
return foundStorageGroupInstanceName
return do_create_storage_group_v3()
def get_storage_pool_capability(self, conn, poolInstanceName):
"""Get the pool capability.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance
:returns: the storage pool capability instance. None if not found
"""
storagePoolCapability = None
associators = (
conn.AssociatorNames(poolInstanceName,
ResultClass='Symm_StoragePoolCapabilities'))
if len(associators) > 0:
storagePoolCapability = associators[0]
return storagePoolCapability
def get_storage_pool_setting(
self, conn, storagePoolCapability, slo, workload):
"""Get the pool setting for pool capability.
:param conn: the connection information to the ecom server
:param storagePoolCapability: the storage pool capability instance
:param slo: the slo string e.g Bronze
:param workload: the workload string e.g DSS_REP
:returns: the storage pool setting instance
"""
foundStoragePoolSetting = None
storagePoolSettings = (
conn.AssociatorNames(storagePoolCapability,
ResultClass='CIM_storageSetting'))
for storagePoolSetting in storagePoolSettings:
settingInstanceID = storagePoolSetting['InstanceID']
matchString = ("%(slo)s:%(workload)s"
% {'slo': slo,
'workload': workload})
if matchString in settingInstanceID:
foundStoragePoolSetting = storagePoolSetting
break
if foundStoragePoolSetting is None:
exceptionMessage = (_(
"The array does not support the storage pool setting "
"for SLO %(slo)s and workload %(workload)s. Please "
"check the array for valid SLOs and workloads.")
% {'slo': slo,
'workload': workload})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return foundStoragePoolSetting
def _get_supported_size_range_for_SLO(
self, conn, storageConfigService,
srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs):
"""Gets available performance capacity per SLO.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage configuration service instance
:param srpPoolInstanceName: the SRP storage pool instance
:param storagePoolSettingInstanceName: the SLO type, e.g Bronze
:param extraSpecs: additional info
:returns: dict -- supportedSizeDict - the supported size dict
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, supportedSizeDict = conn.InvokeMethod(
'GetSupportedSizeRange',
srpPoolInstanceName,
ElementType=self.utils.get_num(3, '16'),
Goal=storagePoolSettingInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(
conn, supportedSizeDict, extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Cannot get supported size range for %(sps)s "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sps': storagePoolSettingInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod GetSupportedSizeRange "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return supportedSizeDict
def get_volume_range(
self, conn, storageConfigService, poolInstanceName, slo, workload,
extraSpecs):
"""Get upper and lower range for volume for slo/workload combination.
:param conn: the connection information to the ecom server
:param storageConfigService: the storage config service
:param poolInstanceName: the pool instance
:param slo: slo string e.g Bronze
:param workload: workload string e.g DSS
:param extraSpecs: additional info
:returns: supportedSizeDict
"""
supportedSizeDict = {}
storagePoolCapabilityInstanceName = self.get_storage_pool_capability(
conn, poolInstanceName)
if storagePoolCapabilityInstanceName:
storagePoolSettingInstanceName = self.get_storage_pool_setting(
conn, storagePoolCapabilityInstanceName, slo, workload)
supportedSizeDict = self._get_supported_size_range_for_SLO(
conn, storageConfigService, poolInstanceName,
storagePoolSettingInstanceName, extraSpecs)
return supportedSizeDict
def activate_snap_relationship(
self, conn, repServiceInstanceName, syncInstanceName, extraSpecs):
"""Activate snap relationship and start copy operation.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = ACTIVATESNAPVX
LOG.debug("Activate snap: %(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def return_to_resource_pool(self, conn, repServiceInstanceName,
syncInstanceName, extraSpecs):
"""Return the snap target resources back to the pool.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
:param extraSpecs: additional info
:returns: rc - return code
:returns: job object of the replica creation operation
"""
# Operation 4: activate the snapVx.
operation = DEACTIVATESNAPVX
LOG.debug("Return snap resource back to pool: "
"%(sv)s operation: %(operation)s.",
{'sv': syncInstanceName, 'operation': operation})
return self._modify_replica_synchronization(
conn, repServiceInstanceName, syncInstanceName, operation,
extraSpecs)
def _modify_replica_synchronization(
self, conn, repServiceInstanceName, syncInstanceName,
operation, extraSpecs, force=False):
"""Modify the relationship between the clone/snap and source volume.
Helper function that makes an SMI-S call to break clone relationship
between the clone volume and the source.
:param conn: the connection to the ecom server
:param repServiceInstanceName: instance name of the replication service
:param syncInstanceName: instance name of the
SE_StorageSynchronized_SV_SV object
:param operation: operation code
:param extraSpecs: additional info
:param force: force to modify replication synchronization if True
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'ModifyReplicaSynchronization', repServiceInstanceName,
Operation=self.utils.get_num(operation, '16'),
Synchronization=syncInstanceName,
Force=force)
LOG.debug("_modify_replica_synchronization: %(sv)s "
"operation: %(operation)s Return code: %(rc)lu.",
{'sv': syncInstanceName, 'operation': operation, 'rc': rc})
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error modify replica synchronization: %(sv)s "
"operation: %(operation)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'sv': syncInstanceName, 'operation': operation,
'rc': rc, 'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod ModifyReplicaSynchronization "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
return rc, job
def create_group_replica(
self, conn, replicationService,
srcGroupInstanceName, tgtGroupInstanceName, relationName,
extraSpecs):
"""Make SMI-S call to create replica for source group.
:param conn: the connection to the ecom server
:param replicationService: replication service
:param srcGroupInstanceName: source group instance name
:param tgtGroupInstanceName: target group instance name
:param relationName: replica relationship name
:param extraSpecs: additional info
:returns: int -- return code
:returns: job object of the replica creation operation
:raises: VolumeBackendAPIException
"""
LOG.debug(
"Creating CreateGroupReplica V3: "
"replicationService: %(replicationService)s "
"RelationName: %(relationName)s "
"sourceGroup: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'replicationService': replicationService,
'relationName': relationName,
'srcGroup': srcGroupInstanceName,
'tgtGroup': tgtGroupInstanceName})
rc, job = conn.InvokeMethod(
'CreateGroupReplica',
replicationService,
RelationshipName=relationName,
SourceGroup=srcGroupInstanceName,
TargetGroup=tgtGroupInstanceName,
SyncType=self.utils.get_num(SNAPSYNCTYPE, '16'))
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMsg = (_("Error CreateGroupReplica: "
"source: %(source)s target: %(target)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'source': srcGroupInstanceName,
'target': tgtGroupInstanceName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMsg)
raise exception.VolumeBackendAPIException(data=exceptionMsg)
return rc, job
def get_srp_pool_stats(self, conn, arrayInfo):
"""Get the totalManagedSpace, remainingManagedSpace.
:param conn: the connection to the ecom server
:param arrayInfo: the array dict
:returns: totalCapacityGb
:returns: remainingCapacityGb
"""
totalCapacityGb = -1
remainingCapacityGb = -1
storageSystemInstanceName = self.utils.find_storageSystem(
conn, arrayInfo['SerialNumber'])
srpPoolInstanceNames = conn.AssociatorNames(
storageSystemInstanceName,
ResultClass='Symm_SRPStoragePool')
for srpPoolInstanceName in srpPoolInstanceNames:
poolnameStr = self.utils.get_pool_name(conn, srpPoolInstanceName)
if six.text_type(arrayInfo['PoolName']) == (
six.text_type(poolnameStr)):
try:
# Check that pool hasn't suddently been deleted.
srpPoolInstance = conn.GetInstance(srpPoolInstanceName)
propertiesList = srpPoolInstance.properties.items()
for properties in propertiesList:
if properties[0] == 'TotalManagedSpace':
cimProperties = properties[1]
totalManagedSpace = cimProperties.value
totalCapacityGb = self.utils.convert_bits_to_gbs(
totalManagedSpace)
elif properties[0] == 'RemainingManagedSpace':
cimProperties = properties[1]
remainingManagedSpace = cimProperties.value
remainingCapacityGb = (
self.utils.convert_bits_to_gbs(
remainingManagedSpace))
except Exception:
pass
remainingSLOCapacityGb = (
self._get_remaining_slo_capacity_wlp(
conn, srpPoolInstanceName, arrayInfo,
storageSystemInstanceName['Name']))
if remainingSLOCapacityGb != -1:
remainingCapacityGb = remainingSLOCapacityGb
else:
LOG.warning(_LW(
"Remaining capacity %(remainingCapacityGb)s "
"GBs is determined from SRP pool capacity "
"and not the SLO capacity. Performance may "
"not be what you expect."),
{'remainingCapacityGb': remainingCapacityGb})
return totalCapacityGb, remainingCapacityGb
def _get_remaining_slo_capacity_wlp(self, conn, srpPoolInstanceName,
arrayInfo, systemName):
"""Get the remaining SLO capacity.
This is derived from the WLP portion of Unisphere. Please
see the SMIProvider doc and the readme doc for details.
:param conn: the connection to the ecom server
:param srpPoolInstanceName: SRP instance name
:param arrayInfo: the array dict
:param systemName: the system name
:returns: remainingCapacityGb
"""
remainingCapacityGb = -1
storageConfigService = (
self.utils.find_storage_configuration_service(
conn, systemName))
supportedSizeDict = (
self.get_volume_range(
conn, storageConfigService, srpPoolInstanceName,
arrayInfo['SLO'], arrayInfo['Workload'],
None))
try:
# Information source is V3.
if supportedSizeDict['EMCInformationSource'] == INFO_SRC_V3:
remainingCapacityGb = self.utils.convert_bits_to_gbs(
supportedSizeDict['EMCRemainingSLOCapacity'])
LOG.debug("Received remaining SLO Capacity "
"%(remainingCapacityGb)s GBs for SLO "
"%(SLO)s and workload %(workload)s.",
{'remainingCapacityGb': remainingCapacityGb,
'SLO': arrayInfo['SLO'],
'workload': arrayInfo['Workload']})
except KeyError:
pass
return remainingCapacityGb
def extend_volume_in_SG(
self, conn, storageConfigService, volumeInstanceName,
volumeName, volumeSize, extraSpecs):
"""Extend a volume instance.
:param conn: connection to the ecom server
:param storageConfigservice: the storage configuration service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param volumeSize: the volume size
:param extraSpecs: additional info
:returns: volumeDict
:returns: int -- return code
:raises: VolumeBackendAPIException
"""
startTime = time.time()
rc, job = conn.InvokeMethod(
'CreateOrModifyElementFromStoragePool',
storageConfigService, TheElement=volumeInstanceName,
Size=self.utils.get_num(volumeSize, '64'))
LOG.debug("Extend Volume: %(volumename)s. Return code: %(rc)lu.",
{'volumename': volumeName,
'rc': rc})
if rc != 0:
rc, error_desc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Extend Volume: %(volumeName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'volumeName': volumeName,
'rc': rc,
'error': error_desc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(startTime,
time.time())})
# Find the newly created volume.
volumeDict = self.get_volume_dict_from_job(conn, job['Job'])
return volumeDict, rc
|
|
"""
User settings/preference dialog
===============================
"""
import sys
import logging
from .. import config
from ..utils.settings import SettingChangedEvent
from ..utils.propertybindings import (
AbstractBoundProperty, PropertyBinding, BindingManager
)
from AnyQt.QtWidgets import (
QWidget, QMainWindow, QComboBox, QCheckBox, QListView, QTabWidget,
QToolBar, QAction, QStackedWidget, QVBoxLayout, QHBoxLayout,
QFormLayout, QSizePolicy, QLineEdit,
)
from AnyQt.QtCore import (
Qt, QEventLoop, QAbstractItemModel, QModelIndex
)
log = logging.getLogger(__name__)
class UserDefaultsPropertyBinding(AbstractBoundProperty):
"""
A Property binding for a setting in a
:class:`Orange.canvas.utility.settings.Settings` instance.
"""
def __init__(self, obj, propertyName, parent=None):
AbstractBoundProperty.__init__(self, obj, propertyName, parent)
obj.installEventFilter(self)
def get(self):
return self.obj.get(self.propertyName)
def set(self, value):
self.obj[self.propertyName] = value
def eventFilter(self, obj, event):
if event.type() == SettingChangedEvent.SettingChanged and \
event.key() == self.propertyName:
self.notifyChanged()
return AbstractBoundProperty.eventFilter(self, obj, event)
class UserSettingsModel(QAbstractItemModel):
"""
An Item Model for user settings presenting a list of
key, setting value entries along with it's status and type.
"""
def __init__(self, parent=None, settings=None):
QAbstractItemModel.__init__(self, parent)
self.__settings = settings
self.__headers = ["Name", "Status", "Type", "Value"]
def setSettings(self, settings):
if self.__settings != settings:
self.__settings = settings
self.reset()
def settings(self):
return self.__settings
def rowCount(self, parent=QModelIndex()):
if parent.isValid():
return 0
elif self.__settings:
return len(self.__settings)
else:
return 0
def columnCount(self, parent=QModelIndex()):
if parent.isValid():
return 0
else:
return len(self.__headers)
def parent(self, index):
return QModelIndex()
def index(self, row, column=0, parent=QModelIndex()):
if parent.isValid() or \
column < 0 or column >= self.columnCount() or \
row < 0 or row >= self.rowCount():
return QModelIndex()
return self.createIndex(row, column, row)
def headerData(self, section, orientation, role=Qt.DisplayRole):
if section >= 0 and section < 4 and orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
return self.__headers[section]
return QAbstractItemModel.headerData(self, section, orientation, role)
def data(self, index, role=Qt.DisplayRole):
if self._valid(index):
key = self._keyFromIndex(index)
column = index.column()
if role == Qt.DisplayRole:
if column == 0:
return key
elif column == 1:
default = self.__settings.isdefault(key)
return "Default" if default else "User"
elif column == 2:
return type(self.__settings.get(key)).__name__
elif column == 3:
return self.__settings.get(key)
return self
return None
def flags(self, index):
if self._valid(index):
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if index.column() == 3:
return Qt.ItemIsEditable | flags
else:
return flags
return Qt.NoItemFlags
def setData(self, index, value, role=Qt.EditRole):
if self._valid(index) and index.column() == 3:
key = self._keyFromIndex(index)
try:
self.__settings[key] = value
except (TypeError, ValueError) as ex:
log.error("Failed to set value (%r) for key %r", value, key,
exc_info=True)
else:
self.dataChanged.emit(index, index)
return True
return False
def _valid(self, index):
row = index.row()
return row >= 0 and row < self.rowCount()
def _keyFromIndex(self, index):
row = index.row()
return list(self.__settings.keys())[row]
def container_widget_helper(orientation=Qt.Vertical, spacing=None, margin=0):
widget = QWidget()
if orientation == Qt.Vertical:
layout = QVBoxLayout()
widget.setSizePolicy(QSizePolicy.Fixed,
QSizePolicy.MinimumExpanding)
else:
layout = QHBoxLayout()
if spacing is not None:
layout.setSpacing(spacing)
if margin is not None:
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
return widget
class UserSettingsDialog(QMainWindow):
"""
A User Settings/Defaults dialog.
"""
MAC_UNIFIED = True
def __init__(self, parent=None, **kwargs):
QMainWindow.__init__(self, parent, **kwargs)
self.setWindowFlags(Qt.Dialog)
self.setWindowModality(Qt.ApplicationModal)
self.layout().setSizeConstraint(QVBoxLayout.SetFixedSize)
self.__macUnified = sys.platform == "darwin" and self.MAC_UNIFIED
self._manager = BindingManager(self,
submitPolicy=BindingManager.AutoSubmit)
self.__loop = None
self.__settings = config.settings()
self.__setupUi()
def __setupUi(self):
"""Set up the UI.
"""
if self.__macUnified:
self.tab = QToolBar()
self.addToolBar(Qt.TopToolBarArea, self.tab)
self.setUnifiedTitleAndToolBarOnMac(True)
# This does not seem to work
self.setWindowFlags(self.windowFlags() & \
~Qt.MacWindowToolBarButtonHint)
self.tab.actionTriggered[QAction].connect(
self.__macOnToolBarAction
)
central = QStackedWidget()
central.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
else:
self.tab = central = QTabWidget(self)
self.stack = central
self.setCentralWidget(central)
# General Tab
tab = QWidget()
self.addTab(tab, self.tr("General"),
toolTip=self.tr("General Options"))
form = QFormLayout()
tab.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
nodes = QWidget(self, objectName="nodes")
nodes.setLayout(QVBoxLayout())
nodes.layout().setContentsMargins(0, 0, 0, 0)
cb_anim = QCheckBox(
self.tr("Enable node animations"),
objectName="enable-node-animations",
toolTip=self.tr("Enable shadow and ping animations for nodes "
"in the workflow.")
)
self.bind(cb_anim, "checked", "schemeedit/enable-node-animations")
nodes.layout().addWidget(cb_anim)
form.addRow(self.tr("Nodes"), nodes)
links = QWidget(self, objectName="links")
links.setLayout(QVBoxLayout())
links.layout().setContentsMargins(0, 0, 0, 0)
cb_show = QCheckBox(
self.tr("Show channel names between widgets"),
objectName="show-channel-names",
toolTip=self.tr("Show source and sink channel names "
"over the links.")
)
self.bind(cb_show, "checked", "schemeedit/show-channel-names")
links.layout().addWidget(cb_show)
form.addRow(self.tr("Links"), links)
quickmenu = QWidget(self, objectName="quickmenu-options")
quickmenu.setLayout(QVBoxLayout())
quickmenu.layout().setContentsMargins(0, 0, 0, 0)
cb1 = QCheckBox(self.tr("On double click"),
toolTip=self.tr("Open quick menu on a double click "
"on an empty spot in the canvas"))
cb2 = QCheckBox(self.tr("On right click"),
toolTip=self.tr("Open quick menu on a right click "
"on an empty spot in the canvas"))
cb3 = QCheckBox(self.tr("On space key press"),
toolTip=self.tr("On Space key press while the mouse"
"is hovering over the canvas."))
cb4 = QCheckBox(self.tr("On any key press"),
toolTip=self.tr("On any key press while the mouse"
"is hovering over the canvas."))
self.bind(cb1, "checked", "quickmenu/trigger-on-double-click")
self.bind(cb2, "checked", "quickmenu/trigger-on-right-click")
self.bind(cb3, "checked", "quickmenu/trigger-on-space-key")
self.bind(cb4, "checked", "quickmenu/trigger-on-any-key")
quickmenu.layout().addWidget(cb1)
quickmenu.layout().addWidget(cb2)
quickmenu.layout().addWidget(cb3)
quickmenu.layout().addWidget(cb4)
form.addRow(self.tr("Open quick menu on"), quickmenu)
startup = QWidget(self, objectName="startup-group")
startup.setLayout(QVBoxLayout())
startup.layout().setContentsMargins(0, 0, 0, 0)
cb_splash = QCheckBox(self.tr("Show splash screen"), self,
objectName="show-splash-screen")
cb_welcome = QCheckBox(self.tr("Show welcome screen"), self,
objectName="show-welcome-screen")
self.bind(cb_splash, "checked", "startup/show-splash-screen")
self.bind(cb_welcome, "checked", "startup/show-welcome-screen")
startup.layout().addWidget(cb_splash)
startup.layout().addWidget(cb_welcome)
form.addRow(self.tr("On startup"), startup)
toolbox = QWidget(self, objectName="toolbox-group")
toolbox.setLayout(QVBoxLayout())
toolbox.layout().setContentsMargins(0, 0, 0, 0)
exclusive = QCheckBox(self.tr("Only one tab can be open at a time"))
self.bind(exclusive, "checked", "mainwindow/toolbox-dock-exclusive")
toolbox.layout().addWidget(exclusive)
form.addRow(self.tr("Tool box"), toolbox)
tab.setLayout(form)
# Output Tab
tab = QWidget()
self.addTab(tab, self.tr("Output"),
toolTip="Output Redirection")
form = QFormLayout()
box = QWidget()
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
combo = QComboBox()
combo.addItems([self.tr("Critical"),
self.tr("Error"),
self.tr("Warn"),
self.tr("Info"),
self.tr("Debug")])
self.bind(combo, "currentIndex", "logging/level")
layout.addWidget(combo)
box.setLayout(layout)
form.addRow(self.tr("Logging"), box)
box = QWidget()
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
cb1 = QCheckBox(self.tr("Open in external browser"),
objectName="open-in-external-browser")
self.bind(cb1, "checked", "help/open-in-external-browser")
layout.addWidget(cb1)
box.setLayout(layout)
form.addRow(self.tr("Help window"), box)
tab.setLayout(form)
# Error Reporting Tab
tab = QWidget()
self.addTab(tab, self.tr("Error Reporting"),
toolTip="Settings related to error reporting")
form = QFormLayout()
line_edit_mid = QLineEdit()
self.bind(line_edit_mid, "text", "error-reporting/machine-id")
form.addRow("Machine ID:", line_edit_mid)
tab.setLayout(form)
if self.__macUnified:
# Need some sensible size otherwise mac unified toolbar 'takes'
# the space that should be used for layout of the contents
self.adjustSize()
def addTab(self, widget, text, toolTip=None, icon=None):
if self.__macUnified:
action = QAction(text, self)
if toolTip:
action.setToolTip(toolTip)
if icon:
action.setIcon(toolTip)
action.setData(len(self.tab.actions()))
self.tab.addAction(action)
self.stack.addWidget(widget)
else:
i = self.tab.addTab(widget, text)
if toolTip:
self.tab.setTabToolTip(i, toolTip)
if icon:
self.tab.setTabIcon(i, icon)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.hide()
self.deleteLater()
def bind(self, source, source_property, key, transformer=None):
target = UserDefaultsPropertyBinding(self.__settings, key)
source = PropertyBinding(source, source_property)
source.set(target.get())
self._manager.bind(target, source)
def commit(self):
self._manager.commit()
def revert(self):
self._manager.revert()
def reset(self):
for target, source in self._manager.bindings():
try:
source.reset()
except NotImplementedError:
# Cannot reset.
pass
except Exception:
log.error("Error reseting %r", source.propertyName,
exc_info=True)
def exec_(self):
self.__loop = QEventLoop()
self.show()
status = self.__loop.exec_()
self.__loop = None
return status
def hideEvent(self, event):
QMainWindow.hideEvent(self, event)
if self.__loop is not None:
self.__loop.exit(0)
self.__loop = None
def __macOnToolBarAction(self, action):
self.stack.setCurrentIndex(action.data())
|
|
#from django.shortcuts import render
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
from django.db.models import Q
import json
import requests
from collections import Counter
from random import randrange
from datetime import date
from depot.models import Station, Area, State
from depot.forms import EntryForm, APISearchForm, APIStationForm
from booking.forms import BookingForm
from booking import models as booking_models
from insure.models import Device
from insure import forms as insure_forms
from drugshare import forms as drug_forms
from drugshare import models as drug_models
from api.sms import send_message
def add_station(request):
form = APIStationForm(request.GET)
if form.is_valid():
brand_name = form.cleaned_data['brand'].upper()
address = form.cleaned_data['address']
state_tag = form.cleaned_data['state'].strip().lower()
try:
state = State.objects.get(tag__iexact=state_tag)
except State.DoesNotExist:
return HttpResponse('wrong state')
#try:
# brand = Brand.objects.get(name=brand_name)
#except Brand.DoesNotExist:
# brand = Brand.objects.create(name=brand_name)
# Does the station already exist?
try:
station = Station.objects.get(
brand=brand_name, address=address, state=state)
return HttpResponse('station already exists')
except Station.DoesNotExist:
station = Station.objects.create(
brand=brand_name, address=address, state=state)
for name in form.cleaned_data['area']:
area, _ = Area.objects.get_or_create(name=name)
station.area.add(area)
return HttpResponse('success')
return HttpResponse('error')
def get_stations(request):
stations = Station.objects.all()
form = APISearchForm(request.GET)
if form.is_valid():
name = form.cleaned_data['name']
state = form.cleaned_data['state']
stations = stations.filter(
area__name__icontains=name,
state__tag__iexact=state)
output = []
for stn in stations:
data = {
'station_id': stn.id,
'name': stn.brand,
'address': stn.address,
'num_cars': 'N/A',
'fuel_price': 'N/A',
'kegs': 'N/A',
'time': 'N/A'
}
recent = stn.recent
if recent:
#kegs = 'Yes' if recent.kegs else 'No'
data.update({
'num_cars': recent.get_num_cars_display(),
'fuel_price': str(recent.fuel_price) or 'N/A',
'kegs': 'Yes' if recent.kegs else 'No',
'time': recent.current_time.strftime('%Y-%m-%d %H:%M:%S')
})
output.append(data)
return HttpResponse(json.dumps(output))
@csrf_exempt
def make_entry(request, station_id):
station = Station.objects.get(pk=station_id)
#import pdb;pdb.set_trace()
if request.method == 'GET':
form = EntryForm(request.GET)
if form.is_valid():
entry = form.save(commit=False)
entry.station = station
entry.save()
return HttpResponse('Success')
return HttpResponse('Error')
def booking(request, resident_id):
resident = get_object_or_404(booking_models.Resident, pk=resident_id)
estate = resident.estate
form = BookingForm(request.GET)
#import pdb;pdb.set_trace()
if form.is_valid():
obj = form.save(commit=False)
code = randrange(10002321, 99221025)
obj.code = code
obj.resident = resident
obj.save()
msg = "You have been booked by {resident.name} into\
{resident.estate.name} with code: {code}".format(
resident=resident, code=code)
phone = '234{}'.format(obj.phone[-10:])
#payload = {
# 'sender': 'V LOGIN',
# 'to': '234{}'.format(obj.phone[-10:]),
# 'msg': msg
#}
send_message(phone, msg)
#sms_url = 'http://shoutinsms.bayo.webfactional.com/api/sendmsg/'
#requests.get(sms_url, params=payload)
booking_models.SentMessage.objects.create(resident=resident)
estate.balance -= 1
estate.save()
return HttpResponse('A message has been sent to your visitor.')
return HttpResponseBadRequest('An error occured. Please try again')
def book_profile(request):
uuid = request.GET.get('uuid')
device = get_object_or_404(
booking_models.Device, uuid=uuid, resident__isnull=False)
out = {
'device_id': device.id,
'resident_id': device.resident.id,
'estate_id': device.resident.estate.id
}
print out
return HttpResponse(json.dumps(out))
def book_phone(request):
phone = request.GET.get('phone')
uuid = request.GET.get('uuid')
try:
booking_models.Resident.objects.get(phone=phone)
except booking_models.Resident.DoesNotExist:
return HttpResponseBadRequest('Sorry you have not been registered')
else:
try:
booking_models.Token.objects.get(msisdn=phone)
except booking_models.Token.DoesNotExist:
code = randrange(100321, 992125)
booking_models.Token.objects.create(
code=code, msisdn=phone, uuid=uuid)
#payload = {
# 'sender': 'V LOGIN',
# 'to': phone,
# 'msg': 'This is your verification code: {}'.format(code)
#}
msg = 'This is your verification code: {}'.format(code)
send_message(phone, msg)
#sms_url = 'http://shoutinsms.bayo.webfactional.com/api/sendmsg/'
#requests.get(sms_url, params=payload)
return HttpResponse('The verification code has been sent to you.')
else:
return HttpResponseBadRequest(
'A verification code has already been sent')
return HttpResponseBadRequest(
'An unfortunate error occured, please contact the admin')
def book_code(request):
code = request.GET.get('code')
uuid = request.GET.get('uuid')
try:
token = booking_models.Token.objects.get(code=code, uuid=uuid)
except booking_models.Token.DoesNotExist:
return HttpResponseBadRequest('The code you sent is invalid')
else:
resident = booking_models.Resident.objects.get(phone=token.msisdn)
resident.active = True
resident.save()
device = booking_models.Device.objects.create(
uuid=uuid, resident=resident)
out = {
'device_id': device.id,
'resident_id': device.resident.id,
'estate_id': device.resident.estate.id
}
return HttpResponse(json.dumps(out))
return HttpResponseBadRequest('Error, please contact the admin')
@csrf_exempt
def insure(request):
form = insure_forms.EntryForm(request.POST, request.FILES)
# import pdb;pdb.set_trace()
if form.is_valid():
obj = form.save(commit=False)
uuid = request.POST.get('uuid', '')
if uuid:
device, _ = Device.objects.get_or_create(uuid=uuid)
obj.device = device
obj.save()
return HttpResponse("Saved building information.")
return HttpResponseBadRequest("Error")
def add_device(request):
form = drug_forms.TokenForm(request.GET)
if form.is_valid():
code = form.cleaned_data['code']
uuid = form.cleaned_data['uuid']
try:
token = drug_models.Token.objects.get(
code=code, when=date.today())
except drug_models.Token.DoesNotExist:
return HttpResponseBadRequest('Token not valid')
else:
pharmacy = token.pharmacy
device = drug_models.Device.objects.create(
pharmacy=pharmacy, uuid=uuid)
out = {
'name': pharmacy.name,
'pharmacist': pharmacy.pharmacist,
'phone': pharmacy.phone,
'email': pharmacy.email,
'id': pharmacy.id,
'device_id': device.id,
}
outlets = []
for outlet in drug_models.Outlet.objects.filter(
pharmacy=pharmacy, active=True):
outlets.append({
'id': outlet.id,
'phone': outlet.phone,
'address': outlet.address,
'state': outlet.state.name
})
out['outlets'] = outlets
return HttpResponse(json.dumps(out))
return HttpResponseBadRequest("Error in adding device")
def make_token(request, device_id):
device = get_object_or_404(drug_models.Device, pk=device_id)
if not device.active:
return HttpResponseBadRequest('Inactive device')
pharm = device.pharmacy
token = randrange(100000, 999999)
drug_models.Token.objects.create(pharmacy=pharm, code=token)
return HttpResponse('{}'.format(token))
def register_pharm(request):
form = drug_forms.RegisterForm(request.GET)
#import pdb;pdb.set_trace()
if form.is_valid():
uuid = form.cleaned_data['uuid']
try:
drug_models.Device.objects.get(uuid=uuid)
except drug_models.Device.DoesNotExist:
pharmacy = drug_models.Pharmacy.objects.create(
name=form.cleaned_data['pharmacy'],
pharmacist=form.cleaned_data['pharmacist'],
phone=form.cleaned_data['phone'],
email=form.cleaned_data['email'])
device = drug_models.Device.objects.create(
pharmacy=pharmacy, uuid=uuid)
out = {
'name': pharmacy.name,
'pharmacist': pharmacy.pharmacist,
'phone': pharmacy.phone,
'email': pharmacy.email,
'id': pharmacy.id,
'device_id': device.id,
'outlets': []
}
return HttpResponse(json.dumps(out))
return HttpResponseBadRequest('Unable to register')
def get_profile(request):
form = drug_forms.DeviceForm(request.GET)
if form.is_valid():
device = form.cleaned_data['uuid']
if not device.active:
return HttpResponseBadRequest('Inactive device')
out = {
'name': device.pharmacy.name,
'pharmacist': device.pharmacy.pharmacist,
'phone': device.pharmacy.phone,
'email': device.pharmacy.email,
'id': device.pharmacy.id,
'device_id': device.id
}
outlets = []
for outlet in drug_models.Outlet.objects.filter(
pharmacy=device.pharmacy, active=True):
outlets.append({
'id': outlet.id,
'phone': outlet.phone,
'address': outlet.address,
'state': outlet.state.name
})
out['outlets'] = outlets
print out
return HttpResponse(json.dumps(out))
return HttpResponseBadRequest("Error")
def delete_outlet(request, id):
outlet = get_object_or_404(drug_models.Outlet, pk=id)
outlet.active = False
outlet.save()
return HttpResponse('Successfully deleted outlet')
def update_pharm(request, device_id):
device = get_object_or_404(drug_models.Device, pk=id)
if not device.active:
return HttpResponseBadRequest('Inactive device')
pharmacy = device.pharmacy
#pharmacy = get_object_or_404(drug_models.Pharmacy, id=id)
form = drug_forms.PharmacyForm(request.GET, instance=pharmacy)
#import pdb;pdb.set_trace()
if form.is_valid():
form.save()
#pharmacy.name = form.cleaned_data['name']
#pharmacy.phamacist = form.cleaned_data['pharmacist']
#pharmacy.phone = form.cleaned_data['phone']
#pharmacy.email = form.cleaned_data['email']
#pharmacy.save()
return HttpResponse("Saved Pharmacy")
return HttpResponseBadRequest('Unable to save Pharmacy')
def add_outlet(request, device_id):
device = get_object_or_404(drug_models.Device, id=device_id)
#import pdb;pdb.set_trace()
form = drug_forms.OutletForm(request.GET)
if form.is_valid():
_state = request.GET.get('state')
state = drug_models.State.objects.get(name__iexact=_state)
outlet = form.save(commit=False)
outlet.pharmacy = device.pharmacy
outlet.state = state
outlet.active = True
outlet.save()
out = {
'address': outlet.address,
'state': outlet.state.name,
'phone': outlet.phone,
'id': outlet.id
}
return HttpResponse(json.dumps(out))
return HttpResponseBadRequest('Unable to save Outlet')
def list_generic_drugs(request):
output = []
drugs = drug_models.Drug.objects.distinct('name')
for item in drugs:
output.append({
'id': item.id,
'name': item.name
})
print output
return HttpResponse(json.dumps(output))
def add_drug(request):
form = drug_forms.DrugForm(request.GET)
#import pdb;pdb.set_trace()
if form.is_valid():
form.save()
return HttpResponse("Drug added")
return HttpResponseBadRequest('Unable to add the drug')
def search_drug(request, device_id):
device = get_object_or_404(drug_models.Device, pk=device_id)
if not device.active:
return HttpResponseBadRequest('Inactive device')
form = drug_forms.SearchForm(request.GET)
drugs = []
if form.is_valid():
item = form.cleaned_data['name'].title()
drug_models.Search.objects.create(pharmacy=device.pharmacy, name=item)
for drug in drug_models.Drug.objects.valid_drugs().filter(
Q(name__icontains=item) | Q(brand_name__icontains=item)
).order_by('-expiry_date'):
item = {
'id': drug.id,
'name': drug.name,
'brand': drug.brand_name,
'state': drug.outlet.state.name,
'cost': '{}'.format(drug.cost),
'expiry': drug.expiry_date.strftime('%Y-%m-%d'),
'quantity': drug.quantity,
'packsize': drug.pack_size
}
drugs.append(item)
return HttpResponse(json.dumps(drugs))
def stock_drug(request, device_id):
device = get_object_or_404(drug_models.Device, pk=device_id)
if not device.active:
return HttpResponseBadRequest('Inactive device')
pharmacy = device.pharmacy
#pharmacy = get_object_or_404(drug_models.Pharmacy, pk=id)
drugs = []
for drug in drug_models.Drug.objects.valid_drugs().filter(
outlet__pharmacy=pharmacy):
item = {
'id': drug.id,
'name': drug.name,
'cost': '{}'.format(drug.cost),
'packsize': drug.pack_size,
'expiry': drug.expiry_date.strftime('%Y-%m-%d'),
'quantity': drug.quantity,
'address': drug.outlet.address
}
drugs.append(item)
return HttpResponse(json.dumps(drugs))
def remove_drug(request, id):
drug = get_object_or_404(drug_models.Drug, pk=id)
drug.delete()
return HttpResponse("Deleted successfully")
def edit_drug(request, id):
drug = get_object_or_404(drug_models.Drug, pk=id)
form = drug_forms.QtyForm(request.GET)
if form.is_valid():
drug.quantity = form.cleaned_data['quantity']
drug.save()
return HttpResponse("Updated successfully")
return HttpResponseBadRequest("Error trying to update drug")
def recent_drugs(request, count):
drugs = drug_models.Search.objects.values_list('name', flat=True)
output = []
for item in Counter(drugs).most_common(int(count)):
output.append({
'name': item[0],
'count': item[1]})
print output
return HttpResponse(json.dumps(output))
def wishlist_drug(request, device_id):
device = get_object_or_404(drug_models.Device, pk=device_id)
if not device.active:
return HttpResponseBadRequest('Inactive device')
pharmacy = device.pharmacy
#pharmacy = get_object_or_404(drug_models.Pharmacy, pk=pharm_id)
print "pharmacy %s" % pharmacy
today = date.today()
drugs = []
for item in drug_models.DrugRequest.objects.filter(
outlet__pharmacy=pharmacy,
drug__expiry_date__gt=today).filter(
Q(status=drug_models.DrugRequest.PENDING)
| Q(status=drug_models.DrugRequest.ACCEPTED)):
if item.status == drug_models.DrugRequest.PENDING:
status = "Pending"
else:
status = "Accepted"
drugs.append({
'id': item.id,
'name': item.drug.name,
'brand': item.drug.brand_name,
'outlet': item.outlet.address,
'quantity': item.quantity,
'cost': "{}".format(item.drug.cost),
'packsize': item.drug.pack_size,
'expiry': item.drug.expiry_date.strftime('%Y-%m-%d'),
'status': status,
'total_cost': "{}".format(item.total_cost)
})
return HttpResponse(json.dumps(drugs))
def request_drug(request, drug_id):
drug = get_object_or_404(drug_models.Drug, pk=drug_id)
form = drug_forms.DrugRequestForm(request.GET)
if form.is_valid():
outlet = form.cleaned_data['outlet']
quantity = form.cleaned_data['quantity']
drug_models.DrugRequest.objects.create(
drug=drug, outlet=outlet, quantity=quantity)
return HttpResponse('Successfully added request')
return HttpResponseBadRequest('Error creating request')
def pending_requests(request, device_id):
device = get_object_or_404(drug_models.Device, pk=device_id)
if not device.active:
return HttpResponseBadRequest('Inactive device')
pharmacy = device.pharmacy
print "pharmacy %s" % pharmacy
today = date.today()
output = []
for item in drug_models.DrugRequest.objects.filter(
drug__outlet__pharmacy=pharmacy,
drug__expiry_date__gt=today,
status=drug_models.DrugRequest.PENDING):
output.append({
'id': item.id,
'name': item.drug.name,
'cost': '{}'.format(item.drug.cost),
'expiry': item.drug.expiry_date.strftime('%Y-%m-%d'),
'quantity': item.quantity,
'date': item.posted_on.strftime('%Y-%m-%d'),
'state': item.drug.outlet.state.name
})
print output
return HttpResponse(json.dumps(output))
def accept(request, request_id):
drug_request = get_object_or_404(drug_models.DrugRequest, pk=request_id)
drug_request.status = drug_models.DrugRequest.ACCEPTED
drug_request.save()
return HttpResponse("Accepted successfully")
def reject(request, request_id):
drug_request = get_object_or_404(drug_models.DrugRequest, pk=request_id)
drug_request.status = drug_models.DrugRequest.CANCELLED
drug_request.save()
return HttpResponse("Rejected successfully")
def feedback(request, id):
drug_request = get_object_or_404(drug_models.DrugRequest, pk=id)
form = drug_forms.FeedbackForm(request.GET)
if form.is_valid():
drug_models.RequestFeedback.objects.create(
request=drug_request,
request_status=drug_request.status,
message=form.cleaned_data['message'])
return HttpResponse("Feedback added successfully")
return HttpResponseBadRequest('Error adding feedback')
|
|
import os
import logging
import datetime
import math
import re
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import urlencode
import aniso8601
from flask import Flask, json, render_template
from flask_ask import Ask, request, session, question, statement
ENDPOINT = "https://tidesandcurrents.noaa.gov/api/datagetter"
SESSION_CITY = "city"
SESSION_DATE = "date"
# NOAA station codes
STATION_CODE_SEATTLE = "9447130"
STATION_CODE_SAN_FRANCISCO = "9414290"
STATION_CODE_MONTEREY = "9413450"
STATION_CODE_LOS_ANGELES = "9410660"
STATION_CODE_SAN_DIEGO = "9410170"
STATION_CODE_BOSTON = "8443970"
STATION_CODE_NEW_YORK = "8518750"
STATION_CODE_VIRGINIA_BEACH = "8638863"
STATION_CODE_WILMINGTON = "8658163"
STATION_CODE_CHARLESTON = "8665530"
STATION_CODE_BEAUFORT = "8656483"
STATION_CODE_MYRTLE_BEACH = "8661070"
STATION_CODE_MIAMI = "8723214"
STATION_CODE_TAMPA = "8726667"
STATION_CODE_NEW_ORLEANS = "8761927"
STATION_CODE_GALVESTON = "8771341"
STATIONS = {}
STATIONS["seattle"] = STATION_CODE_SEATTLE
STATIONS["san francisco"] = STATION_CODE_SAN_FRANCISCO
STATIONS["monterey"] = STATION_CODE_MONTEREY
STATIONS["los angeles"] = STATION_CODE_LOS_ANGELES
STATIONS["san diego"] = STATION_CODE_SAN_DIEGO
STATIONS["boston"] = STATION_CODE_BOSTON
STATIONS["new york"] = STATION_CODE_NEW_YORK
STATIONS["virginia beach"] = STATION_CODE_VIRGINIA_BEACH
STATIONS["wilmington"] = STATION_CODE_WILMINGTON
STATIONS["charleston"] = STATION_CODE_CHARLESTON
STATIONS["beaufort"] = STATION_CODE_BEAUFORT
STATIONS["myrtle beach"] = STATION_CODE_MYRTLE_BEACH
STATIONS["miami"] = STATION_CODE_MIAMI
STATIONS["tampa"] = STATION_CODE_TAMPA
STATIONS["new orleans"] = STATION_CODE_NEW_ORLEANS
STATIONS["galveston"] = STATION_CODE_GALVESTON
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
class TideInfo(object):
def __init__(self):
self.first_high_tide_time = None
self.first_high_tide_height = None
self.low_tide_time = None
self.low_tide_height = None
self.second_high_tide_time = None
self.second_high_tide_height = None
@ask.launch
def launch():
welcome_text = render_template('welcome')
help_text = render_template('help')
return question(welcome_text).reprompt(help_text)
@ask.intent('OneshotTideIntent',
mapping={'city': 'City', 'date': 'Date'},
convert={'date': 'date'},
default={'city': 'seattle', 'date': datetime.date.today })
def one_shot_tide(city, date):
if city.lower() not in STATIONS:
return supported_cities()
return _make_tide_request(city, date)
@ask.intent('DialogTideIntent',
mapping={'city': 'City', 'date': 'Date'},
convert={'date': 'date'})
def dialog_tide(city, date):
if city is not None:
if city.lower() not in STATIONS:
return supported_cities()
if SESSION_DATE not in session.attributes:
session.attributes[SESSION_CITY] = city
return _dialog_date(city)
date = aniso8601.parse_date(session.attributes[SESSION_DATE])
return _make_tide_request(city, date)
elif date is not None:
if SESSION_CITY not in session.attributes:
session.attributes[SESSION_DATE] = date.isoformat()
return _dialog_city(date)
city = session.attributes[SESSION_CITY]
return _make_tide_request(city, date)
else:
return _dialog_no_slot()
@ask.intent('SupportedCitiesIntent')
def supported_cities():
cities = ", ".join(sorted(STATIONS.keys()))
list_cities_text = render_template('list_cities', cities=cities)
list_cities_reprompt_text = render_template('list_cities_reprompt')
return question(list_cities_text).reprompt(list_cities_reprompt_text)
@ask.intent('AMAZON.HelpIntent')
def help():
help_text = render_template('help')
list_cities_reprompt_text = render_template('list_cities_reprompt')
return question(help_text).reprompt(list_cities_reprompt_text)
@ask.intent('AMAZON.StopIntent')
def stop():
bye_text = render_template('bye')
return statement(bye_text)
@ask.intent('AMAZON.CancelIntent')
def cancel():
bye_text = render_template('bye')
return statement(bye_text)
@ask.session_ended
def session_ended():
return "{}", 200
@app.template_filter()
def humanize_date(dt):
# http://stackoverflow.com/a/20007730/1163855
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n/10%10!=1)*(n%10<4)*n%10::4])
month_and_day_of_week = dt.strftime('%A %B')
day_of_month = ordinal(dt.day)
year = dt.year if dt.year != datetime.datetime.now().year else ""
formatted_date = "{} {} {}".format(month_and_day_of_week, day_of_month, year)
formatted_date = re.sub('\s+', ' ', formatted_date)
return formatted_date
@app.template_filter()
def humanize_time(dt):
morning_threshold = 12
afternoon_threshold = 17
evening_threshold = 20
hour_24 = dt.hour
if hour_24 < morning_threshold:
period_of_day = "in the morning"
elif hour_24 < afternoon_threshold:
period_of_day = "in the afternoon"
elif hour_24 < evening_threshold:
period_of_day = "in the evening"
else:
period_of_day = " at night"
the_time = dt.strftime('%I:%M')
formatted_time = "{} {}".format(the_time, period_of_day)
return formatted_time
@app.template_filter()
def humanize_height(height):
round_down_threshold = 0.25
round_to_half_threshold = 0.75
is_negative = False
if height < 0:
height = abs(height)
is_negative = True
remainder = height % 1
if remainder < round_down_threshold:
remainder_text = ""
feet = int(math.floor(height))
elif remainder < round_to_half_threshold:
remainder_text = "and a half"
feet = int(math.floor(height))
else:
remainder_text = ""
feet = int(math.floor(height))
if is_negative:
feet *= -1
formatted_height = "{} {} feet".format(feet, remainder_text)
formatted_height = re.sub('\s+', ' ', formatted_height)
return formatted_height
def _dialog_no_slot():
if SESSION_CITY in session.attributes:
date_dialog2_text = render_template('date_dialog2')
return question(date_dialog2_text).reprompt(date_dialog2_text)
else:
return supported_cities()
def _dialog_date(city):
date_dialog_text = render_template('date_dialog', city=city)
date_dialog_reprompt_text = render_template('date_dialog_reprompt')
return question(date_dialog_text).reprompt(date_dialog_reprompt_text)
def _dialog_city(date):
session.attributes[SESSION_DATE] = date
session.attributes_encoder = _json_date_handler
city_dialog_text = render_template('city_dialog', date=date)
city_dialog_reprompt_text = render_template('city_dialog_reprompt')
return question(city_dialog_text).reprompt(city_dialog_reprompt_text)
def _json_date_handler(obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
def _make_tide_request(city, date):
station = STATIONS.get(city.lower())
noaa_api_params = {
'station': station,
'product': 'predictions',
'datum': 'MLLW',
'units': 'english',
'time_zone': 'lst_ldt',
'format': 'json'
}
if date == datetime.date.today():
noaa_api_params['date'] = 'today'
else:
noaa_api_params['begin_date'] = date.strftime('%Y%m%d')
noaa_api_params['range'] = 24
url = ENDPOINT + "?" + urlencode(noaa_api_params)
resp_body = urlopen(url).read()
if len(resp_body) == 0:
statement_text = render_template('noaa_problem')
else:
noaa_response_obj = json.loads(resp_body)
predictions = noaa_response_obj['predictions']
tideinfo = _find_tide_info(predictions)
statement_text = render_template('tide_info', date=date, city=city, tideinfo=tideinfo)
return statement(statement_text).simple_card("Tide Pooler", statement_text)
def _find_tide_info(predictions):
"""
Algorithm to find the 2 high tides for the day, the first of which is smaller and occurs
mid-day, the second of which is larger and typically in the evening.
"""
last_prediction = None
first_high_tide = None
second_high_tide = None
low_tide = None
first_tide_done = False
for prediction in predictions:
if last_prediction is None:
last_prediction = prediction
continue
if last_prediction['v'] < prediction['v']:
if not first_tide_done:
first_high_tide = prediction
else:
second_high_tide = prediction
else: # we're decreasing
if not first_tide_done and first_high_tide is not None:
first_tide_done = True
elif second_high_tide is not None:
break # we're decreasing after having found the 2nd tide. We're done.
if first_tide_done:
low_tide = prediction
last_prediction = prediction
fmt = '%Y-%m-%d %H:%M'
parse = datetime.datetime.strptime
tideinfo = TideInfo()
tideinfo.first_high_tide_time = parse(first_high_tide['t'], fmt)
tideinfo.first_high_tide_height = float(first_high_tide['v'])
tideinfo.second_high_tide_time = parse(second_high_tide['t'], fmt)
tideinfo.second_high_tide_height = float(second_high_tide['v'])
tideinfo.low_tide_time = parse(low_tide['t'], fmt)
tideinfo.low_tide_height = float(low_tide['v'])
return tideinfo
if __name__ == '__main__':
if 'ASK_VERIFY_REQUESTS' in os.environ:
verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()
if verify == 'false':
app.config['ASK_VERIFY_REQUESTS'] = False
app.run(debug=True)
|
|
from . import bindings
from .utils.conversion import boolean_t
from .utils import six
NV_UNIQUE_NAME = bindings['NV_UNIQUE_NAME']
NV_UNIQUE_NAME_TYPE = bindings['NV_UNIQUE_NAME_TYPE']
data_type_t = bindings['data_type_t']
def data_type_t_safe(id):
try:
return data_type_t(id)
except ValueError:
return None
ffi = bindings.ffi
libzfs = bindings.libzfs
NO_DEFAULT = object()
HOLDER_TYPE = 'uint_t *'
def _split_dict(base, keys_to_split):
a, b = {}, {}
for key, value in base.items():
(b if key in keys_to_split else a)[key] = value
return a, b
def nvlist_to_dict(nvlist, **kwargs):
kwargs.setdefault('skip_unknown', True)
with nvlist:
return dict(nvlist.items(**kwargs))
def ptr_to_dict(ptr, **kwargs):
dict_kwargs, nvlist_kwargs = _split_dict(kwargs, ['free', 'alloc', 'flags'])
nvlist = NVList.from_nvlist_ptr(ptr, **nvlist_kwargs)
return nvlist_to_dict(nvlist, **dict_kwargs)
def hdl_to_dict(hdl, **kwargs):
dict_kwargs, nvlist_kwargs = _split_dict(kwargs, ['free', 'alloc', 'flags'])
nvlist = NVList.from_nvlist_hdl(hdl, **nvlist_kwargs)
return nvlist_to_dict(nvlist, **dict_kwargs)
class UnknownDataType(Exception):
pass
class NVPairMixIn(object):
@classmethod
def _detect_type(cls, data_type, default=NO_DEFAULT):
info = NVLIST_HANDLERS.get(data_type)
if not info and default is NO_DEFAULT:
raise UnknownDataType("Unknown data type: %r" % data_type)
elif not info:
info = default
return info
@classmethod
def _from_python(cls, data_type, data):
info = cls._detect_type(data_type)
return info.convert_from_python(data)
@classmethod
def _to_python(cls, data_type, data, count=None):
info = cls._detect_type(data_type)
return info.convert_to_python(data, count)
class NVList(NVPairMixIn):
@classmethod
def from_nvlist_ptr(cls, ptr, **kwargs):
hdl = ffi.new('nvlist_t **')
hdl[0] = ptr
if 'hdl' in kwargs:
del kwargs['hdl']
return cls.from_nvlist_hdl(hdl, **kwargs)
@classmethod
def from_nvlist_hdl(cls, hdl, **kwargs):
kwargs['alloc'] = False
return cls(hdl=hdl, **kwargs)
def __init__(self, flags=NV_UNIQUE_NAME, hdl=None, alloc=True, free=True):
self._flags = flags
self._hdl = hdl
self._alloc = alloc
self._alloced = None
self._free = free
def alloc(self):
if self._hdl is None:
self._hdl = ffi.new('nvlist_t **')
if self._alloc is True:
self._alloced = libzfs.nvlist_alloc(self._hdl, int(self._flags), 0)
return self._alloced
__enter__ = alloc
def free(self, exc_type=None, exc_val=None, exc_tb=None):
if self._hdl and self._free and self._alloced:
libzfs.nvlist_free(self.ptr)
self._alloced = None
__exit__ = free
@property
def ptr(self):
if self._hdl:
return self._hdl[0]
return None
@property
def hdl(self):
return self._hdl
def add(self, key, data_type, value):
info = self._detect_type(data_type)
value = info.from_python(value)
return not bool(info.nvlist_add(self.ptr, key, value))
def lookup(self, key, data_type, default=NO_DEFAULT):
info = self._detect_type(data_type)
holder = info.new()
countholder = None
if info.is_array:
countholder = ffi.new(HOLDER_TYPE)
val = info.nvlist_lookup(self.ptr, key, holder, countholder)
else:
val = info.nvlist_lookup(self.ptr, key, holder)
if not bool(val):
return info.to_python(holder, countholder)
elif default is not NO_DEFAULT:
return default
raise KeyError(key)
def _lookup_type(self, key, default=NO_DEFAULT):
holder = ffi.new('nvpair_t **')
val = libzfs.nvlist_lookup_nvpair(self.ptr, key, holder)
if bool(val):
raise KeyError(key)
typeid = libzfs.nvpair_type(holder[0])
data_type = data_type_t_safe(typeid)
if data_type is None and default is NO_DEFAULT:
raise UnknownDataType("Unknown data id: %r" % typeid)
elif data_type is None:
data_type = default
return data_type, typeid, holder
def lookup_type(self, key):
data_type, typeid, holder = self._lookup_type(key)
return data_type
def exists(self, key):
return self._lookup_type(key, default=False)
def lookup_smart(self, key, default=NO_DEFAULT):
data_type = holder = None
try:
data_type, typeid, holder = self._lookup_type(key)
except:
pass
if not data_type:
if default is NO_DEFAULT:
raise KeyError(key)
return default
info = self._detect_type(data_type)
valholder = info.new()
countholder = None
if info.is_array:
countholder = ffi.new(HOLDER_TYPE)
val = info.nvpair_value(holder[0], valholder, countholder)
else:
val = info.nvpair_value(holder[0], valholder)
if not bool(val):
return info.to_python(valholder, countholder)
elif default is NO_DEFAULT:
raise KeyError(key)
return default
def _iter_nvlist(self, skip_unknown=False):
pair = libzfs.nvlist_next_nvpair(self.ptr, ffi.NULL)
while pair != ffi.NULL:
key = ffi.string(libzfs.nvpair_name(pair))
typeid = libzfs.nvpair_type(pair)
data_type = data_type_t_safe(typeid)
info = self._detect_type(data_type, default=None)
if (data_type and info) or skip_unknown is False:
yield pair, key, typeid, data_type, info
pair = libzfs.nvlist_next_nvpair(self.ptr, pair)
def items(self, skip_unknown=False, deep=20, extended=False):
def y(k, t, v):
if extended:
return (k, t, v)
return (k, v)
for pair, key, typeid, data_type, info in self._iter_nvlist():
if data_type is None or info is None:
yield y(key, data_type, None)
valholder = info.new()
countholder = None
if info.is_array:
countholder = ffi.new(HOLDER_TYPE)
val = info.nvpair_value(pair, valholder, countholder)
else:
val = info.nvpair_value(pair, valholder)
if not bool(val):
value = info.to_python(valholder, countholder)
def _iter(item):
item._free = self._free
with item:
valtype = list if extended else dict
dictval = valtype(item.items(skip_unknown=skip_unknown, deep=deep - 1, extended=extended))
return dictval
if deep > 0 and isinstance(value, NVList):
value = _iter(value)
elif deep > 0 and isinstance(value, list) and isinstance(value[0], NVList):
value = [_iter(x) for x in value]
yield y(key, data_type, value)
pair = libzfs.nvlist_next_nvpair(self.ptr, pair)
class NVListHandler(object):
def __init__(self, funcname, typename, converter, add_converter = None, is_array = False):
self._funcname = funcname
self._typename = typename
self._converter = converter
self._add_converter = add_converter
self._is_array = is_array
def new(self):
return ffi.new(self._typename)
def to_python(self, x, count = None):
if self._converter:
if self.is_array:
return self._converter(x, count)
return self._converter(x)
return x
def from_python(self, x):
if callable(self._add_converter):
return self._add_converter(x)
if self._add_converter is False:
raise Exception("Unable to convert type")
return x
def _get_libzfs_func(self, prefix):
return getattr(libzfs, '%s_%s' % (prefix, self._funcname))
@property
def nvlist_add(self):
return self._get_libzfs_func('nvlist_add')
@property
def nvlist_lookup(self):
return self._get_libzfs_func('nvlist_lookup')
@property
def nvpair_value(self):
return self._get_libzfs_func('nvpair_value')
@property
def is_array(self):
return self._is_array
def _array_converter(converter):
def _inner(x, count):
items = []
for i in range(count[0]):
items.append(converter(x[0][i]))
return items
return _inner
def _to_int(hdl):
if isinstance(hdl, six.integer_types):
return int(hdl)
return int(hdl[0])
#
# Key: configuration
# - add func
# - lookup func
# - lookup holder type
# - add converter
# - lookup converter
#
NVLIST_HANDLERS = {
data_type_t.DATA_TYPE_BOOLEAN: NVListHandler('boolean_value', 'boolean_t *', lambda x: bool(x[0]), boolean_t),
data_type_t.DATA_TYPE_BOOLEAN_VALUE: NVListHandler('boolean_value', 'boolean_t *', lambda x: bool(x[0]), boolean_t),
data_type_t.DATA_TYPE_BYTE: NVListHandler('byte', 'uchar_t *', _to_int, None),
data_type_t.DATA_TYPE_INT8: NVListHandler('int8', 'int8_t *', _to_int, None),
data_type_t.DATA_TYPE_UINT8: NVListHandler('uint8', 'uint8_t *', _to_int, None),
data_type_t.DATA_TYPE_INT16: NVListHandler('int16', 'int16_t *', _to_int, None),
data_type_t.DATA_TYPE_UINT16: NVListHandler('uint16', 'uint16_t *', _to_int, None),
data_type_t.DATA_TYPE_INT32: NVListHandler('int32', 'int32_t *', _to_int, None),
data_type_t.DATA_TYPE_UINT32: NVListHandler('uint32', 'uint32_t *', _to_int, None),
data_type_t.DATA_TYPE_INT64: NVListHandler('int64', 'int64_t *', _to_int, None),
data_type_t.DATA_TYPE_UINT64: NVListHandler('uint64', 'uint64_t *', _to_int, None),
data_type_t.DATA_TYPE_STRING: NVListHandler('string', 'char **', lambda x: ffi.string(x[0]), None),
data_type_t.DATA_TYPE_NVLIST: NVListHandler('nvlist', 'nvlist_t **', NVList.from_nvlist_hdl, False),
data_type_t.DATA_TYPE_BYTE_ARRAY: NVListHandler('byte_array', 'uchar_t **', _array_converter(_to_int), None),
data_type_t.DATA_TYPE_INT8_ARRAY: NVListHandler('int8_array', 'int8_t **', _array_converter(_to_int), False, True),
data_type_t.DATA_TYPE_UINT8_ARRAY: NVListHandler('uint8_array', 'uint8_t **', _array_converter(_to_int), False, True),
data_type_t.DATA_TYPE_INT16_ARRAY: NVListHandler('int16_array', 'int16_t **', _array_converter(_to_int), False, True),
data_type_t.DATA_TYPE_UINT16_ARRAY: NVListHandler('uint16_array', 'uint16_t **', _array_converter(_to_int), False, True),
data_type_t.DATA_TYPE_INT32_ARRAY: NVListHandler('int32_array', 'int32_t **', _array_converter(_to_int), False, True),
data_type_t.DATA_TYPE_UINT32_ARRAY: NVListHandler('uint32_array', 'uint32_t **', _array_converter(_to_int), False, True),
data_type_t.DATA_TYPE_INT64_ARRAY: NVListHandler('int64_array', 'int64_t **', _array_converter(_to_int), False, True),
data_type_t.DATA_TYPE_UINT64_ARRAY: NVListHandler('uint64_array', 'uint64_t **', _array_converter(_to_int), False, True),
data_type_t.DATA_TYPE_NVLIST_ARRAY: NVListHandler('nvlist_array', 'nvlist_t ***',
_array_converter(NVList.from_nvlist_ptr), False, True),
data_type_t.DATA_TYPE_STRING_ARRAY: NVListHandler('string_array', 'char ***',
_array_converter(lambda x: ffi.string(x)), False, True),
}
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
"Incompatible shapes|Dimensions must be equal"):
f(x.astype(t), y.astype(t))
class LogicalOpTest(test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with test_util.device(use_gpu=use_gpu):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y)
@test_util.run_deprecated_v1
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = constant_op.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = constant_op.constant(3)
y = constant_op.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = constant_op.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
class SelectOpTest(test.TestCase):
def _compare(self, fn, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with test_util.device(use_gpu=use_gpu):
out = fn(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self,
fn,
c,
x,
y,
numeric_gradient_type=None,
x_init_value=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = fn(c, inx, iny)
s = list(np.shape(c))
if x_init_value is None:
x_init_value = x
if x.shape != y.shape:
x_init_value = np.broadcast_to(y, x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x_init_value)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = fn(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, fn, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = fn(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=x, delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = fn(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testScalar(self, fn):
c = True
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testScalar(self):
self._testScalar(array_ops.where)
self._testScalar(array_ops.where_v2)
def _testScalarBroadcast(self, fn, c, x, y):
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testScalarBroadcast(self):
c = True
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
def _testBasic(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testBasic(self):
self._testBasic(array_ops.where)
self._testBasic(array_ops.where_v2)
def _testBasicBroadcast(self, fn, c, x, y):
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testBasicBroadcast(self):
c0 = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
c1 = np.random.randint(0, 2, 2).astype(np.bool).reshape(1, 1, 2)
c2 = np.random.randint(0, 2, 3).astype(np.bool).reshape(1, 3, 1)
c3 = np.random.randint(0, 2, 1).astype(np.bool).reshape(1, 1, 1)
for c in [c0, c1, c2, c3]:
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
def _testGradients(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(fn, c, xt, yt, np.float)
self._compareGradientY(fn, c, xt, yt, np.float)
else:
self._compareGradientX(fn, c, xt, yt)
self._compareGradientY(fn, c, xt, yt)
@test_util.run_deprecated_v1
def testGradients(self):
self._testGradients(array_ops.where)
self._testGradients(array_ops.where_v2)
@test_util.run_deprecated_v1
def testGradientsBroadcast(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for t in [np.float32, np.float64]:
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
def _testShapeMismatch(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
fn(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
self._testShapeMismatch(array_ops.where)
self._testShapeMismatch(array_ops.where_v2)
def _testEmptyTensor(self, fn):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
x = np.random.rand(1, 3, 0) * 100
y = np.random.rand(1, 3, 0) * 100
z_expected = np.zeros((1, 3, 0), dtype=np.float32)
with self.cached_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
z = fn(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
@test_util.run_deprecated_v1
def testEmptyTensor(self):
self._testEmptyTensor(array_ops.where)
self._testEmptyTensor(array_ops.where_v2)
def _testNan(self, fn):
with self.cached_session():
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
x = fn(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
@test_util.run_deprecated_v1
def testNan(self):
"""Verify that nans don't propagate where they shouldn't."""
self._testNan(array_ops.where)
self._testNan(array_ops.where_v2)
class BatchSelectOpTest(test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with test_util.device(use_gpu=use_gpu):
out = array_ops.where(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
@test_util.run_deprecated_v1
def testGradients(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
array_ops.where(c, xt, yt)
class MinMaxOpTest(test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)
tf_min, tf_max = self.evaluate([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testScalar(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1).item() * 100. # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]:
self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True)
def _compareGradientX(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testGradients(self):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
self._compareGradientX(math_ops.maximum, x, y)
self._compareGradientY(math_ops.maximum, x, y)
self._compareGradientX(math_ops.minimum, x, y)
self._compareGradientY(math_ops.minimum, x, y)
class MathOpsOverloadTest(test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with test_util.force_cpu():
inx = ops.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return self.evaluate(z)
def _computeLiteralAndTensor(self, x, y, dtype, func):
with test_util.force_cpu():
iny = ops.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return self.evaluate(z)
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)
self.assertAllClose(np_ans,
self._computeTensorAndLiteral(x, y, dtype, tf_func))
self.assertAllClose(np_ans,
self._computeLiteralAndTensor(x, y, dtype, tf_func))
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with test_util.force_cpu():
self.assertAllClose(
np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype))))
def testOverload(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
dtypes_lib.complex64,
dtypes_lib.complex128,
]
funcs = [
(np.add, _ADD),
(np.subtract, _SUB),
(np.multiply, _MUL),
(np.power, _POW),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
if dtype in (dtypes_lib.complex64,
dtypes_lib.complex128) and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64.
for dtype in [dtypes_lib.int32, dtypes_lib.int64]:
self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
]
funcs = [
(np.less, _LT),
(np.less_equal, _LE),
(np.greater, _GT),
(np.greater_equal, _GE),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),
(np.logical_xor, _XOR), (np.equal, math_ops.equal),
(np.not_equal, math_ops.not_equal)]
for np_func, tf_func in logical_funcs:
self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
[True, False, True, False], dtypes_lib.bool, np_func,
tf_func)
self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)
class IsFiniteInfNanTest(test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf(
inx), math_ops.is_nan(inx)
tf_finite, tf_inf, tf_nan = self.evaluate([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
self.assertAllEqual(np_finite, tf_finite)
self.assertShapeEqual(np_inf, oinf)
self.assertShapeEqual(np_nan, onan)
self.assertShapeEqual(np_finite, ofinite)
def _testDtype(self, dtype):
fi = np.finfo(dtype)
data = np.array([
0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max, -np.inf,
np.inf, np.nan
]).astype(dtype)
self._compare(data, use_gpu=False)
self._compare(data, use_gpu=True)
def testHalf(self):
self._testDtype(np.float16)
def testFloat(self):
self._testDtype(np.float32)
def testDouble(self):
self._testDtype(np.float64)
def testSqrt(self):
for dtype in [np.float16, np.float32, np.float64]:
fi = np.finfo(dtype)
for size in [1, 3, 4, 7, 8, 63, 64, 65]:
# For float32 Eigen uses Carmack's fast vectorized sqrt algorithm.
# It is not accurate for very large arguments, so we test for
# fi.max/100 instead of fi.max here.
for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:
x = np.full((size,), value, dtype=dtype)
np_y = np.sqrt(x)
np_nan = np.isnan(np_y)
with test_util.use_gpu():
tf_y = math_ops.sqrt(x)
tf_nan = math_ops.is_nan(tf_y)
if value < 0:
self.assertAllEqual(np_nan, self.evaluate(tf_nan))
else:
self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y))
class RoundingTest(test.TestCase):
def _compare_values(self, x, y=None):
y = np.rint(x) if y is None else np.asarray(y)
tf_rint = math_ops.rint(x)
np_rint = self.evaluate(tf_rint)
self.assertAllEqual(y, np_rint)
self.assertShapeEqual(y, tf_rint)
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = self.evaluate([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
def _testDtype(self, dtype):
data = (np.arange(-3, 3) / 4.).reshape(1, 3, 2).astype(dtype)
self._compare(data)
# TODO: rint op is not supported for float16
if dtype is np.float16:
return
self._compare_values(data)
x = [0.5, 0.5000001]
y = [0.0, 1.0]
self._compare_values(x, y=y)
# numpy example
x = [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]
y = [-2., -2., -0., 0., 2., 2., 2.]
self._compare_values(x, y=y)
def testTypes(self):
self.skipTest("b/131162241")
for dtype in [np.float16, np.float32, np.float64]:
self._testDtype(dtype)
class ComplexMakeRealImagTest(test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with test_util.device(use_gpu=use_gpu):
real = ops.convert_to_tensor(real)
imag = ops.convert_to_tensor(imag)
tf_ans = math_ops.complex(real, imag)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def testMake(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
for use_gpu in [False, True]:
self._compareMake(real, imag, use_gpu)
self._compareMake(real, 12.0, use_gpu)
self._compareMake(23.0, imag, use_gpu)
def testRealImagNumericType(self):
for use_gpu in [True, False]:
for value in [1., 1j, 1. + 1j]:
np_real, np_imag = np.real(value), np.imag(value)
with test_util.device(use_gpu=use_gpu):
tf_real = math_ops.real(value)
tf_imag = math_ops.imag(value)
self.assertAllEqual(np_real, self.evaluate(tf_real))
self.assertAllEqual(np_imag, self.evaluate(tf_imag))
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
np_zeros = np_real * 0
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_real = math_ops.real(inx)
tf_imag = math_ops.imag(inx)
tf_real_real = math_ops.real(tf_real)
tf_imag_real = math_ops.imag(tf_real)
self.assertAllEqual(np_real, self.evaluate(tf_real))
self.assertAllEqual(np_imag, self.evaluate(tf_imag))
self.assertAllEqual(np_real, self.evaluate(tf_real_real))
self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))
def testRealImag64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def testRealImag128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def _compareAngle(self, cplx, use_gpu):
np_angle = np.angle(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_angle = math_ops.angle(inx)
tf_angle_val = self.evaluate(tf_angle)
self.assertAllClose(np_angle, tf_angle_val)
self.assertShapeEqual(np_angle, tf_angle)
def testAngle64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
def testAngle(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testRealReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,
dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.real(x)
self.assertEqual(x, y)
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_conj = math_ops.conj(inx)
tf_ans = self.evaluate(tf_conj)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
def testConj64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def testConj128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testConjReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16,
dtypes_lib.float32, dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.conj(x)
self.assertEqual(x, y)
@test_util.run_deprecated_v1
def testConjString(self):
x = array_ops.placeholder(dtypes_lib.string)
with self.assertRaisesRegexp(TypeError,
r"Expected numeric or variant tensor"):
math_ops.conj(x)
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
# complex numbers. Then, we extract real and imag parts and
# computes the squared sum. This is obviously the same as sum(real
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.cached_session():
inx = ops.convert_to_tensor(x)
real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)
real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])
cplx = math_ops.complex(real, imag)
cplx = math_ops.conj(cplx)
loss = math_ops.reduce_sum(math_ops.square(
math_ops.real(cplx))) + math_ops.reduce_sum(
math_ops.square(math_ops.imag(cplx)))
epsilon = 1e-3
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def _compareBroadcastGradient(self, x):
x_ = ops.convert_to_tensor(x)
epsilon = 1e-3
with self.cached_session():
for args in [(x_, 0.), (0., x_)]:
z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))
jacob_t, jacob_n = gradient_checker.compute_gradient(
x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testGradient(self):
# complex64
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)
self._compareGradient(data)
self._compareBroadcastGradient(data)
# complex128
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64)
self._compareGradient(data)
def _compareMulGradient(self, data):
# data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.cached_session():
inp = ops.convert_to_tensor(data)
xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)
def vec(x): # Reshape to a vector
return array_ops.reshape(x, [-1])
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
return math_ops.complex(r, i)
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))
epsilon = 0.005
jacob_t, jacob_n = gradient_checker.compute_gradient(
inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testMulGradient(self):
data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)
self._compareMulGradient(data)
class PolyvalTest(test.TestCase):
def _runtest(self, dtype, degree):
x = np.random.rand(2, 2).astype(dtype)
coeffs = [np.random.rand(2, 2).astype(dtype) for _ in range(degree + 1)]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testSimple(self):
for dtype in [
np.int32, np.float32, np.float64, np.complex64, np.complex128
]:
for degree in range(5):
self._runtest(dtype, degree)
def testBroadcast(self):
dtype = np.float32
degree = 3
shapes = [(1,), (2, 1), (1, 2), (2, 2)]
for x_shape in shapes:
for coeff_shape in shapes:
x = np.random.rand(*x_shape).astype(dtype)
coeffs = [
np.random.rand(*coeff_shape).astype(dtype)
for _ in range(degree + 1)
]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testEmpty(self):
x = np.random.rand(2, 2).astype(np.float32)
coeffs = []
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def test_coeffs_raise(self):
x = np.random.rand(2, 2).astype(np.float32)
coeffs = {}
with self.assertRaisesRegexp(ValueError, "Argument coeffs must be list"):
math_ops.polyval(coeffs, x)
class SingularGradientOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testGradientAtSingularity(self):
if not compat.forward_compatible(2020, 3, 14):
self.skipTest("Skipping test for future functionality.")
ops_and_singularity = [
(gen_math_ops.reciprocal, (0.,)),
(gen_math_ops.rsqrt, (0.,)),
(gen_math_ops.sqrt, (0.,)),
(gen_math_ops.sqrt_grad, (
0.,
0.,
)),
(gen_math_ops.reciprocal_grad, (
1.,
0.,
)),
(gen_math_ops.tan, (np.pi / 2,)),
(gen_math_ops.log, (0.,)),
(gen_math_ops.log1p, (-1.,)),
(gen_math_ops.acosh, (0.,)),
(gen_math_ops.asin, (1.,)),
(gen_math_ops.acos, (1.,)),
(gen_math_ops.atan2, (0., 0.)),
(gen_math_ops.div, (1., 0.)),
(gen_math_ops.div_no_nan, (1., 0.)),
(gen_math_ops.real_div, (1., 0.)),
(math_ops.pow, (0., -1.)),
]
for op, singularity in ops_and_singularity:
for dtype in (dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128):
if dtype.is_complex and op in [
gen_math_ops.asin, gen_math_ops.acos, gen_math_ops.atan2
]:
continue
if dtype == dtypes_lib.half and op in [
gen_math_ops.acosh, gen_math_ops.asin, gen_math_ops.acos,
gen_math_ops.atan2
]:
continue
with self.cached_session():
print("op = ", op, ", singularity = ", singularity, ", type = ",
dtype)
args = [constant_op.constant(s, dtype=dtype) for s in singularity]
grad_y = constant_op.constant(0, dtype=dtype)
y = op(*args)
g = gradients_impl.gradients(y, args, grad_ys=grad_y)
g_val = self.evaluate(g)
self.assertAllEqual(g_val, np.zeros(len(singularity)))
if __name__ == "__main__":
test.main()
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modules/certificate/."""
__author__ = 'John Orr ([email protected])'
import actions
from controllers import sites
from models import courses
from models import models
from models import student_work
from modules.certificate import certificate
from modules.certificate import custom_criteria
from modules.review import domain
from modules.review import peer
from modules.review import review as review_module
from google.appengine.api import namespace_manager
from google.appengine.ext import db
class MockHandler(object):
def gettext(self, text):
return text
class CertificateHandlerTestCase(actions.TestBase):
"""Tests for the handler which presents the certificate."""
def setUp(self):
super(CertificateHandlerTestCase, self).setUp()
# Mock the module's student_is_qualified method
self.is_qualified = True
self.original_student_is_qualified = certificate.student_is_qualified
certificate.student_is_qualified = (
lambda student, course: self.is_qualified)
def tearDown(self):
certificate.student_is_qualified = self.original_student_is_qualified
super(CertificateHandlerTestCase, self).tearDown()
def test_student_must_be_enrolled(self):
# If student not in session, expect redirect
response = self.get('/certificate')
self.assertEquals(302, response.status_code)
# If student is not enrolled, expect redirect
actions.login('[email protected]')
response = self.get('/certificate')
self.assertEquals(302, response.status_code)
self.assertEquals(
'http://localhost/preview', response.headers['Location'])
# If the student is enrolled, expect certificate
models.Student.add_new_student_for_current_user('Test User', None, self)
response = self.get('/certificate')
self.assertEquals(200, response.status_code)
def test_student_must_be_qualified(self):
actions.login('[email protected]')
models.Student.add_new_student_for_current_user('Test User', None, self)
# If student is not qualified, expect redirect to home page
self.is_qualified = False
response = self.get('/certificate')
self.assertEquals(302, response.status_code)
self.assertEquals('http://localhost/', response.headers['Location'])
# If student is qualified, expect certificate
self.is_qualified = True
response = self.get('/certificate')
self.assertEquals(200, response.status_code)
def test_certificate_should_have_student_nickname(self):
actions.login('[email protected]')
models.Student.add_new_student_for_current_user('Jane Doe', None, self)
response = self.get('/certificate')
self.assertEquals(200, response.status_code)
self.assertIn('Jane Doe', response.body)
def test_download_pdf(self):
actions.login('[email protected]')
models.Student.add_new_student_for_current_user('Test User', None, self)
response = self.get('/certificate.pdf')
self.assertEqual('application/pdf', response.headers['Content-Type'])
self.assertEqual(
'attachment; filename=certificate.pdf',
response.headers['Content-Disposition'])
self.assertIn('/Title (Course Builder Certificate)', response.body)
def test_certificate_table_entry(self):
actions.login('[email protected]')
models.Student.add_new_student_for_current_user('Test User', None, self)
student = models.Student.get_by_email('[email protected]')
all_courses = sites.get_all_courses()
app_context = all_courses[0]
course = courses.Course(None, app_context=app_context)
# If the student is qualified, a link is shown
self.is_qualified = True
mock_handler = MockHandler()
table_entry = certificate.get_certificate_table_entry(
mock_handler, student, course)
self.assertEquals('Certificate', table_entry[0])
link = str(table_entry[1])
self.assertEquals(
'<a href="certificate">Click for certificate</a> '
'| <a href="certificate.pdf">Download PDF</a>', link)
# If the student is not qualified, a message is shown
self.is_qualified = False
table_entry = certificate.get_certificate_table_entry(
mock_handler, student, course)
self.assertEquals('Certificate', table_entry[0])
self.assertIn(
'You have not yet met the course requirements', table_entry[1])
class CertificateCriteriaTestCase(actions.TestBase):
"""Tests the different certificate criteria configurations."""
COURSE_NAME = 'certificate_criteria'
STUDENT_EMAIL = '[email protected]'
ADMIN_EMAIL = '[email protected]'
ANALYTICS_URL = ('/' + COURSE_NAME +
'/dashboard?action=analytics&tab=certificates_earned')
def setUp(self):
super(CertificateCriteriaTestCase, self).setUp()
self.base = '/' + self.COURSE_NAME
context = actions.simple_add_course(
self.COURSE_NAME, self.ADMIN_EMAIL, 'Certificate Criteria')
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace('ns_%s' % self.COURSE_NAME)
self.course = courses.Course(None, context)
self.course.save()
actions.login(self.STUDENT_EMAIL)
actions.register(self, self.STUDENT_EMAIL)
self.student = (
models.StudentProfileDAO.get_enrolled_student_by_email_for(
self.STUDENT_EMAIL, context))
# Override course.yaml settings by patching app_context.
self.get_environ_old = sites.ApplicationContext.get_environ
self.certificate_criteria = []
def get_environ_new(app_context):
environ = self.get_environ_old(app_context)
environ['certificate_criteria'] = self.certificate_criteria
return environ
sites.ApplicationContext.get_environ = get_environ_new
def tearDown(self):
# Clean up app_context.
sites.ApplicationContext.get_environ = self.get_environ_old
namespace_manager.set_namespace(self.old_namespace)
super(CertificateCriteriaTestCase, self).tearDown()
def _assert_redirect_to_course_landing_page(self, response):
self.assertEquals(302, response.status_code)
self.assertEquals('http://localhost/' + self.COURSE_NAME + '/', (
response.headers['Location']))
def test_no_criteria(self):
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
def _run_analytic_and_expect(self, expected_students,
expected_active_students,
expected_certificates):
actions.login(self.ADMIN_EMAIL)
response = self.get(self.ANALYTICS_URL)
self.submit(response.forms['gcb-run-visualization-certificates_earned'],
response)
self.execute_all_deferred_tasks()
dom = self.parse_html_string(self.get(self.ANALYTICS_URL).body)
total_students = int(
dom.find('.//span[@id="total_students"]').text)
total_active_students = int(
dom.find('.//span[@id="total_active_students"]').text)
total_certificates = int(
dom.find('.//span[@id="total_certificates"]').text)
self.assertEquals(expected_students, total_students)
self.assertEquals(expected_active_students, total_active_students)
self.assertEquals(expected_certificates, total_certificates)
actions.login(self.STUDENT_EMAIL)
def test_no_criteria_analytic(self):
self._run_analytic_and_expect(1, 0, 0)
def test_machine_graded(self):
assessment = self.course.add_assessment()
assessment.title = 'Assessment'
assessment.html_content = 'assessment content'
assessment.now_available = True
self.course.save()
self.certificate_criteria.append(
{'assessment_id': assessment.unit_id, 'pass_percent': 70.0})
# Student has not yet completed assessment, expect redirect to home page
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
self._run_analytic_and_expect(1, 0, 0) # 1 student, 0 active, no cert.
# Submit assessment with low score
actions.submit_assessment(
self,
assessment.unit_id,
{'answers': '', 'score': 50.0,
'assessment_type': assessment.unit_id},
presubmit_checks=False
)
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
self._run_analytic_and_expect(1, 1, 0) # 1 student, 1 active, no cert
# Submit assessment with expected score
actions.submit_assessment(
self,
assessment.unit_id,
{'answers': '', 'score': 70,
'assessment_type': assessment.unit_id},
presubmit_checks=False
)
response = self.get('certificate')
self.assertEquals(200, response.status_code)
self._run_analytic_and_expect(1, 1, 1) # 1 student, 1 active, 1 cert
def _submit_review(self, assessment):
"""Submits a review by the current student.
Creates a new user that completes the assessment as well,
so that the student can review it.
Args:
assessment: The assessment to review.
"""
reviewer_key = self.student.get_key()
reviewee = models.Student(key_name='[email protected]')
reviewee_key = reviewee.put()
submission_key = db.Key.from_path(
student_work.Submission.kind(),
student_work.Submission.key_name(
reviewee_key=reviewee_key, unit_id=str(assessment.unit_id)))
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=reviewee_key,
submission_key=submission_key, unit_id=str(assessment.unit_id)
).put()
review_key = student_work.Review(
contents='old_contents', reviewee_key=reviewee_key,
reviewer_key=reviewer_key, unit_id=str(assessment.unit_id)).put()
step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_HUMAN,
review_key=review_key, review_summary_key=summary_key,
reviewee_key=reviewee_key, reviewer_key=reviewer_key,
submission_key=submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=str(assessment.unit_id)
).put()
updated_step_key = review_module.Manager.write_review(
step_key, 'new_contents')
self.assertEqual(step_key, updated_step_key)
def test_peer_graded(self):
assessment = self.course.add_assessment()
assessment.title = 'Assessment'
assessment.html_content = 'assessment content'
assessment.workflow_yaml = (
'{grader: human,'
'matcher: peer,'
'review_due_date: \'2034-07-01 12:00\','
'review_min_count: 1,'
'review_window_mins: 20,'
'submission_due_date: \'2034-07-01 12:00\'}')
assessment.now_available = True
self.course.save()
self.certificate_criteria.append(
{'assessment_id': assessment.unit_id})
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
actions.submit_assessment(
self,
assessment.unit_id,
{'answers': '', 'assessment_type': assessment.unit_id},
presubmit_checks=False
)
# Submitting assessment without doing required reviews is not enough
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
# Submitting assessment together with required reviews is enough
self._submit_review(assessment)
response = self.get('certificate')
self.assertEquals(200, response.status_code)
def test_custom_criteria(self):
def test_custom_criterion(unused_student, unused_course):
return True
CRITERION = 'test_custom_criterion'
self.certificate_criteria.append(
{'custom_criteria': CRITERION})
setattr(custom_criteria, CRITERION, test_custom_criterion)
custom_criteria.registration_table.append(CRITERION)
response = self.get('certificate')
self.assertEquals(200, response.status_code)
def test_combination(self):
# Add machine graded assessment
machine_graded = self.course.add_assessment()
machine_graded.title = 'Machine Graded'
machine_graded.html_content = 'assessment content'
machine_graded.now_available = True
# Add peer graded assessment
peer_graded = self.course.add_assessment()
peer_graded.title = 'Peer Graded'
peer_graded.html_content = 'assessment content'
peer_graded.workflow_yaml = (
'{grader: human,'
'matcher: peer,'
'review_due_date: \'2034-07-01 12:00\','
'review_min_count: 1,'
'review_window_mins: 20,'
'submission_due_date: \'2034-07-01 12:00\'}')
peer_graded.now_available = True
self.course.save()
self.certificate_criteria.extend([
{'assessment_id': machine_graded.unit_id, 'pass_percent': 30},
{'assessment_id': peer_graded.unit_id}])
# Confirm that meeting one criterion is not sufficient
actions.submit_assessment(
self,
machine_graded.unit_id,
{'answers': '', 'score': 40,
'assessment_type': machine_graded.unit_id},
presubmit_checks=False
)
response = self.get('certificate')
self._assert_redirect_to_course_landing_page(response)
# Confirm that meeting both criteria is sufficient
actions.submit_assessment(
self,
peer_graded.unit_id,
{'answers': '', 'assessment_type': peer_graded.unit_id},
presubmit_checks=False
)
self._submit_review(peer_graded)
response = self.get('certificate')
self.assertEquals(200, response.status_code)
|
|
# Copyright 2010 OpenStack LLC.
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import socket
from xml.dom import minidom
from webob import exc
import webob
from nova.api.openstack import common
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import instance_types
from nova import exception
from nova import flags
from nova import log as logging
from nova.rpc import common as rpc_common
from nova.scheduler import api as scheduler_api
from nova import utils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class SecurityGroupsTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return 'security_groups' in datum
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
if detailed:
elem.set('userId', 'user_id')
elem.set('tenantId', 'tenant_id')
elem.set('updated')
elem.set('created')
elem.set('hostId')
elem.set('accessIPv4')
elem.set('accessIPv6')
elem.set('status')
elem.set('progress')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
# Attach security groups node
secgrps = SecurityGroupsTemplateElement('security_groups')
elem.append(secgrps)
secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group',
selector='security_groups')
secgrp.set('name')
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""
Common deserializer to handle xml-formatted server create
requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request"""
node = self.find_first_child_named(server_node, "personality")
if node is not None:
personality = []
for file_node in self.find_children_named(node, "file"):
item = {}
if file_node.hasAttribute("path"):
item["path"] = file_node.getAttribute("path")
item["contents"] = self.extract_text(file_node)
personality.append(item)
return personality
else:
return None
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request"""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "imageRef", "flavorRef", "adminPass",
"accessIPv4", "accessIPv6"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
metadata_node = self.find_first_child_named(server_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
personality = self._extract_personality(server_node)
if personality is not None:
server["personality"] = personality
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
security_groups = self._extract_security_groups(server_node)
if security_groups is not None:
server["security_groups"] = security_groups
auto_disk_config = server_node.getAttribute('auto_disk_config')
if auto_disk_config:
server['auto_disk_config'] = utils.bool_from_str(auto_disk_config)
return server
def _extract_networks(self, server_node):
"""Marshal the networks attribute of a parsed request"""
node = self.find_first_child_named(server_node, "networks")
if node is not None:
networks = []
for network_node in self.find_children_named(node,
"network"):
item = {}
if network_node.hasAttribute("uuid"):
item["uuid"] = network_node.getAttribute("uuid")
if network_node.hasAttribute("fixed_ip"):
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
networks.append(item)
return networks
else:
return None
def _extract_security_groups(self, server_node):
"""Marshal the security_groups attribute of a parsed request"""
node = self.find_first_child_named(server_node, "security_groups")
if node is not None:
security_groups = []
for sg_node in self.find_children_named(node, "security_group"):
item = {}
name_node = self.find_first_child_named(sg_node, "name")
if name_node:
item["name"] = self.extract_text(name_node)
security_groups.append(item)
return security_groups
else:
return None
class ActionDeserializer(CommonDeserializer):
"""
Deserializer to handle xml-formatted server action requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
dom = minidom.parseString(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_deserializer = {
'createImage': self._action_create_image,
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'rebuild': self._action_rebuild,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
}.get(action_name, super(ActionDeserializer, self).default)
action_data = action_deserializer(action_node)
return {'body': {action_name: action_data}}
def _action_create_image(self, node):
return self._deserialize_image_action(node, ('name',))
def _action_change_password(self, node):
if not node.hasAttribute("adminPass"):
raise AttributeError("No adminPass was specified in request")
return {"adminPass": node.getAttribute("adminPass")}
def _action_reboot(self, node):
if not node.hasAttribute("type"):
raise AttributeError("No reboot type was specified in request")
return {"type": node.getAttribute("type")}
def _action_rebuild(self, node):
rebuild = {}
if node.hasAttribute("name"):
rebuild['name'] = node.getAttribute("name")
if node.hasAttribute("auto_disk_config"):
rebuild['auto_disk_config'] = node.getAttribute("auto_disk_config")
metadata_node = self.find_first_child_named(node, "metadata")
if metadata_node is not None:
rebuild["metadata"] = self.extract_metadata(metadata_node)
personality = self._extract_personality(node)
if personality is not None:
rebuild["personality"] = personality
if not node.hasAttribute("imageRef"):
raise AttributeError("No imageRef was specified in request")
rebuild["imageRef"] = node.getAttribute("imageRef")
return rebuild
def _action_resize(self, node):
resize = {}
if node.hasAttribute("flavorRef"):
resize["flavorRef"] = node.getAttribute("flavorRef")
else:
raise AttributeError("No flavorRef was specified in request")
if node.hasAttribute("auto_disk_config"):
resize['auto_disk_config'] = node.getAttribute("auto_disk_config")
return resize
def _action_confirm_resize(self, node):
return None
def _action_revert_resize(self, node):
return None
def _deserialize_image_action(self, node, allowed_attributes):
data = {}
for attribute in allowed_attributes:
value = node.getAttribute(attribute)
if value:
data[attribute] = value
metadata_node = self.find_first_child_named(node, 'metadata')
if metadata_node is not None:
metadata = self.metadata_deserializer.extract_metadata(
metadata_node)
data['metadata'] = metadata
return data
class CreateDeserializer(CommonDeserializer):
"""
Deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
"""Deserialize an xml-formatted server create request"""
dom = minidom.parseString(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
class Controller(wsgi.Controller):
""" The Server API base controller class for the OpenStack API """
_view_builder_class = views_servers.ViewBuilder
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = link[0]['href']
# Convenience return
return robj
def __init__(self, **kwargs):
super(Controller, self).__init__(**kwargs)
self.compute_api = compute.API()
@wsgi.serializers(xml=MinimalServersTemplate)
def index(self, req):
""" Returns a list of server names and ids for a given user """
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=str(err))
except exception.NotFound:
raise exc.HTTPNotFound()
return servers
@wsgi.serializers(xml=ServersTemplate)
def detail(self, req):
""" Returns a list of server details for a given user """
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=str(err))
except exception.NotFound as err:
raise exc.HTTPNotFound()
return servers
def _get_block_device_mapping(self, data):
"""Get block_device_mapping from 'server' dictionary.
Overridden by volumes controller.
"""
return None
def _add_instance_faults(self, ctxt, instances):
faults = self.compute_api.get_instance_faults(ctxt, instances)
if faults is not None:
for instance in instances:
faults_list = faults.get(instance['uuid'], [])
try:
instance['fault'] = faults_list[0]
except IndexError:
pass
return instances
def _get_servers(self, req, is_detail):
"""Returns a list of servers, taking into account any search
options specified.
"""
search_opts = {}
search_opts.update(req.str_GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Convert local_zone_only into a boolean
search_opts['local_zone_only'] = utils.bool_from_str(
search_opts.get('local_zone_only', False))
# If search by 'status', we need to convert it to 'vm_state'
# to pass on to child zones.
if 'status' in search_opts:
status = search_opts['status']
state = common.vm_state_from_status(status)
if state is None:
reason = _('Invalid server status: %(status)s') % locals()
raise exception.InvalidInput(reason=reason)
search_opts['vm_state'] = state
if 'changes-since' in search_opts:
try:
parsed = utils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
# NOTE(dprince) This prevents computes' get_all() from returning
# instances from multiple tenants when an admin accounts is used.
# By default non-admin accounts are always limited to project/user
# both here and in the compute API.
if not context.is_admin or (context.is_admin and 'all_tenants'
not in search_opts):
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
instance_list = self.compute_api.get_all(context,
search_opts=search_opts)
limited_list = self._limit_items(instance_list, req)
if is_detail:
self._add_instance_faults(context, limited_list)
return self._view_builder.detail(req, limited_list)
else:
return self._view_builder.index(req, limited_list)
def _get_server(self, context, instance_uuid):
"""Utility function for looking up an instance by uuid"""
try:
return self.compute_api.routing_get(context, instance_uuid)
except exception.NotFound:
raise exc.HTTPNotFound()
def _handle_quota_error(self, error):
"""
Reraise quota errors as api-specific http exceptions
"""
code_mappings = {
"OnsetFileLimitExceeded":
_("Personality file limit exceeded"),
"OnsetFilePathLimitExceeded":
_("Personality file path too long"),
"OnsetFileContentLimitExceeded":
_("Personality file content too long"),
# NOTE(bcwaldon): expose the message generated below in order
# to better explain how the quota was exceeded
"InstanceLimitExceeded": error.message,
}
expl = code_mappings.get(error.kwargs['code'], error.message)
raise exc.HTTPRequestEntityTooLarge(explanation=expl,
headers={'Retry-After': 0})
def _validate_server_name(self, value):
if not isinstance(value, basestring):
msg = _("Server name is not a string or unicode")
raise exc.HTTPBadRequest(explanation=msg)
if value.strip() == '':
msg = _("Server name is an empty string")
raise exc.HTTPBadRequest(explanation=msg)
def _get_injected_files(self, personality):
"""
Create a list of injected files from the personality attribute
At this time, injected_files must be formatted as a list of
(file_path, file_content) pairs for compatibility with the
underlying compute service.
"""
injected_files = []
for item in personality:
try:
path = item['path']
contents = item['contents']
except KeyError as key:
expl = _('Bad personality format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
try:
contents = base64.b64decode(contents)
except TypeError:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
return injected_files
def _get_requested_networks(self, requested_networks):
"""
Create a list of requested networks from the networks attribute
"""
networks = []
for network in requested_networks:
try:
network_uuid = network['uuid']
if not utils.is_uuid_like(network_uuid):
msg = _("Bad networks format: network uuid is not in"
" proper format (%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
#fixed IP address is optional
#if the fixed IP address is not provided then
#it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ipv4(address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = _("Duplicate networks (%s) are not allowed")\
% network_uuid
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
def _validate_user_data(self, user_data):
"""Check if the user_data is encoded properly"""
if not user_data:
return
try:
user_data = base64.b64decode(user_data)
except TypeError:
expl = _('Userdata content cannot be decoded')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv4(self, address):
try:
socket.inet_aton(address)
except socket.error:
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv6(self, address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ServerTemplate)
@exception.novaclient_converter
@scheduler_api.redirect_handler
def show(self, req, id):
""" Returns server details by server id """
try:
context = req.environ['nova.context']
instance = self.compute_api.routing_get(context, id)
self._add_instance_faults(context, [instance])
return self._view_builder.show(req, instance)
except exception.NotFound:
raise exc.HTTPNotFound()
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
""" Creates a new server for a given user """
if not body:
raise exc.HTTPUnprocessableEntity()
if not 'server' in body:
raise exc.HTTPUnprocessableEntity()
body['server']['key_name'] = self._get_key_name(req, body)
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if not 'name' in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
image_href = self._image_ref_from_req_data(body)
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_href = image_href.split('/').pop()
if not utils.is_uuid_like(str(image_href)):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
personality = server_dict.get('personality')
config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
sg_names = []
security_groups = server_dict.get('security_groups')
if security_groups is not None:
sg_names = [sg['name'] for sg in security_groups if sg.get('name')]
if not sg_names:
sg_names.append('default')
sg_names = list(set(sg_names))
requested_networks = server_dict.get('networks')
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks)
(access_ip_v4, ) = server_dict.get('accessIPv4'),
if access_ip_v4 is not None:
self._validate_access_ipv4(access_ip_v4)
(access_ip_v6, ) = server_dict.get('accessIPv6'),
if access_ip_v6 is not None:
self._validate_access_ipv6(access_ip_v6)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
zone_blob = server_dict.get('blob')
# optional openstack extensions:
key_name = server_dict.get('key_name')
user_data = server_dict.get('user_data')
self._validate_user_data(user_data)
availability_zone = server_dict.get('availability_zone')
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
block_device_mapping = self._get_block_device_mapping(server_dict)
# Only allow admins to specify their own reservation_ids
# This is really meant to allow zones to work.
reservation_id = server_dict.get('reservation_id')
if all([reservation_id is not None,
reservation_id != '',
not context.is_admin]):
reservation_id = None
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count')
max_count = server_dict.get('max_count')
# min_count and max_count are optional. If they exist, they come
# in as strings. We want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = int(min_count) if min_count else 1
max_count = int(max_count) if max_count else min_count
if min_count > max_count:
min_count = max_count
auto_disk_config = server_dict.get('auto_disk_config')
scheduler_hints = server_dict.get('scheduler_hints', {})
try:
inst_type = \
instance_types.get_instance_type_by_flavor_id(flavor_id)
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_href,
display_name=name,
display_description=name,
key_name=key_name,
metadata=server_dict.get('metadata', {}),
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
zone_blob=zone_blob,
reservation_id=reservation_id,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints)
except exception.QuotaError as error:
self._handle_quota_error(error)
except exception.InstanceTypeMemoryTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.InstanceTypeDiskTooSmall as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.SecurityGroupNotFound as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
except rpc_common.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % \
{'err_type': err.exc_type, 'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
# Let the caller deal with unhandled exceptions.
# If the caller wanted a reservation_id, return it
if ret_resv_id:
return {'reservation_id': resv_id}
server = self._view_builder.create(req, instances[0])
if '_is_precooked' in server['server'].keys():
del server['server']['_is_precooked']
else:
if FLAGS.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _delete(self, context, id):
instance = self._get_server(context, id)
if FLAGS.reclaim_instance_interval:
self.compute_api.soft_delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
@scheduler_api.redirect_handler
def update(self, req, id, body):
"""Update server then pass on to version-specific controller"""
if len(req.body) == 0:
raise exc.HTTPUnprocessableEntity()
if not body:
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = access_ipv4.strip()
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = access_ipv6.strip()
if 'auto_disk_config' in body['server']:
auto_disk_config = utils.bool_from_str(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
instance = self.compute_api.routing_get(ctxt, id)
try:
self.compute_api.update(ctxt, instance, **update_dict)
except exception.NotFound:
raise exc.HTTPNotFound()
instance.update(update_dict)
self._add_instance_faults(ctxt, [instance])
return self._view_builder.show(req, instance)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
@exception.novaclient_converter
@scheduler_api.redirect_handler
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
except Exception, e:
LOG.exception(_("Error in confirm-resize %s"), e)
raise exc.HTTPBadRequest()
return exc.HTTPNoContent()
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
@exception.novaclient_converter
@scheduler_api.redirect_handler
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
except Exception, e:
LOG.exception(_("Error in revert-resize %s"), e)
raise exc.HTTPBadRequest()
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
@exception.novaclient_converter
@scheduler_api.redirect_handler
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.exception(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.exception(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
except Exception, e:
LOG.exception(_("Error in reboot %s"), e)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameSize:
msg = _("Resize requires a change in size.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
return webob.Response(status_int=202)
@wsgi.response(204)
@exception.novaclient_converter
@scheduler_api.redirect_handler
def delete(self, req, id):
""" Destroys a server """
try:
self._delete(req.environ['nova.context'], id)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _get_key_name(self, req, body):
if 'server' in body:
try:
return body['server'].get('key_name')
except AttributeError:
msg = _("Malformed server entity")
raise exc.HTTPBadRequest(explanation=msg)
def _image_ref_from_req_data(self, data):
try:
return unicode(data['server']['imageRef'])
except (TypeError, KeyError):
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('changePassword')
@exception.novaclient_converter
@scheduler_api.redirect_handler
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not 'changePassword' in body
or not 'adminPass' in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = body['changePassword']['adminPass']
if not isinstance(password, basestring) or password == '':
msg = _("Invalid adminPass")
raise exc.HTTPBadRequest(explanation=msg)
server = self._get_server(context, id)
self.compute_api.set_admin_password(context, server, password)
return webob.Response(status_int=202)
def _limit_items(self, items, req):
return common.limited_by_marker(items, req)
def _validate_metadata(self, metadata):
"""Ensure that we can work with the metadata given."""
try:
metadata.iteritems()
except AttributeError as ex:
msg = _("Unable to parse metadata key/value pairs.")
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
@exception.novaclient_converter
@scheduler_api.redirect_handler
def _action_resize(self, req, id, body):
""" Resizes a given instance to the flavor size requested """
try:
flavor_ref = body["resize"]["flavorRef"]
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {}
if 'auto_disk_config' in body['resize']:
kwargs['auto_disk_config'] = body['resize']['auto_disk_config']
return self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
@exception.novaclient_converter
@scheduler_api.redirect_handler
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes"""
try:
body = body['rebuild']
except (KeyError, TypeError):
raise exc.HTTPBadRequest(_("Invalid request body"))
try:
image_href = body["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
try:
password = body['adminPass']
except (KeyError, TypeError):
password = utils.generate_password(FLAGS.password_length)
context = req.environ['nova.context']
instance = self._get_server(context, id)
attr_map = {
'personality': 'files_to_inject',
'name': 'display_name',
'accessIPv4': 'access_ip_v4',
'accessIPv6': 'access_ip_v6',
'metadata': 'metadata',
'auto_disk_config': 'auto_disk_config',
}
if 'accessIPv4' in body:
self._validate_access_ipv4(body['accessIPv4'])
if 'accessIPv6' in body:
self._validate_access_ipv6(body['accessIPv6'])
kwargs = {}
for request_attribute, instance_attribute in attr_map.items():
try:
kwargs[instance_attribute] = body[request_attribute]
except (KeyError, TypeError):
pass
self._validate_metadata(kwargs.get('metadata', {}))
if 'files_to_inject' in kwargs:
personality = kwargs['files_to_inject']
kwargs['files_to_inject'] = self._get_injected_files(personality)
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**kwargs)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.ImageNotFound as error:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, id)
self._add_instance_faults(context, [instance])
view = self._view_builder.show(req, instance)
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
if FLAGS.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('createImage')
@exception.novaclient_converter
@scheduler_api.redirect_handler
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_quota_limit(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, id)
try:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage')
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = os.path.join(req.application_url,
context.project_id,
'images',
image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
""" Determine the admin password for a server on creation """
password = server.get('adminPass')
if password is None:
return utils.generate_password(FLAGS.password_length)
if not isinstance(password, basestring) or password == '':
msg = _("Invalid adminPass")
raise exc.HTTPBadRequest(explanation=msg)
return password
def _get_server_search_options(self):
"""Return server search options allowed by non-admin"""
return ('reservation_id', 'name', 'local_zone_only',
'status', 'image', 'flavor', 'changes-since')
def create_resource():
return wsgi.Resource(Controller())
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context"""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
unk_opt_str = ", ".join(unknown_options)
log_msg = _("Removing options '%(unk_opt_str)s' from query") % locals()
LOG.debug(log_msg)
for opt in unknown_options:
search_options.pop(opt, None)
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from collections import OrderedDict
from operator import itemgetter
from flask import session
from sqlalchemy.orm import contains_eager, joinedload, load_only, undefer
from sqlalchemy.sql.expression import nullslast
from indico.core import signals
from indico.core.auth import multipass
from indico.core.db import db
from indico.core.db.sqlalchemy.custom.unaccent import unaccent_match
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.util.queries import escape_like
from indico.modules.categories import Category
from indico.modules.categories.models.principals import CategoryPrincipal
from indico.modules.events import Event
from indico.modules.users import User, logger
from indico.modules.users.models.affiliations import UserAffiliation
from indico.modules.users.models.emails import UserEmail
from indico.modules.users.models.favorites import favorite_user_table
from indico.modules.users.models.suggestions import SuggestedCategory
from indico.util.event import truncate_path
from indico.util.string import crc32, remove_accents
# colors for user-specific avatar bubbles
user_colors = ['#e06055', '#ff8a65', '#e91e63', '#f06292', '#673ab7', '#ba68c8', '#7986cb', '#3f51b5', '#5e97f6',
'#00a4e4', '#4dd0e1', '#0097a7', '#d4e157', '#aed581', '#57bb8a', '#4db6ac', '#607d8b', '#795548',
'#a1887f', '#fdd835', '#a3a3a3']
def get_admin_emails():
"""Get the email addresses of all Indico admins"""
return {u.email for u in User.query.filter_by(is_admin=True, is_deleted=False)}
def get_related_categories(user, detailed=True):
"""Gets the related categories of a user for the dashboard"""
favorites = set()
if user.favorite_categories:
favorites = set(Category.query
.filter(Category.id.in_(c.id for c in user.favorite_categories))
.options(undefer('chain_titles'))
.all())
managed = set(Category.query
.filter(Category.acl_entries.any(db.and_(CategoryPrincipal.type == PrincipalType.user,
CategoryPrincipal.user == user,
CategoryPrincipal.has_management_permission())),
~Category.is_deleted)
.options(undefer('chain_titles')))
if not detailed:
return favorites | managed
res = {}
for categ in favorites | managed:
res[(categ.title, categ.id)] = {
'categ': categ,
'favorite': categ in favorites,
'managed': categ in managed,
'path': truncate_path(categ.chain_titles[:-1], chars=50)
}
return OrderedDict(sorted(res.items(), key=itemgetter(0)))
def get_suggested_categories(user):
"""Gets the suggested categories of a user for the dashboard"""
related = set(get_related_categories(user, detailed=False))
res = []
category_strategy = contains_eager('category')
category_strategy.subqueryload('acl_entries')
category_strategy.undefer('chain_titles')
query = (user.suggested_categories
.filter_by(is_ignored=False)
.join(SuggestedCategory.category)
.options(category_strategy))
for suggestion in query:
category = suggestion.category
if (category.is_deleted or category in related or category.suggestions_disabled or
any(p.suggestions_disabled for p in category.parent_chain_query)):
user.suggested_categories.remove(suggestion)
continue
if not category.can_access(user):
continue
res.append({
'score': suggestion.score,
'categ': category,
'path': truncate_path(category.chain_titles[:-1], chars=50)
})
return res
def get_linked_events(user, dt, limit=None):
"""Get the linked events and the user's roles in them
:param user: A `User`
:param dt: Only include events taking place on/after that date
:param limit: Max number of events
"""
from indico.modules.events.abstracts.util import (get_events_with_abstract_reviewer_convener,
get_events_with_abstract_persons)
from indico.modules.events.contributions.util import get_events_with_linked_contributions
from indico.modules.events.papers.util import get_events_with_paper_roles
from indico.modules.events.registration.util import get_events_registered
from indico.modules.events.sessions.util import get_events_with_linked_sessions
from indico.modules.events.surveys.util import get_events_with_submitted_surveys
from indico.modules.events.util import (get_events_managed_by, get_events_created_by,
get_events_with_linked_event_persons)
links = OrderedDict()
for event_id in get_events_registered(user, dt):
links.setdefault(event_id, set()).add('registration_registrant')
for event_id in get_events_with_submitted_surveys(user, dt):
links.setdefault(event_id, set()).add('survey_submitter')
for event_id in get_events_managed_by(user, dt):
links.setdefault(event_id, set()).add('conference_manager')
for event_id in get_events_created_by(user, dt):
links.setdefault(event_id, set()).add('conference_creator')
for event_id, principal_roles in get_events_with_linked_sessions(user, dt).iteritems():
links.setdefault(event_id, set()).update(principal_roles)
for event_id, principal_roles in get_events_with_linked_contributions(user, dt).iteritems():
links.setdefault(event_id, set()).update(principal_roles)
for event_id, role in get_events_with_linked_event_persons(user, dt).iteritems():
links.setdefault(event_id, set()).add(role)
for event_id, roles in get_events_with_abstract_reviewer_convener(user, dt).iteritems():
links.setdefault(event_id, set()).update(roles)
for event_id, roles in get_events_with_abstract_persons(user, dt).iteritems():
links.setdefault(event_id, set()).update(roles)
for event_id, roles in get_events_with_paper_roles(user, dt).iteritems():
links.setdefault(event_id, set()).update(roles)
if not links:
return OrderedDict()
query = (Event.query
.filter(~Event.is_deleted,
Event.id.in_(links))
.options(joinedload('series'),
load_only('id', 'category_id', 'title', 'start_dt', 'end_dt',
'series_id', 'series_pos', 'series_count'))
.order_by(Event.start_dt, Event.id))
if limit is not None:
query = query.limit(limit)
return OrderedDict((event, links[event.id]) for event in query)
def serialize_user(user):
"""Serialize user to JSON-like object"""
return {
'id': user.id,
'title': user.title,
'identifier': user.identifier,
'name': user.display_full_name,
'familyName': user.last_name,
'firstName': user.first_name,
'affiliation': user.affiliation,
'phone': user.phone,
'email': user.email,
'_type': 'Avatar'
}
def _build_name_search(name_list):
text = remove_accents('%{}%'.format('%'.join(escape_like(name) for name in name_list)))
return db.or_(db.func.indico.indico_unaccent(db.func.concat(User.first_name, ' ', User.last_name)).ilike(text),
db.func.indico.indico_unaccent(db.func.concat(User.last_name, ' ', User.first_name)).ilike(text))
def build_user_search_query(criteria, exact=False, include_deleted=False, include_pending=False,
favorites_first=False):
unspecified = object()
query = User.query.distinct(User.id).options(db.joinedload(User._all_emails))
if not include_pending:
query = query.filter(~User.is_pending)
if not include_deleted:
query = query.filter(~User.is_deleted)
affiliation = criteria.pop('affiliation', unspecified)
if affiliation is not unspecified:
query = query.join(UserAffiliation).filter(unaccent_match(UserAffiliation.name, affiliation, exact))
email = criteria.pop('email', unspecified)
if email is not unspecified:
query = query.join(UserEmail).filter(unaccent_match(UserEmail.email, email, exact))
# search on any of the name fields (first_name OR last_name)
name = criteria.pop('name', unspecified)
if name is not unspecified:
if exact:
raise ValueError("'name' is not compatible with 'exact'")
if 'first_name' in criteria or 'last_name' in criteria:
raise ValueError("'name' is not compatible with (first|last)_name")
query = query.filter(_build_name_search(name.replace(',', '').split()))
for k, v in criteria.iteritems():
query = query.filter(unaccent_match(getattr(User, k), v, exact))
# wrap as subquery so we can apply order regardless of distinct-by-id
query = query.from_self()
if favorites_first:
query = (query.outerjoin(favorite_user_table, db.and_(favorite_user_table.c.user_id == session.user.id,
favorite_user_table.c.target_id == User.id))
.order_by(nullslast(favorite_user_table.c.user_id)))
query = query.order_by(db.func.lower(db.func.indico.indico_unaccent(User.first_name)),
db.func.lower(db.func.indico.indico_unaccent(User.last_name)),
User.id)
return query
def search_users(exact=False, include_deleted=False, include_pending=False, external=False, allow_system_user=False,
**criteria):
"""Searches for users.
:param exact: Indicates if only exact matches should be returned.
This is MUCH faster than a non-exact saerch,
especially when searching external users.
:param include_deleted: Indicates if also users marked as deleted
should be returned.
:param include_pending: Indicates if also users who are still
pending should be returned.
:param external: Indicates if identity providers should be searched
for matching users.
:param allow_system_user: Whether the system user may be returned
in the search results.
:param criteria: A dict containing any of the following keys:
name, first_name, last_name, email, affiliation, phone,
address
:return: A set of matching users. If `external` was set, it may
contain both :class:`~flask_multipass.IdentityInfo` objects
for external users not yet in Indico and :class:`.User`
objects for existing users.
"""
criteria = {key: value.strip() for key, value in criteria.iteritems() if value.strip()}
if not criteria:
return set()
query = (build_user_search_query(
dict(criteria),
exact=exact,
include_deleted=include_deleted,
include_pending=include_pending)
.options(db.joinedload(User.identities),
db.joinedload(User.merged_into_user)))
found_emails = {}
found_identities = {}
system_user = set()
for user in query:
for identity in user.identities:
found_identities[(identity.provider, identity.identifier)] = user
for email in user.all_emails:
found_emails[email] = user
if user.is_system and not user.all_emails and allow_system_user:
system_user = {user}
# external user providers
if external:
identities = multipass.search_identities(exact=exact, **criteria)
for ident in identities:
if not ident.data.get('email'):
# Skip users with no email
continue
if ((ident.provider.name, ident.identifier) not in found_identities and
ident.data['email'].lower() not in found_emails):
found_emails[ident.data['email'].lower()] = ident
found_identities[(ident.provider, ident.identifier)] = ident
return set(found_emails.viewvalues()) | system_user
def get_user_by_email(email, create_pending=False):
"""finds a user based on his email address.
:param email: The email address of the user.
:param create_pending: If True, this function searches for external
users and creates a new pending User in case
no existing user was found.
:return: A :class:`.User` instance or ``None`` if not exactly one
user was found.
"""
email = email.lower().strip()
if not email:
return None
if not create_pending:
res = User.query.filter(~User.is_deleted, User.all_emails == email).all()
else:
res = search_users(exact=True, include_pending=True, external=True, email=email)
if len(res) != 1:
return None
user_or_identity = next(iter(res))
if isinstance(user_or_identity, User):
return user_or_identity
elif not create_pending:
return None
# Create a new pending user
data = user_or_identity.data
user = User(first_name=data.get('first_name') or '', last_name=data.get('last_name') or '', email=email,
address=data.get('address', ''), phone=data.get('phone', ''),
affiliation=data.get('affiliation', ''), is_pending=True)
db.session.add(user)
db.session.flush()
return user
def merge_users(source, target, force=False):
"""Merge two users together, unifying all related data
:param source: source user (will be set as deleted)
:param target: target user (final)
"""
if source.is_deleted and not force:
raise ValueError('Source user {} has been deleted. Merge aborted.'.format(source))
if target.is_deleted:
raise ValueError('Target user {} has been deleted. Merge aborted.'.format(target))
# Move emails to the target user
primary_source_email = source.email
logger.info("Target %s initial emails: %s", target, ', '.join(target.all_emails))
logger.info("Source %s emails to be linked to target %s: %s", source, target, ', '.join(source.all_emails))
UserEmail.query.filter_by(user_id=source.id).update({
UserEmail.user_id: target.id,
UserEmail.is_primary: False
})
# Make sure we don't have stale data after the bulk update we just performed
db.session.expire_all()
# Update favorites
target.favorite_users |= source.favorite_users
target.favorite_of |= source.favorite_of
target.favorite_categories |= source.favorite_categories
# Update category suggestions
SuggestedCategory.merge_users(target, source)
# Merge identities
for identity in set(source.identities):
identity.user = target
# Notify signal listeners about the merge
signals.users.merged.send(target, source=source)
db.session.flush()
# Mark source as merged
source.merged_into_user = target
source.is_deleted = True
db.session.flush()
# Restore the source user's primary email
source.email = primary_source_email
db.session.flush()
logger.info("Successfully merged %s into %s", source, target)
def get_color_for_username(username):
return user_colors[crc32(username) % len(user_colors)]
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo.config import cfg
from oslo.utils import strutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common import image_service as service
from ironic.common import keystone
from ironic.common import states
from ironic.common import utils
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(rameshg87): This file now registers some of opts in pxe group.
# This is acceptable for now as a future refactoring into
# separate boot and deploy interfaces is planned, and moving config
# options twice is not recommended. Hence we would move the parameters
# to the appropriate place in the final refactoring.
pxe_opts = [
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help='Additional append parameters for baremetal PXE boot.'),
cfg.StrOpt('default_ephemeral_format',
default='ext4',
help='Default file system format for ephemeral partition, '
'if one is created.'),
cfg.StrOpt('images_path',
default='/var/lib/ironic/images/',
help='Directory where images are stored on disk.'),
cfg.StrOpt('instance_master_path',
default='/var/lib/ironic/master_images',
help='Directory where master instance images are stored on '
'disk.'),
cfg.IntOpt('image_cache_size',
default=20480,
help='Maximum size (in MiB) of cache for master images, '
'including those in use.'),
# 10080 here is 1 week - 60*24*7. It is entirely arbitrary in the absence
# of a facility to disable the ttl entirely.
cfg.IntOpt('image_cache_ttl',
default=10080,
help='Maximum TTL (in minutes) for old master images in '
'cache.'),
cfg.StrOpt('disk_devices',
default='cciss/c0d0,sda,hda,vda',
help='The disk devices to scan while doing the deploy.'),
]
CONF = cfg.CONF
CONF.register_opts(pxe_opts, group='pxe')
@image_cache.cleanup(priority=50)
class InstanceImageCache(image_cache.ImageCache):
def __init__(self, image_service=None):
super(self.__class__, self).__init__(
CONF.pxe.instance_master_path,
# MiB -> B
cache_size=CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
cache_ttl=CONF.pxe.image_cache_ttl * 60,
image_service=image_service)
def _get_image_dir_path(node_uuid):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.pxe.images_path, node_uuid)
def _get_image_file_path(node_uuid):
"""Generate the full path for an instances disk."""
return os.path.join(_get_image_dir_path(node_uuid), 'disk')
def parse_instance_info(node):
"""Gets the instance specific Node deployment info.
This method validates whether the 'instance_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
info = node.instance_info
i_info = {}
i_info['image_source'] = info.get('image_source')
i_info['root_gb'] = info.get('root_gb')
error_msg = _("Cannot validate iSCSI deploy")
deploy_utils.check_for_missing_params(i_info, error_msg)
# Internal use only
i_info['deploy_key'] = info.get('deploy_key')
i_info['swap_mb'] = info.get('swap_mb', 0)
i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0)
i_info['ephemeral_format'] = info.get('ephemeral_format')
err_msg_invalid = _("Cannot validate parameter for iSCSI deploy. "
"Invalid parameter %(param)s. Reason: %(reason)s")
for param in ('root_gb', 'swap_mb', 'ephemeral_gb'):
try:
int(i_info[param])
except ValueError:
reason = _("'%s' is not an integer value.") % i_info[param]
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': param, 'reason': reason})
if i_info['ephemeral_gb'] and not i_info['ephemeral_format']:
i_info['ephemeral_format'] = CONF.pxe.default_ephemeral_format
preserve_ephemeral = info.get('preserve_ephemeral', False)
try:
i_info['preserve_ephemeral'] = strutils.bool_from_string(
preserve_ephemeral, strict=True)
except ValueError as e:
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': 'preserve_ephemeral', 'reason': e})
return i_info
def check_image_size(task):
"""Check if the requested image is larger than the root partition size.
:param task: a TaskManager instance containing the node to act on.
:raises: InstanceDeployFailure if size of the image is greater than root
partition.
"""
i_info = parse_instance_info(task.node)
image_path = _get_image_file_path(task.node.uuid)
image_mb = deploy_utils.get_image_mb(image_path)
root_mb = 1024 * int(i_info['root_gb'])
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. '
'Image size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
def cache_instance_image(ctx, node):
"""Fetch the instance's image from Glance
This method pulls the AMI and writes them to the appropriate place
on local disk.
:param ctx: context
:param node: an ironic node object
:returns: a tuple containing the uuid of the image and the path in
the filesystem where image is cached.
"""
i_info = parse_instance_info(node)
fileutils.ensure_tree(_get_image_dir_path(node.uuid))
image_path = _get_image_file_path(node.uuid)
uuid = i_info['image_source']
LOG.debug("Fetching image %(ami)s for node %(uuid)s",
{'ami': uuid, 'uuid': node.uuid})
deploy_utils.fetch_images(ctx, InstanceImageCache(), [(uuid, image_path)])
return (uuid, image_path)
def destroy_images(node_uuid):
"""Delete instance's image file.
:param node_uuid: the uuid of the ironic node.
"""
utils.unlink_without_raise(_get_image_file_path(node_uuid))
utils.rmtree_without_raise(_get_image_dir_path(node_uuid))
InstanceImageCache().clean_up()
def get_deploy_info(node, **kwargs):
"""Returns the information required for doing iSCSI deploy in a
dictionary.
:param node: ironic node object
:param kwargs: the keyword args passed from the conductor node.
:raises: MissingParameterValue, if some required parameters were not
passed.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
deploy_key = kwargs.get('key')
i_info = parse_instance_info(node)
if i_info['deploy_key'] != deploy_key:
raise exception.InvalidParameterValue(_("Deploy key does not match"))
params = {'address': kwargs.get('address'),
'port': kwargs.get('port', '3260'),
'iqn': kwargs.get('iqn'),
'lun': kwargs.get('lun', '1'),
'image_path': _get_image_file_path(node.uuid),
'root_mb': 1024 * int(i_info['root_gb']),
'swap_mb': int(i_info['swap_mb']),
'ephemeral_mb': 1024 * int(i_info['ephemeral_gb']),
'preserve_ephemeral': i_info['preserve_ephemeral'],
'node_uuid': node.uuid,
}
missing = [key for key in params if params[key] is None]
if missing:
raise exception.MissingParameterValue(_(
"Parameters %s were not passed to ironic"
" for deploy.") % missing)
# ephemeral_format is nullable
params['ephemeral_format'] = i_info.get('ephemeral_format')
return params
def set_failed_state(task, msg):
"""Sets the deploy status as failed with relevant messages.
This method sets the deployment as fail with the given message.
It sets node's provision_state to DEPLOYFAIL and updates last_error
with the given error message. It also powers off the baremetal node.
:param task: a TaskManager instance containing the node to act on.
:param msg: the message to set in last_error of the node.
"""
node = task.node
node.provision_state = states.DEPLOYFAIL
node.target_provision_state = states.NOSTATE
node.save()
try:
manager_utils.node_power_action(task, states.POWER_OFF)
except Exception:
msg2 = (_('Node %s failed to power off while handling deploy '
'failure. This may be a serious condition. Node '
'should be removed from Ironic or put in maintenance '
'mode until the problem is resolved.') % node.uuid)
LOG.exception(msg2)
finally:
# NOTE(deva): node_power_action() erases node.last_error
# so we need to set it again here.
node.last_error = msg
node.save()
def continue_deploy(task, **kwargs):
"""Resume a deployment upon getting POST data from deploy ramdisk.
This method raises no exceptions because it is intended to be
invoked asynchronously as a callback from the deploy ramdisk.
:param task: a TaskManager instance containing the node to act on.
:param kwargs: the kwargs to be passed to deploy.
:returns: UUID of the root partition or None on error.
"""
node = task.node
node.provision_state = states.DEPLOYING
node.save()
params = get_deploy_info(node, **kwargs)
ramdisk_error = kwargs.get('error')
if ramdisk_error:
LOG.error(_LE('Error returned from deploy ramdisk: %s'),
ramdisk_error)
set_failed_state(task, _('Failure in deploy ramdisk.'))
destroy_images(node.uuid)
return
LOG.info(_LI('Continuing deployment for node %(node)s, params %(params)s'),
{'node': node.uuid, 'params': params})
root_uuid = None
try:
root_uuid = deploy_utils.deploy(**params)
except Exception as e:
LOG.error(_LE('Deploy failed for instance %(instance)s. '
'Error: %(error)s'),
{'instance': node.instance_uuid, 'error': e})
set_failed_state(task, _('Failed to continue iSCSI deployment.'))
destroy_images(node.uuid)
return root_uuid
def build_deploy_ramdisk_options(node):
"""Build the ramdisk config options for a node
This method builds the ramdisk options for a node,
given all the required parameters for doing iscsi deploy.
:param node: a single Node.
:returns: A dictionary of options to be passed to ramdisk for performing
the deploy.
"""
# NOTE: we should strip '/' from the end because this is intended for
# hardcoded ramdisk script
ironic_api = (CONF.conductor.api_url or
keystone.get_service_url()).rstrip('/')
deploy_key = utils.random_alnum(32)
i_info = node.instance_info
i_info['deploy_key'] = deploy_key
node.instance_info = i_info
node.save()
deploy_options = {
'deployment_id': node['uuid'],
'deployment_key': deploy_key,
'iscsi_target_iqn': "iqn-%s" % node.uuid,
'ironic_api_url': ironic_api,
'disk': CONF.pxe.disk_devices,
}
return deploy_options
def validate_glance_image_properties(ctx, deploy_info, properties):
"""Validate the image in Glance.
Check if the image exist in Glance and if it contains the
properties passed.
:param ctx: security context
:param deploy_info: the deploy_info to be validated
:param properties: the list of image meta-properties to be validated.
:raises: InvalidParameterValue if connection to glance failed or
authorization for accessing image failed or if image doesn't exist.
:raises: MissingParameterValue if the glance image doesn't contain
the mentioned properties.
"""
image_id = deploy_info['image_source']
try:
glance_service = service.Service(version=1, context=ctx)
image_props = glance_service.show(image_id)['properties']
except (exception.GlanceConnectionFailed,
exception.ImageNotAuthorized,
exception.Invalid):
raise exception.InvalidParameterValue(_(
"Failed to connect to Glance to get the properties "
"of the image %s") % image_id)
except exception.ImageNotFound:
raise exception.InvalidParameterValue(_(
"Image %s not found in Glance") % image_id)
missing_props = []
for prop in properties:
if not image_props.get(prop):
missing_props.append(prop)
if missing_props:
props = ', '.join(missing_props)
raise exception.MissingParameterValue(_(
"Image %(image)s is missing the following properties: "
"%(properties)s") % {'image': image_id, 'properties': props})
def validate(task):
"""Validates the pre-requisites for iSCSI deploy.
Validates whether node in the task provided has some ports enrolled.
This method validates whether conductor url is available either from CONF
file or from keystone.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if no ports are enrolled for the given node.
"""
node = task.node
if not driver_utils.get_node_mac_addresses(task):
raise exception.InvalidParameterValue(_("Node %s does not have "
"any port associated with it.") % node.uuid)
try:
# TODO(lucasagomes): Validate the format of the URL
CONF.conductor.api_url or keystone.get_service_url()
except (exception.CatalogFailure,
exception.CatalogNotFound,
exception.CatalogUnauthorized):
raise exception.InvalidParameterValue(_(
"Couldn't get the URL of the Ironic API service from the "
"configuration file or keystone catalog."))
|
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PrepaymentRecharge(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, id_product=None, amount=None, type=None, date_add=None, date_upd=None, active=None):
"""
PrepaymentRecharge - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'id_product': 'int',
'amount': 'float',
'type': 'str',
'date_add': 'str',
'date_upd': 'str',
'active': 'bool'
}
self.attribute_map = {
'id': 'id',
'id_product': 'id_product',
'amount': 'amount',
'type': 'type',
'date_add': 'date_add',
'date_upd': 'date_upd',
'active': 'active'
}
self._id = id
self._id_product = id_product
self._amount = amount
self._type = type
self._date_add = date_add
self._date_upd = date_upd
self._active = active
@property
def id(self):
"""
Gets the id of this PrepaymentRecharge.
:return: The id of this PrepaymentRecharge.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this PrepaymentRecharge.
:param id: The id of this PrepaymentRecharge.
:type: int
"""
self._id = id
@property
def id_product(self):
"""
Gets the id_product of this PrepaymentRecharge.
:return: The id_product of this PrepaymentRecharge.
:rtype: int
"""
return self._id_product
@id_product.setter
def id_product(self, id_product):
"""
Sets the id_product of this PrepaymentRecharge.
:param id_product: The id_product of this PrepaymentRecharge.
:type: int
"""
self._id_product = id_product
@property
def amount(self):
"""
Gets the amount of this PrepaymentRecharge.
:return: The amount of this PrepaymentRecharge.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this PrepaymentRecharge.
:param amount: The amount of this PrepaymentRecharge.
:type: float
"""
self._amount = amount
@property
def type(self):
"""
Gets the type of this PrepaymentRecharge.
:return: The type of this PrepaymentRecharge.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this PrepaymentRecharge.
:param type: The type of this PrepaymentRecharge.
:type: str
"""
self._type = type
@property
def date_add(self):
"""
Gets the date_add of this PrepaymentRecharge.
:return: The date_add of this PrepaymentRecharge.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this PrepaymentRecharge.
:param date_add: The date_add of this PrepaymentRecharge.
:type: str
"""
self._date_add = date_add
@property
def date_upd(self):
"""
Gets the date_upd of this PrepaymentRecharge.
:return: The date_upd of this PrepaymentRecharge.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this PrepaymentRecharge.
:param date_upd: The date_upd of this PrepaymentRecharge.
:type: str
"""
self._date_upd = date_upd
@property
def active(self):
"""
Gets the active of this PrepaymentRecharge.
:return: The active of this PrepaymentRecharge.
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this PrepaymentRecharge.
:param active: The active of this PrepaymentRecharge.
:type: bool
"""
self._active = active
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
from common_fixtures import * # NOQA
from cattle import ApiError
def _get_agent_for_container(container):
agent = None
for map in container.hosts()[0].instanceHostMaps():
c = map.instance()
if c.agentId is not None:
agent = c.agent()
assert agent is not None
return agent
def _get_agent_client(agent):
creds = agent.account().credentials()
api_key = [x for x in creds if x.kind == 'agentApiKey'][0]
assert len(api_key)
return api_client(api_key.publicValue, api_key.secretValue)
def test_health_check_create_instance(super_client, context):
container = context.create_container(healthCheck={
'port': 80,
})
assert container.healthCheck.port == 80
container = super_client.reload(container)
hci = find_one(container.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(container)
assert hcihm.healthState == 'healthy'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
se = super_client.wait_success(se)
assert se.state == 'created'
assert se.accountId == container.accountId
assert se.instanceId == container.id
assert se.healthcheckInstanceId == hci.id
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
assert hcihm.externalTimestamp == ts
check = lambda: super_client.reload(container).healthState == 'healthy'
wait_for(check, timeout=5)
def test_health_check_create_service(super_client, context, client):
env = client.create_environment(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, environmentId=env.id, scale=1)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
expose_map = find_one(service.serviceExposeMaps)
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(container)
assert hcihm.healthState == 'healthy'
assert container.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
assert container.healthState == 'initializing'
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='UP',
healthcheckUuid=hcihm.uuid)
super_client.wait_success(se)
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'healthy'
check = lambda: super_client.reload(container).healthState == 'healthy'
wait_for(check, timeout=5)
ts = int(time.time())
client = _get_agent_client(agent)
se = client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
se = super_client.wait_success(se)
assert se.state == 'created'
assert se.accountId == container.accountId
assert se.instanceId == container.id
assert se.healthcheckInstanceId == hci.id
hcihm = super_client.wait_success(super_client.reload(hcihm))
assert hcihm.healthState == 'unhealthy'
assert hcihm.externalTimestamp == ts
check = lambda: super_client.reload(container).healthState == 'unhealthy'
wait_for(check, timeout=5)
wait_for(lambda: len(service.serviceExposeMaps()) > 1)
def test_health_check_bad_external_timestamp(super_client, context, client):
env = client.create_environment(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, environmentId=env.id, scale=1)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
expose_map = find_one(service.serviceExposeMaps)
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
hcihm = find_one(hci.healthcheckInstanceHostMaps)
agent = _get_agent_for_container(container)
agent_client = _get_agent_client(agent)
assert hcihm.healthState == 'healthy'
with pytest.raises(ApiError) as e:
agent_client.create_service_event(reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
assert e.value.error.code == 'MissingRequired'
assert e.value.error.fieldName == 'externalTimestamp'
def test_health_check_bad_agent(super_client, context, client):
# Create another host to get the agent from that host
host2 = super_client.reload(register_simulated_host(context))
env = client.create_environment(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, environmentId=env.id, scale=1)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
expose_map = find_one(service.serviceExposeMaps)
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
hcihm = None
for h in hci.healthcheckInstanceHostMaps():
if h.hostId != host2.id:
hcihm = h
break
assert hcihm.hostId != host2.id
agent_client = _get_agent_client(host2.agent())
assert hcihm.healthState == 'healthy'
ts = int(time.time())
with pytest.raises(ApiError) as e:
agent_client.create_service_event(externalTimestamp=ts,
reportedHealth='Something Bad',
healthcheckUuid=hcihm.uuid)
assert e.value.error.code == 'CantVerifyHealthcheck'
def test_health_check_host_remove(super_client, context, client):
# create 4 hosts for healtcheck as one of them would be removed later
super_client.reload(register_simulated_host(context))
super_client.reload(register_simulated_host(context))
super_client.reload(register_simulated_host(context))
super_client.reload(register_simulated_host(context))
env = client.create_environment(name='env-' + random_str())
service = client.create_service(name='test', launchConfig={
'imageUuid': context.image_uuid,
'healthCheck': {
'port': 80,
}
}, environmentId=env.id, scale=1)
service = client.wait_success(client.wait_success(service).activate())
assert service.state == 'active'
expose_map = find_one(service.serviceExposeMaps)
container = super_client.reload(expose_map.instance())
hci = find_one(container.healthcheckInstances)
assert len(hci.healthcheckInstanceHostMaps()) == 3
hcihm = hci.healthcheckInstanceHostMaps()[0]
hosts = super_client.list_host(uuid=hcihm.host().uuid)
assert len(hosts) == 1
host = hosts[0]
# remove the host
host = super_client.wait_success(host.deactivate())
host = super_client.wait_success(super_client.delete(host))
assert host.state == 'removed'
# verify that new hostmap was created for the instance
hci = find_one(container.healthcheckInstances)
assert len(hci.healthcheckInstanceHostMaps()) == 3
hcim = None
for h in hci.healthcheckInstanceHostMaps():
if h.hostId == host.id:
hcihm = h
break
assert hcim is None
|
|
import numpy as np
import matplotlib.pyplot as plt
import struct
import os, sys
import re
import copy
class Matrix:
"""
Class to Read and Hangle Matrix files
"""
def __init__(self,Path): # Give the Path of the folder containing all the mtrx files
# Read PATH and open file
self.Path = Path
self.fp = None # file variable
for x in os.listdir(Path): # List the folder and look for the _0001.mtrx file
if x[-10:] == "_0001.mtrx":
self.fp = open(self.Path+"/"+x, "rb")
if self.fp == None:
print("Matrix file not found!")
sys.exit(1)
if self.fp.read(8) != b"ONTMATRX": # header of the file
print("Unknown header! Wrong Matrix file format")
sys.exit(2)
self.version = self.fp.read(4) # should be 0101
self.IDs = {}
self.params = {} # dictionary to list all the parameters
self.images = {} # images[x] are the parameters used during the record for file named x
# Parse the file and read the block
while True: # While not EOF scan files and read block
r = self.read_block()
if r == False:
break
def read_string(self):
"""
Strings are stored as UTF-16. First 32-bits is the string length
"""
N = struct.unpack("<L", self.fp.read(4))[0] # string length
if N == 0:
return ""
s = self.fp.read(N*2).decode('utf-16')
return s
def plotSTS(self, ID, num=1): # plot STS file called xxx--ID_num.I(V)_mtrx
x, y = self.getSTS(ID, num)
plt.plot(x, y)
plt.show()
def getUpDown(self, X, Y, NPTS):
"""
Split data in Up and Down measurement, pad them with NaN if necessary and return them in increasing order.
The returned value are X,Yup, Ydown
If Up/Down data are missing an empty array will be returned
"""
if len(Y) < NPTS: # Missing data
Y = np.pad(Y, NPTS, 'constant', constant_values=np.nan)
elif len(Y) > NPTS: # Forward and backward scans
if len(Y) < 2*NPTS: # Missing data
Y = np.pad(Y, 2*NPTS, 'constant', constant_values=np.nan)
if X[NPTS-1] < X[0]:
return X[NPTS:], [Y[NPTS:], Y[NPTS-1::-1]]
else:
return X[:NPTS], [Y[:NPTS], Y[-1:NPTS-1:-1]]
if X[-1] < X[0]:
return X[::-1], [np.empty(NPTS), Y[::-1], np.empty(NPTS)]
return X, [Y, np.empty(NPTS)]
def getSTSData(self, ID, nums=[1]):
if not ID in self.IDs or len(nums) < 1:
return None
# retrieve the spectroscopy data (V, I and an object IM containing the parameters)
V, I, IM = self.getSTS(ID, nums[0], params=True)
NPTS = int(IM['Spectroscopy']['Device_1_Points']['value'])
hasDI = self.IDs[ID]['hasDI']
# Call the function to split and flip data if it's UP/Down measurements
V, I = self.getUpDown(V, I, NPTS)
for num in nums[1:]: # Skip first num as it's already parsed above
X, Y = self.getUpDown(*self.getSTS(ID, num), NPTS=NPTS)
if not np.array_equal(V, X):
raise Exception("Bias axis differs between measurements?!?")
for i in range(2): # i=0: Up scan, i=1: Down scan
I[i] = np.vstack((I[i], Y[i]))
Im = [np.nan]*2 # Store the mean of I
Ims = [np.nan]*2 # Store StDev of I
for i in range(2): # i=0: Up scan, i=1: Down scan
Im[i] = I[i].mean(axis=0)
Ims[i] = I[i].std(axis=0)
if hasDI:
X, dI = self.getUpDown(*self.getDIDV(ID, nums[0]), NPTS=NPTS)
for num in nums[1:]:
X, Y = self.getUpDown(*self.getDIDV(ID, num), NPTS=NPTS)
if not np.array_equal(V, X):
raise Exception("Bias axis differs between measurements?!?")
for i in range(2): # i=0: Up scan, i=1: Down scan
dI[i] = np.vstack((dI[i], Y[i]))
dIm = [np.nan]*2 # Store the mean of dI/dV
dIms = [np.nan]*2 # Store the StdDev of dI/dV
for i in range(2): # i=0: Up scan, i=1: Down scan
dIm[i] = dI[i].mean(axis=0)
dIms[i] = dI[i].std(axis=0)
return {'nums':nums, 'V':V, 'I':I, 'dI':dI, 'Imean':Im, 'Istd':Ims, 'dImean':dIm, 'dIstd':dIms}
def getDIDV(self, ID, num=1):
"""
The dI/dV measurements are stored the same way as the I(V), but with file extension Aux2(V).
"""
return self.getSTS(ID, num, ext='Aux2')
def getSTSparams(self, ID, num=1, ext='I'):
if not ID in self.IDs:
return None, None
I = u"%s--%i_%i.%s(V)_mtrx"%(self.IDs[ID]['root'], ID, num, ext)
if not I in self.images:
return None
return self.images[I]
def getSTS(self, ID, num=1, ext='I', params=False):
"""
Get a spectroscopy file xxxx-ID_num.I(V)_mtrx
"""
IM = self.getSTSparams(ID,num,ext)
if IM == None:
return None
v1 = IM['Spectroscopy']['Device_1_Start']['value'] # Get the start voltage used for the scan
v2 = IM['Spectroscopy']['Device_1_End']['value'] # Get the end voltage for the scan
I = u"%s--%i_%i.%s(V)_mtrx"%(self.IDs[ID]['root'], ID, num, ext)
ImagePath = self.Path+"/"+I
if not os.path.exists(ImagePath):
return None
ff = open(ImagePath, "rb") # read the STS file
if ff.read(8) != b"ONTMATRX":
print("ERROR: Invalid STS format")
sys.exit(1)
if ff.read(4) != b"0101":
print("ERROR: Invalid STS version")
sys.exit(2)
t = ff.read(4) # TLKB header
ff.read(8) # timestamp
ff.read(8) # Skip 8bytes (??? unknown data. Usualy it's = 00 00 00 00 00 00 00 00)
t = ff.read(4) # CSED header
ss = struct.unpack('<15L', ff.read(60)) # 15 uint32. ss[6] and ss[7] store the size of the points. ([6] is what was planned and [7] what was actually recorded)
# ss[6] should be used to reconstruct the X-axis and ss[7] to read the binary data
if ff.read(4) != b'ATAD':
print("ERROR: Data should be here, but aren't. Please debug script")
sys.exit(3)
ff.read(4)
data = np.array(struct.unpack("<%il"%(ss[7]), ff.read(ss[7]*4))) # The data are stored as unsigned LONG
# Reconstruct the x-axis. Take the start and end volatege (v1,v2) with the correct number of points and pad it to the data length. Padding is in 'reflect' mode in the case of Forward/backward scans.
X = np.linspace(v1, v2, int(IM['Spectroscopy']['Device_1_Points']['value']))
if len(X) < ss[6]:
X = np.concatenate((X, X[::-1]))
if len(data) < len(X):
data = np.concatenate((data, [np.nan]*(len(X)-len(data))))
if params:
return X, data, IM
return X, data
def read_value(self):
"""
Values are stored with a specific header for each data type
"""
t = self.fp.read(4)
if t == b"BUOD":
# double
v = struct.unpack("<d", self.fp.read(8))[0]
elif t == b"GNOL":
# uint32
v = struct.unpack("<L", self.fp.read(4))[0]
elif t == b"LOOB":
# bool32
v = struct.unpack("<L", self.fp.read(4))[0] > 0
elif t == b"GRTS":
v = self.read_string()
else:
v = t
return v
def getUI(self):
"""
Read an unsigned int from the file
"""
return struct.unpack("<L", self.fp.read(4))[0]
def read_block(self, sub=False):
indent = self.fp.read(4) # 4bytes forming the header. Those are capital letters between A-Z
if len(indent) < 4: # EOF reached?
return False
bs = struct.unpack("<L", self.fp.read(4))[0]+[8, 0][sub] # Size of the block
r = {"ID":indent, "bs":bs} # Store the parameters found in the block
p = self.fp.tell() # store the file position of the block
if indent == b"DOMP": # Block storing parameters changed during an experiment
self.fp.read(12)
inst = self.read_string()
prop = self.read_string()
unit = self.read_string()
self.fp.read(4)
value =self.read_value()
r.update({'inst':inst, 'prop':prop, 'unit':unit, 'value':value})
self.params[inst][prop].update({'unit':unit, 'value':value}) # Update theparameters information stored in self.params
elif indent == b"CORP": # Processor of scanning window. Useless in this script for the moment
self.fp.read(12)
a = self.read_string()
b = self.read_string()
r.update({'a':a, 'b':b})
elif indent == b"FERB": # A file was stored
self.fp.read(12)
a = self.read_string() # Filename
r['filename'] = a
self.images[a] = copy.deepcopy(self.params) # Store the parameters used to record the file a se
# Create a catalogue to avoid to scan all images later
res = re.search(r'^(.*?)--([0-9]*)_([0-9]*)\.([^_]+)_mtrx$', a)
ID = int(res.group(2))
num = int(res.group(3))
_type = res.group(4)
if not ID in self.IDs:
self.IDs[ID] = {'nums':[], 'root':res.group(1)}
if _type in ["Aux2(V)"]:
self.IDs[ID]['hasDI'] = True
if _type in ["I(V)"]:
self.IDs[ID]['nums'].append(num)
elif indent == b"SPXE": # Initial configuration
self.fp.read(12) # ??? useless 12 bytes
r['LNEG'] = self.read_block(True) # read subblock
r['TSNI'] = self.read_block(True) # read subblock
r['SXNC'] = self.read_block(True) # read subblock
elif indent == b"LNEG":
r.update({'a':self.read_string(), 'b':self.read_string(), 'c':self.read_string()})
elif indent == b"TSNI":
anz = self.getUI()
rr = []
for ai in range(anz):
a = self.read_string()
b = self.read_string()
c = self.read_string()
count = self.getUI()
pa = []
for i in range(count):
x = self.read_string()
y = self.read_string()
pa.append({'a':x, 'b':y})
rr.append({'a':a, 'b':b, 'c':c, 'content':pa})
elif indent == b"SXNC":
count = self.getUI()
r['count'] = count
rr = []
for i in range(count):
a = self.read_string()
b = self.read_string()
k = self.getUI()
kk = []
for j in range(k):
x = self.read_string()
y = self.read_string()
kk.append((x, y))
rr.append((a, b, i, kk))
r['content'] = rr
elif indent == b"APEE": # Store the configurations
self.fp.read(12) # ??? useless 12bytes
num = self.getUI() # Number of parameters class
r['num'] = num
for i in range(num):
inst = self.read_string() # Parameter class name
grp = self.getUI() # Number of parameters in this class
kk = {}
for j in range(grp): # Scan for each parameter, value and unit
prop = self.read_string() # parameter name
unit = self.read_string() # parameter unit
self.fp.read(4) # ???
value = self.read_value() # parameter value
kk[prop] = {"unit":unit, "value":value}
r[inst] = kk
self.params = r # Store this information as initial values for the parmeters
# print(self.params['Spectroscopy'])
self.fp.seek(p) # go back to the beginning of the block
self.fp.read(bs) # go to the next block by skiping the block-size bytes
return r # return the informations collected
|
|
import sys
import os
import glob
import tempfile
import shutil
import time
import urllib2
import netrc
import json
from urlparse import urlparse, urljoin
from subprocess import Popen, PIPE, check_call
from w3lib.form import encode_multipart
from scrapy.command import ScrapyCommand
from scrapy.exceptions import UsageError
from scrapy.utils.http import basic_auth_header
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.conf import get_config, closest_scrapy_cfg
_SETUP_PY_TEMPLATE = \
"""# Automatically created by: scrapy deploy
from setuptools import setup, find_packages
setup(
name = 'project',
version = '1.0',
packages = find_packages(),
entry_points = {'scrapy': ['settings = %(settings)s']},
)
"""
class Command(ScrapyCommand):
requires_project = True
def syntax(self):
return "[options] [ [target] | -l | -L <target> ]"
def short_desc(self):
return "Deploy project in Scrapyd target"
def long_desc(self):
return "Deploy the current project into the given Scrapyd server " \
"(known as target)"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-p", "--project",
help="the project name in the target")
parser.add_option("-v", "--version",
help="the version to deploy. Defaults to current timestamp")
parser.add_option("-l", "--list-targets", action="store_true", \
help="list available targets")
parser.add_option("-L", "--list-projects", metavar="TARGET", \
help="list available projects on TARGET")
parser.add_option("--egg", metavar="FILE",
help="use the given egg, instead of building it")
parser.add_option("--build-egg", metavar="FILE",
help="only build the egg, don't deploy it")
def run(self, args, opts):
try:
import setuptools
except ImportError:
raise UsageError("setuptools not installed")
if opts.list_targets:
for name, target in _get_targets().items():
print "%-20s %s" % (name, target['url'])
return
if opts.list_projects:
target = _get_target(opts.list_projects)
req = urllib2.Request(_url(target, 'listprojects.json'))
_add_auth_header(req, target)
f = urllib2.urlopen(req)
projects = json.loads(f.read())['projects']
print os.linesep.join(projects)
return
tmpdir = None
if opts.build_egg: # build egg only
egg, tmpdir = _build_egg()
_log("Writing egg to %s" % opts.build_egg)
shutil.copyfile(egg, opts.build_egg)
else: # buld egg and deploy
target_name = _get_target_name(args)
target = _get_target(target_name)
project = _get_project(target, opts)
version = _get_version(target, opts)
if opts.egg:
_log("Using egg: %s" % opts.egg)
egg = opts.egg
else:
_log("Building egg of %s-%s" % (project, version))
egg, tmpdir = _build_egg()
_upload_egg(target, egg, project, version)
if tmpdir:
shutil.rmtree(tmpdir)
def _log(message):
sys.stderr.write(message + os.linesep)
def _get_target_name(args):
if len(args) > 1:
raise UsageError("Too many arguments: %s" % ' '.join(args))
elif args:
return args[0]
elif len(args) < 1:
return 'default'
def _get_project(target, opts):
project = opts.project or target.get('project')
if not project:
raise UsageError("Missing project")
return project
def _get_option(section, option, default=None):
cfg = get_config()
return cfg.get(section, option) if cfg.has_option(section, option) \
else default
def _get_targets():
cfg = get_config()
baset = dict(cfg.items('deploy')) if cfg.has_section('deploy') else {}
targets = {}
if 'url' in baset:
targets['default'] = baset
for x in cfg.sections():
if x.startswith('deploy:'):
t = baset.copy()
t.update(cfg.items(x))
targets[x[7:]] = t
return targets
def _get_target(name):
try:
return _get_targets()[name]
except KeyError:
raise UsageError("Unknown target: %s" % name)
def _url(target, action):
return urljoin(target['url'], action)
def _get_version(target, opts):
version = opts.version or target.get('version')
if version == 'HG':
p = Popen(['hg', 'tip', '--template', '{rev}'], stdout=PIPE)
return 'r%s' % p.communicate()[0]
elif version == 'GIT':
p = Popen(['git', 'describe', '--always'], stdout=PIPE)
return '%s' % p.communicate()[0].strip('\n')
elif version:
return version
else:
return str(int(time.time()))
def _upload_egg(target, eggpath, project, version):
with open(eggpath, 'rb') as f:
eggdata = f.read()
data = {
'project': project,
'version': version,
'egg': ('project.egg', eggdata),
}
body, boundary = encode_multipart(data)
url = _url(target, 'addversion.json')
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Content-Length': str(len(body)),
}
req = urllib2.Request(url, body, headers)
_add_auth_header(req, target)
_log("Deploying %s-%s to %s" % (project, version, url))
_http_post(req)
def _add_auth_header(request, target):
if 'username' in target:
u, p = target.get('username'), target.get('password', '')
request.add_header('Authorization', basic_auth_header(u, p))
else: # try netrc
try:
host = urlparse(target['url']).hostname
a = netrc.netrc().authenticators(host)
request.add_header('Authorization', basic_auth_header(a[0], a[2]))
except (netrc.NetrcParseError, IOError, TypeError):
pass
def _http_post(request):
try:
f = urllib2.urlopen(request)
_log("Server response (%s):" % f.code)
print f.read()
except urllib2.HTTPError, e:
_log("Deploy failed (%s):" % e.code)
print e.read()
except urllib2.URLError, e:
_log("Deploy failed: %s" % e)
def _build_egg():
closest = closest_scrapy_cfg()
os.chdir(os.path.dirname(closest))
if not os.path.exists('setup.py'):
settings = get_config().get('settings', 'default')
_create_default_setup_py(settings=settings)
d = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=d)
retry_on_eintr(check_call, [sys.executable, 'setup.py', 'clean', '-a', 'bdist_egg', '-d', d], stdout=f)
egg = glob.glob(os.path.join(d, '*.egg'))[0]
return egg, d
def _create_default_setup_py(**kwargs):
with open('setup.py', 'w') as f:
f.write(_SETUP_PY_TEMPLATE % kwargs)
|
|
#
# Copyright 2014 China Mobile Limited
#
# Author: Gangyi Luo <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from stevedore import driver
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
from ceilometer import utils
import time
import os
from time import sleep
from ovirtga.guestagent import GuestAgent
from ovirtga.vmchannels import Listener
#from ovirtga.vmgreenchannels import Listener
LOG = log.getLogger(__name__)
_VMCHANNEL_DEVICE_NAME = 'com.redhat.rhevm.vdsm'
# This device name is used as default both in the qemu-guest-agent
# service/daemon and in libvirtd (to be used with the quiesce flag).
_QEMU_GA_DEVICE_NAME = 'org.qemu.guest_agent.0'
_QEMU_GA_DEVICE_DIR = '/var/lib/libvirt/qemu/'
# Named tuple representing disk usage.
#
# mount_point: mount point of a disk device
# usage: disk space usage of the mount point
#
DiskUsage = collections.namedtuple('DiskUsage',
['mount_point', 'usage'])
class OGAInspector(object):
INSPECTOR_TIMEOUT = 120
def __init__(self):
self.oga_dict = {}
self.channelListener = Listener()
self.channelListener.settimeout(30)
self.channelListener.start()
#self._prepare_socket_dir()
def _get_agent(self, instance_name):
if instance_name in self.oga_dict:
return self.oga_dict[instance_name]
guestSocketFile = self._make_channel_path(_VMCHANNEL_DEVICE_NAME,
instance_name)
if os.path.exists(guestSocketFile):
guest_agent = GuestAgent(guestSocketFile, self.channelListener)
guest_agent.connect()
self.oga_dict[instance_name] = guest_agent
return guest_agent
else:
LOG.error("Instance %s socket file %s does not exist!" %
(instance_name, guestSocketFile))
return None
def clear_outdated_agent(self):
del_keys = []
for instance_name in self.oga_dict:
guest_agent = self.oga_dict[instance_name]
if (time.time() - guest_agent.update_time()
> self.INSPECTOR_TIMEOUT):
guest_agent.stop()
del_keys.append(instance_name)
for key in del_keys:
del self.oga_dict[key]
def _prepare_socket_dir(self):
chmod_dir_cmd = ['chmod', '-R', 'g+rwx', _QEMU_GA_DEVICE_DIR]
utils.execute(*chmod_dir_cmd, run_as_root=True)
def _make_channel_path(self, deviceName, instance_name):
return "/var/lib/libvirt/qemu/%s.%s.sock" % (deviceName,
instance_name)
def inspect_mem(self, instance_name):
"""Inspect the CPU statistics for an instance.
:param instance_name: the name of the target instance
:return: the number of CPUs and cumulative CPU time
"""
agt = self._get_agent(instance_name)
while(True):
print agt.getGuestInfo()
if(agt.getGuestInfo().get("memoryStats") is not None and
agt.getGuestInfo().get("memoryStats").get("mem_total")):
print agt.getGuestInfo()["memoryStats"]["mem_total"]
self.inspect_sys(instance_name)
sleep(1)
def inspect_sys(self, instance_name):
"""Inspect the system information for an instance.
:param instance_name: the name of the target instance
:return: the dict of system information
"""
agt = self._get_agent(instance_name)
if agt is None:
return None
sys_dict = {}
for attr in ["netIfaces", "guestFQDN", "lastLogin",
"guestOs", "guestIPs"]:
val = agt.getGuestInfo().get(attr)
if val is not None and val != '':
sys_dict[attr] = val
print attr, val
return sys_dict
def inspect_mem_total(self, instance_name):
"""Inspect the Total Memory for an instance.
:param instance_name: the name of the target instance
:return: the size of the total memory or -1 if none data retrieved
"""
agt = self._get_agent(instance_name)
if agt is None:
return -1
guestInfo = agt.getGuestInfo()
if (guestInfo is not None and
guestInfo.get("memoryStats") is not None and
guestInfo.get("memoryStats").get("mem_total") is not None):
return guestInfo.get("memoryStats").get("mem_total")
else:
return -1
def inspect_mem_total(self, instance_name):
"""Inspect the Total Memory for an instance.
:param instance_name: the name of the target instance
:return: the size of the total memory or -1 if none data retrieved
"""
agt = self._get_agent(instance_name)
if agt is None:
return -1
guestInfo = agt.getGuestInfo()
if (guestInfo is not None and
guestInfo.get("memoryStats") is not None and
guestInfo.get("memoryStats").get("mem_total") is not None):
return guestInfo.get("memoryStats").get("mem_total")
else:
return -1
def inspect_mem_unused(self, instance_name):
"""Inspect the unused Memory for an instance.
:param instance_name: the name of the target instance
:return: the size of the unused memory or -1 if none data retrieved
"""
agt = self._get_agent(instance_name)
if agt is None:
return -1
guestInfo = agt.getGuestInfo()
if (guestInfo is not None and
guestInfo.get("memoryStats") is not None and
guestInfo.get("memoryStats").get("mem_unused") is not None):
return guestInfo.get("memoryStats").get("mem_unused")
else:
return -1
def inspect_mem_cached(self, instance_name):
"""Inspect the cached Memory for an instance.
:param instance_name: the name of the target instance
:return: the size of the cached memory or -1 if none data retrieved
"""
agt = self._get_agent(instance_name)
if agt is None:
return -1
guestInfo = agt.getGuestInfo()
if (guestInfo is not None and
guestInfo.get("memoryStats") is not None and
guestInfo.get("memoryStats").get("mem_cached") is not None):
return guestInfo.get("memoryStats").get("mem_cached")
else:
return -1
def inspect_mem_swap(self, instance_name):
"""Inspect the swap Memory for an instance.
:param instance_name: the name of the target instance
:return: the size of the swap memory or -1 if none data retrieved
"""
agt = self._get_agent(instance_name)
if agt is None:
return -1
guestInfo = agt.getGuestInfo()
if (guestInfo is not None and
guestInfo.get("memoryStats") is not None and
guestInfo.get("memoryStats").get("swap_total") is not None):
return guestInfo.get("memoryStats").get("swap_total")
else:
return -1
def inspect_mem_buffer(self, instance_name):
"""Inspect the buffer Memory for an instance.
:param instance_name: the name of the target instance
:return: the size of the buffer memory or -1 if none data retrieved
"""
agt = self._get_agent(instance_name)
if agt is None:
return -1
guestInfo = agt.getGuestInfo()
if (guestInfo is not None and
guestInfo.get("memoryStats") is not None and
guestInfo.get("memoryStats").get("mem_buffers") is not None):
return guestInfo.get("memoryStats").get("mem_buffers")
else:
return -1
def inspect_disk_usage(self, instance_name):
"""Inspect the disk_usage for an instance.
:param instance_name: the name of the target instance
:return: the list of disk usage or none if no data retrieved
"""
agt = self._get_agent(instance_name)
if agt is None:
return None
guestInfo = agt.getGuestInfo()
if (guestInfo is not None and
guestInfo.get("disksUsage") is not None):
usage_list = []
for per_disk in guestInfo["disksUsage"]:
used = per_disk["used"]
total = per_disk["total"]
path = per_disk["path"]
usage = float(used)/float(total)
usage = round(usage, 3)
usage = usage * 100
disk_usage = DiskUsage(mount_point=path,
usage=usage)
usage_list.append(disk_usage)
return usage_list
else:
return None
def get_oga_inspector():
try:
namespace = 'ceilometer.compute.virt'
mgr = driver.DriverManager(namespace,
"oga_inspector",
invoke_on_load=True)
return mgr.driver
except ImportError as e:
LOG.error(_("Unable to load the "
"Ovirt Geuest Agent inspector: %s") % e)
return None
if __name__ == '__main__':
inspector = OGAInspector()
inspector.inspect_mem("instance-00000005")
|
|
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
"""
This file parses messages using functions defined in in the template's
parser.py
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Parsing",)
import sys
from gluon import current
# =============================================================================
class S3Parsing(object):
"""
Core Message Parsing Framework
- reusable functions
"""
# -------------------------------------------------------------------------
@staticmethod
def parser(function_name, message_id, **kwargs):
"""
1st Stage Parser
- called by msg.parse()
Sets the appropriate Authorisation level and then calls the
parser function from the template
"""
reply = None
s3db = current.s3db
# Retrieve Message
table = s3db.msg_message
message = current.db(table.message_id == message_id).select(limitby=(0, 1)
).first()
from_address = message.from_address
if "<" in from_address:
from_address = from_address.split("<")[1].split(">")[0]
email = S3Parsing.is_session_alive(from_address)
if email:
current.auth.s3_impersonate(email)
else:
(email, password) = S3Parsing.parse_login(message)
if email and password:
current.auth.login_bare(email, password)
expiration = current.session.auth["expiration"]
table = s3db.msg_session
table.insert(email = email,
expiration_time = expiration,
from_address = from_address)
reply = "Login succesful"
# The message may have multiple purposes
#return reply
# Load the Parser template for this deployment
template = current.deployment_settings.get_msg_parser()
module_name = "applications.%s.modules.templates.%s.parser" \
% (current.request.application, template)
__import__(module_name)
mymodule = sys.modules[module_name]
S3Parser = mymodule.S3Parser()
# Pass the message to the parser
try:
fn = getattr(S3Parser, function_name)
except:
current.log.error("Parser not found: %s" % function_name)
return None
reply = fn(message, **kwargs) or reply
if not reply:
return None
# Send Reply
current.msg.send(from_address, reply)
# -------------------------------------------------------------------------
@staticmethod
def parse_login(message):
"""
Authenticate a login request
"""
if not message:
return None, None
words = message.body.split(" ")
login = False
email = None
password = None
if "LOGIN" in [word.upper() for word in words]:
login = True
if len(words) == 2 and login:
password = words[1]
elif len(words) == 3 and login:
email = words[1]
password = words[2]
if login:
if password and not email:
email = message.from_address
return email, password
else:
return None, None
# ---------------------------------------------------------------------
@staticmethod
def is_session_alive(from_address):
"""
Check whether there is an alive session from the same sender
"""
email = None
now = current.request.utcnow
stable = current.s3db.msg_session
query = (stable.is_expired == False) & \
(stable.from_address == from_address)
records = current.db(query).select(stable.id,
stable.created_datetime,
stable.expiration_time,
stable.email,
)
for record in records:
time = record.created_datetime
time = time - now
time = time.total_seconds()
if time < record.expiration_time:
email = record.email
break
else:
record.update_record(is_expired = True)
return email
# ---------------------------------------------------------------------
@staticmethod
def lookup_person(address):
"""
Lookup a Person from an Email Address
"""
s3db = current.s3db
if "<" in address:
address = address.split("<")[1].split(">")[0]
ptable = s3db.pr_person
ctable = s3db.pr_contact
query = (ctable.value == address) & \
(ctable.contact_method == "EMAIL") & \
(ctable.pe_id == ptable.pe_id) & \
(ptable.deleted == False) & \
(ctable.deleted == False)
possibles = current.db(query).select(ptable.id,
limitby=(0, 2))
if len(possibles) == 1:
return possibles.first().id
return None
# ---------------------------------------------------------------------
@staticmethod
def lookup_human_resource(address):
"""
Lookup a Human Resource from an Email Address
"""
db = current.db
s3db = current.s3db
if "<" in address:
address = address.split("<")[1].split(">")[0]
hrtable = s3db.hrm_human_resource
ptable = db.pr_person
ctable = s3db.pr_contact
query = (ctable.value == address) & \
(ctable.contact_method == "EMAIL") & \
(ctable.pe_id == ptable.pe_id) & \
(ptable.id == hrtable.person_id) & \
(ctable.deleted == False) & \
(ptable.deleted == False) & \
(hrtable.deleted == False)
possibles = db(query).select(hrtable.id,
limitby=(0, 2))
if len(possibles) == 1:
return possibles.first().id
return None
# END =========================================================================
|
|
# VS SIMULATE
from shared_functions import *
from net_designs import *
import pandas as ps
import json
import numpy as np
import pickle
import networkx as nx
import scipy.stats as sct
from scipy import stats as sc
import random
import h5py
import os
from copy import deepcopy
from scipy import stats as sc
def random_policy(data):
return np.random.randint(1,13,data.shape[0])
def propagate(data, regressor, policy, prices, periods=12, num_actions=3, orig_actions=None):
# Initializing arrays to hold output
customers = np.zeros((periods+1,data.shape[0],data.shape[1],data.shape[2]), dtype = np.float32)
customers[0] = data
actions = np.zeros((periods,data.shape[0],data.shape[1],num_actions), dtype = np.float32)
quantity = np.zeros((periods,data.shape[0],data.shape[1]), dtype = np.float32)
amount = np.zeros((periods,data.shape[0],data.shape[1]), dtype = np.float32)
for t in xrange(periods):
# SELECTING ACTIONS - IF A TENSOR OF ORIGINAL ACTIONS IS PROVIDED, IGNORE POLICY
if isinstance(orig_actions, (np.ndarray)):
actions[t] = orig_actions[t]
# OTHERWISE, USE POLICY
else:
actions[t] = policy(customers[t])
inp = np.append(customers[t],actions[t],axis = 2).astype(np.float32)
inp = inp.reshape(inp.shape[0],-1)
# PROPAGATING CUSTOMERS
quantity_pred = np.rint(regressor.predict(inp)).astype(np.float32)
quantity_pred[quantity_pred < 0] = 0
quantity_sim = np.random.poisson(quantity_pred,quantity_pred.shape)
quantity[t] = quantity_sim
amount[t] = quantity[t] * np.tile(prices, (quantity[t].shape[0],1))
# UPDATING CUSTOMER STATE
# Recency
customers[t+1,:,:,0] = (customers[t,:,:,0] + 1)*(quantity[t] == 0)
# Frequency
customers[t+1,:,:,1] = customers[t,:,:,1] + quantity[t]
# Avg. Past Donation
customers[t+1,:,:,2] = (customers[t,:,:,2] * customers[t,:,:,1] + amount[t]) / (customers[t+1,:,:,1] + 1*(customers[t+1,:,:,1]==0))
# Offer Recency
customers[t+1,:,:,3] = (customers[t,:,:,3] + 1)*(actions[t,:,:,0] == 0) # Null action 1
# Offer Frequency
customers[t+1,:,:,4] = customers[t,:,:,4] + actions[t,:,:,0]
return customers, actions, quantity, amount
record = dict()
# LOAD MODEL
print('Loading model')
regressor = VSRegressor()
regressor.load_weights("../results/vs_propagation_quantity_best_cat.h5")
RANDOM_SEED = 999
# LOAD DATA
print('Loading data')
h5f = h5py.File('../kaggle_valued_shoppers/temp_data_cat.h5','r')
data = h5f['temp_data'][:]
h5f.close()
columns = ['transaction_recency','transaction_frequency','avg_past_transaction_value',
'offer_recency','offer_frequency','number_of_offers','offer_goods_quantity_per_offer','offer_value_per_offer',
'purchased_goods_quantity','purchased_item_price']
cols_X = [0,1,2,3,4,5,6,7]
cols_S = [0,1,2,3,4]
cols_A = [5,6,7]
cols_Y = 8
cols_Z = 9
# LOAD AVG PRICES PER CATEGORY
print('Loading avg. prices')
prices = load('../kaggle_valued_shoppers/vs_cat_avg_prices.p')
# PREPARE TEST DATA
print('Preparing data')
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
rand_ind = np.random.permutation(data.shape[0])
test_customers = rand_ind[216000:217000] # 1000 customers from test set
test_data = data[test_customers].T
del data
# EXTRACT ARRAYS WITH ORIGINAL DATA
orig_S = test_data[cols_S].T
orig_A = test_data[cols_A].T
orig_Q = test_data[cols_Y].T
orig_P = test_data[cols_Z].T * orig_Q
del test_data
orig_S = np.transpose(orig_S, (1, 0, 2, 3))
orig_A = np.transpose(orig_A, (1, 0, 2, 3))
orig_Q = np.transpose(orig_Q, (1, 0, 2))
orig_P = np.transpose(orig_P, (1, 0, 2))
# SIMULATE DATA
T = 17
# INPUT IS THE STARTING STATE ARRAY, NET, TESTED POLICY [IGNORED BECUASE WE PROVIDE ACTIONS FOR EVERY TIME PERIOD AS ORIG ACTIONS]
# AVG. PRICES FOR EACH CATEGORY, PERIODS FOR SMULATION, NUMBER OF VARIABLES DESCRIBING THE ACTION, [OPTIONAL] TENSOR OF ORIGINAL ACTIONS
S, A, Q, P = propagate(orig_S[0], regressor, random_policy, prices,
periods=16, num_actions=3, orig_actions=orig_A)
# PLOT CUMULATIVE PURCHASES BY CATEGORY OVER TIME
plt.figure(num=None, figsize=(8, 6), dpi=150, facecolor='w', edgecolor='w')
for i in range(Q.shape[2]):
plt.plot(range(orig_Q.shape[0]),np.cumsum(orig_P.mean(1),0)[:,i], linewidth=2, alpha=0.3, color="green")
plt.plot(range(Q.shape[0]),np.cumsum(P.mean(1),0)[:,i], linewidth=2, alpha=0.5, color="brown",linestyle='--')
plt.xlim(0,15)
plt.ylim(0,50)
line_green, = plt.plot([],[], label='Actual Data', color="green")
line_brown, = plt.plot([],[], label='Simulated Data', color="brown",linestyle='--')
#plt.legend(handles=[line_green, line_brown], fontsize=20)
plt.xlabel("Campaign Period", fontsize=20, labelpad=15)
plt.ylabel("Mean Cumulative Purchases", fontsize=20, labelpad=15)
plt.tick_params(axis='both', which='major', labelsize=15)
plt.tick_params(axis='both', which='minor', labelsize=15)
plt.savefig("../results/vs_cumulative_purchase.pdf", bbox_inches='tight')
plt.close()
# PLOT HISTTOGRAM OF TOTAL PURCHASES
orig_purchases = np.sum(orig_P.sum(0),1).squeeze()
sim_purchases = np.sum(P.sum(0),1).squeeze()
plot_validate(orig_purchases, sim_purchases, xlab="Total Purchase Amount", ylab="Probability Mass", name="../results/vs_total_purchase.pdf",
n_bins=7, x_range=(0,525), y_range=(0,0.5), font = 20, legend=True, bar_width=15)
# MAKE A RECORD OF KEY PURCHASE METRICS
record['KL_divergence_deeplearning_purchases'] = str(KL_validate(orig_purchases, sim_purchases,
n_bins=7, x_range=(0,525)))
record['orig_mean_deeplearning_purchases'] = str(np.mean(orig_purchases))
record['sim_mean_deeplearning_purchases'] = str(np.mean(sim_purchases))
record['orig_std_deeplearning_purchases'] = str(np.std(orig_purchases))
record['sim_std_deeplearning_purchases'] = str(np.std(sim_purchases))
# CALCULATE THE NUMBER OF SIGNIFICANTLY DIFFERENT SIMULATED TOTAL PURCHASE HISOTGRAMS - BY CATEGORY
g = 0
for i in range(20):
a = KL_validate(orig_P.sum(0)[:,i].squeeze(), P.sum(0)[:,i].squeeze(), n_bins=7, x_range=(0,525))
record['KL_purchases_'+str(i)] = str(a)
g+= 1*(a[2]<0.05)
record['KL_purchases_purchases_significant'] = g
# PLOT MEAN RECENCY OVER TIME
plt.figure(num=None, figsize=(8, 6), dpi=150, facecolor='w', edgecolor='w')
for i in range(S.shape[2]):
plt.plot(range(orig_S.shape[0]),orig_S.mean(1)[:,i,0], linewidth=2, alpha=0.3, color="green")
plt.plot(range(S.shape[0]),S.mean(1)[:,i,0], linewidth=2, alpha=0.5, color="brown",linestyle='--')
plt.xlim(0,15)
plt.ylim(0,20)
line_green, = plt.plot([],[], label='Actual Data', color="green")
line_brown, = plt.plot([],[], label='Simulated Data', color="brown",linestyle='--')
plt.legend(handles=[line_green, line_brown], fontsize=20)
plt.xlabel("Campaign Period", fontsize=20, labelpad=15)
plt.ylabel("Mean Transaction Recency", fontsize=20, labelpad=15)
plt.tick_params(axis='both', which='major', labelsize=15)
plt.tick_params(axis='both', which='minor', labelsize=15)
plt.savefig("../results/vs_recency.pdf", bbox_inches='tight')
plt.close()
# PLOT HISTOGRAM OF ENDPERIOD RECENCY
orig_recen = np.mean(orig_S[16],1)[:,0].squeeze()
sim_recen = np.mean(S[16],1)[:,0].squeeze()
plot_validate(orig_recen,sim_recen, xlab="End-Period Recency", ylab="Probability Mass", name="../results/vs_endperiod_recency.pdf",
n_bins=5, x_range=(0,20), y_range=(0,0.5), font = 20, legend=True, bar_width=1)
# MAKE A RECORD OF KEY RECENCY METRICS
record['KL_divergence_deeplearning_recen'] = str(KL_validate(orig_recen, sim_recen, n_bins=5, x_range=(0,20)))
record['orig_mean_deeplearning_recen'] = str(np.mean(orig_recen))
record['sim_mean_deeplearning_recen'] = str(np.mean(sim_recen))
record['orig_std_deeplearning_recen'] = str(np.std(orig_recen))
record['sim_std_deeplearning_recen'] = str(np.std(sim_recen))
# SAVE RECORD
save_json(record,'../results/vs_record_simulate.json')
print(record)
|
|
# encoding: utf-8
import hashlib
import os
import stat
import json
import mimetypes
import urllib2
import logging
from math import ceil
import posixpath
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.http import HttpResponse, HttpResponseBadRequest, Http404, \
HttpResponseRedirect
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.utils import timezone
from django.utils.http import urlquote
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.views.decorators.http import condition
import seaserv
from seaserv import get_repo, get_commits, is_valid_filename, \
seafserv_threaded_rpc, seafserv_rpc, is_repo_owner, check_permission, \
is_passwd_set, get_file_size, get_group, get_session_info, get_commit, \
MAX_DOWNLOAD_DIR_SIZE, send_message, ccnet_threaded_rpc, \
get_personal_groups_by_user, seafile_api
from pysearpc import SearpcError
from seahub.avatar.util import get_avatar_file_storage
from seahub.auth.decorators import login_required, login_required_ajax
from seahub.auth import login as auth_login
from seahub.auth import get_backends
from seahub.base.accounts import User
from seahub.base.decorators import user_mods_check, require_POST
from seahub.base.models import UserStarredFiles, ClientLoginToken
from seahub.contacts.models import Contact
from seahub.options.models import UserOptions, CryptoOptionNotSetError
from seahub.profile.models import Profile
from seahub.share.models import FileShare, UploadLinkShare
from seahub.utils import render_permission_error, render_error, list_to_string, \
get_fileserver_root, gen_shared_upload_link, is_org_context, \
gen_dir_share_link, gen_file_share_link, get_repo_last_modify, \
calculate_repos_last_modify, get_file_type_and_ext, get_user_repos, \
EMPTY_SHA1, normalize_file_path, gen_file_upload_url, \
get_file_revision_id_size, get_ccnet_server_addr_port, \
gen_file_get_url, string2list, MAX_INT, IS_EMAIL_CONFIGURED, \
EVENTS_ENABLED, get_user_events, get_org_user_events, show_delete_days, \
TRAFFIC_STATS_ENABLED, get_user_traffic_stat, new_merge_with_no_conflict, \
user_traffic_over_limit, send_perm_audit_msg, get_origin_repo_info, \
get_max_upload_file_size, is_pro_version, FILE_AUDIT_ENABLED, \
is_org_repo_creation_allowed
from seahub.utils.paginator import get_page_range
from seahub.utils.star import get_dir_starred_files
from seahub.utils.timeutils import utc_to_local
from seahub.views.modules import MOD_PERSONAL_WIKI, enable_mod_for_user, \
disable_mod_for_user
import seahub.settings as settings
from seahub.settings import FILE_PREVIEW_MAX_SIZE, INIT_PASSWD, USE_PDFJS, \
FILE_ENCODING_LIST, FILE_ENCODING_TRY_LIST, AVATAR_FILE_STORAGE, \
SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER, SEND_EMAIL_ON_RESETTING_USER_PASSWD, \
ENABLE_SUB_LIBRARY, ENABLE_FOLDER_PERM
from constance import config
# Get an instance of a logger
logger = logging.getLogger(__name__)
def validate_owner(request, repo_id):
"""
Check whether user in the request owns the repo.
"""
ret = is_repo_owner(request.user.username, repo_id)
return True if ret else False
def is_registered_user(email):
"""
Check whether user is registerd.
"""
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
user = None
return True if user else False
_default_repo_id = None
def get_system_default_repo_id():
global _default_repo_id
if not _default_repo_id:
try:
_default_repo_id = seaserv.seafserv_threaded_rpc.get_system_default_repo_id()
except SearpcError as e:
logger.error(e)
return _default_repo_id
def check_folder_permission(request, repo_id, path):
"""Check repo/folder/file access permission of a user, always return 'rw'
when repo is system repo and user is admin.
Arguments:
- `request`:
- `repo_id`:
- `path`:
"""
username = request.user.username
if request.user.is_staff and get_system_default_repo_id() == repo_id:
return 'rw'
return seafile_api.check_permission_by_path(repo_id, path, username)
def check_file_lock(repo_id, file_path, username):
""" check if file is locked to current user
according to returned value of seafile_api.check_file_lock:
0: not locked
1: locked by other
2: locked by me
-1: error
return (is_locked, locked_by_me)
"""
try:
return_value = seafile_api.check_file_lock(repo_id,
file_path.lstrip('/'), username)
except SearpcError as e:
logger.error(e)
return (None, None)
if return_value == 0:
return (False, False)
elif return_value == 1:
return (True , False)
elif return_value == 2:
return (True, True)
else:
return (None, None)
def gen_path_link(path, repo_name):
"""
Generate navigate paths and links in repo page.
"""
if path and path[-1] != '/':
path += '/'
paths = []
links = []
if path and path != '/':
paths = path[1:-1].split('/')
i = 1
for name in paths:
link = '/' + '/'.join(paths[:i])
i = i + 1
links.append(link)
if repo_name:
paths.insert(0, repo_name)
links.insert(0, '/')
zipped = zip(paths, links)
return zipped
def get_file_download_link(repo_id, obj_id, path):
"""Generate file download link.
Arguments:
- `repo_id`:
- `obj_id`:
- `filename`:
"""
return reverse('download_file', args=[repo_id, obj_id]) + '?p=' + \
urlquote(path)
def get_repo_dirents(request, repo, commit, path, offset=-1, limit=-1):
"""List repo dirents based on commit id and path. Use ``offset`` and
``limit`` to do paginating.
Returns: A tupple of (file_list, dir_list, dirent_more)
TODO: Some unrelated parts(file sharing, stars, modified info, etc) need
to be pulled out to multiple functions.
"""
dir_list = []
file_list = []
dirent_more = False
if commit.root_id == EMPTY_SHA1:
return ([], [], False) if limit == -1 else ([], [], False)
else:
try:
dirs = seafile_api.list_dir_by_commit_and_path(commit.repo_id,
commit.id, path,
offset, limit)
if not dirs:
return ([], [], False)
except SearpcError as e:
logger.error(e)
return ([], [], False)
if limit != -1 and limit == len(dirs):
dirent_more = True
username = request.user.username
starred_files = get_dir_starred_files(username, repo.id, path)
fileshares = FileShare.objects.filter(repo_id=repo.id).filter(username=username)
uploadlinks = UploadLinkShare.objects.filter(repo_id=repo.id).filter(username=username)
view_dir_base = reverse("view_common_lib_dir", args=[repo.id, ''])
dl_dir_base = reverse('repo_download_dir', args=[repo.id])
file_history_base = reverse('file_revisions', args=[repo.id])
for dirent in dirs:
dirent.last_modified = dirent.mtime
dirent.sharelink = ''
dirent.uploadlink = ''
if stat.S_ISDIR(dirent.props.mode):
dpath = posixpath.join(path, dirent.obj_name)
if dpath[-1] != '/':
dpath += '/'
for share in fileshares:
if dpath == share.path:
dirent.sharelink = gen_dir_share_link(share.token)
dirent.sharetoken = share.token
break
for link in uploadlinks:
if dpath == link.path:
dirent.uploadlink = gen_shared_upload_link(link.token)
dirent.uploadtoken = link.token
break
p_dpath = posixpath.join(path, dirent.obj_name)
dirent.view_link = view_dir_base + '?p=' + urlquote(p_dpath)
dirent.dl_link = dl_dir_base + '?p=' + urlquote(p_dpath)
dir_list.append(dirent)
else:
file_list.append(dirent)
if repo.version == 0:
dirent.file_size = get_file_size(repo.store_id, repo.version, dirent.obj_id)
else:
dirent.file_size = dirent.size
dirent.starred = False
fpath = posixpath.join(path, dirent.obj_name)
p_fpath = posixpath.join(path, dirent.obj_name)
dirent.view_link = reverse('view_lib_file', args=[repo.id, p_fpath])
dirent.dl_link = get_file_download_link(repo.id, dirent.obj_id,
p_fpath)
dirent.history_link = file_history_base + '?p=' + urlquote(p_fpath)
if fpath in starred_files:
dirent.starred = True
for share in fileshares:
if fpath == share.path:
dirent.sharelink = gen_file_share_link(share.token)
dirent.sharetoken = share.token
break
return (file_list, dir_list, dirent_more)
def get_unencry_rw_repos_by_user(request):
"""Get all unencrypted repos the user can read and write.
"""
username = request.user.username
def has_repo(repos, repo):
for r in repos:
if repo.id == r.id:
return True
return False
org_id = request.user.org.org_id if is_org_context(request) else None
owned_repos, shared_repos, groups_repos, public_repos = get_user_repos(
username, org_id=org_id)
accessible_repos = []
for r in owned_repos:
if not has_repo(accessible_repos, r) and not r.encrypted:
accessible_repos.append(r)
for r in shared_repos + groups_repos + public_repos:
if not has_repo(accessible_repos, r) and not r.encrypted:
if check_folder_permission(request, r.id, '/') == 'rw':
accessible_repos.append(r)
return accessible_repos
def render_recycle_root(request, repo_id):
repo = get_repo(repo_id)
if not repo:
raise Http404
scan_stat = request.GET.get('scan_stat', None)
try:
deleted_entries = seafile_api.get_deleted(repo_id, 0, '/', scan_stat)
except SearpcError as e:
logger.error(e)
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
return HttpResponseRedirect(next)
if not deleted_entries:
new_scan_stat = None
else:
new_scan_stat = deleted_entries[-1].scan_stat
trash_more = True if new_scan_stat is not None else False
deleted_entries = deleted_entries[0:-1]
for dirent in deleted_entries:
if stat.S_ISDIR(dirent.mode):
dirent.is_dir = True
else:
dirent.is_dir = False
# Entries sort by deletion time in descending order.
deleted_entries.sort(lambda x, y : cmp(y.delete_time,
x.delete_time))
username = request.user.username
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo.id)
else:
repo_owner = seafile_api.get_repo_owner(repo.id)
is_repo_owner = True if repo_owner == username else False
enable_clean = False
if is_repo_owner:
enable_clean = True
return render_to_response('repo_dir_recycle_view.html', {
'show_recycle_root': True,
'repo': repo,
'repo_dir_name': repo.name,
'dir_entries': deleted_entries,
'scan_stat': new_scan_stat,
'trash_more': trash_more,
'enable_clean': enable_clean,
}, context_instance=RequestContext(request))
def render_recycle_dir(request, repo_id, commit_id):
basedir = request.GET.get('base', '')
path = request.GET.get('p', '')
if not basedir or not path:
return render_recycle_root(request, repo_id)
if basedir[0] != '/':
basedir = '/' + basedir
if path[-1] != '/':
path += '/'
repo = get_repo(repo_id)
if not repo:
raise Http404
try:
commit = seafserv_threaded_rpc.get_commit(repo.id, repo.version, commit_id)
except SearpcError as e:
logger.error(e)
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
return HttpResponseRedirect(next)
if not commit:
raise Http404
zipped = gen_path_link(path, '')
dir_entries = seafile_api.list_dir_by_commit_and_path(commit.repo_id,
commit.id, basedir+path,
-1, -1)
for dirent in dir_entries:
if stat.S_ISDIR(dirent.mode):
dirent.is_dir = True
else:
dirent.is_dir = False
return render_to_response('repo_dir_recycle_view.html', {
'show_recycle_root': False,
'repo': repo,
'repo_dir_name': repo.name,
'zipped': zipped,
'dir_entries': dir_entries,
'commit_id': commit_id,
'basedir': basedir,
'path': path,
}, context_instance=RequestContext(request))
def render_dir_recycle_root(request, repo_id, dir_path):
repo = get_repo(repo_id)
if not repo:
raise Http404
scan_stat = request.GET.get('scan_stat', None)
try:
deleted_entries = seafile_api.get_deleted(repo_id, 0, dir_path, scan_stat)
except SearpcError as e:
logger.error(e)
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
return HttpResponseRedirect(next)
if not deleted_entries:
new_scan_stat = None
else:
new_scan_stat = deleted_entries[-1].scan_stat
trash_more = True if new_scan_stat is not None else False
deleted_entries = deleted_entries[0:-1]
for dirent in deleted_entries:
if stat.S_ISDIR(dirent.mode):
dirent.is_dir = True
else:
dirent.is_dir = False
# Entries sort by deletion time in descending order.
deleted_entries.sort(lambda x, y : cmp(y.delete_time,
x.delete_time))
return render_to_response('repo_dir_recycle_view.html', {
'show_recycle_root': True,
'repo': repo,
'repo_dir_name': os.path.basename(dir_path.rstrip('/')),
'dir_entries': deleted_entries,
'scan_stat': new_scan_stat,
'trash_more': trash_more,
'dir_path': dir_path,
}, context_instance=RequestContext(request))
def render_dir_recycle_dir(request, repo_id, commit_id, dir_path):
basedir = request.GET.get('base', '')
path = request.GET.get('p', '')
if not basedir or not path:
return render_dir_recycle_root(request, repo_id, dir_path)
if basedir[0] != '/':
basedir = '/' + basedir
if path[-1] != '/':
path += '/'
repo = get_repo(repo_id)
if not repo:
raise Http404
try :
commit = seafserv_threaded_rpc.get_commit(repo.id, repo.version, commit_id)
except SearpcError as e:
logger.error(e)
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
return HttpResponseRedirect(next)
if not commit:
raise Http404
zipped = gen_path_link(path, '')
dir_entries = seafile_api.list_dir_by_commit_and_path(commit.repo_id,
commit.id, basedir+path,
-1, -1)
for dirent in dir_entries:
if stat.S_ISDIR(dirent.mode):
dirent.is_dir = True
else:
dirent.is_dir = False
return render_to_response('repo_dir_recycle_view.html', {
'show_recycle_root': False,
'repo': repo,
'repo_dir_name': os.path.basename(dir_path.rstrip('/')),
'zipped': zipped,
'dir_entries': dir_entries,
'commit_id': commit_id,
'basedir': basedir,
'path': path,
'dir_path': dir_path,
}, context_instance=RequestContext(request))
@login_required
def repo_recycle_view(request, repo_id):
if not seafile_api.get_dir_id_by_path(repo_id, '/') or \
check_folder_permission(request, repo_id, '/') != 'rw':
return render_permission_error(request, _(u'Unable to view recycle page'))
commit_id = request.GET.get('commit_id', '')
if not commit_id:
return render_recycle_root(request, repo_id)
else:
return render_recycle_dir(request, repo_id, commit_id)
@login_required
def dir_recycle_view(request, repo_id):
dir_path = request.GET.get('dir_path', '')
if not seafile_api.get_dir_id_by_path(repo_id, dir_path) or \
check_folder_permission(request, repo_id, dir_path) != 'rw':
return render_permission_error(request, _(u'Unable to view recycle page'))
commit_id = request.GET.get('commit_id', '')
if not commit_id:
return render_dir_recycle_root(request, repo_id, dir_path)
else:
return render_dir_recycle_dir(request, repo_id, commit_id, dir_path)
@login_required
def repo_online_gc(request, repo_id):
if request.method != 'POST':
raise Http404
repo = get_repo(repo_id)
if not repo:
raise Http404
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
username = request.user.username
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo.id)
else:
repo_owner = seafile_api.get_repo_owner(repo.id)
is_repo_owner = True if repo_owner == username else False
if not is_repo_owner:
messages.error(request, _('Permission denied'))
return HttpResponseRedirect(next)
day = int(request.POST.get('day'))
try:
seafile_api.clean_up_repo_history(repo.id, day)
except SearpcError as e:
logger.error(e)
messages.error(request, _('Internal server error'))
return HttpResponseRedirect(next)
return HttpResponseRedirect(next)
def can_access_repo_setting(request, repo_id, username):
repo = seafile_api.get_repo(repo_id)
if not repo:
return (False, None)
# no settings for virtual repo
if ENABLE_SUB_LIBRARY and repo.is_virtual:
return (False, None)
# check permission
if is_org_context(request):
repo_owner = seafile_api.get_org_repo_owner(repo_id)
else:
repo_owner = seafile_api.get_repo_owner(repo_id)
is_owner = True if username == repo_owner else False
if not is_owner:
return (False, None)
return (True, repo)
def upload_error_msg (code):
err_msg = _(u'Internal Server Error')
if (code == 0):
err_msg = _(u'Filename contains invalid character')
elif (code == 1):
err_msg = _(u'Duplicated filename')
elif (code == 2):
err_msg = _(u'File does not exist')
elif (code == 3):
err_msg = _(u'File size surpasses the limit')
elif (code == 4):
err_msg = _(u'The space of owner is used up, upload failed')
elif (code == 5):
err_msg = _(u'An error occurs during file transfer')
return err_msg
def upload_file_error(request, repo_id):
if request.method == 'GET':
repo = get_repo(repo_id)
if not repo:
raise Http404
parent_dir = request.GET.get('p')
filename = request.GET.get('fn', '')
err = request.GET.get('err')
if not parent_dir or not err:
return render_error(request, _(u'Invalid url'))
zipped = gen_path_link (parent_dir, repo.name)
code = int(err)
err_msg = upload_error_msg(code)
return render_to_response('upload_file_error.html', {
'repo': repo,
'zipped': zipped,
'filename': filename,
'err_msg': err_msg,
}, context_instance=RequestContext(request))
def update_file_error(request, repo_id):
if request.method == 'GET':
repo = get_repo(repo_id)
if not repo:
raise Http404
target_file = request.GET.get('p')
err = request.GET.get('err')
if not target_file or not err:
return render_error(request, _(u'Invalid url'))
zipped = gen_path_link (target_file, repo.name)
code = int(err)
err_msg = upload_error_msg(code)
return render_to_response('update_file_error.html', {
'repo': repo,
'zipped': zipped,
'err_msg': err_msg,
}, context_instance=RequestContext(request))
@login_required
def repo_history(request, repo_id):
"""
List library modification histories.
"""
user_perm = check_folder_permission(request, repo_id, '/')
if not user_perm:
return render_permission_error(request, _(u'Unable to view library modification'))
repo = get_repo(repo_id)
if not repo:
raise Http404
username = request.user.username
try:
server_crypto = UserOptions.objects.is_server_crypto(username)
except CryptoOptionNotSetError:
# Assume server_crypto is ``False`` if this option is not set.
server_crypto = False
password_set = False
if repo.props.encrypted and \
(repo.enc_version == 1 or (repo.enc_version == 2 and server_crypto)):
try:
ret = seafserv_rpc.is_passwd_set(repo_id, username)
if ret == 1:
password_set = True
except SearpcError, e:
return render_error(request, e.msg)
if not password_set:
return HttpResponseRedirect(reverse("view_common_lib_dir", args=[repo_id, '']))
try:
current_page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
current_page = 1
per_page = 100
commits_all = get_commits(repo_id, per_page * (current_page -1),
per_page + 1)
commits = commits_all[:per_page]
for c in commits:
c.show = False if new_merge_with_no_conflict(c) else True
if len(commits_all) == per_page + 1:
page_next = True
else:
page_next = False
return render_to_response('repo_history.html', {
"repo": repo,
"commits": commits,
'current_page': current_page,
'prev_page': current_page-1,
'next_page': current_page+1,
'per_page': per_page,
'page_next': page_next,
'user_perm': user_perm,
}, context_instance=RequestContext(request))
@login_required
@require_POST
def repo_revert_history(request, repo_id):
next = request.META.get('HTTP_REFERER', None)
if not next:
next = settings.SITE_ROOT
repo = get_repo(repo_id)
if not repo:
messages.error(request, _("Library does not exist"))
return HttpResponseRedirect(next)
# perm check
perm = check_folder_permission(request, repo_id, '/')
username = request.user.username
repo_owner = seafile_api.get_repo_owner(repo.id)
if perm is None or repo_owner != username:
messages.error(request, _("Permission denied"))
return HttpResponseRedirect(next)
try:
server_crypto = UserOptions.objects.is_server_crypto(username)
except CryptoOptionNotSetError:
# Assume server_crypto is ``False`` if this option is not set.
server_crypto = False
password_set = False
if repo.props.encrypted and \
(repo.enc_version == 1 or (repo.enc_version == 2 and server_crypto)):
try:
ret = seafserv_rpc.is_passwd_set(repo_id, username)
if ret == 1:
password_set = True
except SearpcError, e:
return render_error(request, e.msg)
if not password_set:
return HttpResponseRedirect(reverse("view_common_lib_dir", args=[repo_id, '']))
commit_id = request.GET.get('commit_id', '')
if not commit_id:
return render_error(request, _(u'Please specify history ID'))
try:
seafserv_threaded_rpc.revert_on_server(repo_id, commit_id, request.user.username)
except SearpcError, e:
if e.msg == 'Bad arguments':
return render_error(request, _(u'Invalid arguments'))
elif e.msg == 'No such repo':
return render_error(request, _(u'Library does not exist'))
elif e.msg == "Commit doesn't exist":
return render_error(request, _(u'History you specified does not exist'))
else:
return render_error(request, _(u'Unknown error'))
return HttpResponseRedirect(next)
def fpath_to_link(repo_id, path, is_dir=False):
"""Translate file path of a repo to its view link"""
if is_dir:
href = reverse("view_common_lib_dir", args=[repo_id, path.encode('utf-8').strip('/')])
else:
if not path.startswith('/'):
p = '/' + path
href = reverse("view_lib_file", args=[repo_id, p.encode('utf-8')])
return '<a href="%s">%s</a>' % (href, escape(path))
def get_diff(repo_id, arg1, arg2):
lists = {'new': [], 'removed': [], 'renamed': [], 'modified': [],
'newdir': [], 'deldir': []}
diff_result = seafserv_threaded_rpc.get_diff(repo_id, arg1, arg2)
if not diff_result:
return lists
for d in diff_result:
if d.status == "add":
lists['new'].append(fpath_to_link(repo_id, d.name))
elif d.status == "del":
lists['removed'].append(escape(d.name))
elif d.status == "mov":
lists['renamed'].append(escape(d.name) + " ==> " + fpath_to_link(repo_id, d.new_name))
elif d.status == "mod":
lists['modified'].append(fpath_to_link(repo_id, d.name))
elif d.status == "newdir":
lists['newdir'].append(fpath_to_link(repo_id, d.name, is_dir=True))
elif d.status == "deldir":
lists['deldir'].append(escape(d.name))
return lists
def create_default_library(request):
"""Create a default library for user.
Arguments:
- `username`:
"""
username = request.user.username
# Disable user guide no matter user permission error or creation error,
# so that the guide popup only show once.
UserOptions.objects.disable_user_guide(username)
if not request.user.permissions.can_add_repo():
return
if is_org_context(request):
org_id = request.user.org.org_id
default_repo = seafile_api.create_org_repo(name=_("My Library"),
desc=_("My Library"),
username=username,
passwd=None,
org_id=org_id)
else:
default_repo = seafile_api.create_repo(name=_("My Library"),
desc=_("My Library"),
username=username,
passwd=None)
sys_repo_id = get_system_default_repo_id()
if sys_repo_id is None:
return
try:
dirents = seafile_api.list_dir_by_path(sys_repo_id, '/')
for e in dirents:
obj_name = e.obj_name
seafile_api.copy_file(sys_repo_id, '/', obj_name,
default_repo, '/', obj_name, username, 0)
except SearpcError as e:
logger.error(e)
return
UserOptions.objects.set_default_repo(username, default_repo)
return default_repo
def get_owned_repo_list(request):
"""List owned repos.
"""
username = request.user.username
if is_org_context(request):
org_id = request.user.org.org_id
return seafile_api.get_org_owned_repo_list(org_id, username)
else:
return seafile_api.get_owned_repo_list(username)
def get_virtual_repos_by_owner(request):
"""List virtual repos.
Arguments:
- `request`:
"""
username = request.user.username
if is_org_context(request):
org_id = request.user.org.org_id
return seaserv.seafserv_threaded_rpc.get_org_virtual_repos_by_owner(
org_id, username)
else:
return seafile_api.get_virtual_repos_by_owner(username)
@login_required
@user_mods_check
def libraries(request):
"""
New URL to replace myhome
"""
username = request.user.username
# options
if request.cloud_mode and request.user.org is None:
allow_public_share = False
else:
allow_public_share = True
sub_lib_enabled = UserOptions.objects.is_sub_lib_enabled(username)
max_upload_file_size = get_max_upload_file_size()
guide_enabled = UserOptions.objects.is_user_guide_enabled(username)
if guide_enabled:
create_default_library(request)
folder_perm_enabled = True if is_pro_version() and ENABLE_FOLDER_PERM else False
can_add_pub_repo = True if is_org_repo_creation_allowed(request) else False
return render_to_response('libraries.html', {
"allow_public_share": allow_public_share,
"guide_enabled": guide_enabled,
"sub_lib_enabled": sub_lib_enabled,
'enable_upload_folder': settings.ENABLE_UPLOAD_FOLDER,
'enable_resumable_fileupload': settings.ENABLE_RESUMABLE_FILEUPLOAD,
'enable_thumbnail': settings.ENABLE_THUMBNAIL,
'thumbnail_default_size': settings.THUMBNAIL_DEFAULT_SIZE,
'thumbnail_size_for_grid': settings.THUMBNAIL_SIZE_FOR_GRID,
'enable_encrypted_library': config.ENABLE_ENCRYPTED_LIBRARY,
'enable_repo_history_setting': config.ENABLE_REPO_HISTORY_SETTING,
'max_upload_file_size': max_upload_file_size,
'folder_perm_enabled': folder_perm_enabled,
'is_pro': True if is_pro_version() else False,
'file_audit_enabled': FILE_AUDIT_ENABLED,
'can_add_pub_repo': can_add_pub_repo,
}, context_instance=RequestContext(request))
@login_required
@require_POST
def unsetinnerpub(request, repo_id):
"""Unshare repos in organization or in share admin page.
Only system admin, organization admin or repo owner can perform this op.
"""
repo = get_repo(repo_id)
perm = request.GET.get('permission', None)
if perm is None:
return render_error(request, _(u'Argument is not valid'))
if not repo:
messages.error(request, _('Failed to unshare the library, as it does not exist.'))
return HttpResponseRedirect(reverse('share_admin'))
# permission check
username = request.user.username
if is_org_context(request):
org_id = request.user.org.org_id
repo_owner = seafile_api.get_org_repo_owner(repo.id)
is_repo_owner = True if repo_owner == username else False
if not (request.user.org.is_staff or is_repo_owner):
raise Http404
else:
repo_owner = seafile_api.get_repo_owner(repo.id)
is_repo_owner = True if repo_owner == username else False
if not (request.user.is_staff or is_repo_owner):
raise Http404
try:
if is_org_context(request):
org_id = request.user.org.org_id
seaserv.seafserv_threaded_rpc.unset_org_inner_pub_repo(org_id,
repo.id)
else:
seaserv.unset_inner_pub_repo(repo.id)
origin_repo_id, origin_path = get_origin_repo_info(repo.id)
if origin_repo_id is not None:
perm_repo_id = origin_repo_id
perm_path = origin_path
else:
perm_repo_id = repo.id
perm_path = '/'
send_perm_audit_msg('delete-repo-perm', username, 'all',
perm_repo_id, perm_path, perm)
messages.success(request, _('Unshare "%s" successfully.') % repo.name)
except SearpcError:
messages.error(request, _('Failed to unshare "%s".') % repo.name)
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
return HttpResponseRedirect(next)
# @login_required
# def ownerhome(request, owner_name):
# owned_repos = []
# quota_usage = 0
# owned_repos = seafserv_threaded_rpc.list_owned_repos(owner_name)
# quota_usage = seafserv_threaded_rpc.get_user_quota_usage(owner_name)
# user_dict = user_info(request, owner_name)
# return render_to_response('ownerhome.html', {
# "owned_repos": owned_repos,
# "quota_usage": quota_usage,
# "owner": owner_name,
# "user_dict": user_dict,
# }, context_instance=RequestContext(request))
@login_required
def repo_set_access_property(request, repo_id):
ap = request.GET.get('ap', '')
seafserv_threaded_rpc.repo_set_access_property(repo_id, ap)
return HttpResponseRedirect(reverse("view_common_lib_dir", args=[repo_id, '']))
@login_required
def file_upload_progress_page(request):
'''
As iframe in repo_upload_file.html, for solving problem in chrome.
'''
uuid = request.GET.get('uuid', '')
fileserver_root = get_fileserver_root()
upload_progress_con_id = request.GET.get('upload_progress_con_id', '')
return render_to_response('file_upload_progress_page.html', {
'uuid': uuid,
'fileserver_root': fileserver_root,
'upload_progress_con_id': upload_progress_con_id,
}, context_instance=RequestContext(request))
@login_required
def validate_filename(request):
repo_id = request.GET.get('repo_id')
filename = request.GET.get('filename')
if not (repo_id and filename):
return render_error(request)
result = {'ret':'yes'}
try:
ret = is_valid_filename(filename)
except SearpcError:
result['ret'] = 'error'
else:
result['ret'] = 'yes' if ret == 1 else 'no'
content_type = 'application/json; charset=utf-8'
return HttpResponse(json.dumps(result), content_type=content_type)
def render_file_revisions (request, repo_id):
"""List all history versions of a file."""
days_str = request.GET.get('days', '')
try:
days = int(days_str)
except ValueError:
days = 7
path = request.GET.get('p', '/')
if path[-1] == '/':
path = path[:-1]
u_filename = os.path.basename(path)
if not path:
return render_error(request)
repo = get_repo(repo_id)
if not repo:
error_msg = _(u"Library does not exist")
return render_error(request, error_msg)
filetype = get_file_type_and_ext(u_filename)[0].lower()
if filetype == 'text' or filetype == 'markdown':
can_compare = True
else:
can_compare = False
try:
commits = seafile_api.get_file_revisions(repo_id, path, -1, -1, days)
except SearpcError, e:
logger.error(e.msg)
return render_error(request, e.msg)
if not commits:
return render_error(request, _(u'No revisions found'))
# Check whether user is repo owner
if validate_owner(request, repo_id):
is_owner = True
else:
is_owner = False
cur_path = path
for commit in commits:
commit.path = cur_path
if commit.rev_renamed_old_path:
cur_path = '/' + commit.rev_renamed_old_path
zipped = gen_path_link(path, repo.name)
can_revert_file = True
username = request.user.username
is_locked, locked_by_me = check_file_lock(repo_id, path, username)
if seafile_api.check_permission_by_path(repo_id, path, username) != 'rw' or \
(is_locked and not locked_by_me):
can_revert_file = False
return render_to_response('file_revisions.html', {
'repo': repo,
'path': path,
'u_filename': u_filename,
'zipped': zipped,
'commits': commits,
'is_owner': is_owner,
'can_compare': can_compare,
'can_revert_file': can_revert_file,
'days': days,
}, context_instance=RequestContext(request))
@login_required
@require_POST
def repo_revert_file(request, repo_id):
repo = get_repo(repo_id)
if not repo:
raise Http404
commit_id = request.GET.get('commit')
path = request.GET.get('p')
if not (commit_id and path):
return render_error(request, _(u"Invalid arguments"))
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
username = request.user.username
# perm check
if check_folder_permission(request, repo.id, path) != 'rw':
messages.error(request, _("Permission denied"))
return HttpResponseRedirect(next)
is_locked, locked_by_me = check_file_lock(repo_id, path, username)
if (is_locked, locked_by_me) == (None, None):
messages.error(request, _("Check file lock error"))
return HttpResponseRedirect(next)
if is_locked and not locked_by_me:
messages.error(request, _("File is locked"))
return HttpResponseRedirect(next)
try:
ret = seafile_api.revert_file(repo_id, commit_id, path, username)
except Exception as e:
logger.error(e)
messages.error(request, _('Failed to restore, please try again later.'))
return HttpResponseRedirect(next)
if ret == 1:
root_url = reverse('view_common_lib_dir', args=[repo_id, ''])
msg = _(u'Successfully revert %(path)s to <a href="%(root)s">root directory.</a>') % {"path": escape(path.lstrip('/')), "root": root_url}
messages.success(request, msg, extra_tags='safe')
else:
file_view_url = reverse('view_lib_file', args=[repo_id, path.encode('utf-8')])
msg = _(u'Successfully revert <a href="%(url)s">%(path)s</a>') % {"url": file_view_url, "path": escape(path.lstrip('/'))}
messages.success(request, msg, extra_tags='safe')
return HttpResponseRedirect(next)
@login_required
@require_POST
def repo_revert_dir(request, repo_id):
repo = get_repo(repo_id)
if not repo:
raise Http404
commit_id = request.GET.get('commit')
path = request.GET.get('p')
if not (commit_id and path):
return render_error(request, _(u"Invalid arguments"))
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
# perm check
if check_folder_permission(request, repo.id, path) != 'rw':
messages.error(request, _("Permission denied"))
return HttpResponseRedirect(next)
try:
ret = seafile_api.revert_dir(repo_id, commit_id, path, request.user.username)
except Exception as e:
logger.error(e)
messages.error(request, _('Failed to restore, please try again later.'))
return HttpResponseRedirect(next)
if ret == 1:
root_url = reverse('view_common_lib_dir', args=[repo_id, ''])
msg = _(u'Successfully revert %(path)s to <a href="%(url)s">root directory.</a>') % {"path": escape(path.lstrip('/')), "url": root_url}
messages.success(request, msg, extra_tags='safe')
else:
dir_view_url = reverse('view_common_lib_dir', args=[repo_id, path.strip('/').encode('utf-8')])
msg = _(u'Successfully revert <a href="%(url)s">%(path)s</a>') % {"url": dir_view_url, "path": escape(path.lstrip('/'))}
messages.success(request, msg, extra_tags='safe')
return HttpResponseRedirect(next)
@login_required
def file_revisions(request, repo_id):
"""List file revisions in file version history page.
"""
repo = get_repo(repo_id)
if not repo:
raise Http404
# perm check
if check_folder_permission(request, repo_id, '/') is None:
raise Http404
return render_file_revisions(request, repo_id)
def demo(request):
"""
Login as demo account.
"""
user = User.objects.get(email=settings.CLOUD_DEMO_USER)
for backend in get_backends():
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
auth_login(request, user)
redirect_to = settings.SITE_ROOT
return HttpResponseRedirect(redirect_to)
def list_inner_pub_repos(request):
"""List inner pub repos.
"""
username = request.user.username
if is_org_context(request):
org_id = request.user.org.org_id
return seaserv.list_org_inner_pub_repos(org_id, username)
if not request.cloud_mode:
return seaserv.list_inner_pub_repos(username)
return []
def i18n(request):
"""
Set client language preference, lasts for one month
"""
from django.conf import settings
next = request.META.get('HTTP_REFERER', settings.SITE_ROOT)
lang = request.GET.get('lang', settings.LANGUAGE_CODE)
if lang not in [e[0] for e in settings.LANGUAGES]:
# language code is not supported, use default.
lang = settings.LANGUAGE_CODE
# set language code to user profile if user is logged in
if not request.user.is_anonymous():
p = Profile.objects.get_profile_by_user(request.user.username)
if p is not None:
# update exist record
p.set_lang_code(lang)
else:
# add new record
Profile.objects.add_or_update(request.user.username, '', '', lang)
# set language code to client
res = HttpResponseRedirect(next)
res.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang, max_age=30*24*60*60)
return res
@login_required
def repo_download_dir(request, repo_id):
repo = get_repo(repo_id)
if not repo:
return render_error(request, _(u'Library does not exist'))
path = request.GET.get('p', '/')
if path[-1] != '/': # Normalize dir path
path += '/'
if not seafile_api.get_dir_id_by_path(repo.id, path):
return render_error(request, _('"%s" does not exist.') % path)
if len(path) > 1:
dirname = os.path.basename(path.rstrip('/')) # Here use `rstrip` to cut out last '/' in path
else:
dirname = repo.name
allow_download = True if check_folder_permission(request, repo_id, '/') else False
if allow_download:
dir_id = seafile_api.get_dir_id_by_commit_and_path(repo.id,
repo.head_cmmt_id, path)
try:
total_size = seafile_api.get_dir_size(repo.store_id,
repo.version, dir_id)
except Exception, e:
logger.error(str(e))
return render_error(request, _(u'Internal Error'))
if total_size > MAX_DOWNLOAD_DIR_SIZE:
return render_error(request, _(u'Unable to download directory "%s": size is too large.') % dirname)
token = seafile_api.get_fileserver_access_token(repo_id,
dir_id,
'download-dir',
request.user.username)
else:
return render_error(request, _(u'Unable to download "%s"') % dirname )
url = gen_file_get_url(token, dirname)
from seahub.views.file import send_file_access_msg
send_file_access_msg(request, repo, path, 'web')
return redirect(url)
def group_events_data(events):
"""
Group events according to the date.
"""
event_groups = []
for e in events:
e.time = utc_to_local(e.timestamp)
e.date = e.time.strftime("%Y-%m-%d")
if e.etype == 'repo-update':
e.author = e.commit.creator_name
elif e.etype == 'repo-create':
e.author = e.creator
else:
e.author = e.repo_owner
if len(event_groups) == 0 or \
len(event_groups) > 0 and e.date != event_groups[-1]['date']:
event_group = {}
event_group['date'] = e.date
event_group['events'] = [e]
event_groups.append(event_group)
else:
event_groups[-1]['events'].append(e)
return event_groups
@login_required
def convert_cmmt_desc_link(request):
"""Return user to file/directory page based on the changes in commit.
"""
repo_id = request.GET.get('repo_id')
cmmt_id = request.GET.get('cmmt_id')
name = request.GET.get('nm')
repo = get_repo(repo_id)
if not repo:
raise Http404
# perm check
if check_folder_permission(request, repo_id, '/') is None:
raise Http404
diff_result = seafserv_threaded_rpc.get_diff(repo_id, '', cmmt_id)
if not diff_result:
raise Http404
for d in diff_result:
if name not in d.name:
# skip to next diff_result if file/folder user clicked does not
# match the diff_result
continue
if d.status == 'add' or d.status == 'mod': # Add or modify file
return HttpResponseRedirect(
reverse('view_lib_file', args=[repo_id, '/' + d.name]))
elif d.status == 'mov': # Move or Rename file
return HttpResponseRedirect(
reverse('view_lib_file', args=[repo_id, '/' + d.new_name]))
elif d.status == 'newdir':
return HttpResponseRedirect(
reverse('view_common_lib_dir', args=[repo_id, d.name.strip('/')]))
else:
continue
# Shoud never reach here.
logger.warn('OUT OF CONTROL!')
logger.warn('repo_id: %s, cmmt_id: %s, name: %s' % (repo_id, cmmt_id, name))
for d in diff_result:
logger.warn('diff_result: %s' % (d.__dict__))
raise Http404
@login_required
def toggle_modules(request):
"""Enable or disable modules.
"""
if request.method != 'POST':
raise Http404
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
username = request.user.username
personal_wiki = request.POST.get('personal_wiki', 'off')
if personal_wiki == 'on':
enable_mod_for_user(username, MOD_PERSONAL_WIKI)
messages.success(request, _('Successfully enable "Personal Wiki".'))
else:
disable_mod_for_user(username, MOD_PERSONAL_WIKI)
if referer.find('wiki') > 0:
next = settings.SITE_ROOT
messages.success(request, _('Successfully disable "Personal Wiki".'))
return HttpResponseRedirect(next)
storage = get_avatar_file_storage()
def latest_entry(request, filename):
try:
return storage.modified_time(filename)
except Exception as e:
logger.error(e)
return None
@condition(last_modified_func=latest_entry)
def image_view(request, filename):
if AVATAR_FILE_STORAGE is None:
raise Http404
# read file from cache, if hit
filename_md5 = hashlib.md5(filename).hexdigest()
cache_key = 'image_view__%s' % filename_md5
file_content = cache.get(cache_key)
if file_content is None:
# otherwise, read file from database and update cache
image_file = storage.open(filename, 'rb')
if not image_file:
raise Http404
file_content = image_file.read()
cache.set(cache_key, file_content, 365 * 24 * 60 * 60)
# Prepare response
content_type, content_encoding = mimetypes.guess_type(filename)
response = HttpResponse(content=file_content, content_type=content_type)
response['Content-Disposition'] = 'inline; filename=%s' % filename
if content_encoding:
response['Content-Encoding'] = content_encoding
return response
def shib_login(request):
return HttpResponseRedirect(request.GET.get("next", reverse('libraries')))
def underscore_template(request, template):
"""Serve underscore template through Django, mainly for I18n.
Arguments:
- `request`:
- `template`:
"""
if not template.startswith('js'): # light security check
raise Http404
return render_to_response(template, {},
context_instance=RequestContext(request))
def fake_view(request, **kwargs):
"""
Used for 'view_common_lib_dir' and some other urls
As the urls start with '#',
http request will not access this function
"""
pass
def client_token_login(request):
"""Login from desktop client with a generated token.
"""
tokenstr = request.GET.get('token', '')
user = None
if len(tokenstr) == 32:
try:
username = ClientLoginToken.objects.get_username(tokenstr)
except ClientLoginToken.DoesNotExist:
pass
else:
try:
user = User.objects.get(email=username)
for backend in get_backends():
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
except User.DoesNotExist:
pass
if user:
if request.user.is_authenticated() and request.user.username == user.username:
pass
else:
request.client_token_login = True
auth_login(request, user)
return HttpResponseRedirect(request.GET.get("next", reverse('libraries')))
|
|
#!/usr/bin/env python
import sys
import os
import pysam
import gzip
from argparse import ArgumentParser
from collections import defaultdict
class iteratorWrapper:
def __init__(self, inIterator, finalValue):
self.it = inIterator
self.finalValue = finalValue
self.endIter = False
def __iter__(self):
return self
def next(self):
try:
temp = self.it.next()
except StopIteration:
if self.endIter == False:
temp = self.finalValue
self.endIter = True
else:
raise StopIteration
return temp
def consensus_caller(input_reads, cutoff, tag, length_check):
nuc_identity_list = [0, 0, 0, 0, 0, 0] # In the order of T, C, G, A, N, Total
nuc_key_dict = {0: 'T', 1: 'C', 2: 'G', 3: 'A', 4: 'N'}
consensus_seq = ''
if length_check is True:
for read in input_reads[1:]:
if len(read) != len(input_reads[0]):
raise Exception("Read lengths for tag %s used for calculating the SSCS are not uniform!!!" % tag)
for i in xrange(len(input_reads[0])): # Count the types of nucleotides at a position in a read.
# i is the nucleotide index within a read in groupedReadsList
for j in xrange(len(input_reads)): # Do this for every read that comprises a tag family.
# j is the read index within groupedReadsList
try:
if input_reads[j][i] == 'T':
nuc_identity_list[0] += 1
elif input_reads[j][i] == 'C':
nuc_identity_list[1] += 1
elif input_reads[j][i] == 'G':
nuc_identity_list[2] += 1
elif input_reads[j][i] == 'A':
nuc_identity_list[3] += 1
elif input_reads[j][i] == 'N':
nuc_identity_list[4] += 1
else:
nuc_identity_list[4] += 1
nuc_identity_list[5] += 1
except:
break
try:
for j in [0, 1, 2, 3, 4]:
if float(nuc_identity_list[j])/float(nuc_identity_list[5]) >= cutoff:
consensus_seq += nuc_key_dict[j]
break
elif j == 4:
consensus_seq += 'N'
except:
consensus_seq += 'N'
nuc_identity_list = [0, 0, 0, 0, 0, 0] # Reset for the next nucleotide position
return consensus_seq
def qual_calc(qual_list):
return [sum(qual_score) for qual_score in zip(*qual_list)]
def main():
parser = ArgumentParser()
parser.add_argument('--input', dest='in_bam', required=True,
help='Path to unaligned, paired-end, bam file.')
parser.add_argument('--taglen', dest='tag_len', type=int, default=12,
help='Length in bases of the duplex tag sequence.[12]')
parser.add_argument('--spacerlen', dest='spcr_len', type=int, default=5,
help='Length in bases of the spacer sequence between duplex tag and the start of target DNA. [5]')
parser.add_argument("--tagstats", dest='tagstats', action="store_true",
help="output tagstats file")
parser.add_argument('--minmem', dest='minmem', type=int, default=3,
help="Minimum number of reads allowed to comprise a consensus. [3]")
parser.add_argument('--maxmem', dest='maxmem', type=int, default=200,
help="Maximum number of reads allowed to comprise a consensus. [200]")
parser.add_argument('--cutoff', dest='cutoff', type=float, default=.7,
help="Percentage of nucleotides at a given position in a read that must be identical in order "
"for a consensus to be called at that position. [0.7]")
parser.add_argument('--Ncutoff', dest='Ncutoff', type=float, default=1,
help="With --filt 'n', maximum fraction of Ns allowed in a consensus [1.0]")
parser.add_argument('--write-sscs', dest='write_sscs', action="store_true",
help="Print the SSCS reads to file in FASTQ format")
parser.add_argument('--without-dcs', dest='without_dcs', action="store_true",
help="Don't print final DCS reads")
parser.add_argument("--rep_filt", action="store", type=int, dest='rep_filt',
help="Remove tags with homomeric runs of nucleotides of length x. [9]", default=9)
parser.add_argument('--prefix', dest='prefix', type=str, required=True,
help="Sample name to uniquely identify samples")
o = parser.parse_args()
dummy_header = {'HD': {'VN': '1.0'}, 'SQ': [{'LN': 1575, 'SN': 'chr1'}, {'LN': 1584, 'SN': 'chr2'}]}
in_bam_file = pysam.AlignmentFile(o.in_bam, "rb", check_sq=False)
temp_bam = pysam.AlignmentFile(o.prefix + ".temp.bam", 'wb', header=dummy_header)
paired_end_count = 1
if o.write_sscs is True:
read1_sscs_fq_file = gzip.open(o.prefix + '_read1_sscs.fq.gz', 'wb')
read2_sscs_fq_file = gzip.open(o.prefix + '_read2_sscs.fq.gz', 'wb')
if o.without_dcs is False:
read1_dcs_fq_file = gzip.open(o.prefix + '_read1_dcs.fq.gz', 'wb')
read2_dcs_fq_file = gzip.open(o.prefix + '_read2_dcs.fq.gz', 'wb')
'''This block of code takes an unaligned bam file, extracts the tag sequences from the reads, and converts them to
to "ab/ba" format where 'a' and 'b' are the tag sequences from Read 1 and Read 2, respectively. Conversion occurs by
putting the tag with the "lesser" value in front of the tag with the "higher" value. The original tag orientation is
denoted by appending #ab or #ba to the end of the tag. After conversion, the resulting temporary bam file is then
sorted by read name.'''
print "Parsing tags..."
for line in in_bam_file.fetch(until_eof=True):
if paired_end_count % 2 == 1:
temp_read1_entry = pysam.AlignedSegment()
temp_read1_entry.query_name = line.query_name
temp_read1_entry.query_sequence = line.query_alignment_sequence
temp_read1_entry.query_qualities = line.query_alignment_qualities
if paired_end_count % 2 == 0:
temp_bam_entry = pysam.AlignedSegment()
if temp_read1_entry.query_sequence[:o.tag_len] > line.query_alignment_sequence[:o.tag_len]:
temp_bam_entry.query_name = temp_read1_entry.query_sequence[:o.tag_len] + \
line.query_alignment_sequence[:o.tag_len] + '#ab'
elif temp_read1_entry.query_sequence[:o.tag_len] < line.query_alignment_sequence[:o.tag_len]:
temp_bam_entry.query_name = line.query_alignment_sequence[:o.tag_len] + \
temp_read1_entry.query_sequence[:o.tag_len] + '#ba'
elif temp_read1_entry.query_sequence[:o.tag_len] == line.query_alignment_sequence[:o.tag_len]:
paired_end_count += 1
continue
# Write entries for Read 1
temp_bam_entry.query_name += ":1"
temp_bam_entry.query_sequence = temp_read1_entry.query_sequence[o.tag_len + o.spcr_len:]
temp_bam_entry.query_qualities = temp_read1_entry.query_qualities[o.tag_len + o.spcr_len:]
temp_bam_entry.set_tag('X?', temp_read1_entry.query_name, 'Z')
temp_bam.write(temp_bam_entry)
# Write entries for Read 2
temp_bam_entry.query_name = temp_bam_entry.query_name.replace('1', '2')
temp_bam_entry.query_sequence = line.query_sequence[o.tag_len + o.spcr_len:]
temp_bam_entry.query_qualities = line.query_qualities[o.tag_len + o.spcr_len:]
temp_bam_entry.set_tag('X?', line.query_name, 'Z')
temp_bam.write(temp_bam_entry)
paired_end_count += 1
in_bam_file.close()
temp_bam.close()
print "Sorting reads on tag sequence..."
pysam.sort("-n", o.prefix + ".temp.bam", "-o", o.prefix + ".temp.sort.bam") # Sort by read name, which will be the
# tag sequence in this case.
os.remove(o.prefix + ".temp.bam")
'''Extracting tags and sorting based on tag sequence is complete. This block of code now performs the consensus
calling on the tag families in the temporary name sorted bam file.'''
seq_dict = {'ab:1': [], 'ab:2': [], 'ba:1': [], 'ba:2': []}
qual_dict = {'ab:1': [], 'ab:2': [], 'ba:1': [], 'ba:2': []}
fam_size_x_axis = []
fam_size_y_axis = []
read1_dcs_len = 0
read2_dcs_len = 0
in_bam_file = pysam.AlignmentFile(o.prefix + '.temp.sort.bam', "rb", check_sq=False)
first_line = in_bam_file.next()
FinalValue = pysam.AlignedSegment()
FinalValue.query_name = "FinalValue#ab:1"
seq_dict[first_line.query_name.split('#')[1]].append(first_line.query_sequence)
qual_dict[first_line.query_name.split('#')[1]].append(list(first_line.query_qualities))
tag_count_dict = defaultdict(lambda: 0)
print "Creating consensus reads..."
for line in iteratorWrapper(in_bam_file.fetch(until_eof=True), FinalValue):
tag, subtag_order = first_line.query_name.split('#')[0], first_line.query_name.split('#')[1]
if line.query_name.split('#')[0] == tag:
seq_dict[line.query_name.split('#')[1]].append(line.query_sequence)
qual_dict[line.query_name.split('#')[1]].append(list(line.query_qualities))
else:
if len(seq_dict['ab:1']) != len(seq_dict['ab:2']) or len(seq_dict['ba:1']) != len(seq_dict['ba:2']):
raise Exception('ERROR: Read counts for Read1 and Read 2 do not match for tag %s' % tag)
for tag_subtype in seq_dict.keys():
if len(seq_dict[tag_subtype]) > 0:
tag_count_dict[len(seq_dict[tag_subtype])] += 1
if len(seq_dict[tag_subtype]) < o.minmem:
seq_dict[tag_subtype] = []
qual_dict[tag_subtype] = []
elif o.minmem <= len(seq_dict[tag_subtype]) <= o.maxmem: # Tag types w/o reads should not be submitted
# as long as minmem is > 0
seq_dict[tag_subtype] = [consensus_caller(seq_dict[tag_subtype], o.cutoff, tag, True),
str(len(seq_dict[tag_subtype]))]
qual_dict[tag_subtype] = qual_calc(qual_dict[tag_subtype])
elif len(seq_dict[tag_subtype]) > o.maxmem:
seq_dict[tag_subtype] = [consensus_caller(seq_dict[tag_subtype][:o.maxmem], o.cutoff, tag, True),
str(len(seq_dict[tag_subtype]))]
qual_dict[tag_subtype] = qual_calc(qual_dict[tag_subtype])
if o.write_sscs is True:
if len(seq_dict['ab:1']) != 0 and len(seq_dict['ab:2']) != 0:
corrected_qual_score = map(lambda x: x if x < 41 else 41, qual_dict['ab:1'])
read1_sscs_fq_file.write('@%s#ab/1\n%s\n+%s\n%s\n' %
(tag, seq_dict['ab:1'][0], seq_dict['ab:1'][1], "".join(chr(x + 33)
for x in corrected_qual_score)))
corrected_qual_score = map(lambda x: x if x < 41 else 41, qual_dict['ab:2'])
read2_sscs_fq_file.write('@%s#ab/2\n%s\n+%s\n%s\n' %
(tag, seq_dict['ab:2'][0], seq_dict['ab:2'][1], "".join(chr(x + 33)
for x in corrected_qual_score)))
if len(seq_dict['ba:1']) != 0 and len(seq_dict['ba:2']) != 0:
corrected_qual_score = map(lambda x: x if x < 41 else 41, qual_dict['ba:1'])
read1_sscs_fq_file.write('@%s#ba/1\n%s\n+%s\n%s\n' %
(tag, seq_dict['ba:1'][0], seq_dict['ba:1'][1], "".join(chr(x + 33)
for x in corrected_qual_score)))
corrected_qual_score = map(lambda x: x if x < 41 else 41, qual_dict['ba:1'])
read2_sscs_fq_file.write('@%s#ba/2\n%s\n+%s\n%s\n' %
(tag, seq_dict['ba:2'][0], seq_dict['ba:2'][1], "".join(chr(x + 33)
for x in corrected_qual_score)))
if o.without_dcs is False:
if len(seq_dict['ab:1']) != 0 and len(seq_dict['ba:2']) != 0:
dcs_read_1 = [consensus_caller([seq_dict['ab:1'][0], seq_dict['ba:2'][0]], 1, tag, False),
seq_dict['ab:1'][1], seq_dict['ba:2'][1]]
dcs_read_1_qual = map(lambda x: x if x < 41 else 41, qual_calc([qual_dict['ab:1'], qual_dict['ba:2']]))
read1_dcs_len = len(dcs_read_1)
fam_size_x_axis.append(int(seq_dict['ab:1'][1]))
fam_size_y_axis.append(int(seq_dict['ba:2'][1]))
if dcs_read_1.count('N')/float(read1_dcs_len) > o.Ncutoff:
dcs_read_1 = 'N' * read1_dcs_len
dcs_read_1_qual = '!' * read1_dcs_len
if len(seq_dict['ba:1']) != 0 and len(seq_dict['ab:2']) != 0:
dcs_read_2 = [consensus_caller([seq_dict['ba:1'][0], seq_dict['ab:2'][0]], 1, tag, False),
seq_dict['ba:1'][1], seq_dict['ab:2'][1]]
dcs_read_2_qual = map(lambda x: x if x < 41 else 41, qual_calc([qual_dict['ba:1'], qual_dict['ab:2']]))
read2_dcs_len = len(dcs_read_2)
if dcs_read_2.count('N')/float(read1_dcs_len) > o.Ncutoff:
dcs_read_2 = 'N' * read1_dcs_len
dcs_read_2_qual = '!' * read2_dcs_len
if read1_dcs_len != 0 and read2_dcs_len != 0 and tag.count('N') == 0 and \
'A' * o.rep_filt not in tag and 'C' * o.rep_filt not in tag and \
'G' * o.rep_filt not in tag and 'T' * o.rep_filt not in tag:
read1_dcs_fq_file.write('@%s/1\n%s\n+%s:%s\n%s\n' % (tag, dcs_read_1[0], dcs_read_1[1], dcs_read_1[2],
"".join(chr(x + 33) for x in dcs_read_1_qual)))
read2_dcs_fq_file.write('@%s/2\n%s\n+%s:%s\n%s\n' % (tag, dcs_read_2[0], dcs_read_2[1], dcs_read_2[2],
"".join(chr(x + 33) for x in dcs_read_2_qual)))
if line != FinalValue:
# reset conditions for next tag family
first_line = line
seq_dict = {'ab:1': [], 'ab:2': [], 'ba:1': [], 'ba:2': []}
qual_dict = {'ab:1': [], 'ab:2': [], 'ba:1': [], 'ba:2': []}
read1_dcs_len = 0
read2_dcs_len = 0
dcs_read_1 = ''
dcs_read_2 = ''
seq_dict[line.query_name.split('#')[1]].append(line.query_sequence) # Now add initializing data for new tag
qual_dict[first_line.query_name.split('#')[1]].append(list(first_line.query_qualities))
if o.write_sscs is True:
read1_sscs_fq_file.close()
read2_sscs_fq_file.close()
if o.without_dcs is False:
read1_dcs_fq_file.close()
read2_dcs_fq_file.close()
# Try to plot the tag family sizes
if o.tagstats is True:
tag_stats_file = open(o.prefix + ".tagstats.txt", 'w')
x_value = []
y_value = []
total_reads = sum([tag_count_dict[tag_family_size] * tag_family_size for tag_family_size
in tag_count_dict.keys()])
for tag_family_size in sorted(tag_count_dict.keys()):
fraction = (tag_count_dict[tag_family_size] * tag_family_size) / float(total_reads)
tag_stats_file.write('%d\t%d\t%f\n' % (tag_family_size, tag_count_dict[tag_family_size], fraction))
x_value.append(tag_family_size)
y_value.append(fraction)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.figure(1)
plt.bar(x_value, y_value)
plt.xlabel('Family Size')
plt.ylabel('Proportion of Total Reads')
plt.savefig(o.prefix + 'family_size.png', bbox_inches='tight')
if o.without_dcs is False:
plt.figure(2)
plt.scatter(fam_size_x_axis, fam_size_y_axis, alpha=.1)
plt.xlabel('Family size for AB:1')
plt.ylabel('Family size for BA:2')
plt.xlim(0, max(fam_size_x_axis))
plt.ylim(0, max(fam_size_y_axis))
plt.savefig(o.prefix + 'fam_size_relation.png', bbox_inches='tight')
except ImportError:
sys.stderr.write('matplotlib not present. Only tagstats file will be generated.')
tag_stats_file.close()
if __name__ == "__main__":
main()
|
|
from __future__ import absolute_import, print_function, division
import operator
from petl.compat import next, string_types, callable, text_type
from petl.comparison import Comparable
from petl.errors import ArgumentError
from petl.util.base import asindices, expr, Table, values, Record
def select(table, *args, **kwargs):
"""
Select rows meeting a condition. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['a', 4, 9.3],
... ['a', 2, 88.2],
... ['b', 1, 23.3],
... ['c', 8, 42.0],
... ['d', 7, 100.9],
... ['c', 2]]
>>> # the second positional argument can be a function accepting
... # a row
... table2 = etl.select(table1,
... lambda rec: rec.foo == 'a' and rec.baz > 88.1)
>>> table2
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'a' | 2 | 88.2 |
+-----+-----+------+
>>> # the second positional argument can also be an expression
... # string, which will be converted to a function using petl.expr()
... table3 = etl.select(table1, "{foo} == 'a' and {baz} > 88.1")
>>> table3
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'a' | 2 | 88.2 |
+-----+-----+------+
>>> # the condition can also be applied to a single field
... table4 = etl.select(table1, 'foo', lambda v: v == 'a')
>>> table4
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'a' | 4 | 9.3 |
+-----+-----+------+
| 'a' | 2 | 88.2 |
+-----+-----+------+
The complement of the selection can be returned (i.e., the query can be
inverted) by providing `complement=True` as a keyword argument.
"""
missing = kwargs.get('missing', None)
complement = kwargs.get('complement', False)
if len(args) == 0:
raise ArgumentError('missing positional argument')
elif len(args) == 1:
where = args[0]
if isinstance(where, string_types):
where = expr(where)
else:
assert callable(where), 'second argument must be string or callable'
return RowSelectView(table, where, missing=missing,
complement=complement)
else:
field = args[0]
where = args[1]
assert callable(where), 'third argument must be callable'
return FieldSelectView(table, field, where, complement=complement,
missing=missing)
Table.select = select
class RowSelectView(Table):
def __init__(self, source, where, missing=None, complement=False):
self.source = source
self.where = where
self.missing = missing
self.complement = complement
def __iter__(self):
return iterrowselect(self.source, self.where, self.missing,
self.complement)
class FieldSelectView(Table):
def __init__(self, source, field, where, complement=False, missing=None):
self.source = source
self.field = field
self.where = where
self.complement = complement
self.missing = missing
def __iter__(self):
return iterfieldselect(self.source, self.field, self.where,
self.complement, self.missing)
def iterfieldselect(source, field, where, complement, missing):
it = iter(source)
hdr = next(it)
yield tuple(hdr)
indices = asindices(hdr, field)
getv = operator.itemgetter(*indices)
for row in it:
try:
v = getv(row)
except IndexError:
v = missing
if bool(where(v)) != complement: # XOR
yield tuple(row)
def iterrowselect(source, where, missing, complement):
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
yield tuple(hdr)
it = (Record(row, flds, missing=missing) for row in it)
for row in it:
if bool(where(row)) != complement: # XOR
yield tuple(row) # need to convert back to tuple?
def rowlenselect(table, n, complement=False):
"""Select rows of length `n`."""
where = lambda row: len(row) == n
return select(table, where, complement=complement)
Table.rowlenselect = rowlenselect
def selectop(table, field, value, op, complement=False):
"""Select rows where the function `op` applied to the given field and
the given value returns `True`."""
return select(table, field, lambda v: op(v, value),
complement=complement)
Table.selectop = selectop
def selecteq(table, field, value, complement=False):
"""Select rows where the given field equals the given value."""
return selectop(table, field, value, operator.eq, complement=complement)
Table.selecteq = selecteq
Table.eq = selecteq
def selectne(table, field, value, complement=False):
"""Select rows where the given field does not equal the given value."""
return selectop(table, field, value, operator.ne, complement=complement)
Table.selectne = selectne
Table.ne = selectne
def selectlt(table, field, value, complement=False):
"""Select rows where the given field is less than the given value."""
value = Comparable(value)
return selectop(table, field, value, operator.lt, complement=complement)
Table.selectlt = selectlt
Table.lt = selectlt
def selectle(table, field, value, complement=False):
"""Select rows where the given field is less than or equal to the given
value."""
value = Comparable(value)
return selectop(table, field, value, operator.le, complement=complement)
Table.selectle = selectle
Table.le = selectle
def selectgt(table, field, value, complement=False):
"""Select rows where the given field is greater than the given value."""
value = Comparable(value)
return selectop(table, field, value, operator.gt, complement=complement)
Table.selectgt = selectgt
Table.gt = selectgt
def selectge(table, field, value, complement=False):
"""Select rows where the given field is greater than or equal to the given
value."""
value = Comparable(value)
return selectop(table, field, value, operator.ge, complement=complement)
Table.selectge = selectge
Table.ge = selectge
def selectcontains(table, field, value, complement=False):
"""Select rows where the given field contains the given value."""
return selectop(table, field, value, operator.contains,
complement=complement)
Table.selectcontains = selectcontains
def selectin(table, field, value, complement=False):
"""Select rows where the given field is a member of the given value."""
return select(table, field, lambda v: v in value,
complement=complement)
Table.selectin = selectin
def selectnotin(table, field, value, complement=False):
"""Select rows where the given field is not a member of the given value."""
return select(table, field, lambda v: v not in value,
complement=complement)
Table.selectnotin = selectnotin
def selectis(table, field, value, complement=False):
"""Select rows where the given field `is` the given value."""
return selectop(table, field, value, operator.is_, complement=complement)
Table.selectis = selectis
def selectisnot(table, field, value, complement=False):
"""Select rows where the given field `is not` the given value."""
return selectop(table, field, value, operator.is_not, complement=complement)
Table.selectisnot = selectisnot
def selectisinstance(table, field, value, complement=False):
"""Select rows where the given field is an instance of the given type."""
return selectop(table, field, value, isinstance, complement=complement)
Table.selectisinstance = selectisinstance
def selectrangeopenleft(table, field, minv, maxv, complement=False):
"""Select rows where the given field is greater than or equal to `minv` and
less than `maxv`."""
minv = Comparable(minv)
maxv = Comparable(maxv)
return select(table, field, lambda v: minv <= v < maxv,
complement=complement)
Table.selectrangeopenleft = selectrangeopenleft
def selectrangeopenright(table, field, minv, maxv, complement=False):
"""Select rows where the given field is greater than `minv` and
less than or equal to `maxv`."""
minv = Comparable(minv)
maxv = Comparable(maxv)
return select(table, field, lambda v: minv < v <= maxv,
complement=complement)
Table.selectrangeopenright = selectrangeopenright
def selectrangeopen(table, field, minv, maxv, complement=False):
"""Select rows where the given field is greater than or equal to `minv` and
less than or equal to `maxv`."""
minv = Comparable(minv)
maxv = Comparable(maxv)
return select(table, field, lambda v: minv <= v <= maxv,
complement=complement)
Table.selectrangeopen = selectrangeopen
def selectrangeclosed(table, field, minv, maxv, complement=False):
"""Select rows where the given field is greater than `minv` and
less than `maxv`."""
minv = Comparable(minv)
maxv = Comparable(maxv)
return select(table, field, lambda v: minv < Comparable(v) < maxv,
complement=complement)
Table.selectrangeclosed = selectrangeclosed
def selecttrue(table, field, complement=False):
"""Select rows where the given field evaluates `True`."""
return select(table, field, lambda v: bool(v), complement=complement)
Table.selecttrue = selecttrue
Table.true = selecttrue
def selectfalse(table, field, complement=False):
"""Select rows where the given field evaluates `False`."""
return select(table, field, lambda v: not bool(v),
complement=complement)
Table.selectfalse = selectfalse
Table.false = selectfalse
def selectnone(table, field, complement=False):
"""Select rows where the given field is `None`."""
return select(table, field, lambda v: v is None, complement=complement)
Table.selectnone = selectnone
Table.none = selectnone
def selectnotnone(table, field, complement=False):
"""Select rows where the given field is not `None`."""
return select(table, field, lambda v: v is not None,
complement=complement)
Table.selectnotnone = selectnotnone
Table.notnone = selectnotnone
def selectusingcontext(table, query):
"""
Select rows based on data in the current row and/or previous and
next row. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['A', 1],
... ['B', 4],
... ['C', 5],
... ['D', 9]]
>>> def query(prv, cur, nxt):
... return ((prv is not None and (cur.bar - prv.bar) < 2)
... or (nxt is not None and (nxt.bar - cur.bar) < 2))
...
>>> table2 = etl.selectusingcontext(table1, query)
>>> table2
+-----+-----+
| foo | bar |
+=====+=====+
| 'B' | 4 |
+-----+-----+
| 'C' | 5 |
+-----+-----+
The `query` function should accept three rows and return a boolean value.
"""
return SelectUsingContextView(table, query)
Table.selectusingcontext = selectusingcontext
class SelectUsingContextView(Table):
def __init__(self, table, query):
self.table = table
self.query = query
def __iter__(self):
return iterselectusingcontext(self.table, self.query)
def iterselectusingcontext(table, query):
it = iter(table)
hdr = tuple(next(it))
flds = list(map(text_type, hdr))
yield hdr
it = (Record(row, flds) for row in it)
prv = None
cur = next(it)
for nxt in it:
if query(prv, cur, nxt):
yield cur
prv = cur
cur = nxt
# handle last row
if query(prv, cur, None):
yield cur
def facet(table, key):
"""
Return a dictionary mapping field values to tables. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['a', 4, 9.3],
... ['a', 2, 88.2],
... ['b', 1, 23.3],
... ['c', 8, 42.0],
... ['d', 7, 100.9],
... ['c', 2]]
>>> foo = etl.facet(table1, 'foo')
>>> sorted(foo.keys())
['a', 'b', 'c', 'd']
>>> foo['a']
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'a' | 4 | 9.3 |
+-----+-----+------+
| 'a' | 2 | 88.2 |
+-----+-----+------+
>>> foo['c']
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'c' | 8 | 42.0 |
+-----+-----+------+
| 'c' | 2 | |
+-----+-----+------+
See also :func:`petl.util.materialise.facetcolumns`.
"""
fct = dict()
for v in set(values(table, key)):
fct[v] = selecteq(table, key, v)
return fct
Table.facet = facet
def biselect(table, *args, **kwargs):
"""Return two tables, the first containing selected rows, the second
containing remaining rows. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['a', 4, 9.3],
... ['a', 2, 88.2],
... ['b', 1, 23.3],
... ['c', 8, 42.0],
... ['d', 7, 100.9],
... ['c', 2]]
>>> table2, table3 = etl.biselect(table1, lambda rec: rec.foo == 'a')
>>> table2
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'a' | 4 | 9.3 |
+-----+-----+------+
| 'a' | 2 | 88.2 |
+-----+-----+------+
>>> table3
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'b' | 1 | 23.3 |
+-----+-----+-------+
| 'c' | 8 | 42.0 |
+-----+-----+-------+
| 'd' | 7 | 100.9 |
+-----+-----+-------+
| 'c' | 2 | |
+-----+-----+-------+
.. versionadded:: 1.1.0
"""
# override complement kwarg
kwargs['complement'] = False
t1 = select(table, *args, **kwargs)
kwargs['complement'] = True
t2 = select(table, *args, **kwargs)
return t1, t2
Table.biselect = biselect
|
|
#!/usr/bin/python
# dictionaries: process type wordlists
# Author: Daniel McDonald
# make regular expressions and lists of inflected words from word lists
try:
from corpkit.lazyprop import lazyprop
except:
import corpkit
from lazyprop import lazyprop
def _verbs():
import corpkit
from corpkit.dictionaries.verblist import allverbs
verblist = [i for i in allverbs if '_' not in i]
return Wordlist(verblist)
def load_verb_data():
"""load the verb lexicon"""
def resource_path(relative):
"""seemingly not working"""
import os
return os.path.join(os.environ.get("_MEIPASS2", os.path.abspath(".")), relative)
import os
import corpkit
import pickle
from corpkit.process import get_gui_resource_dir
corpath = os.path.dirname(corpkit.__file__)
baspat = os.path.dirname(corpath)
dicpath = os.path.join(baspat, 'dictionaries')
lastpath = os.path.join(baspat, 'corpkit', 'dictionaries')
paths_to_check = [resource_path('eng_verb_lexicon.p'),
os.path.join(dicpath, 'eng_verb_lexicon.p'),
os.path.join(get_gui_resource_dir(), 'eng_verb_lexicon.p'),
os.path.join(lastpath, 'eng_verb_lexicon.p')]
for p in paths_to_check:
try:
return pickle.load(open(p, 'rb'))
except:
pass
return
def find_lexeme(verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
taken from pattern.en, because it wouldn't go into py2app properly
"""
vowels = ['a', 'e', 'i', 'o', 'u']
v = verb.lower()
if len(v) > 1 and v.endswith("e") and v[-2] not in vowels:
# Verbs ending in a consonant followed by "e": dance, save, devote, evolve.
return [v, v, v, v+"s", v, v[:-1]+"ing"] + [v+"d"]*6
if len(v) > 1 and v.endswith("y") and v[-2] not in vowels:
# Verbs ending in a consonant followed by "y": comply, copy, magnify.
return [v, v, v, v[:-1]+"ies", v, v+"ing"] + [v[:-1]+"ied"]*6
if v.endswith(("ss", "sh", "ch", "x")):
# Verbs ending in sibilants: kiss, bless, box, polish, preach.
return [v, v, v, v+"es", v, v+"ing"] + [v+"ed"]*6
if v.endswith("ic"):
# Verbs ending in -ic: panic, mimic.
return [v, v, v, v+"es", v, v+"king"] + [v+"ked"]*6
if len(v) > 1 and v[-1] not in vowels and v[-2] not in vowels:
# Verbs ending in a consonant cluster: delight, clamp.
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
if (len(v) > 1 and v.endswith(("y", "w")) and v[-2] in vowels) \
or (len(v) > 2 and v[-1] not in vowels and v[-2] in vowels and v[-3] in vowels) \
or (len(v) > 3 and v[-1] not in vowels and v[-3] in vowels and v[-4] in vowels):
# Verbs ending in a long vowel or diphthong followed by a consonant: paint, devour, play.
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
if len(v) > 2 and v[-1] not in vowels and v[-2] in vowels and v[-3] not in vowels:
# Verbs ending in a short vowel followed by a consonant: chat, chop, or compel.
return [v, v, v, v+"s", v, v+v[-1]+"ing"] + [v+v[-1]+"ed"]*6
return [v, v, v, v+"s", v, v+"ing"] + [v+"ed"]*6
def get_both_spellings(verb_list):
"""add alternative spellings to verb_list"""
from corpkit.dictionaries.word_transforms import usa_convert
uk_convert = {v: k for k, v in usa_convert.items()}
to_add_to_verb_list = []
for w in verb_list:
if w in usa_convert.keys():
to_add_to_verb_list.append(usa_convert[w])
for w in verb_list:
if w in uk_convert.keys():
to_add_to_verb_list.append(uk_convert[w])
verb_list = sorted(list(set(verb_list + to_add_to_verb_list)))
return verb_list
def add_verb_inflections(verb_list):
"""add verb inflections to verb_list"""
from corpkit.dictionaries.word_transforms import usa_convert
uk_convert = {v: k for k, v in usa_convert.items()}
# get lexemes
lexemes = load_verb_data()
verbforms = []
# for each verb, get or guess the inflections
# make list of ALL VERBS IN ALL INFLECTIONS
all_lists = [lst for lst in lexemes.values()]
allverbs = []
for lst in all_lists:
for v in lst:
if v:
allverbs.append(v)
allverbs = list(set(allverbs))
# use dict first
for w in verb_list:
verbforms.append(w)
try:
wforms = lexemes[w]
except KeyError:
# if not in dict, if it's an inflection, forget it
if w in allverbs:
continue
if "'" in w:
continue
# if it's a coinage, guess
else:
wforms = find_lexeme(w)
# get list of unique forms
forms = list(set([form.replace("n't", "").replace(" not", "") for form in wforms if form]))
for f in forms:
verbforms.append(f)
# deal with contractions
if w == 'be':
be_conts = [r"'m", r"'re", r"'s"]
for cont in be_conts:
verbforms.append(cont)
if w == "have":
have_conts = [r"'d", r"'s", r"'ve"]
for cont in have_conts:
verbforms.append(cont)
# go over again, and add both possible spellings
to_add = []
for w in verbforms:
if w in usa_convert.keys():
to_add.append(usa_convert[w])
for w in verbforms:
if w in uk_convert.keys():
to_add.append(uk_convert[w])
verbforms = sorted(list(set(verbforms + to_add)))
# ensure unicode
t = []
for w in verbforms:
try:
t.append(unicode(w, errors='ignore'))
except:
t.append(w)
return t
# using 'list' keeps compatibility---change to object with no super call soon
class Wordlist(list):
"""A list of words, containing a `words` attribute and a `lemmata` attribute"""
def __init__(self, data, **kwargs):
self.data = data
self.kwargs = kwargs
super(Wordlist, self).__init__(self.data)
# make slice also return wordlist
@lazyprop
def words(self):
"""get inflections"""
if not self.kwargs.get('single'):
return Wordlist(add_verb_inflections(get_both_spellings(self.data)), single=True)
else:
return
@lazyprop
def lemmata(self):
"""show base forms of verbs"""
if not self.kwargs.get('single'):
return Wordlist(get_both_spellings(self.data), single=True)
else:
return
def as_regex(self, boundaries='w', case_sensitive=False, inverse=False, compile=False):
"""
Turn list into regular expression matching any item in list
"""
from corpkit.other import as_regex
return as_regex(get_both_spellings(self.data),
boundaries=boundaries,
case_sensitive=case_sensitive,
inverse=inverse,
compile=compile)
class Processes(object):
"""Process types: relational, verbal, mental, material"""
def __init__(self):
relational = ["become",
"feel",
"be",
"have",
"sound",
"look",
"seem",
"appear",
"smell"
]
verbal = ["forbid",
"forswear",
"prophesy",
"say",
"swear",
"tell",
"write",
"certify",
"deny",
"imply",
"move",
"notify",
"reply",
"specify",
"accede",
"add",
"admit",
"advise",
"advocate",
"allege",
"announce",
"answer",
"apprise",
"argue",
"ask",
"assert",
"assure",
"attest",
"aver",
"avow",
"bark",
"beg",
"bellow",
"blubber",
"boast",
"brag",
"cable",
"call",
"claim",
"comment",
"complain",
"confess",
"confide",
"confirm",
"contend",
"convey",
"counsel",
"declare",
"demand",
"disclaim",
"disclose",
"divulge",
"emphasise",
"emphasize",
"encourage",
"exclaim",
"explain",
"forecast",
"gesture",
"grizzle",
"guarantee",
"hint",
"holler",
"indicate",
"inform",
"insist",
"intimate",
"mention",
"moan",
"mumble",
"murmur",
"mutter",
"note",
"object",
"offer",
"phone",
"pledge",
"preach",
"predicate",
"preordain",
"prescribe",
"proclaim",
"profess",
"prohibit",
"promise",
"propose",
"protest",
"reaffirm",
"reassure",
"rejoin",
"remark",
"remind",
"repeat",
"report",
"request",
"require",
"respond",
"retort",
"reveal",
"riposte",
"roar",
"scream",
"shout",
"signal",
"state",
"stipulate",
"telegraph",
"telephone",
"testify",
"threaten",
"vow",
"warn",
"wire",
"reemphasise",
"reemphasize",
"rumor",
"rumour",
"yell",
# added manually:
'tell',
'say',
'call',
'vent',
'talk',
'ask',
'prescribe',
'diagnose',
'speak',
'suggest',
'mention',
'recommend',
'add',
'discuss',
'agree',
'contact',
'refer',
'explain',
'write',
'consult',
'advise',
'insist',
'perscribe',
'warn',
'offer',
'inform',
'question',
'describe',
'convince',
'order',
'report',
'lie',
'address',
'ring',
'state',
"pray",
'phone',
'share',
'beg',
'blame',
'instruct',
'chat',
'assure',
'dx',
'recomend',
'prescibe',
'promise',
'communicate',
'notify',
'claim',
'convince',
'page',
'wish',
'post',
'complain',
'swear']
behavioural = ['laugh', 'cry', 'listen', 'look', 'hear', 'wake', 'awaken', ]
mental = ["choose",
"feel",
"find",
"forget",
"hear",
"know",
"mean",
"overhear",
"prove",
"read",
"see",
"think",
"understand",
"abide",
"abominate",
"accept",
"acknowledge",
"acquiesce",
"adjudge",
"adore",
"affirm",
"agree",
"allow",
"allure",
"anticipate",
"appreciate",
"ascertain",
"aspire",
"assent",
"assume",
"begrudge",
"believe",
"calculate",
"care",
"conceal",
"concede",
"conceive",
"concern",
"conclude",
"concur",
"condone",
"conjecture",
"consent",
"consider",
"contemplate",
"convince",
"crave",
"decide",
"deduce",
"deem",
"delight",
"desire",
"determine",
"detest",
"discern",
"discover",
"dislike",
"doubt",
"dread",
"enjoy",
"envisage",
"estimate",
"excuse",
"expect",
"exult",
"fear",
"foreknow",
"foresee",
"gather",
"grant",
"grasp",
"hate",
"hope",
"hurt",
"hypothesise",
"hypothesize",
"imagine",
"infer",
"inspire",
"intend",
"intuit",
"judge",
"ken",
"lament",
"like",
"loathe",
"love",
"marvel",
"mind",
"miss",
"need",
"neglect",
"notice",
"observe",
"omit",
"opine",
"perceive",
"plan",
"please",
"posit",
"postulate",
"pray",
"preclude",
"prefer",
"presume",
"presuppose",
"pretend",
"provoke",
"realize",
"realise",
"reason",
"recall",
"reckon",
"recognise",
"recognize",
"recollect",
"reflect",
"regret",
"rejoice",
"relish",
"remember",
"resent",
"resolve",
"rue",
"scent",
"scorn",
"sense",
"settle",
"speculate",
"suffer",
"suppose",
"surmise",
"surprise",
"suspect",
"trust",
"visualise",
"visualize",
"want",
"wish",
"wonder",
"yearn",
"rediscover",
"dream",
"justify",
"figure",
"smell",
"worry",
'know',
'think',
'feel',
'want',
'hope',
'find',
'guess',
'love',
'wish',
'like',
'understand',
'wonder',
'believe',
'hate',
'remember',
'agree',
'notice',
'learn',
'realize',
'miss',
'appreciate',
'decide',
'suffer',
'deal',
'forget',
'care',
'imagine',
'relate',
'worry',
'figure',
'handle',
'struggle',
'pray',
'consider',
'enjoy',
'expect',
'plan',
'suppose',
'trust',
'bother',
'blame',
'accept',
'admit',
'assume',
'remind',
'seek',
'bet',
'refuse',
'cope',
'choose',
'freak',
'fear',
'question',
'recall',
'doubt',
'suspect',
'focus',
'calm'
]
can_be_material = ['bother', 'find']
self.relational = Wordlist(relational)
self.verbal = Wordlist(verbal)
self.mental = Wordlist(mental)
self.behavioural = Wordlist(behavioural)
from corpkit.dictionaries.verblist import allverbs
nonmat = set(self.relational + self.verbal + self.behavioural + self.mental)
vbs = [i for i in allverbs if i not in nonmat and '_' not in i]
self.material = Wordlist(vbs + can_be_material)
processes = Processes()
verbs = _verbs()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations(object):
"""InboundNatRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.InboundNatRuleListResult"]
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.InboundNatRule"
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
inbound_nat_rule_parameters, # type: "_models.InboundNatRule"
**kwargs # type: Any
):
# type: (...) -> "_models.InboundNatRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
inbound_nat_rule_parameters, # type: "_models.InboundNatRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.InboundNatRule"]
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2019_06_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
|
|
import sys, random, os
from dmd import Layer, Frame
import sdl2
from sdl2_displaymanager import sdl2_DisplayManager
import time
class Particle(object):
def __init__(self, x, y, emitter):
self.x = x
self.y = y
self.parent = emitter
self.life = random.randint(int(self.parent.max_life*0.80),self.parent.max_life)
self.dx = random.randint(-5,5)
self.dy = random.randint(-5,5)
# self.r = int(random.randint(0,255))
# self.g = int(random.randint(0,255))
# self.b = int(random.randint(0,255))
self._r = 255
self._g = 255
self._b = 255
self._a = 255
self.tx_num = 0
self.color_changed = True
self.alpha_changed = True
@property
def r(self):
return self._r
@r.setter
def r(self, value):
self._r = value
self.color_changed = True
@property
def g(self):
return self._g
@g.setter
def g(self, value):
self._g = value
self.color_changed = True
@property
def b(self):
return self._b
@b.setter
def b(self, value):
self._b = value
self.color_changed = True
@property
def a(self):
return self._a
@a.setter
def a(self, value):
self._a = value
self.alpha_changed = True
def update(self):
self.life = self.life - 1
self.update_location()
self.update_appearance()
def update_location(self):
self.x = int(self.x + self.dx)
self.y = int(self.y + self.dy)
def update_appearance(self):
if(self.life < .8 * self.parent.max_life):
self.b = 0
self.g = int(self.life/float(self.parent.max_life) * 220) + 35
self.a = ((self.life/float(self.parent.max_life)) * 255)
#print("Life=%d, Alpha=%d" % (self.life, self.a))
class SnowParticle(Particle):
def __init__(self, x, y, emitter):
self.x = x + random.randint(-450,450)
super(SnowParticle, self).__init__(self.x,y,emitter)
self.r = 225
self.g = 225
self.b = 255
def update_location(self):
self.dx = random.randint(-20,20)
self.dy = random.randint(0,20)
super(SnowParticle, self).update_location()
def update_appearance(self):
pass
# if(self.life < .8 * self.parent.max_life):
# self.b = 0
# self.g = int(self.life/float(self.parent.max_life) * 255)
# self.a = ((self.life/float(self.parent.max_life)) * 255)
#print("Life=%d, Alpha=%d" % (self.life, self.a))
class FireParticle(Particle):
def __init__(self, x, y, emitter):
super(FireParticle, self).__init__(x,y,emitter)
self.dx = random.randint(-5,5)
self.dy = random.randint(-4,4)
def update_location(self):
self.dx = random.randint(-3,3)
self.dy = random.randint(-5,1)
super(FireParticle, self).update_location()
class FireworkParticle(Particle):
def __init__(self, x, y, emitter):
super(FireworkParticle, self).__init__(x,y,emitter)
self.dy = random.randint(-5,3)
self.dx = random.randint(-10,10)
self.a = 192
def update_location(self):
if(self.life < .75 * self.parent.max_life):
self.dy = 3# random.randint(3,10)
self.dx = 0
super(FireworkParticle, self).update_location()
def update_appearance(self):
if(self.life < .8 * self.parent.max_life):
self.g = 0
self.b = int(self.life/float(self.parent.max_life) * 220) + 35
self.r = self.b
class ParticleEmitter(object):
def __init__(self, x, y, max_life=60, max_particles=200, particles_per_update=5, total_creations=None, particle_class=Particle, random_next=False, dx=0, dy=0):
self.x = x
self.y = y
self.orig_x = x
self.orig_y = y
self.dx = dx
self.dy = dy
self.particle_class = particle_class
self.random_next = random_next
self.particles = list()
self.particles_per_update = particles_per_update
self.max_particles = max_particles
self.max_life = max_life
self.total_creations = total_creations
self.creations_remaining = total_creations
self.stopped = False
for i in range(0,particles_per_update):
p = self.particle_class(x,y, emitter=self)
p.update()
self.particles.append(p)
if(self.total_creations is not None):
self.creations_remaining = self.creations_remaining - particles_per_update
else:
self.creations_remaining = self.max_particles
cwd = os.path.dirname(__file__)
sprImg8 = sdl2_DisplayManager.inst().load_surface(os.path.join(cwd,"exp8.png"))
sprImg16 = sdl2_DisplayManager.inst().load_surface(os.path.join(cwd,"exp16.png"))
self.txImg8 = sdl2_DisplayManager.inst().texture_from_surface(sprImg8)
self.txImg16 = sdl2_DisplayManager.inst().texture_from_surface(sprImg16)
(self.p8_w,self.p8_h) = self.txImg8.size
(self.p16_w,self.p16_h) = self.txImg16.size
sdl2.SDL_SetTextureBlendMode(self.txImg8.texture, sdl2.SDL_BLENDMODE_BLEND)
sdl2.SDL_SetTextureBlendMode(self.txImg16.texture, sdl2.SDL_BLENDMODE_BLEND)
del sprImg8
del sprImg16
def reset(self, new_x=None, new_y=None):
self.stopped = False
if(new_x is not None):
self.x = new_x
else:
self.x = self.orig_x
if(new_y is not None):
self.y = new_y
else:
self.y = self.orig_y
for x in xrange(len(self.particles)-1,0,-1):
p = self.particles[x]
self.particles.remove(p)
del p
self.creations_remaining = self.total_creations
def update(self):
if(self.total_creations is None) and (not self.stopped):
self.creations_remaining = self.max_particles
for p in self.particles:
p.update()
for x in xrange(len(self.particles)-1,-1,-1):
p = self.particles[x]
if(p.life <= 0):
self.particles.remove(p)
del p
if(self.stopped):
return
if(self.creations_remaining <= 0):
if(self.random_next):
if(len(self.particles)==0):
self.reset(new_x = random.randint(0,200), new_y = random.randint(0,200))
return
for r in range(0,min(self.particles_per_update, self.max_particles-len(self.particles), self.creations_remaining)):
p = self.particle_class(self.x, self.y, emitter=self)
p.update()
self.particles.append(p)
self.creations_remaining = self.creations_remaining - 1
self.x = self.x + self.dx
self.y = self.y + self.dy
def stop(self, immediate_stop = False):
self.creations_remaining = 0
self.stopped = True
if(immediate_stop):
for x in xrange(len(self.particles)-1,-1,-1):
p = self.particles[x]
self.particles.remove(p)
del p
def draw(self, destination_texture = None):
# for p in self.particles:
for x in xrange(0,len(self.particles)): #xrange(len(self.particles)-1,0,-1):
p = self.particles[x]
tx = None
if(p.life > self.max_life * 0.55):
tx = self.txImg16
(self.p_w, self.p_h) = (self.p16_w,self.p16_h)
else:
tx = self.txImg8
(self.p_w, self.p_h) = (self.p8_w,self.p8_h)
if(p.color_changed):
sdl2.SDL_SetTextureColorMod(tx.texture, p.r,p.g,p.b)
p.color_changed = False
# sdl2.SDL_SetTextureAlphaMod(tx.texture, 192) #int(p.a))
if(p.alpha_changed):
sdl2.SDL_SetTextureAlphaMod(tx.texture, int(p.a))
p.alpha_changed = False
if(destination_texture is None):
sdl2_DisplayManager.inst().screen_blit(tx, x=p.x, y=p.y, expand_to_fill=False)
else:
sdl2_DisplayManager.inst().blit(source_tx = tx, dest_tx=destination_texture, dest=(p.x,p.y,self.p_w, self.p_h))
class ParticleSystem(object):
def __init__(self, emitters=None, destination_texture=None):
self.emitters = emitters
self.dest_tx = destination_texture
def update(self):
for e in self.emitters:
e.update()
def draw(self):
for e in self.emitters:
e.draw(self.dest_tx)
def reset(self):
for e in self.emitters:
e.reset()
class ParticleLayer(Layer):
"""
A ParticleSystem as a Layer...
"""
def __init__(self, width, height, emitters, duration=None, num_hold_frames=1):
super(ParticleLayer, self).__init__()
self.buffer = Frame(width, height)
self.start_time = None
self.duration = duration
self.width = width
self.height = height
self.num_hold_frames = num_hold_frames
self.stalls = self.num_hold_frames
# FIRE
# def emitter_dx(x): return random.randint(-10,10)
# def emitter_dy(x): return random.randint(-3,3)
# def dither_dx(x): return random.randint(-10,0)
# def dither_dy(x): return (x - 0.35)
# self.ps = ParticleEmitter(width/2, height/2, max_life=20, max_particles=200, particles_per_update=100, total_creations=400, particle_class=Particle)
self.ps = ParticleSystem(emitters=emitters, destination_texture=self.buffer.pySurface)
def next_frame(self):
# Assign the new script item:
if(self.start_time is None):
self.start_time = time.time()
elif(self.duration is not None and (self.start_time + self.duration > time.time())):
return None
self.stalls = self.stalls - 1
if(self.stalls <= 0):
self.stalls = self.num_hold_frames
else:
return self.buffer
self.buffer.clear()
self.ps.update()
self.ps.draw()
return self.buffer
def reset(self):
"""Resets the animation back to the first frame."""
self.start_time = None
self.stalls = self.num_hold_frames
self.buffer.clear()
self.ps.reset()
def main():
sdl2_DisplayManager.Init(450,225,2)
# # FIRE
# def emitter_dx(x): return random.randint(-10,10)
# def emitter_dy(x): return random.randint(-3,3)
# def dither_dx(x): return random.randint(-10,0)
# def dither_dy(x): return (x - 0.35)
#ps = ParticleEmitter(450,225, max_life=20, max_particles=400, particles_per_update=20, particle_class=Particle)
ps1 = ParticleEmitter(450, 225, max_life=20, max_particles=200, particles_per_update=40, total_creations=None, particle_class=FireParticle)
# ps2 = ParticleEmitter(250, 115, max_life=30, max_particles=500, particles_per_update=40, total_creations=2000, particle_class=SnowParticle)
# ps2 = ParticleEmitter(450, 0, max_life=35, max_particles=500, particles_per_update=16, total_creations=None, particle_class=SnowParticle)
ps2 = ParticleEmitter(20, 20, max_life=20, max_particles=300, particles_per_update=100, total_creations=300, particle_class=FireworkParticle, random_next=True)
ps3 = ParticleEmitter(300, 220, max_life=20, max_particles=300, particles_per_update=100, total_creations=300, particle_class=FireworkParticle, random_next=True)
# ps = ParticleSystem(emitters=[ps1,ps2, ps3])
ps = ParticleSystem(emitters=[ps1, ps2, ps3])
# def emitter_dx(x): return random.randint(-10,10)
# def emitter_dy(y): return random.randint(-6,6)
# def dither_dx(x): return x #random.randint(-6,6)
# def dither_dy(y): return y #(x - 0.35)
# ps = ParticleEmitter(450,200, max_life=20, max_particles=200, particles_per_update=5, emitter_dx_fn=emitter_dx, emitter_dy_fn=emitter_dy, dither_dx_fn=dither_dx, dither_dy_fn=dither_dy)
running = True
while running:
events = sdl2.ext.get_events()
for event in events:
if event.type == sdl2.SDL_QUIT:
running = False
break
if event.type == sdl2.SDL_KEYDOWN:
if event.key.keysym.sym == sdl2.SDLK_r:
ps.reset()
if event.key.keysym.sym == sdl2.SDLK_ESCAPE:
running = False
break
ps.update()
ps.draw()
sdl2_DisplayManager.inst().flip()
sdl2.SDL_Delay(33)
sdl2_DisplayManager.inst().clear()
return 0
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script will check out llvm and clang, and then package the results up
to a tgz file."""
import argparse
import fnmatch
import itertools
import os
import shutil
import subprocess
import sys
import tarfile
from lib.config import IS_ARM64_HOST, IS_ARMV7_HOST
# Path constants.
THIS_DIR = os.path.dirname(__file__)
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', 'src'))
THIRD_PARTY_DIR = os.path.join(CHROMIUM_DIR, 'third_party')
LLVM_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm')
LLVM_BOOTSTRAP_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-bootstrap')
LLVM_BOOTSTRAP_INSTALL_DIR = os.path.join(THIRD_PARTY_DIR,
'llvm-bootstrap-install')
LLVM_BUILD_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-build')
LLVM_RELEASE_DIR = os.path.join(LLVM_BUILD_DIR, 'Release+Asserts')
LLVM_LTO_LLD_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-lto-lld')
STAMP_FILE = os.path.join(LLVM_BUILD_DIR, 'cr_build_revision')
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
BOTO_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'boto')
def Tee(output, logfile):
logfile.write(output)
print output,
def TeeCmd(cmd, logfile, fail_hard=True):
"""Runs cmd and writes the output to both stdout and logfile."""
# Reading from PIPE can deadlock if one buffer is full but we wait on a
# different one. To work around this, pipe the subprocess's stderr to
# its stdout buffer and don't give it a stdin.
# shell=True is required in cmd.exe since depot_tools has an svn.bat, and
# bat files only work with shell=True set.
proc = subprocess.Popen(cmd, bufsize=1, shell=sys.platform == 'win32',
stdin=open(os.devnull), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline,''):
Tee(line, logfile)
if proc.poll() is not None:
break
exit_code = proc.wait()
if exit_code != 0 and fail_hard:
print 'Failed:', cmd
sys.exit(1)
def PrintTarProgress(tarinfo):
print 'Adding', tarinfo.name
return tarinfo
def GetExpectedStamp(args):
return "%s-%s" % (args.clang_revision, args.clang_sub_revision)
def GetGsutilPath():
if not 'find_depot_tools' in sys.modules:
sys.path.insert(0, os.path.join(CHROMIUM_DIR, 'build'))
global find_depot_tools
import find_depot_tools
depot_path = find_depot_tools.add_depot_tools_to_path()
if depot_path is None:
print ('depot_tools are not found in PATH. '
'Follow the instructions in this document '
'http://dev.chromium.org/developers/how-tos/install-depot-tools'
' to install depot_tools and then try again.')
sys.exit(1)
gsutil_path = os.path.join(depot_path, 'gsutil.py')
return gsutil_path
def RunGsutil(args):
return subprocess.call([sys.executable, GetGsutilPath()] + args)
def GsutilArchiveExists(archive_name, platform):
gsutil_args = ['-q', 'stat',
'gs://chromium-browser-clang-staging/%s/%s.tgz' %
(platform, archive_name)]
return RunGsutil(gsutil_args) == 0
def MaybeUpload(args, archive_name, platform):
# We don't want to rewrite the file, if it already exists on the server,
# so -n option to gsutil is used. It will warn, if the upload was aborted.
gsutil_args = ['cp', '-n', '-a', 'public-read',
'%s.tgz' % archive_name,
'gs://chromium-browser-clang-staging/%s/%s.tgz' %
(platform, archive_name)]
if args.upload:
print 'Uploading %s to S3...' % archive_name
bucket = os.environ.get('ELECTRON_S3_BUCKET')
access_key = os.environ.get('ELECTRON_S3_ACCESS_KEY')
secret_key = os.environ.get('ELECTRON_S3_SECRET_KEY')
exit_code = s3put(bucket, access_key, secret_key,
'clang/{0}/'.format(platform),
['%s.tgz' % archive_name])
if exit_code != 0:
print "s3 upload failed, exit_code: %s" % exit_code
sys.exit(exit_code)
else:
print 'To upload, run:'
print ('gsutil %s' % ' '.join(gsutil_args))
def boto_path_dirs():
if IS_ARM64_HOST:
return [
os.path.join(BOTO_DIR, 'build', 'lib.linux-aarch64-2.7')
]
elif IS_ARMV7_HOST:
return [
os.path.join(BOTO_DIR, 'build', 'lib.linux-armv7l-2.7')
]
else:
return [
os.path.join(BOTO_DIR, 'build', 'lib'),
os.path.join(BOTO_DIR, 'build', 'lib.linux-x86_64-2.7')
]
def s3put(bucket, access_key, secret_key, path, file):
args = [
'--bucket', bucket,
'--multipart',
'--path', path,
'--grant', 'public-read'
] + file
env = os.environ.copy()
env['AWS_ACCESS_KEY_ID'] = access_key
env['AWS_SECRET_ACCESS_KEY'] = secret_key
env['PYTHONPATH'] = os.path.pathsep.join(
[env.get('PYTHONPATH', '')] + boto_path_dirs())
boto = os.path.join(BOTO_DIR, 'bin', 's3put')
try:
output = subprocess.check_call([sys.executable, boto] + list(args), env=env)
return output
except subprocess.CalledProcessError as e:
print 'Error calling boto: ' + e.output
return e.returncode
def UploadPDBToSymbolServer():
assert sys.platform == 'win32'
# Upload PDB and binary to the symbol server on Windows. Put them into the
# chromium-browser-symsrv bucket, since chrome devs have that in their
# _NT_SYMBOL_PATH already. Executable and PDB must be at paths following a
# certain pattern for the Microsoft debuggers to be able to load them.
# Executable:
# chromium-browser-symsrv/clang-cl.exe/ABCDEFAB01234/clang-cl.ex_
# ABCDEFAB is the executable's timestamp in %08X format, 01234 is the
# executable's image size in %x format. tools/symsrc/img_fingerprint.py
# can compute this ABCDEFAB01234 string for us, so use that.
# The .ex_ instead of .exe at the end means that the file is compressed.
# PDB:
# gs://chromium-browser-symsrv/clang-cl.exe.pdb/AABBCCDD/clang-cl.dll.pd_
# AABBCCDD here is computed from the output of
# dumpbin /all mybinary.exe | find "Format: RSDS"
# but tools/symsrc/pdb_fingerprint_from_img.py can compute it already, so
# again just use that.
sys.path.insert(0, os.path.join(CHROMIUM_DIR, 'tools', 'symsrc'))
import img_fingerprint, pdb_fingerprint_from_img
binaries = [ 'bin/clang-cl.exe', 'bin/lld-link.exe' ]
for binary_path in binaries:
binary_path = os.path.join(LLVM_RELEASE_DIR, binary_path)
binary_id = img_fingerprint.GetImgFingerprint(binary_path)
(pdb_id, pdb_path) = pdb_fingerprint_from_img.GetPDBInfoFromImg(binary_path)
# The build process builds clang.exe and then copies it to clang-cl.exe
# (both are the same binary and they behave differently on what their
# filename is). Hence, the pdb is at clang.pdb, not at clang-cl.pdb.
# Likewise, lld-link.exe's PDB file is called lld.pdb.
# Compress and upload.
for f, f_id in ((binary_path, binary_id), (pdb_path, pdb_id)):
subprocess.check_call(
['makecab', '/D', 'CompressionType=LZX', '/D', 'CompressionMemory=21',
f, '/L', os.path.dirname(f)], stdout=open(os.devnull, 'w'))
f_cab = f[:-1] + '_'
dest = '%s/%s/%s' % (os.path.basename(f), f_id, os.path.basename(f_cab))
print 'Uploading %s to Google Cloud Storage...' % dest
gsutil_args = ['cp', '-n', '-a', 'public-read', f_cab,
'gs://chromium-browser-symsrv/' + dest]
exit_code = RunGsutil(gsutil_args)
if exit_code != 0:
print "gsutil failed, exit_code: %s" % exit_code
sys.exit(exit_code)
def main():
parser = argparse.ArgumentParser(description='build and package clang')
parser.add_argument('--upload', action='store_true',
help='Upload the target archive to Google Cloud Storage.')
parser.add_argument('--build-args',
help='Additional args to pass to build script.',
default='')
parser.add_argument('--clang-revision', help='Clang revision to build',
required=True)
parser.add_argument('--clang-sub-revision',
help='Clang sub revision to build', required=True)
parser.add_argument('--platform',
help='Platform to build')
args = parser.parse_args()
# Check that the script is not going to upload a toolchain built from HEAD.
use_head_revision = 'LLVM_FORCE_HEAD_REVISION' in os.environ
if args.upload and use_head_revision:
print ("--upload and LLVM_FORCE_HEAD_REVISION could not be used "
"at the same time.")
return 1
expected_stamp = GetExpectedStamp(args)
pdir = 'clang-' + expected_stamp
print pdir
if args.platform:
platform = args.platform
elif sys.platform == 'darwin':
platform = 'Mac'
elif sys.platform == 'win32':
platform = 'Win'
else:
platform = 'Linux_x64'
with open('buildlog.txt', 'w') as log:
Tee('Diff in llvm:\n', log)
TeeCmd(['svn', 'stat', LLVM_DIR], log, fail_hard=False)
TeeCmd(['svn', 'diff', LLVM_DIR], log, fail_hard=False)
Tee('Diff in llvm/tools/clang:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
# TODO(thakis): compiler-rt is in projects/compiler-rt on Windows but
# llvm/compiler-rt elsewhere. So this diff call is currently only right on
# Windows.
Tee('Diff in llvm/compiler-rt:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
Tee('Diff in llvm/projects/libcxx:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
Tee('Starting build\n', log)
# Do a clobber build.
shutil.rmtree(LLVM_BOOTSTRAP_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BOOTSTRAP_INSTALL_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BUILD_DIR, ignore_errors=True)
opt_flags = args.build_args.split(' ')
if sys.platform.startswith('linux'):
opt_flags += ['--lto-lld']
build_cmd = [sys.executable, os.path.join(THIS_DIR, 'build-clang.py'),
'--bootstrap', '--force-local-build', '--clang-revision',
args.clang_revision, '--clang-sub-revision',
args.clang_sub_revision] + opt_flags
TeeCmd(build_cmd, log)
stamp = open(STAMP_FILE).read().rstrip()
if stamp != expected_stamp:
print 'Actual stamp (%s) != expected stamp (%s).' % (stamp, expected_stamp)
return 1
shutil.rmtree(pdir, ignore_errors=True)
# Copy a whitelist of files to the directory we're going to tar up.
# This supports the same patterns that the fnmatch module understands.
exe_ext = '.exe' if sys.platform == 'win32' else ''
want = ['bin/llvm-symbolizer' + exe_ext,
'bin/sancov' + exe_ext,
'lib/clang/*/asan_blacklist.txt',
'lib/clang/*/cfi_blacklist.txt',
# Copy built-in headers (lib/clang/3.x.y/include).
'lib/clang/*/include/*',
]
if sys.platform == 'win32':
want.append('bin/clang-cl.exe')
want.append('bin/lld-link.exe')
else:
so_ext = 'dylib' if sys.platform == 'darwin' else 'so'
want.extend(['bin/clang',
'lib/libFindBadConstructs.' + so_ext,
'lib/libBlinkGCPlugin.' + so_ext,
])
if sys.platform == 'darwin':
want.extend([# Copy only the OSX and iossim (ASan and profile) runtime
# libraries:
'lib/clang/*/lib/darwin/*asan_osx*',
'lib/clang/*/lib/darwin/*asan_iossim*',
'lib/clang/*/lib/darwin/*profile_osx*',
'lib/clang/*/lib/darwin/*profile_iossim*',
# And the OSX and ios builtin libraries (iossim is lipo'd into
# ios) for the _IsOSVersionAtLeast runtime function.
'lib/clang/*/lib/darwin/*.ios.a',
'lib/clang/*/lib/darwin/*.osx.a',
])
elif sys.platform.startswith('linux'):
# Add llvm-ar and lld for LTO.
want.append('bin/llvm-ar')
want.append('bin/lld')
# Copy only
# lib/clang/*/lib/linux/libclang_rt.{[atm]san,san,ubsan,profile}-*.a ,
# but not dfsan.
want.extend(['lib/clang/*/lib/linux/*[atm]san*',
'lib/clang/*/lib/linux/*ubsan*',
'lib/clang/*/lib/linux/*libclang_rt.san*',
'lib/clang/*/lib/linux/*profile*',
'lib/clang/*/msan_blacklist.txt',
])
elif sys.platform == 'win32':
want.extend(['lib/clang/*/lib/windows/clang_rt.asan*.dll',
'lib/clang/*/lib/windows/clang_rt.asan*.lib',
])
for root, dirs, files in os.walk(LLVM_RELEASE_DIR):
# root: third_party/llvm-build/Release+Asserts/lib/..., rel_root: lib/...
rel_root = root[len(LLVM_RELEASE_DIR)+1:]
rel_files = [os.path.join(rel_root, f) for f in files]
wanted_files = list(set(itertools.chain.from_iterable(
fnmatch.filter(rel_files, p) for p in want)))
if wanted_files:
# Guaranteed to not yet exist at this point:
os.makedirs(os.path.join(pdir, rel_root))
for f in wanted_files:
src = os.path.join(LLVM_RELEASE_DIR, f)
dest = os.path.join(pdir, f)
shutil.copy(src, dest)
# Strip libraries.
if sys.platform == 'darwin' and f.endswith('.dylib'):
subprocess.call(['strip', '-x', dest])
elif (sys.platform.startswith('linux') and
os.path.splitext(f)[1] in ['.so', '.a']):
subprocess.call(['strip', '-g', dest])
# Set up symlinks.
if sys.platform != 'win32':
os.symlink('clang', os.path.join(pdir, 'bin', 'clang++'))
os.symlink('clang', os.path.join(pdir, 'bin', 'clang-cl'))
if sys.platform.startswith('linux'):
os.symlink('lld', os.path.join(pdir, 'bin', 'ld.lld'))
os.symlink('lld', os.path.join(pdir, 'bin', 'lld-link'))
# Copy libc++ headers.
if sys.platform == 'darwin':
shutil.copytree(os.path.join(LLVM_BOOTSTRAP_INSTALL_DIR, 'include', 'c++'),
os.path.join(pdir, 'include', 'c++'))
# Copy buildlog over.
shutil.copy('buildlog.txt', pdir)
# Create archive.
tar_entries = ['bin', 'lib', 'buildlog.txt']
if sys.platform == 'darwin':
tar_entries += ['include']
with tarfile.open(pdir + '.tgz', 'w:gz') as tar:
for entry in tar_entries:
tar.add(os.path.join(pdir, entry), arcname=entry, filter=PrintTarProgress)
MaybeUpload(args, pdir, platform)
# Zip up llvm-code-coverage for code coverage.
code_coverage_dir = 'llvm-code-coverage-' + stamp
shutil.rmtree(code_coverage_dir, ignore_errors=True)
os.makedirs(os.path.join(code_coverage_dir, 'bin'))
for filename in ['llvm-cov', 'llvm-profdata']:
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', filename + exe_ext),
os.path.join(code_coverage_dir, 'bin'))
with tarfile.open(code_coverage_dir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(code_coverage_dir, 'bin'), arcname='bin',
filter=PrintTarProgress)
MaybeUpload(args, code_coverage_dir, platform)
# Zip up llvm-objdump for sanitizer coverage.
objdumpdir = 'llvmobjdump-' + stamp
shutil.rmtree(objdumpdir, ignore_errors=True)
os.makedirs(os.path.join(objdumpdir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'llvm-objdump' + exe_ext),
os.path.join(objdumpdir, 'bin'))
with tarfile.open(objdumpdir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(objdumpdir, 'bin'), arcname='bin',
filter=PrintTarProgress)
MaybeUpload(args, objdumpdir, platform)
# Zip up the translation_unit tool.
translation_unit_dir = 'translation_unit-' + stamp
shutil.rmtree(translation_unit_dir, ignore_errors=True)
os.makedirs(os.path.join(translation_unit_dir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'translation_unit' +
exe_ext),
os.path.join(translation_unit_dir, 'bin'))
with tarfile.open(translation_unit_dir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(translation_unit_dir, 'bin'), arcname='bin',
filter=PrintTarProgress)
MaybeUpload(args, translation_unit_dir, platform)
if sys.platform == 'win32' and args.upload:
UploadPDBToSymbolServer()
# FIXME: Warn if the file already exists on the server.
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A class for sampling, encoding, and decoding from trained MusicVAE models."""
import copy
import os
import re
import tarfile
import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
class NoExtractedExamplesError(Exception):
pass
class MultipleExtractedExamplesError(Exception):
pass
class TrainedModel(object):
"""An interface to a trained model for encoding, decoding, and sampling.
Attributes:
config: The Config to build the model graph with.
batch_size: The batch size to build the model graph with.
checkpoint_dir_or_path: The directory containing checkpoints for the model,
the most recent of which will be loaded, or a direct path to a specific
checkpoint.
var_name_substitutions: Optional list of string pairs containing regex
patterns and substitution values for renaming model variables to match
those in the checkpoint. Useful for backwards compatibility.
session_target: Optional execution engine to connect to. Defaults to
in-process.
sample_kwargs: Additional, non-tensor keyword arguments to pass to sample
call.
"""
def __init__(self, config, batch_size, checkpoint_dir_or_path=None,
var_name_substitutions=None, session_target='', **sample_kwargs):
if tf.gfile.IsDirectory(checkpoint_dir_or_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir_or_path)
else:
checkpoint_path = checkpoint_dir_or_path
self._config = copy.deepcopy(config)
self._config.data_converter.set_mode('infer')
self._config.hparams.batch_size = batch_size
with tf.Graph().as_default():
model = self._config.model
model.build(
self._config.hparams,
self._config.data_converter.output_depth,
is_training=False)
# Input placeholders
self._temperature = tf.placeholder(tf.float32, shape=())
if self._config.hparams.z_size:
self._z_input = tf.placeholder(
tf.float32, shape=[batch_size, self._config.hparams.z_size])
else:
self._z_input = None
if self._config.data_converter.control_depth > 0:
self._c_input = tf.placeholder(
tf.float32, shape=[None, self._config.data_converter.control_depth])
else:
self._c_input = None
self._inputs = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.input_depth])
self._controls = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.control_depth])
self._inputs_length = tf.placeholder(
tf.int32,
shape=[batch_size] + list(self._config.data_converter.length_shape))
self._max_length = tf.placeholder(tf.int32, shape=())
# Outputs
self._outputs, self._decoder_results = model.sample(
batch_size,
max_length=self._max_length,
z=self._z_input,
c_input=self._c_input,
temperature=self._temperature,
**sample_kwargs)
if self._config.hparams.z_size:
q_z = model.encode(self._inputs, self._inputs_length, self._controls)
self._mu = q_z.loc
self._sigma = q_z.scale.diag
self._z = q_z.sample()
var_map = None
if var_name_substitutions is not None:
var_map = {}
for v in tf.global_variables():
var_name = v.name[:-2] # Strip ':0' suffix.
for pattern, substitution in var_name_substitutions:
var_name = re.sub(pattern, substitution, var_name)
if var_name != v.name[:-2]:
tf.logging.info('Renaming `%s` to `%s`.', v.name[:-2], var_name)
var_map[var_name] = v
# Restore graph
self._sess = tf.Session(target=session_target)
saver = tf.train.Saver(var_map)
if (os.path.exists(checkpoint_path) and
tarfile.is_tarfile(checkpoint_path)):
tf.logging.info('Unbundling checkpoint.')
with tempfile.TemporaryDirectory() as temp_dir:
tar = tarfile.open(checkpoint_path)
tar.extractall(temp_dir)
# Assume only a single checkpoint is in the directory.
for name in tar.getnames():
if name.endswith('.index'):
checkpoint_path = os.path.join(temp_dir, name[0:-6])
break
saver.restore(self._sess, checkpoint_path)
else:
saver.restore(self._sess, checkpoint_path)
def sample(self, n=None, length=None, temperature=1.0, same_z=False,
c_input=None):
"""Generates random samples from the model.
Args:
n: The number of samples to return. A full batch will be returned if not
specified.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
same_z: Whether to use the same latent vector for all samples in the
batch (if applicable).
c_input: A sequence of control inputs to use for all samples (if
applicable).
Returns:
A list of samples as NoteSequence objects.
Raises:
ValueError: If `length` is not specified and an end token is not being
used.
"""
batch_size = self._config.hparams.batch_size
n = n or batch_size
z_size = self._config.hparams.z_size
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
length = length or tf.int32.max
feed_dict = {
self._temperature: temperature,
self._max_length: length
}
if self._z_input is not None and same_z:
z = np.random.randn(z_size).astype(np.float32)
z = np.tile(z, (batch_size, 1))
feed_dict[self._z_input] = z
if self._c_input is not None:
feed_dict[self._c_input] = c_input
outputs = []
for _ in range(int(np.ceil(n / batch_size))):
if self._z_input is not None and not same_z:
feed_dict[self._z_input] = (
np.random.randn(batch_size, z_size).astype(np.float32))
outputs.append(self._sess.run(self._outputs, feed_dict))
samples = np.vstack(outputs)[:n]
if self._c_input is not None:
return self._config.data_converter.from_tensors(
samples, np.tile(np.expand_dims(c_input, 0), [batch_size, 1, 1]))
else:
return self._config.data_converter.from_tensors(samples)
def encode(self, note_sequences, assert_same_length=False):
"""Encodes a collection of NoteSequences into latent vectors.
Args:
note_sequences: A collection of NoteSequence objects to encode.
assert_same_length: Whether to raise an AssertionError if all of the
extracted sequences are not the same length.
Returns:
The encoded `z`, `mu`, and `sigma` values.
Raises:
RuntimeError: If called for a non-conditional model.
NoExtractedExamplesError: If no examples were extracted.
MultipleExtractedExamplesError: If multiple examples were extracted.
AssertionError: If `assert_same_length` is True and any extracted
sequences differ in length.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
inputs = []
controls = []
lengths = []
for note_sequence in note_sequences:
extracted_tensors = self._config.data_converter.to_tensors(note_sequence)
if not extracted_tensors.inputs:
raise NoExtractedExamplesError(
'No examples extracted from NoteSequence: %s' % note_sequence)
if len(extracted_tensors.inputs) > 1:
raise MultipleExtractedExamplesError(
'Multiple (%d) examples extracted from NoteSequence: %s' %
(len(extracted_tensors.inputs), note_sequence))
inputs.append(extracted_tensors.inputs[0])
controls.append(extracted_tensors.controls[0])
lengths.append(extracted_tensors.lengths[0])
if assert_same_length and len(inputs[0]) != len(inputs[-1]):
raise AssertionError(
'Sequences 0 and %d have different lengths: %d vs %d' %
(len(inputs) - 1, len(inputs[0]), len(inputs[-1])))
return self.encode_tensors(inputs, lengths, controls)
def encode_tensors(self, input_tensors, lengths, control_tensors=None):
"""Encodes a collection of input tensors into latent vectors.
Args:
input_tensors: Collection of input tensors to encode.
lengths: Collection of lengths of input tensors.
control_tensors: Collection of control tensors to encode.
Returns:
The encoded `z`, `mu`, and `sigma` values.
Raises:
RuntimeError: If called for a non-conditional model.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
n = len(input_tensors)
input_depth = self._config.data_converter.input_depth
batch_size = self._config.hparams.batch_size
batch_pad_amt = -n % batch_size
if batch_pad_amt > 0:
input_tensors += [np.zeros([0, input_depth])] * batch_pad_amt
length_array = np.array(lengths, np.int32)
length_array = np.pad(
length_array,
[(0, batch_pad_amt)] + [(0, 0)] * (length_array.ndim - 1),
'constant')
max_length = max([len(t) for t in input_tensors])
inputs_array = np.zeros(
[len(input_tensors), max_length, input_depth])
for i, t in enumerate(input_tensors):
inputs_array[i, :len(t)] = t
control_depth = self._config.data_converter.control_depth
controls_array = np.zeros(
[len(input_tensors), max_length, control_depth])
if control_tensors is not None:
control_tensors += [np.zeros([0, control_depth])] * batch_pad_amt
for i, t in enumerate(control_tensors):
controls_array[i, :len(t)] = t
outputs = []
for i in range(len(inputs_array) // batch_size):
batch_begin = i * batch_size
batch_end = (i+1) * batch_size
feed_dict = {self._inputs: inputs_array[batch_begin:batch_end],
self._controls: controls_array[batch_begin:batch_end],
self._inputs_length: length_array[batch_begin:batch_end]}
outputs.append(
self._sess.run([self._z, self._mu, self._sigma], feed_dict))
assert outputs
return tuple(np.vstack(v)[:n] for v in zip(*outputs))
def decode(self, z, length=None, temperature=1.0, c_input=None):
"""Decodes a collection of latent vectors into NoteSequences.
Args:
z: A collection of latent vectors to decode.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
c_input: Control sequence (if applicable).
Returns:
A list of decodings as NoteSequence objects.
Raises:
RuntimeError: If called for a non-conditional model.
ValueError: If `length` is not specified and an end token is not being
used.
"""
tensors = self.decode_to_tensors(z, length, temperature, c_input)
if self._c_input is not None:
return self._config.data_converter.from_tensors(
tensors,
np.tile(
np.expand_dims(c_input, 0),
[self._config.hparams.batch_size, 1, 1]))
else:
return self._config.data_converter.from_tensors(tensors)
def decode_to_tensors(self, z, length=None, temperature=1.0, c_input=None,
return_full_results=False):
"""Decodes a collection of latent vectors into output tensors.
Args:
z: A collection of latent vectors to decode.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
c_input: Control sequence (if applicable).
return_full_results: If true will return the full decoder_results,
otherwise it will return only the samples.
Returns:
If return_full_results is True, will return the full decoder_results list,
otherwise it will return the samples from the decoder as a 2D numpy array.
Raises:
RuntimeError: If called for a non-conditional model.
ValueError: If `length` is not specified and an end token is not being
used.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot decode with a non-conditional model.')
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
batch_size = self._config.hparams.batch_size
n = len(z)
length = length or tf.int32.max
batch_pad_amt = -n % batch_size
z = np.pad(z, [(0, batch_pad_amt), (0, 0)], mode='constant')
outputs = []
for i in range(len(z) // batch_size):
feed_dict = {
self._temperature: temperature,
self._z_input: z[i*batch_size:(i+1)*batch_size],
self._max_length: length,
}
if self._c_input is not None:
feed_dict[self._c_input] = c_input
if return_full_results:
outputs.extend(self._sess.run(self._decoder_results, feed_dict))
else:
outputs.extend(self._sess.run(self._outputs, feed_dict))
return outputs[:n]
def interpolate(self, start_sequence, end_sequence, num_steps,
length=None, temperature=1.0, assert_same_length=True):
"""Interpolates between a start and an end NoteSequence.
Args:
start_sequence: The NoteSequence to interpolate from.
end_sequence: The NoteSequence to interpolate to.
num_steps: Number of NoteSequences to be generated, including the
reconstructions of the start and end sequences.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
assert_same_length: Whether to raise an AssertionError if all of the
extracted sequences are not the same length.
Returns:
A list of interpolated NoteSequences.
Raises:
AssertionError: If `assert_same_length` is True and any extracted
sequences differ in length.
"""
def _slerp(p0, p1, t):
"""Spherical linear interpolation."""
omega = np.arccos(np.dot(np.squeeze(p0/np.linalg.norm(p0)),
np.squeeze(p1/np.linalg.norm(p1))))
so = np.sin(omega)
return np.sin((1.0-t)*omega) / so * p0 + np.sin(t*omega)/so * p1
_, mu, _ = self.encode([start_sequence, end_sequence], assert_same_length)
z = np.array([_slerp(mu[0], mu[1], t)
for t in np.linspace(0, 1, num_steps)])
return self.decode(
length=length,
z=z,
temperature=temperature)
|
|
__version__ = '0.2.0'
import pymysql
from flask import Flask, Response, request
from books_util import *
conf = get_configuration()
db = pymysql.connect(**conf)
# server configuration
app = Flask(__name__)
@app.route('/configuration')
def configuration():
rdata = json.dumps({
"version": __version__,
"configuration": conf,
"date": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M")})
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
@app.route('/tag_counts')
@app.route('/tag_counts/<tag>')
def tag_counts(tag=None):
search_str = "SELECT Tag, COUNT(Tag) as count FROM tags"
if tag is not None:
search_str += f" WHERE Tag LIKE \"{tag}%\""
search_str += " GROUP BY Tag ORDER BY count DESC"
header = ["tag", "count"]
app.logger.debug(search_str)
c = db.cursor()
try:
c.execute(search_str)
except pymysql.Error as e:
app.logger.error(e)
rdata = json.dumps({"error": str(e)})
else:
s = c.fetchall()
rdata = serialize_rows(s, header)
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
@app.route('/tags/<id>')
def tags(id=None):
search_str = f"SELECT Tag FROM tags WHERE BookID = {id} ORDER BY Tag"
app.logger.debug(search_str)
c = db.cursor()
try:
c.execute(search_str)
except pymysql.Error as e:
app.logger.error(e)
rdata = {"error": str(e)}
else:
s = c.fetchall()
tag_list = [x[0] for x in s]
s = list(s)
rdata = json.dumps({"BookID": id, "tag_list": tag_list})
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
@app.route('/books_read_by_year')
@app.route('/books_read_by_year/<target_year>')
def books_read_by_year(target_year=None):
search_str = ("SELECT YEAR(LastRead) as Year, SUM(Pages) as Pages, COUNT(Pages) as Books\n"
"FROM `book collection`\n"
"WHERE LastRead is not NULL and LastRead <> \"0000-00-00 00:00:00\" and year(LastRead) <> \"1966\" ")
if target_year is not None:
search_str += f" AND YEAR(LastRead) = {target_year}\n"
search_str += ("GROUP BY YEAR(LastRead)\n"
"ORDER BY LastRead ASC;")
header = ["year", "pages read", "books read"]
app.logger.debug(search_str)
c = db.cursor()
try:
c.execute(search_str)
except pymysql.Error as e:
app.logger.error(e)
rdata = json.dumps({"error": str(e)})
else:
s = c.fetchall()
rdata = serialize_rows(s, header)
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
@app.route('/books_read')
@app.route('/books_read/<target_year>')
def books_read(target_year=None):
search_str = ("SELECT *\n"
"FROM `book collection`\n"
"WHERE LastRead is not NULL and LastRead <> \"0000-00-00 00:00:00\" ")
if target_year is not None:
search_str += f" and YEAR(LastRead) = {target_year}"
search_str += " ORDER BY LastRead ASC;"
header = table_header
app.logger.debug(search_str)
c = db.cursor()
try:
c.execute(search_str)
except pymysql.Error as e:
app.logger.error(e)
rdata = json.dumps({"error": str(e)})
else:
s = c.fetchall()
rdata = serialize_rows(s, header)
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
@app.route('/books')
def books():
# process any query parameters
args = request.args
where = []
for key in args:
if key == "BookCollectionID":
where.append(f"{key} = \"{args.get(key)}\"")
else:
where.append(f"{key} LIKE \"%{args.get(key)}%\"")
where_str = "AND".join(where)
# run the query
search_str = ("SELECT *\n"
"FROM `book collection`\n")
if where_str != '':
search_str += "\nWHERE " + where_str
search_str += "\nORDER BY Author, Title ASC"
header = table_header
app.logger.debug(search_str)
c = db.cursor()
try:
c.execute(search_str)
except pymysql.Error as e:
app.logger.error(e)
rdata = json.dumps({"error": str(e)})
else:
s = c.fetchall()
rdata = serialize_rows(s, header)
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
@app.route('/update_tag_value/<current>/<updated>')
def update_tag_value(current, updated):
c = db.cursor()
try:
records = c.execute("UPDATE `tags` SET Tag = '{}' WHERE Tag = '{}'".format(updated.lower().strip(), current))
db.commit()
rdata = json.dumps({"tag_update": f"{current} >> {updated}", "updated_tags": records})
except pymysql.Error as e:
app.logger.error(e)
rdata = json.dumps({"error": str(e)})
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
@app.route('/add_tag_by_id/<id>/<tag>')
def add_tag(id, tag):
c = db.cursor()
try:
c.execute("insert into tags (BookID, Tag) values (%s, %s)", (id, tag))
rdata = json.dumps({"BookID": f"{id}", "Tag": f"{tag}"})
except pymysql.Error as e:
app.logger.error(e)
rdata = json.dumps({"error": str(e)})
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
@app.route('/add_books', methods=['POST'])
def add_books():
"""
Post Payload:
[{
"Title": "Delete Me Now",
"Author": "Tester, N A",
"CopyrightDate": "1999-01-01",
"ISBNNumber": "1234",
"ISBNNumber13": "1234",
"PublisherName": "Printerman",
"CoverType": "Hard",
"Pages": "7",
"LastRead": "0000-00-00",
"PreviouslyRead": "0000-00-00",
"Location": "Main Collection",
"Note": "",
"Recycled": 0
}]
E.g.
curl -X POST -H "Content-type: application/json" -d @./examples/test_add_book.json \
http://172.17.0.2:5000/add_books
:return:
"""
# records should be a list of dictionaries including all fields
records = request.get_json()
search_str = ("INSERT INTO `book collection` "
"(Title, Author, CopyrightDate, ISBNNumber, ISBNNumber13, PublisherName, CoverType, Pages, "
"LastRead, PreviouslyRead, Location, Note, Recycled) "
"VALUES "
"(\"{Title}\", \"{Author}\", \"{CopyrightDate}\", \"{ISBNNumber}\", \"{ISBNNumber13}\", "
"\"{PublisherName}\", \"{CoverType}\", \"{Pages}\", \"{LastRead}\", \"{PreviouslyRead}\", "
"\"{Location}\", \"{Note}\", \"{Recycled}\")")
c = db.cursor()
rdata = []
for record in records:
try:
c.execute(search_str.format(**record))
rdata.append(record)
except pymysql.Error as e:
app.logger.error(e)
rdata.append({"error": str(e)})
rdata = json.dumps({"add_books": rdata})
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
@app.route('/update_book', methods=['POST'])
def update_book():
"""
Post Payload:
{
"BookCollectionID": 1606,
"LastRead": "0000-00-00",
"PreviouslyRead": "0000-00-00",
"Note": "",
"Recycled": 0
}
E.g.
curl -X POST -H "Content-type: application/json" -d @./examples/test_update_book.json \
http://172.17.0.2:5000/update_book
:return:
"""
# records should be a list of dictionaries including all fields
record = request.get_json()
search_str = "UPDATE `book collection` SET "
continuation = False
for key in record:
if key == "BookCollectionID":
continue
if continuation:
search_str += ", "
else:
continuation = True
search_str += f" {key} = \"{record[key]}\""
search_str += " WHERE BookCollectionID = {BookCollectionID} "
print(search_str)
c = db.cursor()
rdata = []
try:
c.execute(search_str.format(**record))
rdata.append(record)
except pymysql.Error as e:
app.logger.error(e)
rdata.append({"error": str(e)})
rdata = json.dumps({"update_books": rdata})
response_headers = resp_header(rdata)
return Response(response=rdata, status=200, headers=response_headers)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
|
|
'''
A tool that generates a binary blob of packaged files and a json bag with info on each file's location in the blob
You can split your files into "asset bundles", and create each bundle separately
with this tool. Then just process the json for each and they will load
the data and prepare it accordingly. This allows you to share assets and reduce
data downloads.
Usage:
file_packager.py TARGET [--preload A [B..]] [--js-output=OUTPUT.js]
--js-output=FILE Writes output in FILE, if not specified, standard output is used.
Notes:
* The file packager generates unix-style file paths. So if you are on windows and a file is accessed at
subdir\file, in JS it will be subdir/file. For simplicity we treat the web platform as a *NIX.
'''
import os, sys, shutil, random, uuid, ctypes
import posixpath
#import shared
import json
#from shared import Compression, execute, suffix, unsuffixed
from subprocess import Popen, PIPE, STDOUT
if len(sys.argv) == 1:
print '''Usage: file_packager.py TARGET [--preload A...] [--js-output=OUTPUT.js]
See the source for more details.'''
sys.exit(0)
DEBUG = os.environ.get('EMCC_DEBUG')
data_target = sys.argv[1]
IMAGE_SUFFIXES = ('.jpg', '.png', '.bmp')
AUDIO_SUFFIXES = ('.ogg', '.wav', '.mp3')
AUDIO_MIMETYPES = { 'ogg': 'audio/ogg', 'wav': 'audio/wav', 'mp3': 'audio/mpeg' }
CRUNCH_INPUT_SUFFIX = '.dds'
CRUNCH_OUTPUT_SUFFIX = '.crn'
DDS_HEADER_SIZE = 128
AV_WORKAROUND = 0 # Set to 1 to randomize file order and add some padding, to work around silly av false positives
data_files = []
in_preload = False
in_embed = False
has_preloaded = False
in_compress = 0
crunch = 0
plugins = []
jsoutput = None
force = True
use_preload_cache = False
as_json = True
for arg in sys.argv[1:]:
if arg == '--preload':
in_preload = True
in_embed = False
has_preloaded = True
in_compress = 0
elif arg == '--embed':
in_embed = True
in_preload = False
in_compress = 0
elif arg == '--as-json':
as_json = True
elif arg == '--no-force':
force = False
elif arg == '--use-preload-cache':
use_preload_cache = True
elif arg.startswith('--js-output'):
jsoutput = arg.split('=')[1] if '=' in arg else None
elif arg.startswith('--crunch'):
from shared import CRUNCH
crunch = arg.split('=')[1] if '=' in arg else '128'
in_preload = False
in_embed = False
in_compress = 0
elif arg.startswith('--plugin'):
plugin = open(arg.split('=')[1], 'r').read()
eval(plugin) # should append itself to plugins
in_preload = False
in_embed = False
in_compress = 0
elif in_preload or in_embed:
mode = 'preload'
if in_embed:
mode = 'embed'
if '@' in arg:
srcpath, dstpath = arg.split('@') # User is specifying destination filename explicitly.
else:
srcpath = dstpath = arg # Use source path as destination path.
if os.path.isfile(srcpath) or os.path.isdir(srcpath):
data_files.append({ 'srcpath': srcpath, 'dstpath': dstpath, 'mode': mode })
else:
print >> sys.stderr, 'Warning: ' + arg + ' does not exist, ignoring.'
elif in_compress:
if in_compress == 1:
Compression.encoder = arg
in_compress = 2
elif in_compress == 2:
Compression.decoder = arg
in_compress = 3
elif in_compress == 3:
Compression.js_name = arg
in_compress = 0
if (not force) and len(data_files) == 0:
has_preloaded = False
ret = '''
var Module;
if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
(function() {
'''
code = '''
function assert(check, msg) {
if (!check) throw msg + new Error().stack;
}
'''
json_data = {}
# Win32 code to test whether the given file has the hidden property set.
def has_hidden_attribute(filepath):
if sys.platform != 'win32':
return False
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(unicode(filepath))
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
# The packager should never preload/embed any directories that have a component starting with '.' in them,
# or if the file is hidden (Win32). Note that this filter ONLY applies to directories. Explicitly specified single files
# are always preloaded/embedded, even if they start with a '.'.
def should_ignore(filename):
if has_hidden_attribute(filename):
return True
components = filename.replace('\\\\', '/').replace('\\', '/').split('/')
for c in components:
if c.startswith('.') and c != '.' and c != '..':
return True
return False
# Expand directories into individual files
def add(arg, dirname, names):
# rootpathsrc: The path name of the root directory on the local FS we are adding to emscripten virtual FS.
# rootpathdst: The name we want to make the source path available on the emscripten virtual FS.
mode, rootpathsrc, rootpathdst = arg
for name in names:
fullname = os.path.join(dirname, name)
if not os.path.isdir(fullname):
if should_ignore(fullname):
if DEBUG:
print >> sys.stderr, 'Skipping hidden file "' + fullname + '" from inclusion in the emscripten virtual file system.'
else:
dstpath = os.path.join(rootpathdst, os.path.relpath(fullname, rootpathsrc)) # Convert source filename relative to root directory of target FS.
data_files.append({ 'srcpath': fullname, 'dstpath': dstpath, 'mode': mode })
for file_ in data_files:
if os.path.isdir(file_['srcpath']):
os.path.walk(file_['srcpath'], add, [file_['mode'], file_['srcpath'], file_['dstpath']])
data_files = filter(lambda file_: not os.path.isdir(file_['srcpath']), data_files)
# Absolutize paths, and check that they make sense
curr_abspath = os.path.abspath(os.getcwd())
for file_ in data_files:
if file_['srcpath'] == file_['dstpath']:
# This file was not defined with src@dst, so we inferred the destination from the source. In that case,
# we require that the destination not be under the current location
path = file_['dstpath']
abspath = os.path.abspath(path)
if DEBUG: print >> sys.stderr, path, abspath, curr_abspath
if not abspath.startswith(curr_abspath):
print >> sys.stderr, 'Error: Embedding "%s" which is below the current directory "%s". This is invalid since the current directory becomes the root that the generated code will see' % (path, curr_abspath)
sys.exit(1)
file_['dstpath'] = abspath[len(curr_abspath)+1:]
if os.path.isabs(path):
print >> sys.stderr, 'Warning: Embedding an absolute file/directory name "' + path + '" to the virtual filesystem. The file will be made available in the relative path "' + file_['dstpath'] + '". You can use the explicit syntax --preload-file srcpath@dstpath to explicitly specify the target location the absolute source path should be directed to.'
for file_ in data_files:
file_['dstpath'] = file_['dstpath'].replace(os.path.sep, '/') # name in the filesystem, native and emulated
if file_['dstpath'].endswith('/'): # If user has submitted a directory name as the destination but omitted the destination filename, use the filename from source file
file_['dstpath'] = file_['dstpath'] + os.path.basename(file_['srcpath'])
# make destination path always relative to the root
file_['dstpath'] = posixpath.normpath(os.path.join('/', file_['dstpath']))
if DEBUG:
print >> sys.stderr, 'Packaging file "' + file_['srcpath'] + '" to VFS in path "' + file_['dstpath'] + '".'
# Remove duplicates (can occur naively, for example preload dir/, preload dir/subdir/)
seen = {}
def was_seen(name):
if seen.get(name): return True
seen[name] = 1
return False
data_files = filter(lambda file_: not was_seen(file_['dstpath']), data_files)
if AV_WORKAROUND:
random.shuffle(data_files)
# Apply plugins
for file_ in data_files:
for plugin in plugins:
plugin(file_)
# Crunch files
if crunch:
shutil.copyfile(shared.path_from_root('tools', 'crunch-worker.js'), 'crunch-worker.js')
ret += '''
var decrunchWorker = new Worker('crunch-worker.js');
var decrunchCallbacks = [];
decrunchWorker.onmessage = function(msg) {
decrunchCallbacks[msg.data.callbackID](msg.data.data);
console.log('decrunched ' + msg.data.filename + ' in ' + msg.data.time + ' ms, ' + msg.data.data.length + ' bytes');
decrunchCallbacks[msg.data.callbackID] = null;
};
function requestDecrunch(filename, data, callback) {
decrunchWorker.postMessage({
filename: filename,
data: new Uint8Array(data),
callbackID: decrunchCallbacks.length
});
decrunchCallbacks.push(callback);
}
'''
for file_ in data_files:
if file_['dstpath'].endswith(CRUNCH_INPUT_SUFFIX):
src_dds_name = file_['srcpath']
src_crunch_name = unsuffixed(src_dds_name) + CRUNCH_OUTPUT_SUFFIX
# Preload/embed the .crn version instead of the .dds version, but use the .dds suffix for the target file in the virtual FS.
file_['srcpath'] = src_crunch_name
try:
# Do not crunch if crunched version exists and is more recent than dds source
crunch_time = os.stat(src_crunch_name).st_mtime
dds_time = os.stat(src_dds_name).st_mtime
if dds_time < crunch_time: continue
except:
pass # if one of them does not exist, continue on
# guess at format. this lets us tell crunch to not try to be clever and use odd formats like DXT5_AGBR
try:
format = Popen(['file', file_['srcpath']], stdout=PIPE).communicate()[0]
if 'DXT5' in format:
format = ['-dxt5']
elif 'DXT1' in format:
format = ['-dxt1']
else:
raise Exception('unknown format')
except:
format = []
Popen([CRUNCH, '-outsamedir', '-file', src_dds_name, '-quality', crunch] + format, stdout=sys.stderr).communicate()
#if not os.path.exists(os.path.basename(crunch_name)):
# print >> sys.stderr, 'Failed to crunch, perhaps a weird dxt format? Looking for a source PNG for the DDS'
# Popen([CRUNCH, '-file', unsuffixed(file_['srcpath']) + '.png', '-quality', crunch] + format, stdout=sys.stderr).communicate()
assert os.path.exists(src_crunch_name), 'crunch failed to generate output'
# prepend the dds header
crunched = open(src_crunch_name, 'rb').read()
c = open(src_crunch_name, 'wb')
c.write(open(src_dds_name, 'rb').read()[:DDS_HEADER_SIZE])
c.write(crunched)
c.close()
# Set up folders
partial_dirs = []
for file_ in data_files:
dirname = os.path.dirname(file_['dstpath'])
dirname = dirname.lstrip('/') # absolute paths start with '/', remove that
if dirname != '':
parts = dirname.split('/')
for i in range(len(parts)):
partial = '/'.join(parts[:i+1])
if partial not in partial_dirs:
code += '''Module['FS_createPath']('/%s', '%s', true, true);\n''' % ('/'.join(parts[:i]), parts[i])
partial_dirs.append(partial)
if has_preloaded:
# Bundle all datafiles into one archive. Avoids doing lots of simultaneous XHRs which has overhead.
data = open(data_target, 'wb')
start = 0
for file_ in data_files:
file_['data_start'] = start
curr = open(file_['srcpath'], 'rb').read()
file_['data_end'] = start + len(curr)
if AV_WORKAROUND: curr += '\x00'
#print >> sys.stderr, 'bundling', file_['srcpath'], file_['dstpath'], file_['data_start'], file_['data_end']
start += len(curr)
data.write(curr)
data.close()
# TODO: sha256sum on data_target
#if Compression.on:
# Compression.compress(data_target)
# Data requests - for getting a block of data out of the big archive - have a similar API to XHRs
code += '''
function DataRequest(start, end, crunched, audio) {
this.start = start;
this.end = end;
this.crunched = crunched;
this.audio = audio;
}
DataRequest.prototype = {
requests: {},
open: function(mode, name) {
this.name = name;
this.requests[name] = this;
Module['addRunDependency']('fp ' + this.name);
},
send: function() {},
onload: function() {
var byteArray = this.byteArray.subarray(this.start, this.end);
if (this.crunched) {
var ddsHeader = byteArray.subarray(0, 128);
var that = this;
requestDecrunch(this.name, byteArray.subarray(128), function(ddsData) {
byteArray = new Uint8Array(ddsHeader.length + ddsData.length);
byteArray.set(ddsHeader, 0);
byteArray.set(ddsData, 128);
that.finish(byteArray);
});
} else {
this.finish(byteArray);
}
},
finish: function(byteArray) {
var that = this;
Module['FS_createPreloadedFile'](this.name, null, byteArray, true, true, function() {
Module['removeRunDependency']('fp ' + that.name);
}, function() {
if (that.audio) {
Module['removeRunDependency']('fp ' + that.name); // workaround for chromium bug 124926 (still no audio with this, but at least we don't hang)
} else {
Module.printErr('Preloading file ' + that.name + ' failed');
}
}, false, true); // canOwn this data in the filesystem, it is a slide into the heap that will never change
this.requests[this.name] = null;
},
};
'''
preloaded_files=[]
counter = 0
for file_ in data_files:
filename = file_['dstpath']
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
if file_['mode'] == 'embed':
# Embed
data = map(ord, open(file_['srcpath'], 'rb').read())
if not data:
str_data = '[]'
else:
str_data = ''
chunk_size = 10240
while len(data) > 0:
chunk = data[:chunk_size]
data = data[chunk_size:]
if not str_data:
str_data = str(chunk)
else:
str_data += '.concat(' + str(chunk) + ')'
code += '''Module['FS_createDataFile']('%s', '%s', %s, true, true);\n''' % (dirname, basename, str_data)
elif file_['mode'] == 'preload':
# Preload
varname = 'filePreload%d' % counter
counter += 1
file_data = {
'filename': file_['dstpath'],
'start': file_['data_start'],
'end': file_['data_end'],
'crunched': '1' if crunch and filename.endswith(CRUNCH_INPUT_SUFFIX) else '0',
'audio': '1' if filename[-4:] in AUDIO_SUFFIXES else '0',
}
code += ''' new DataRequest(%(start)d, %(end)d, %(crunched)s, %(audio)s).open('GET', '%(filename)s');
''' % file_data
preloaded_files.append(file_data)
else:
assert 0
if has_preloaded:
# Get the big archive and split it up
use_data = '''
// copy the entire loaded file into a spot in the heap. Files will refer to slices in that. They cannot be freed though.
var ptr = Module['_malloc'](byteArray.length);
Module['HEAPU8'].set(byteArray, ptr);
DataRequest.prototype.byteArray = Module['HEAPU8'].subarray(ptr, ptr+byteArray.length);
'''
for file_ in data_files:
if file_['mode'] == 'preload':
use_data += ' DataRequest.prototype.requests["%s"].onload();\n' % (file_['dstpath'])
use_data += " Module['removeRunDependency']('datafile_%s');\n" % data_target
#if Compression.on:
# use_data = '''
# Module["decompress"](byteArray, function(decompressed) {
# byteArray = new Uint8Array(decompressed);
# %s
# });
# ''' % use_data
package_uuid = uuid.uuid4();
remote_package_name = os.path.basename(data_target)
code += r'''
if (!Module.expectedDataFileDownloads) {
Module.expectedDataFileDownloads = 0;
Module.finishedDataFileDownloads = 0;
}
Module.expectedDataFileDownloads++;
var PACKAGE_PATH = window['encodeURIComponent'](window.location.pathname.toString().substring(0, window.location.pathname.toString().lastIndexOf('/')) + '/');
var PACKAGE_NAME = '%s';
var REMOTE_PACKAGE_NAME = '%s';
var PACKAGE_UUID = '%s';
''' % (data_target, remote_package_name, package_uuid)
if use_preload_cache:
code += r'''
var indexedDB = window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB;
var IDB_RO = "readonly";
var IDB_RW = "readwrite";
var DB_NAME = 'EM_PRELOAD_CACHE';
var DB_VERSION = 1;
var METADATA_STORE_NAME = 'METADATA';
var PACKAGE_STORE_NAME = 'PACKAGES';
function openDatabase(callback, errback) {
try {
var openRequest = indexedDB.open(DB_NAME, DB_VERSION);
} catch (e) {
return errback(e);
}
openRequest.onupgradeneeded = function(event) {
var db = event.target.result;
if(db.objectStoreNames.contains(PACKAGE_STORE_NAME)) {
db.deleteObjectStore(PACKAGE_STORE_NAME);
}
var packages = db.createObjectStore(PACKAGE_STORE_NAME);
if(db.objectStoreNames.contains(METADATA_STORE_NAME)) {
db.deleteObjectStore(METADATA_STORE_NAME);
}
var metadata = db.createObjectStore(METADATA_STORE_NAME);
};
openRequest.onsuccess = function(event) {
var db = event.target.result;
callback(db);
};
openRequest.onerror = function(error) {
errback(error);
};
};
/* Check if there's a cached package, and if so whether it's the latest available */
function checkCachedPackage(db, packageName, callback, errback) {
var transaction = db.transaction([METADATA_STORE_NAME], IDB_RO);
var metadata = transaction.objectStore(METADATA_STORE_NAME);
var getRequest = metadata.get(packageName);
getRequest.onsuccess = function(event) {
var result = event.target.result;
if (!result) {
return callback(false);
} else {
return callback(PACKAGE_UUID === result.uuid);
}
};
getRequest.onerror = function(error) {
errback(error);
};
};
function fetchCachedPackage(db, packageName, callback, errback) {
var transaction = db.transaction([PACKAGE_STORE_NAME], IDB_RO);
var packages = transaction.objectStore(PACKAGE_STORE_NAME);
var getRequest = packages.get(packageName);
getRequest.onsuccess = function(event) {
var result = event.target.result;
callback(result);
};
getRequest.onerror = function(error) {
errback(error);
};
};
function cacheRemotePackage(db, packageName, packageData, packageMeta, callback, errback) {
var transaction = db.transaction([PACKAGE_STORE_NAME, METADATA_STORE_NAME], IDB_RW);
var packages = transaction.objectStore(PACKAGE_STORE_NAME);
var metadata = transaction.objectStore(METADATA_STORE_NAME);
var putPackageRequest = packages.put(packageData, packageName);
putPackageRequest.onsuccess = function(event) {
var putMetadataRequest = metadata.put(packageMeta, packageName);
putMetadataRequest.onsuccess = function(event) {
callback(packageData);
};
putMetadataRequest.onerror = function(error) {
errback(error);
};
};
putPackageRequest.onerror = function(error) {
errback(error);
};
};
'''
ret += r'''
function fetchRemotePackage(packageName, callback, errback) {
var xhr = new XMLHttpRequest();
xhr.open('GET', packageName, true);
xhr.responseType = 'arraybuffer';
xhr.onprogress = function(event) {
var url = packageName;
if (event.loaded && event.total) {
if (!xhr.addedTotal) {
xhr.addedTotal = true;
if (!Module.dataFileDownloads) Module.dataFileDownloads = {};
Module.dataFileDownloads[url] = {
loaded: event.loaded,
total: event.total
};
} else {
Module.dataFileDownloads[url].loaded = event.loaded;
}
var total = 0;
var loaded = 0;
var num = 0;
for (var download in Module.dataFileDownloads) {
var data = Module.dataFileDownloads[download];
total += data.total;
loaded += data.loaded;
num++;
}
total = Math.ceil(total * Module.expectedDataFileDownloads/num);
if (Module['setStatus']) Module['setStatus']('Downloading data... (' + loaded + '/' + total + ')');
} else if (!Module.dataFileDownloads) {
if (Module['setStatus']) Module['setStatus']('Downloading data...');
}
};
xhr.onload = function(event) {
var packageData = xhr.response;
callback(packageData);
};
xhr.send(null);
};
function handleError(error) {
console.error('package error:', error);
};
'''
code += r'''
function processPackageData(arrayBuffer) {
Module.finishedDataFileDownloads++;
assert(arrayBuffer, 'Loading data file failed.');
var byteArray = new Uint8Array(arrayBuffer);
var curr;
%s
};
Module['addRunDependency']('datafile_%s');
''' % (use_data, data_target) # use basename because from the browser's point of view, we need to find the datafile in the same dir as the html file
code += r'''
if (!Module.preloadResults) Module.preloadResults = {};
'''
if use_preload_cache:
code += r'''
function preloadFallback(error) {
console.error(error);
console.error('falling back to default preload behavior');
fetchRemotePackage(REMOTE_PACKAGE_NAME, processPackageData, handleError);
};
openDatabase(
function(db) {
checkCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME,
function(useCached) {
Module.preloadResults[PACKAGE_NAME] = {fromCache: useCached};
if (useCached) {
console.info('loading ' + PACKAGE_NAME + ' from cache');
fetchCachedPackage(db, PACKAGE_PATH + PACKAGE_NAME, processPackageData, preloadFallback);
} else {
console.info('loading ' + PACKAGE_NAME + ' from remote');
fetchRemotePackage(REMOTE_PACKAGE_NAME,
function(packageData) {
cacheRemotePackage(db, PACKAGE_PATH + PACKAGE_NAME, packageData, {uuid:PACKAGE_UUID}, processPackageData,
function(error) {
console.error(error);
processPackageData(packageData);
});
}
, preloadFallback);
}
}
, preloadFallback);
}
, preloadFallback);
if (Module['setStatus']) Module['setStatus']('Downloading...');
'''
else:
# Not using preload cache, so we might as well start the xhr ASAP, potentially before JS parsing of the main codebase if it's after us.
# Only tricky bit is the fetch is async, but also when runWithFS is called is async, so we handle both orderings.
ret += r'''
var fetched = null, fetchedCallback = null;
fetchRemotePackage('%s', function(data) {
if (fetchedCallback) {
fetchedCallback(data);
fetchedCallback = null;
} else {
fetched = data;
}
}, handleError);
''' % os.path.basename(data_target)
code += r'''
Module.preloadResults[PACKAGE_NAME] = {fromCache: false};
if (fetched) {
processPackageData(fetched);
fetched = null;
} else {
fetchedCallback = processPackageData;
}
'''
ret += '''
function runWithFS() {
'''
ret += code
ret += '''
}
if (Module['calledRun']) {
runWithFS();
} else {
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(runWithFS); // FS is not initialized yet, wait for it
}
'''
if crunch:
ret += '''
if (!Module['postRun']) Module['postRun'] = [];
Module["postRun"].push(function() {
decrunchWorker.terminate();
});
'''
ret += '''
})();
'''
if force or len(data_files) > 0:
if jsoutput == None:
print ret
else:
f = open(jsoutput, 'w')
f.write(ret)
if as_json:
json_data = { 'bundle_file': data_target, 'directories': partial_dirs, 'files': preloaded_files, 'package_uuid': '%s' % uuid.uuid4() }
f = open(data_target+'.json', 'w')
f.write(json.dumps(json_data))
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class _ModelWithOptimizer(training.Model):
def __init__(self):
super(_ModelWithOptimizer, self).__init__()
self.dense = core.Dense(1)
self.optimizer = adam.AdamOptimizer(0.01)
@def_function.function(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)))
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {'loss': loss}
class DefFunctionTest(test.TestCase):
def testNoVariables(self):
@def_function.function
def fn(x):
return 2 * x
self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)
def testFailIfVariablesAreCreatedMoreThanOnce(self):
@def_function.function
def fn(x):
return variables.Variable(1.0) + x
with self.assertRaises(ValueError):
fn(1.0)
def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):
state = []
@def_function.function
def fn(x):
state.append(variables.Variable(1.0))
return state[-1] + x
with self.assertRaises(ValueError):
fn(1.0)
def testCorrectVariableCreation(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testFunctionInitializer(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
def testFunctionInitializationFunction(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
init_fn = fn.get_initialization_function(constant_op.constant(1.0))
self.assertEqual(len(state), 1)
self.assertFalse(
resource_variable_ops.var_is_initialized_op(state[0].handle))
init_fn()
self.assertEqual(state[0].numpy(), 2.0)
def testVariableInitializerNotConstant(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testLegacyGraphModeVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
result = fn(3.0)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 2.0)
self.assertAllEqual(sess.run(result), 6.0)
def testLegacyGraphModeVariablesNonTrivialInitializer(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
two = constant_op.constant(2.0)
four = two * two
two_again = math_ops.sqrt(four)
state.append(variables.Variable(two_again + four))
return state[0] * x
result = fn(3.0)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 6.0)
self.assertAllEqual(sess.run(result), 18.0)
def testLegacyGraphModeInputDependentInitializerFails(self):
with ops.Graph().as_default():
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
with self.assertRaises(ValueError):
fn(constant_op.constant(3.0))
def testMethod(self):
class MyModel(object):
def __init__(self):
self.var = None
@def_function.function
def apply(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return self.var * x
m0 = MyModel()
self.assertAllEqual(m0.apply(3.0), 6.0)
# Calling twice to exercise that we do not recreate variables.
m0.var.assign(3.0)
self.assertAllEqual(m0.apply(3.0), 9.0)
m1 = MyModel()
self.assertAllEqual(m1.apply(3.0), 6.0)
def test_optimizer(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model = _ModelWithOptimizer()
model(x, y)
def test_concrete_function_from_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def compute(x):
return 2. * x
concrete = compute.get_concrete_function()
self.assertAllClose(1., concrete(constant_op.constant(0.5)))
concrete = compute.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(4., concrete(constant_op.constant(2.)))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
|
from __future__ import division, print_function, absolute_import
import pytest
from math import sqrt, exp, sin, cos
from functools import lru_cache
from numpy.testing import (assert_warns, assert_,
assert_allclose,
assert_equal,
assert_array_equal)
import numpy as np
from numpy import finfo, power, nan, isclose
from scipy.optimize import zeros, newton, root_scalar
from scipy._lib._util import getargspec_no_self as _getargspec
# Import testing parameters
from scipy.optimize._tstutils import get_tests, functions as tstutils_functions, fstrings as tstutils_fstrings
from scipy._lib._numpy_compat import suppress_warnings
TOL = 4*np.finfo(float).eps # tolerance
_FLOAT_EPS = finfo(float).eps
# A few test functions used frequently:
# # A simple quadratic, (x-1)^2 - 1
def f1(x):
return x ** 2 - 2 * x - 1
def f1_1(x):
return 2 * x - 2
def f1_2(x):
return 2.0 + 0 * x
def f1_and_p_and_pp(x):
return f1(x), f1_1(x), f1_2(x)
# Simple transcendental function
def f2(x):
return exp(x) - cos(x)
def f2_1(x):
return exp(x) + sin(x)
def f2_2(x):
return exp(x) + cos(x)
# lru cached function
@lru_cache()
def f_lrucached(x):
return x
class TestBasic(object):
def run_check_by_name(self, name, smoothness=0, **kwargs):
a = .5
b = sqrt(3)
xtol = 4*np.finfo(float).eps
rtol = 4*np.finfo(float).eps
for function, fname in zip(tstutils_functions, tstutils_fstrings):
if smoothness > 0 and fname in ['f4', 'f5', 'f6']:
continue
r = root_scalar(function, method=name, bracket=[a, b], x0=a,
xtol=xtol, rtol=rtol, **kwargs)
zero = r.root
assert_(r.converged)
assert_allclose(zero, 1.0, atol=xtol, rtol=rtol,
err_msg='method %s, function %s' % (name, fname))
def run_check(self, method, name):
a = .5
b = sqrt(3)
xtol = 4 * _FLOAT_EPS
rtol = 4 * _FLOAT_EPS
for function, fname in zip(tstutils_functions, tstutils_fstrings):
zero, r = method(function, a, b, xtol=xtol, rtol=rtol,
full_output=True)
assert_(r.converged)
assert_allclose(zero, 1.0, atol=xtol, rtol=rtol,
err_msg='method %s, function %s' % (name, fname))
def run_check_lru_cached(self, method, name):
# check that https://github.com/scipy/scipy/issues/10846 is fixed
a = -1
b = 1
zero, r = method(f_lrucached, a, b, full_output=True)
assert_(r.converged)
assert_allclose(zero, 0,
err_msg='method %s, function %s' % (name, 'f_lrucached'))
def _run_one_test(self, tc, method, sig_args_keys=None,
sig_kwargs_keys=None, **kwargs):
method_args = []
for k in sig_args_keys or []:
if k not in tc:
# If a,b not present use x0, x1. Similarly for f and func
k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k)
method_args.append(tc[k])
method_kwargs = dict(**kwargs)
method_kwargs.update({'full_output': True, 'disp': False})
for k in sig_kwargs_keys or []:
method_kwargs[k] = tc[k]
root = tc.get('root')
func_args = tc.get('args', ())
try:
r, rr = method(*method_args, args=func_args, **method_kwargs)
return root, rr, tc
except Exception:
return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR), tc
def run_tests(self, tests, method, name,
xtol=4 * _FLOAT_EPS, rtol=4 * _FLOAT_EPS,
known_fail=None, **kwargs):
r"""Run test-cases using the specified method and the supplied signature.
Extract the arguments for the method call from the test case
dictionary using the supplied keys for the method's signature."""
# The methods have one of two base signatures:
# (f, a, b, **kwargs) # newton
# (func, x0, **kwargs) # bisect/brentq/...
sig = _getargspec(method) # ArgSpec with args, varargs, varkw, defaults
nDefaults = len(sig[3])
nRequired = len(sig[0]) - nDefaults
sig_args_keys = sig[0][:nRequired]
sig_kwargs_keys = []
if name in ['secant', 'newton', 'halley']:
if name in ['newton', 'halley']:
sig_kwargs_keys.append('fprime')
if name in ['halley']:
sig_kwargs_keys.append('fprime2')
kwargs['tol'] = xtol
else:
kwargs['xtol'] = xtol
kwargs['rtol'] = rtol
results = [list(self._run_one_test(
tc, method, sig_args_keys=sig_args_keys,
sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests]
# results= [[true root, full output, tc], ...]
known_fail = known_fail or []
notcvgd = [elt for elt in results if not elt[1].converged]
notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail]
notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd]
assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []])
# The usable xtol and rtol depend on the test
tols = {'xtol': 4 * _FLOAT_EPS, 'rtol': 4 * _FLOAT_EPS}
tols.update(**kwargs)
rtol = tols['rtol']
atol = tols.get('tol', tols['xtol'])
cvgd = [elt for elt in results if elt[1].converged]
approx = [elt[1].root for elt in cvgd]
correct = [elt[0] for elt in cvgd]
notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if
not isclose(a, c, rtol=rtol, atol=atol)
and elt[-1]['ID'] not in known_fail]
# Evaluate the function and see if is 0 at the purported root
fvs = [tc['f'](aroot, *(tc['args'])) for aroot, c, fullout, tc in notclose]
notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0]
assert_equal([notclose, len(notclose)], [[], 0])
def run_collection(self, collection, method, name, smoothness=None,
known_fail=None,
xtol=4 * _FLOAT_EPS, rtol=4 * _FLOAT_EPS,
**kwargs):
r"""Run a collection of tests using the specified method.
The name is used to determine some optional arguments."""
tests = get_tests(collection, smoothness=smoothness)
self.run_tests(tests, method, name, xtol=xtol, rtol=rtol,
known_fail=known_fail, **kwargs)
def test_bisect(self):
self.run_check(zeros.bisect, 'bisect')
self.run_check_lru_cached(zeros.bisect, 'bisect')
self.run_check_by_name('bisect')
self.run_collection('aps', zeros.bisect, 'bisect', smoothness=1)
def test_ridder(self):
self.run_check(zeros.ridder, 'ridder')
self.run_check_lru_cached(zeros.ridder, 'ridder')
self.run_check_by_name('ridder')
self.run_collection('aps', zeros.ridder, 'ridder', smoothness=1)
def test_brentq(self):
self.run_check(zeros.brentq, 'brentq')
self.run_check_lru_cached(zeros.brentq, 'brentq')
self.run_check_by_name('brentq')
# Brentq/h needs a lower tolerance to be specified
self.run_collection('aps', zeros.brentq, 'brentq', smoothness=1,
xtol=1e-14, rtol=1e-14)
def test_brenth(self):
self.run_check(zeros.brenth, 'brenth')
self.run_check_lru_cached(zeros.brenth, 'brenth')
self.run_check_by_name('brenth')
self.run_collection('aps', zeros.brenth, 'brenth', smoothness=1,
xtol=1e-14, rtol=1e-14)
def test_toms748(self):
self.run_check(zeros.toms748, 'toms748')
self.run_check_lru_cached(zeros.toms748, 'toms748')
self.run_check_by_name('toms748')
self.run_collection('aps', zeros.toms748, 'toms748', smoothness=1)
def test_newton_collections(self):
known_fail = ['aps.13.00']
known_fail += ['aps.12.05', 'aps.12.17'] # fails under Windows Py27
for collection in ['aps', 'complex']:
self.run_collection(collection, zeros.newton, 'newton',
smoothness=2, known_fail=known_fail)
def test_halley_collections(self):
known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09',
'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13',
'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17',
'aps.12.18', 'aps.13.00']
for collection in ['aps', 'complex']:
self.run_collection(collection, zeros.newton, 'halley',
smoothness=2, known_fail=known_fail)
@staticmethod
def f1(x):
return x**2 - 2*x - 1 # == (x-1)**2 - 2
@staticmethod
def f1_1(x):
return 2*x - 2
@staticmethod
def f1_2(x):
return 2.0 + 0*x
@staticmethod
def f2(x):
return exp(x) - cos(x)
@staticmethod
def f2_1(x):
return exp(x) + sin(x)
@staticmethod
def f2_2(x):
return exp(x) + cos(x)
def test_newton(self):
for f, f_1, f_2 in [(self.f1, self.f1_1, self.f1_2),
(self.f2, self.f2_1, self.f2_2)]:
x = zeros.newton(f, 3, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, x1=5, tol=1e-6) # secant, x0 and x1
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, tol=1e-6) # newton
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6) # halley
assert_allclose(f(x), 0, atol=1e-6)
def test_newton_by_name(self):
r"""Invoke newton through root_scalar()"""
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6)
assert_allclose(f(r.root), 0, atol=1e-6)
def test_secant_by_name(self):
r"""Invoke secant through root_scalar()"""
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6)
assert_allclose(f(r.root), 0, atol=1e-6)
r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6)
assert_allclose(f(r.root), 0, atol=1e-6)
def test_halley_by_name(self):
r"""Invoke halley through root_scalar()"""
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
r = root_scalar(f, method='halley', x0=3,
fprime=f_1, fprime2=f_2, xtol=1e-6)
assert_allclose(f(r.root), 0, atol=1e-6)
def test_root_scalar_fail(self):
with pytest.raises(ValueError):
root_scalar(f1, method='secant', x0=3, xtol=1e-6) # no x1
with pytest.raises(ValueError):
root_scalar(f1, method='newton', x0=3, xtol=1e-6) # no fprime
with pytest.raises(ValueError):
root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2
with pytest.raises(ValueError):
root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime
def test_array_newton(self):
"""test newton with array"""
def f1(x, *a):
b = a[0] + x * a[3]
return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x
def f1_1(x, *a):
b = a[3] / a[5]
return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1
def f1_2(x, *a):
b = a[3] / a[5]
return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2
a0 = np.array([
5.32725221, 5.48673747, 5.49539973,
5.36387202, 4.80237316, 1.43764452,
5.23063958, 5.46094772, 5.50512718,
5.42046290
])
a1 = (np.sin(range(10)) + 1.0) * 7.0
args = (a0, a1, 1e-09, 0.004, 10, 0.27456)
x0 = [7.0] * 10
x = zeros.newton(f1, x0, f1_1, args)
x_expected = (
6.17264965, 11.7702805, 12.2219954,
7.11017681, 1.18151293, 0.143707955,
4.31928228, 10.5419107, 12.7552490,
8.91225749
)
assert_allclose(x, x_expected)
# test halley's
x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2)
assert_allclose(x, x_expected)
# test secant
x = zeros.newton(f1, x0, args=args)
assert_allclose(x, x_expected)
def test_array_secant_active_zero_der(self):
"""test secant doesn't continue to iterate zero derivatives"""
x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5],
args=[np.array([17, 25])])
assert_allclose(x, (4.123105625617661, 5.0))
def test_array_newton_integers(self):
# test secant with float
x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2,
args=([15.0, 17.0],))
assert_allclose(x, (3.872983346207417, 4.123105625617661))
# test integer becomes float
x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],))
assert_allclose(x, (3.872983346207417, 4.123105625617661))
def test_array_newton_zero_der_failures(self):
# test derivative zero warning
assert_warns(RuntimeWarning, zeros.newton,
lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y)
# test failures and zero_der
with pytest.warns(RuntimeWarning):
results = zeros.newton(lambda y: y**2 - 2, [0., 0.],
lambda y: 2*y, full_output=True)
assert_allclose(results.root, 0)
assert results.zero_der.all()
assert not results.converged.any()
def test_newton_combined(self):
f1 = lambda x: x**2 - 2*x - 1
f1_1 = lambda x: 2*x - 2
f1_2 = lambda x: 2.0 + 0*x
def f1_and_p_and_pp(x):
return x**2 - 2*x-1, 2*x-2, 2.0
sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1)
sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True)
assert_allclose(sol0.root, sol.root, atol=1e-8)
assert_equal(2*sol.function_calls, sol0.function_calls)
sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2)
sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True)
assert_allclose(sol0.root, sol.root, atol=1e-8)
assert_equal(3*sol.function_calls, sol0.function_calls)
def test_newton_full_output(self):
# Test the full_output capability, both when converging and not.
# Use simple polynomials, to avoid hitting platform dependencies
# (e.g., exp & trig) in number of iterations
x0 = 3
expected_counts = [(6, 7), (5, 10), (3, 9)]
for derivs in range(3):
kwargs = {'tol': 1e-6, 'full_output': True, }
for k, v in [['fprime', self.f1_1], ['fprime2', self.f1_2]][:derivs]:
kwargs[k] = v
x, r = zeros.newton(self.f1, x0, disp=False, **kwargs)
assert_(r.converged)
assert_equal(x, r.root)
assert_equal((r.iterations, r.function_calls), expected_counts[derivs])
if derivs == 0:
assert(r.function_calls <= r.iterations + 1)
else:
assert_equal(r.function_calls, (derivs + 1) * r.iterations)
# Now repeat, allowing one fewer iteration to force convergence failure
iters = r.iterations - 1
x, r = zeros.newton(self.f1, x0, maxiter=iters, disp=False, **kwargs)
assert_(not r.converged)
assert_equal(x, r.root)
assert_equal(r.iterations, iters)
if derivs == 1:
# Check that the correct Exception is raised and
# validate the start of the message.
with pytest.raises(
RuntimeError,
match='Failed to converge after %d iterations, value is .*' % (iters)):
x, r = zeros.newton(self.f1, x0, maxiter=iters, disp=True, **kwargs)
def test_deriv_zero_warning(self):
func = lambda x: x**2 - 2.0
dfunc = lambda x: 2*x
assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False)
with pytest.raises(RuntimeError, match='Derivative was zero'):
result = zeros.newton(func, 0.0, dfunc)
def test_newton_does_not_modify_x0(self):
# https://github.com/scipy/scipy/issues/9964
x0 = np.array([0.1, 3])
x0_copy = x0.copy() # Copy to test for equality.
newton(np.sin, x0, np.cos)
assert_array_equal(x0, x0_copy)
def test_maxiter_int_check(self):
for method in [zeros.bisect, zeros.newton, zeros.ridder, zeros.brentq,
zeros.brenth, zeros.toms748]:
with pytest.raises(TypeError,
match="'float' object cannot be interpreted as an integer"):
method(f1, 0.0, 1.0, maxiter=72.45)
def test_gh_5555():
root = 0.1
def f(x):
return x - root
methods = [zeros.bisect, zeros.ridder]
xtol = rtol = TOL
for method in methods:
res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol)
assert_allclose(root, res, atol=xtol, rtol=rtol,
err_msg='method %s' % method.__name__)
def test_gh_5557():
# Show that without the changes in 5557 brentq and brenth might
# only achieve a tolerance of 2*(xtol + rtol*|res|).
# f linearly interpolates (0, -0.1), (0.5, -0.1), and (1,
# 0.4). The important parts are that |f(0)| < |f(1)| (so that
# brent takes 0 as the initial guess), |f(0)| < atol (so that
# brent accepts 0 as the root), and that the exact root of f lies
# more than atol away from 0 (so that brent doesn't achieve the
# desired tolerance).
def f(x):
if x < 0.5:
return -0.1
else:
return x - 0.6
atol = 0.51
rtol = 4 * _FLOAT_EPS
methods = [zeros.brentq, zeros.brenth]
for method in methods:
res = method(f, 0, 1, xtol=atol, rtol=rtol)
assert_allclose(0.6, res, atol=atol, rtol=rtol)
class TestRootResults:
def test_repr(self):
r = zeros.RootResults(root=1.0,
iterations=44,
function_calls=46,
flag=0)
expected_repr = (" converged: True\n flag: 'converged'"
"\n function_calls: 46\n iterations: 44\n"
" root: 1.0")
assert_equal(repr(r), expected_repr)
def test_complex_halley():
"""Test Halley's works with complex roots"""
def f(x, *a):
return a[0] * x**2 + a[1] * x + a[2]
def f_1(x, *a):
return 2 * a[0] * x + a[1]
def f_2(x, *a):
retval = 2 * a[0]
try:
size = len(x)
except TypeError:
return retval
else:
return [retval] * size
z = complex(1.0, 2.0)
coeffs = (2.0, 3.0, 4.0)
y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
# (-0.75000000000000078+1.1989578808281789j)
assert_allclose(f(y, *coeffs), 0, atol=1e-6)
z = [z] * 10
coeffs = (2.0, 3.0, 4.0)
y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
assert_allclose(f(y, *coeffs), 0, atol=1e-6)
def test_zero_der_nz_dp():
"""Test secant method with a non-zero dp, but an infinite newton step"""
# pick a symmetrical functions and choose a point on the side that with dx
# makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2,
# which has a root at x = 100 and is symmetrical around the line x = 100
# we have to pick a really big number so that it is consistently true
# now find a point on each side so that the secant has a zero slope
dx = np.finfo(float).eps ** 0.33
# 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100
# -> 200 = p0 * (2 + dx) + dx
p0 = (200.0 - dx) / (2.0 + dx)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "RMS of")
x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10)
assert_allclose(x, [100] * 10)
# test scalar cases too
p0 = (2.0 - 1e-4) / (2.0 + 1e-4)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Tolerance of")
x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=False)
assert_allclose(x, 1)
with pytest.raises(RuntimeError, match='Tolerance of'):
x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=True)
p0 = (-2.0 + 1e-4) / (2.0 + 1e-4)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Tolerance of")
x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=False)
assert_allclose(x, -1)
with pytest.raises(RuntimeError, match='Tolerance of'):
x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=True)
def test_array_newton_failures():
"""Test that array newton fails as expected"""
# p = 0.68 # [MPa]
# dp = -0.068 * 1e6 # [Pa]
# T = 323 # [K]
diameter = 0.10 # [m]
# L = 100 # [m]
roughness = 0.00015 # [m]
rho = 988.1 # [kg/m**3]
mu = 5.4790e-04 # [Pa*s]
u = 2.488 # [m/s]
reynolds_number = rho * u * diameter / mu # Reynolds number
def colebrook_eqn(darcy_friction, re, dia):
return (1 / np.sqrt(darcy_friction) +
2 * np.log10(roughness / 3.7 / dia +
2.51 / re / np.sqrt(darcy_friction)))
# only some failures
with pytest.warns(RuntimeWarning):
result = zeros.newton(
colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2,
args=[reynolds_number, diameter], full_output=True
)
assert not result.converged.all()
# they all fail
with pytest.raises(RuntimeError):
result = zeros.newton(
colebrook_eqn, x0=[0.01] * 2, maxiter=2,
args=[reynolds_number, diameter], full_output=True
)
# this test should **not** raise a RuntimeWarning
def test_gh8904_zeroder_at_root_fails():
"""Test that Newton or Halley don't warn if zero derivative at root"""
# a function that has a zero derivative at it's root
def f_zeroder_root(x):
return x**3 - x**2
# should work with secant
r = zeros.newton(f_zeroder_root, x0=0)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# test again with array
r = zeros.newton(f_zeroder_root, x0=[0]*10)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# 1st derivative
def fder(x):
return 3 * x**2 - 2 * x
# 2nd derivative
def fder2(x):
return 6*x - 2
# should work with newton and halley
r = zeros.newton(f_zeroder_root, x0=0, fprime=fder)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
r = zeros.newton(f_zeroder_root, x0=0, fprime=fder,
fprime2=fder2)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# test again with array
r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder,
fprime2=fder2)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# also test that if a root is found we do not raise RuntimeWarning even if
# the derivative is zero, EG: at x = 0.5, then fval = -0.125 and
# fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the
# root, but if the solver continued with that guess, then it will calculate
# a zero derivative, so it should return the root w/o RuntimeWarning
r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# test again with array
r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# doesn't apply to halley
def test_gh_8881():
r"""Test that Halley's method realizes that the 2nd order adjustment
is too big and drops off to the 1st order adjustment."""
n = 9
def f(x):
return power(x, 1.0/n) - power(n, 1.0/n)
def fp(x):
return power(x, (1.0-n)/n)/n
def fpp(x):
return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n
x0 = 0.1
# The root is at x=9.
# The function has positive slope, x0 < root.
# Newton succeeds in 8 iterations
rt, r = newton(f, x0, fprime=fp, full_output=True)
assert(r.converged)
# Before the Issue 8881/PR 8882, halley would send x in the wrong direction.
# Check that it now succeeds.
rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
assert(r.converged)
def test_gh_9608_preserve_array_shape():
"""
Test that shape is preserved for array inputs even if fprime or fprime2 is
scalar
"""
def f(x):
return x**2
def fp(x):
return 2 * x
def fpp(x):
return 2
x0 = np.array([-2], dtype=np.float32)
rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
assert(r.converged)
x0_array = np.array([-2, -3], dtype=np.float32)
# This next invocation should fail
with pytest.raises(IndexError):
result = zeros.newton(
f, x0_array, fprime=fp, fprime2=fpp, full_output=True
)
def fpp_array(x):
return np.full(np.shape(x), 2, dtype=np.float32)
result = zeros.newton(
f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True
)
assert result.converged.all()
@pytest.mark.parametrize(
"maximum_iterations,flag_expected",
[(10, zeros.CONVERR), (100, zeros.CONVERGED)])
def test_gh9254_flag_if_maxiter_exceeded(maximum_iterations, flag_expected):
"""
Test that if the maximum iterations is exceeded that the flag is not
converged.
"""
result = zeros.brentq(
lambda x: ((1.2*x - 2.3)*x + 3.4)*x - 4.5,
-30, 30, (), 1e-6, 1e-6, maximum_iterations,
full_output=True, disp=False)
assert result[1].flag == flag_expected
if flag_expected == zeros.CONVERR:
# didn't converge because exceeded maximum iterations
assert result[1].iterations == maximum_iterations
elif flag_expected == zeros.CONVERGED:
# converged before maximum iterations
assert result[1].iterations < maximum_iterations
def test_gh9551_raise_error_if_disp_true():
"""Test that if disp is true then zero derivative raises RuntimeError"""
def f(x):
return x*x + 1
def f_p(x):
return 2*x
assert_warns(RuntimeWarning, zeros.newton, f, 1.0, f_p, disp=False)
with pytest.raises(
RuntimeError,
match=r'^Derivative was zero\. Failed to converge after \d+ iterations, value is [+-]?\d*\.\d+\.$'):
result = zeros.newton(f, 1.0, f_p)
root = zeros.newton(f, complex(10.0, 10.0), f_p)
assert_allclose(root, complex(0.0, 1.0))
|
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import os
import re
import sys
import urllib
from ConfigParser import ConfigParser
import pkg_resources
from paste.deploy.util.fixtypeerror import fix_call
__all__ = ['loadapp', 'loadserver', 'loadfilter', 'appconfig']
############################################################
## Utility functions
############################################################
def import_string(s):
return pkg_resources.EntryPoint.parse("x="+s).load(False)
def _aslist(obj):
"""
Turn object into a list; lists and tuples are left as-is, None
becomes [], and everything else turns into a one-element list.
"""
if obj is None:
return []
elif isinstance(obj, (list, tuple)):
return obj
else:
return [obj]
def _flatten(lst):
"""
Flatten a nested list.
"""
if not isinstance(lst, (list, tuple)):
return [lst]
result = []
for item in lst:
result.extend(_flatten(item))
return result
class NicerConfigParser(ConfigParser):
def __init__(self, filename, *args, **kw):
ConfigParser.__init__(self, *args, **kw)
self.filename = filename
def _interpolate(self, section, option, rawval, vars):
try:
return ConfigParser._interpolate(
self, section, option, rawval, vars)
except Exception, e:
args = list(e.args)
args[0] = 'Error in file %s, [%s] %s=%r: %s' % (
self.filename, section, option, rawval, e)
e.args = tuple(args)
raise
############################################################
## Object types
############################################################
class _ObjectType(object):
name = None
egg_protocols = None
config_prefixes = None
def __init__(self):
# Normalize these variables:
self.egg_protocols = map(_aslist, _aslist(self.egg_protocols))
self.config_prefixes = map(_aslist, _aslist(self.config_prefixes))
def __repr__(self):
return '<%s protocols=%r prefixes=%r>' % (
self.name, self.egg_protocols, self.config_prefixes)
def invoke(self, context):
assert context.protocol in _flatten(self.egg_protocols)
return fix_call(context.object,
context.global_conf, **context.local_conf)
class _App(_ObjectType):
name = 'application'
egg_protocols = ['paste.app_factory', 'paste.composite_factory',
'paste.composit_factory']
config_prefixes = [['app', 'application'], ['composite', 'composit'],
'pipeline', 'filter-app']
def invoke(self, context):
if context.protocol in ('paste.composit_factory',
'paste.composite_factory'):
return fix_call(context.object,
context.loader, context.global_conf,
**context.local_conf)
elif context.protocol == 'paste.app_factory':
return fix_call(context.object, context.global_conf, **context.local_conf)
else:
assert 0, "Protocol %r unknown" % context.protocol
APP = _App()
class _Filter(_ObjectType):
name = 'filter'
egg_protocols = [['paste.filter_factory', 'paste.filter_app_factory']]
config_prefixes = ['filter']
def invoke(self, context):
if context.protocol == 'paste.filter_factory':
return fix_call(context.object,
context.global_conf, **context.local_conf)
elif context.protocol == 'paste.filter_app_factory':
def filter_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**context.local_conf)
return filter_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
FILTER = _Filter()
class _Server(_ObjectType):
name = 'server'
egg_protocols = [['paste.server_factory', 'paste.server_runner']]
config_prefixes = ['server']
def invoke(self, context):
if context.protocol == 'paste.server_factory':
return fix_call(context.object,
context.global_conf, **context.local_conf)
elif context.protocol == 'paste.server_runner':
def server_wrapper(wsgi_app):
# This should be an object, so it has a nicer __repr__
return fix_call(context.object,
wsgi_app, context.global_conf,
**context.local_conf)
return server_wrapper
else:
assert 0, "Protocol %r unknown" % context.protocol
SERVER = _Server()
# Virtual type: (@@: There's clearly something crufty here;
# this probably could be more elegant)
class _PipeLine(_ObjectType):
name = 'pipeline'
def invoke(self, context):
app = context.app_context.create()
filters = [c.create() for c in context.filter_contexts]
filters.reverse()
for filter in filters:
app = filter(app)
return app
PIPELINE = _PipeLine()
class _FilterApp(_ObjectType):
name = 'filter_app'
def invoke(self, context):
next_app = context.next_context.create()
filter = context.filter_context.create()
return filter(next_app)
FILTER_APP = _FilterApp()
class _FilterWith(_App):
name = 'filtered_with'
def invoke(self, context):
filter = context.filter_context.create()
filtered = context.next_context.create()
if context.next_context.object_type is APP:
return filter(filtered)
else:
# filtering a filter
def composed(app):
return filter(filtered(app))
return composed
FILTER_WITH = _FilterWith()
############################################################
## Loaders
############################################################
def loadapp(uri, name=None, **kw):
return loadobj(APP, uri, name=name, **kw)
def loadfilter(uri, name=None, **kw):
return loadobj(FILTER, uri, name=name, **kw)
def loadserver(uri, name=None, **kw):
return loadobj(SERVER, uri, name=name, **kw)
def appconfig(uri, name=None, relative_to=None, global_conf=None):
context = loadcontext(APP, uri, name=name,
relative_to=relative_to,
global_conf=global_conf)
return context.config()
_loaders = {}
def loadobj(object_type, uri, name=None, relative_to=None,
global_conf=None):
context = loadcontext(
object_type, uri, name=name, relative_to=relative_to,
global_conf=global_conf)
return context.create()
def loadcontext(object_type, uri, name=None, relative_to=None,
global_conf=None):
if '#' in uri:
if name is None:
uri, name = uri.split('#', 1)
else:
# @@: Ignore fragment or error?
uri = uri.split('#', 1)[0]
if name is None:
name = 'main'
if ':' not in uri:
raise LookupError("URI has no scheme: %r" % uri)
scheme, path = uri.split(':', 1)
scheme = scheme.lower()
if scheme not in _loaders:
raise LookupError(
"URI scheme not known: %r (from %s)"
% (scheme, ', '.join(_loaders.keys())))
return _loaders[scheme](
object_type,
uri, path, name=name, relative_to=relative_to,
global_conf=global_conf)
def _loadconfig(object_type, uri, path, name, relative_to,
global_conf):
# De-Windowsify the paths:
path = path.replace('\\', '/')
absolute_path = True
if sys.platform == 'win32':
_absolute_re = re.compile(r'^[a-zA-Z]:')
if not _absolute_re.search(path):
absolute_path = False
else:
if not path.startswith('/'):
absolute_path = False
if not absolute_path:
if not relative_to:
raise ValueError(
"Cannot resolve relative uri %r; no context keyword "
"argument given" % uri)
relative_to = relative_to.replace('\\', '/')
if relative_to.endswith('/'):
path = relative_to + path
else:
path = relative_to + '/' + path
if path.startswith('///'):
path = path[2:]
path = urllib.unquote(path)
loader = ConfigLoader(path)
if global_conf:
loader.update_defaults(global_conf, overwrite=False)
return loader.get_context(object_type, name, global_conf)
_loaders['config'] = _loadconfig
def _loadegg(object_type, uri, spec, name, relative_to,
global_conf):
loader = EggLoader(spec)
return loader.get_context(object_type, name, global_conf)
_loaders['egg'] = _loadegg
############################################################
## Loaders
############################################################
class _Loader(object):
def get_app(self, name=None, global_conf=None):
return self.app_context(
name=name, global_conf=global_conf).create()
def get_filter(self, name=None, global_conf=None):
return self.filter_context(
name=name, global_conf=global_conf).create()
def get_server(self, name=None, global_conf=None):
return self.server_context(
name=name, global_conf=global_conf).create()
def app_context(self, name=None, global_conf=None):
return self.get_context(
APP, name=name, global_conf=global_conf)
def filter_context(self, name=None, global_conf=None):
return self.get_context(
FILTER, name=name, global_conf=global_conf)
def server_context(self, name=None, global_conf=None):
return self.get_context(
SERVER, name=name, global_conf=global_conf)
_absolute_re = re.compile(r'^[a-zA-Z]+:')
def absolute_name(self, name):
"""
Returns true if the name includes a scheme
"""
if name is None:
return False
return self._absolute_re.search(name)
class ConfigLoader(_Loader):
def __init__(self, filename):
self.filename = filename = filename.strip()
self.parser = NicerConfigParser(self.filename)
# Don't lower-case keys:
self.parser.optionxform = str
# Stupid ConfigParser ignores files that aren't found, so
# we have to add an extra check:
if not os.path.exists(filename):
raise OSError(
"File %r not found" % filename)
self.parser.read(filename)
self.parser._defaults.setdefault(
'here', os.path.dirname(os.path.abspath(filename)))
self.parser._defaults.setdefault(
'__file__', os.path.abspath(filename))
def update_defaults(self, new_defaults, overwrite=True):
for key, value in new_defaults.items():
if not overwrite and key in self.parser._defaults:
continue
self.parser._defaults[key] = value
def get_context(self, object_type, name=None, global_conf=None):
if self.absolute_name(name):
return loadcontext(object_type, name,
relative_to=os.path.dirname(self.filename),
global_conf=global_conf)
section = self.find_config_section(
object_type, name=name)
if global_conf is None:
global_conf = {}
else:
global_conf = global_conf.copy()
defaults = self.parser.defaults()
global_conf.update(defaults)
local_conf = {}
global_additions = {}
get_from_globals = {}
for option in self.parser.options(section):
if option.startswith('set '):
name = option[4:].strip()
global_additions[name] = global_conf[name] = (
self.parser.get(section, option))
elif option.startswith('get '):
name = option[4:].strip()
get_from_globals[name] = self.parser.get(section, option)
else:
if option in defaults:
# @@: It's a global option (?), so skip it
continue
local_conf[option] = self.parser.get(section, option)
for local_var, glob_var in get_from_globals.items():
local_conf[local_var] = global_conf[glob_var]
if object_type in (APP, FILTER) and 'filter-with' in local_conf:
filter_with = local_conf.pop('filter-with')
else:
filter_with = None
if 'require' in local_conf:
for spec in local_conf['require'].split():
pkg_resources.require(spec)
del local_conf['require']
if section.startswith('filter-app:'):
context = self._filter_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif section.startswith('pipeline:'):
context = self._pipeline_app_context(
object_type, section, name=name,
global_conf=global_conf, local_conf=local_conf,
global_additions=global_additions)
elif 'use' in local_conf:
context = self._context_from_use(
object_type, local_conf, global_conf, global_additions,
section)
else:
context = self._context_from_explicit(
object_type, local_conf, global_conf, global_additions,
section)
if filter_with is not None:
filter_with_context = LoaderContext(
obj=None,
object_type=FILTER_WITH,
protocol=None,
global_conf=global_conf, local_conf=local_conf,
loader=self)
filter_with_context.filter_context = self.filter_context(
name=filter_with, global_conf=global_conf)
filter_with_context.next_context = context
return filter_with_context
return context
def _context_from_use(self, object_type, local_conf, global_conf,
global_additions, section):
use = local_conf.pop('use')
context = self.get_context(
object_type, name=use, global_conf=global_conf)
context.global_conf.update(global_additions)
context.local_conf.update(local_conf)
# @@: Should loader be overwritten?
context.loader = self
return context
def _context_from_explicit(self, object_type, local_conf, global_conf,
global_addition, section):
possible = []
for protocol_options in object_type.egg_protocols:
for protocol in protocol_options:
if protocol in local_conf:
possible.append((protocol, local_conf[protocol]))
break
if len(possible) > 1:
raise LookupError(
"Multiple protocols given in section %r: %s"
% (section, possible))
if not possible:
raise LookupError(
"No loader given in section %r" % section)
found_protocol, found_expr = possible[0]
del local_conf[found_protocol]
value = import_string(found_expr)
context = LoaderContext(
value, object_type, found_protocol,
global_conf, local_conf, self)
return context
def _filter_app_context(self, object_type, section, name,
global_conf, local_conf, global_additions):
if 'next' not in local_conf:
raise LookupError(
"The [%s] section in %s is missing a 'next' setting"
% (section, self.filename))
next_name = local_conf.pop('next')
context = LoaderContext(None, FILTER_APP, None, global_conf,
local_conf, self)
context.next_context = self.get_context(
APP, next_name, global_conf)
if 'use' in local_conf:
context.filter_context = self._context_from_use(
FILTER, local_conf, global_conf, global_additions,
section)
else:
context.filter_context = self._context_from_explicit(
FILTER, local_conf, global_conf, global_additions,
section)
return context
def _pipeline_app_context(self, object_type, section, name,
global_conf, local_conf, global_additions):
if 'pipeline' not in local_conf:
raise LookupError(
"The [%s] section in %s is missing a 'pipeline' setting"
% (section, self.filename))
pipeline = local_conf.pop('pipeline').split()
if local_conf:
raise LookupError(
"The [%s] pipeline section in %s has extra "
"(disallowed) settings: %s"
% (', '.join(local_conf.keys())))
context = LoaderContext(None, PIPELINE, None, global_conf,
local_conf, self)
context.app_context = self.get_context(
APP, pipeline[-1], global_conf)
context.filter_contexts = [
self.get_context(FILTER, name, global_conf)
for name in pipeline[:-1]]
return context
def find_config_section(self, object_type, name=None):
"""
Return the section name with the given name prefix (following the
same pattern as ``protocol_desc`` in ``config``. It must have the
given name, or for ``'main'`` an empty name is allowed. The
prefix must be followed by a ``:``.
Case is *not* ignored.
"""
possible = []
for name_options in object_type.config_prefixes:
for name_prefix in name_options:
found = self._find_sections(
self.parser.sections(), name_prefix, name)
if found:
possible.extend(found)
break
if not possible:
raise LookupError(
"No section %r (prefixed by %s) found in config %s"
% (name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
if len(possible) > 1:
raise LookupError(
"Ambiguous section names %r for section %r (prefixed by %s) "
"found in config %s"
% (possible, name,
' or '.join(map(repr, _flatten(object_type.config_prefixes))),
self.filename))
return possible[0]
def _find_sections(self, sections, name_prefix, name):
found = []
if name is None:
if name_prefix in sections:
found.append(name_prefix)
name = 'main'
for section in sections:
if section.startswith(name_prefix+':'):
if section[len(name_prefix)+1:].strip() == name:
found.append(section)
return found
class EggLoader(_Loader):
def __init__(self, spec):
self.spec = spec
def get_context(self, object_type, name=None, global_conf=None):
if self.absolute_name(name):
return loadcontext(object_type, name,
global_conf=global_conf)
entry_point, protocol, ep_name = self.find_egg_entry_point(
object_type, name=name)
return LoaderContext(
entry_point,
object_type,
protocol,
global_conf or {}, {},
self,
distribution=pkg_resources.get_distribution(self.spec),
entry_point_name=ep_name)
def find_egg_entry_point(self, object_type, name=None):
"""
Returns the (entry_point, protocol) for the with the given
``name``.
"""
if name is None:
name = 'main'
possible = []
for protocol_options in object_type.egg_protocols:
for protocol in protocol_options:
pkg_resources.require(self.spec)
entry = pkg_resources.get_entry_info(
self.spec,
protocol,
name)
if entry is not None:
possible.append((entry.load(), protocol, entry.name))
break
if not possible:
# Better exception
dist = pkg_resources.get_distribution(self.spec)
raise LookupError(
"Entry point %r not found in egg %r (dir: %s; protocols: %s; "
"entry_points: %s)"
% (name, self.spec,
dist.location,
', '.join(_flatten(object_type.egg_protocols)),
', '.join(_flatten([
(pkg_resources.get_entry_info(self.spec, prot, name) or {}).keys()
for prot in protocol_options] or '(no entry points)'))))
if len(possible) > 1:
raise LookupError(
"Ambiguous entry points for %r in egg %r (protocols: %s)"
% (name, self.spec, ', '.join(_flatten(protocol_options))))
return possible[0]
class LoaderContext(object):
def __init__(self, obj, object_type, protocol,
global_conf, local_conf, loader,
distribution=None, entry_point_name=None):
self.object = obj
self.object_type = object_type
self.protocol = protocol
#assert protocol in _flatten(object_type.egg_protocols), (
# "Bad protocol %r; should be one of %s"
# % (protocol, ', '.join(map(repr, _flatten(object_type.egg_protocols)))))
self.global_conf = global_conf
self.local_conf = local_conf
self.loader = loader
self.distribution = distribution
self.entry_point_name = entry_point_name
def create(self):
return self.object_type.invoke(self)
def config(self):
conf = AttrDict(self.global_conf)
conf.update(self.local_conf)
conf.local_conf = self.local_conf
conf.global_conf = self.global_conf
conf.context = self
return conf
class AttrDict(dict):
"""
A dictionary that can be assigned to.
"""
pass
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Loss operations for use in neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.losses import util
from tensorflow.python.platform import tf_logging as logging
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: `Tensor` of shape `[]`, `[batch_size]` or
`[batch_size, d1, ... dN]`. The `losses` are reduced (`tf.reduce_sum`)
until its dimension matches that of `weights` at which point the reduced
`losses` are element-wise multiplied by `weights` and a final `reduce_sum`
is computed on the result. Conceptually, this operation is similar to
broadcasting (tiling) `weights` to be the same shape as `losses`,
performing an element-wise multiplication, and summing the result. Note,
however, that the dimension matching is right-to-left, not left-to-right;
i.e., the opposite of standard NumPy/Tensorflow broadcasting.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses,
reduction_indices=reduction_indices)
reduced_losses = math_ops.multiply(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses)
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return _safe_div(total_loss, num_present)
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
`[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
in effect, tiled to match the shape of `losses`. Following this effective
tile, the total number of present elements is the number of non-zero weights.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: `Tensor` of shape `[]`, `[batch_size]` or
`[batch_size, d1, ... dK]`, where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is `True`, the value is returned as a tensor of size
`[batch_size]`. Otherwise, a single scalar tensor is returned.
"""
# If weights is a scalar, its easy to compute:
if weights.get_shape().ndims == 0:
if losses.get_shape().ndims == 0:
batch_size = 1
else:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = array_ops.where(math_ops.equal(weights, 0),
0.0, num_per_batch)
num_per_batch = math_ops.multiply(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights.
if weights.get_shape().ndims >= 1:
reduction_indices = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weight would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
def compute_weighted_loss(
losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Computes the weighted loss.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: `Tensor` of shape `[]`, `[batch_size]` or
`[batch_size, d1, ... dK]`, where K < N.
scope: the scope for the operations performed in computing the loss.
loss_collection: the loss will be added to these collections.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
with ops.name_scope(scope, "weighted_loss", [losses, weights]):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
losses_shape = losses.get_shape()
if losses_shape.ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
weights_shape = weights.get_shape()
if weights_shape.ndims is None:
raise ValueError("weight.get_shape().ndims cannot be None")
# TODO(b/33556118): Remove `ndims > 1` check so shapes [] and [1] behave the
# same.
if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1):
weights = array_ops.squeeze(weights, [-1])
# TODO(b/33556118): Remove this when we require weights shape be either
# scalar or the same as losses.
weights_dims = weights_shape.as_list()
losses_dims = losses_shape.as_list()
if len(weights_dims) > len(losses_dims):
raise ValueError(
"Invalid weights shape %s can not be broadcast to losses %s." % (
weights_shape, losses_shape))
for i in range(len(weights_dims)):
if ((losses_dims[i] is not None) and (losses_dims[i] == 1) and
(weights_dims[i] is not None) and (weights_dims[i] != 1)):
raise ValueError(
"Invalid weights shape %s can not be broadcast to losses %s." % (
weights_shape, losses_shape))
for i in range(len(weights_dims)):
if ((losses_dims[i] is not None) and (losses_dims[i] != 1) and
(weights_dims[i] is not None) and (weights_dims[i] == 1)):
logging.warn(
"WARNING: Weights %s with dimension 1 will result in a sum"
", not average, across dimension %d.", weights_shape, i)
total_loss = _scale_losses(losses, weights)
num_present = _num_present(losses, weights)
mean_loss = _safe_mean(total_loss, num_present)
# Convert the result back to the input type.
mean_loss = math_ops.cast(mean_loss, input_dtype)
util.add_loss(mean_loss, loss_collection)
return mean_loss
def absolute_difference(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a `Tensor` of
shape `[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "absolute_difference",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope, loss_collection)
def cosine_distance(
labels, predictions, dim=None, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
labels: `Tensor` whose shape matches 'predictions'
predictions: An arbitrary matrix.
dim: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
if dim is None:
raise ValueError("`dim` cannot be None.")
with ops.name_scope(scope, "cosine_distance_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,])
return compute_weighted_loss(losses, weights, scope, loss_collection)
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a hinge loss to the training procedure.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
logits: The logits, a float tensor.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` of the loss value.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", (logits, labels)) as scope:
logits = math_ops.to_float(logits)
labels = math_ops.to_float(labels)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(losses, weights, scope, loss_collection)
def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "log_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = -math_ops.multiply(
labels,
math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope, loss_collection)
def mean_pairwise_squared_error(labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
labels: The ground truth output tensor, whose shape must match the shape of
`predictions`.
predictions: The predicted outputs, a tensor of size
`[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in
`predictions`.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_pairwise_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
weights = math_ops.to_float(ops.convert_to_tensor(weights))
diffs = math_ops.subtract(predictions, labels)
# Need to verify here since the function doesn't use compute_weighted_loss
if diffs.get_shape().ndims is None:
raise ValueError("diffs.get_shape().ndims cannot be None")
if weights.get_shape().ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
reduction_indices = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs),
reduction_indices=reduction_indices)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch)
sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices)
term2 = 2.0 * _safe_div(math_ops.square(sum_diff),
math_ops.square(num_present_per_batch))
loss = _scale_losses(term1 - term2, weights)
mean_loss = array_ops.where(math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
util.add_loss(mean_loss, loss_collection)
return mean_loss
def mean_squared_error(labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.square(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope, loss_collection)
def sigmoid_cross_entropy(
multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
multi_class_labels: `[batch_size, num_classes]` target integer labels in
`(0, 1)`.
logits: `[batch_size, num_classes]` logits outputs of the network.
weights: Coefficients for the loss. This must be of shape `[]`,
`[batch_size]` or `[batch_size, num_classes]`.
label_smoothing: If greater than `0` then smooth the labels.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None.
"""
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
(logits, multi_class_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope, loss_collection)
def softmax_cross_entropy(
onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
onehot_labels: `[batch_size, num_classes]` target one-hot-encoded labels.
logits: [batch_size, num_classes] logits outputs of the network .
weights: Coefficients for the loss. This must be of shape `[]`,
`[batch_size]` or `[batch_size, num_classes]`.
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None.
"""
with ops.name_scope(scope, "softmax_cross_entropy_loss",
(logits, onehot_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(labels=onehot_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope, loss_collection)
def sparse_softmax_cross_entropy(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape [`batch_size`], then the loss weights apply to each
corresponding sample.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
labels: [batch_size, 1] or [batch_size] target labels of dtype `int32` or
`int64` in the range `[0, num_classes)`.
logits: [batch_size, num_classes] logits outputs of the network .
weights: Coefficients for the loss. This must be of shape `[batch_size]` or
`[batch_size, 1]`.
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of logits, labels, and weight are incompatible, or
if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
# Reshape losses to [batch_size, 1] to be consistent with weights.
losses = array_ops.reshape(losses, shape=[array_ops.shape(losses)[0], 1])
return compute_weighted_loss(losses, weights, scope, loss_collection)
|
|
#!/usr/bin/env python
#
# Copyright (C) 2011 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Build image output_image_file from input_directory and properties_file.
Usage: build_image input_directory properties_file output_image_file
"""
import os
import os.path
import re
import subprocess
import sys
import commands
import common
import shutil
import sparse_img
import tempfile
OPTIONS = common.OPTIONS
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
BLOCK_SIZE = 4096
def RunCommand(cmd):
"""Echo and run the given command.
Args:
cmd: the command represented as a list of strings.
Returns:
A tuple of the output and the exit code.
"""
print "Running: ", " ".join(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = p.communicate()
print "%s" % (output.rstrip(),)
return (output, p.returncode)
def GetVerityFECSize(partition_size):
cmd = "fec -s %d" % partition_size
status, output = commands.getstatusoutput(cmd)
if status:
print output
return False, 0
return True, int(output)
def GetVerityTreeSize(partition_size):
cmd = "build_verity_tree -s %d"
cmd %= partition_size
status, output = commands.getstatusoutput(cmd)
if status:
print output
return False, 0
return True, int(output)
def GetVerityMetadataSize(partition_size):
cmd = "system/extras/verity/build_verity_metadata.py -s %d"
cmd %= partition_size
status, output = commands.getstatusoutput(cmd)
if status:
print output
return False, 0
return True, int(output)
def GetVeritySize(partition_size, fec_supported):
success, verity_tree_size = GetVerityTreeSize(partition_size)
if not success:
return 0
success, verity_metadata_size = GetVerityMetadataSize(partition_size)
if not success:
return 0
verity_size = verity_tree_size + verity_metadata_size
if fec_supported:
success, fec_size = GetVerityFECSize(partition_size + verity_size)
if not success:
return 0
return verity_size + fec_size
return verity_size
def GetSimgSize(image_file):
simg = sparse_img.SparseImage(image_file, build_map=False)
return simg.blocksize * simg.total_blocks
def ZeroPadSimg(image_file, pad_size):
blocks = pad_size // BLOCK_SIZE
print("Padding %d blocks (%d bytes)" % (blocks, pad_size))
simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
simg.AppendFillChunk(0, blocks)
def AdjustPartitionSizeForVerity(partition_size, fec_supported):
"""Modifies the provided partition size to account for the verity metadata.
This information is used to size the created image appropriately.
Args:
partition_size: the size of the partition to be verified.
Returns:
The size of the partition adjusted for verity metadata.
"""
key = "%d %d" % (partition_size, fec_supported)
if key in AdjustPartitionSizeForVerity.results:
return AdjustPartitionSizeForVerity.results[key]
hi = partition_size
if hi % BLOCK_SIZE != 0:
hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
# verity tree and fec sizes depend on the partition size, which
# means this estimate is always going to be unnecessarily small
lo = partition_size - GetVeritySize(hi, fec_supported)
result = lo
# do a binary search for the optimal size
while lo < hi:
i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
size = i + GetVeritySize(i, fec_supported)
if size <= partition_size:
if result < i:
result = i
lo = i + BLOCK_SIZE
else:
hi = i
AdjustPartitionSizeForVerity.results[key] = result
return result
AdjustPartitionSizeForVerity.results = {}
def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path):
cmd = "fec -e %s %s %s" % (sparse_image_path, verity_path, verity_fec_path)
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
print "Could not build FEC data! Error: %s" % output
return False
return True
def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
cmd = "build_verity_tree -A %s %s %s" % (
FIXED_SALT, sparse_image_path, verity_image_path)
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
print "Could not build verity tree! Error: %s" % output
return False
root, salt = output.split()
prop_dict["verity_root_hash"] = root
prop_dict["verity_salt"] = salt
return True
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key):
cmd_template = (
"system/extras/verity/build_verity_metadata.py %s %s %s %s %s %s %s")
cmd = cmd_template % (image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key)
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
print "Could not build verity metadata! Error: %s" % output
return False
return True
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
"""Appends the unsparse image to the given sparse image.
Args:
sparse_image_path: the path to the (sparse) image
unsparse_image_path: the path to the (unsparse) image
Returns:
True on success, False on failure.
"""
cmd = "append2simg %s %s"
cmd %= (sparse_image_path, unsparse_image_path)
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
print "%s: %s" % (error_message, output)
return False
return True
def Append(target, file_to_append, error_message):
cmd = 'cat %s >> %s' % (file_to_append, target)
print cmd
status, output = commands.getstatusoutput(cmd)
if status:
print "%s: %s" % (error_message, output)
return False
return True
def BuildVerifiedImage(data_image_path, verity_image_path,
verity_metadata_path, verity_fec_path,
fec_supported):
if not Append(verity_image_path, verity_metadata_path,
"Could not append verity metadata!"):
return False
if fec_supported:
# build FEC for the entire partition, including metadata
if not BuildVerityFEC(data_image_path, verity_image_path,
verity_fec_path):
return False
if not Append(verity_image_path, verity_fec_path, "Could not append FEC!"):
return False
if not Append2Simg(data_image_path, verity_image_path,
"Could not append verity data!"):
return False
return True
def UnsparseImage(sparse_image_path, replace=True):
img_dir = os.path.dirname(sparse_image_path)
unsparse_image_path = "unsparse_" + os.path.basename(sparse_image_path)
unsparse_image_path = os.path.join(img_dir, unsparse_image_path)
if os.path.exists(unsparse_image_path):
if replace:
os.unlink(unsparse_image_path)
else:
return True, unsparse_image_path
inflate_command = ["simg2img", sparse_image_path, unsparse_image_path]
(_, exit_code) = RunCommand(inflate_command)
if exit_code != 0:
os.remove(unsparse_image_path)
return False, None
return True, unsparse_image_path
def MakeVerityEnabledImage(out_file, fec_supported, prop_dict):
"""Creates an image that is verifiable using dm-verity.
Args:
out_file: the location to write the verifiable image at
prop_dict: a dictionary of properties required for image creation and
verification
Returns:
True on success, False otherwise.
"""
# get properties
image_size = prop_dict["partition_size"]
block_dev = prop_dict["verity_block_device"]
signer_key = prop_dict["verity_key"] + ".pk8"
if OPTIONS.verity_signer_path is not None:
signer_path = OPTIONS.verity_signer_path + ' '
signer_path += ' '.join(OPTIONS.verity_signer_args)
else:
signer_path = prop_dict["verity_signer_cmd"]
# make a tempdir
tempdir_name = tempfile.mkdtemp(suffix="_verity_images")
# get partial image paths
verity_image_path = os.path.join(tempdir_name, "verity.img")
verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
# build the verity tree and get the root hash and salt
if not BuildVerityTree(out_file, verity_image_path, prop_dict):
shutil.rmtree(tempdir_name, ignore_errors=True)
return False
# build the metadata blocks
root_hash = prop_dict["verity_root_hash"]
salt = prop_dict["verity_salt"]
if not BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_dev, signer_path, signer_key):
shutil.rmtree(tempdir_name, ignore_errors=True)
return False
# build the full verified image
if not BuildVerifiedImage(out_file,
verity_image_path,
verity_metadata_path,
verity_fec_path,
fec_supported):
shutil.rmtree(tempdir_name, ignore_errors=True)
return False
shutil.rmtree(tempdir_name, ignore_errors=True)
return True
def ConvertBlockMapToBaseFs(block_map_file):
fd, base_fs_file = tempfile.mkstemp(prefix="script_gen_",
suffix=".base_fs")
os.close(fd)
convert_command = ["blk_alloc_to_base_fs", block_map_file, base_fs_file]
(_, exit_code) = RunCommand(convert_command)
if exit_code != 0:
os.remove(base_fs_file)
return None
return base_fs_file
def BuildImage(in_dir, prop_dict, out_file, target_out=None):
"""Build an image to out_file from in_dir with property prop_dict.
Args:
in_dir: path of input directory.
prop_dict: property dictionary.
out_file: path of the output image file.
target_out: path of the product out directory to read device specific FS config files.
Returns:
True iff the image is built successfully.
"""
# system_root_image=true: build a system.img that combines the contents of
# /system and the ramdisk, and can be mounted at the root of the file system.
origin_in = in_dir
fs_config = prop_dict.get("fs_config")
base_fs_file = None
if (prop_dict.get("system_root_image") == "true"
and prop_dict["mount_point"] == "system"):
in_dir = tempfile.mkdtemp()
# Change the mount point to "/"
prop_dict["mount_point"] = "/"
if fs_config:
# We need to merge the fs_config files of system and ramdisk.
fd, merged_fs_config = tempfile.mkstemp(prefix="root_fs_config",
suffix=".txt")
os.close(fd)
with open(merged_fs_config, "w") as fw:
if "ramdisk_fs_config" in prop_dict:
with open(prop_dict["ramdisk_fs_config"]) as fr:
fw.writelines(fr.readlines())
with open(fs_config) as fr:
fw.writelines(fr.readlines())
fs_config = merged_fs_config
build_command = []
fs_type = prop_dict.get("fs_type", "")
run_fsck = False
fs_spans_partition = True
if fs_type.startswith("squash"):
fs_spans_partition = False
is_verity_partition = "verity_block_device" in prop_dict
verity_supported = prop_dict.get("verity") == "true"
verity_fec_supported = prop_dict.get("verity_fec") == "true"
# Adjust the partition size to make room for the hashes if this is to be
# verified.
if verity_supported and is_verity_partition:
partition_size = int(prop_dict.get("partition_size"))
adjusted_size = AdjustPartitionSizeForVerity(partition_size,
verity_fec_supported)
if not adjusted_size:
return False
prop_dict["partition_size"] = str(adjusted_size)
prop_dict["original_partition_size"] = str(partition_size)
if fs_type.startswith("ext"):
build_command = ["mkuserimg.sh"]
if "extfs_sparse_flag" in prop_dict:
build_command.append(prop_dict["extfs_sparse_flag"])
#run_fsck = True
build_command.extend([in_dir, out_file, fs_type,
prop_dict["mount_point"]])
build_command.append(prop_dict["partition_size"])
if "journal_size" in prop_dict:
build_command.extend(["-j", prop_dict["journal_size"]])
if "timestamp" in prop_dict:
build_command.extend(["-T", str(prop_dict["timestamp"])])
if fs_config:
build_command.extend(["-C", fs_config])
if target_out:
build_command.extend(["-D", target_out])
if "block_list" in prop_dict:
build_command.extend(["-B", prop_dict["block_list"]])
if "base_fs_file" in prop_dict:
base_fs_file = ConvertBlockMapToBaseFs(prop_dict["base_fs_file"])
if base_fs_file is None:
return False
build_command.extend(["-d", base_fs_file])
build_command.extend(["-L", prop_dict["mount_point"]])
if "selinux_fc" in prop_dict:
build_command.append(prop_dict["selinux_fc"])
elif fs_type.startswith("squash"):
build_command = ["mksquashfsimage.sh"]
build_command.extend([in_dir, out_file])
if "squashfs_sparse_flag" in prop_dict:
build_command.extend([prop_dict["squashfs_sparse_flag"]])
build_command.extend(["-m", prop_dict["mount_point"]])
if target_out:
build_command.extend(["-d", target_out])
if fs_config:
build_command.extend(["-C", fs_config])
if "selinux_fc" in prop_dict:
build_command.extend(["-c", prop_dict["selinux_fc"]])
if "block_list" in prop_dict:
build_command.extend(["-B", prop_dict["block_list"]])
if "squashfs_compressor" in prop_dict:
build_command.extend(["-z", prop_dict["squashfs_compressor"]])
if "squashfs_compressor_opt" in prop_dict:
build_command.extend(["-zo", prop_dict["squashfs_compressor_opt"]])
if "squashfs_disable_4k_align" in prop_dict and prop_dict.get("squashfs_disable_4k_align") == "true":
build_command.extend(["-a"])
elif fs_type.startswith("f2fs"):
build_command = ["mkf2fsuserimg.sh"]
build_command.extend([out_file, prop_dict["partition_size"]])
else:
build_command = ["mkyaffs2image", "-f"]
if prop_dict.get("mkyaffs2_extra_flags", None):
build_command.extend(prop_dict["mkyaffs2_extra_flags"].split())
build_command.append(in_dir)
build_command.append(out_file)
if "selinux_fc" in prop_dict:
build_command.append(prop_dict["selinux_fc"])
build_command.append(prop_dict["mount_point"])
if in_dir != origin_in:
# Construct a staging directory of the root file system.
ramdisk_dir = prop_dict.get("ramdisk_dir")
if ramdisk_dir:
shutil.rmtree(in_dir)
shutil.copytree(ramdisk_dir, in_dir, symlinks=True)
staging_system = os.path.join(in_dir, "system")
shutil.rmtree(staging_system, ignore_errors=True)
shutil.copytree(origin_in, staging_system, symlinks=True)
reserved_blocks = prop_dict.get("has_ext4_reserved_blocks") == "true"
ext4fs_output = None
try:
if reserved_blocks and fs_type.startswith("ext4"):
(ext4fs_output, exit_code) = RunCommand(build_command)
else:
(_, exit_code) = RunCommand(build_command)
finally:
if in_dir != origin_in:
# Clean up temporary directories and files.
shutil.rmtree(in_dir, ignore_errors=True)
if fs_config:
os.remove(fs_config)
if base_fs_file is not None:
os.remove(base_fs_file)
if exit_code != 0:
return False
# Bug: 21522719, 22023465
# There are some reserved blocks on ext4 FS (lesser of 4096 blocks and 2%).
# We need to deduct those blocks from the available space, since they are
# not writable even with root privilege. It only affects devices using
# file-based OTA and a kernel version of 3.10 or greater (currently just
# sprout).
if reserved_blocks and fs_type.startswith("ext4"):
assert ext4fs_output is not None
ext4fs_stats = re.compile(
r'Created filesystem with .* (?P<used_blocks>[0-9]+)/'
r'(?P<total_blocks>[0-9]+) blocks')
m = ext4fs_stats.match(ext4fs_output.strip().split('\n')[-1])
used_blocks = int(m.groupdict().get('used_blocks'))
total_blocks = int(m.groupdict().get('total_blocks'))
reserved_blocks = min(4096, int(total_blocks * 0.02))
adjusted_blocks = total_blocks - reserved_blocks
if used_blocks > adjusted_blocks:
mount_point = prop_dict.get("mount_point")
print("Error: Not enough room on %s (total: %d blocks, used: %d blocks, "
"reserved: %d blocks, available: %d blocks)" % (
mount_point, total_blocks, used_blocks, reserved_blocks,
adjusted_blocks))
return False
if not fs_spans_partition:
mount_point = prop_dict.get("mount_point")
partition_size = int(prop_dict.get("partition_size"))
image_size = GetSimgSize(out_file)
if image_size > partition_size:
print("Error: %s image size of %d is larger than partition size of "
"%d" % (mount_point, image_size, partition_size))
return False
if verity_supported and is_verity_partition:
ZeroPadSimg(out_file, partition_size - image_size)
# create the verified image if this is to be verified
if verity_supported and is_verity_partition:
if not MakeVerityEnabledImage(out_file, verity_fec_supported, prop_dict):
return False
if run_fsck and prop_dict.get("skip_fsck") != "true":
success, unsparse_image = UnsparseImage(out_file, replace=False)
if not success:
return False
# Run e2fsck on the inflated image file
e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
(_, exit_code) = RunCommand(e2fsck_command)
os.remove(unsparse_image)
return exit_code == 0
def ImagePropFromGlobalDict(glob_dict, mount_point):
"""Build an image property dictionary from the global dictionary.
Args:
glob_dict: the global dictionary from the build system.
mount_point: such as "system", "data" etc.
"""
d = {}
if "build.prop" in glob_dict:
bp = glob_dict["build.prop"]
if "ro.build.date.utc" in bp:
d["timestamp"] = bp["ro.build.date.utc"]
def copy_prop(src_p, dest_p):
if src_p in glob_dict:
d[dest_p] = str(glob_dict[src_p])
common_props = (
"extfs_sparse_flag",
"squashfs_sparse_flag",
"mkyaffs2_extra_flags",
"selinux_fc",
"skip_fsck",
"verity",
"verity_key",
"verity_signer_cmd",
"verity_fec"
)
for p in common_props:
copy_prop(p, p)
d["mount_point"] = mount_point
if mount_point == "system":
copy_prop("fs_type", "fs_type")
# Copy the generic sysetem fs type first, override with specific one if
# available.
copy_prop("system_fs_type", "fs_type")
copy_prop("system_size", "partition_size")
copy_prop("system_journal_size", "journal_size")
copy_prop("system_verity_block_device", "verity_block_device")
copy_prop("system_root_image", "system_root_image")
copy_prop("ramdisk_dir", "ramdisk_dir")
copy_prop("ramdisk_fs_config", "ramdisk_fs_config")
copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
copy_prop("system_squashfs_compressor", "squashfs_compressor")
copy_prop("system_squashfs_compressor_opt", "squashfs_compressor_opt")
copy_prop("system_squashfs_disable_4k_align", "squashfs_disable_4k_align")
copy_prop("system_base_fs_file", "base_fs_file")
elif mount_point == "data":
# Copy the generic fs type first, override with specific one if available.
copy_prop("fs_type", "fs_type")
copy_prop("userdata_fs_type", "fs_type")
copy_prop("userdata_size", "partition_size")
elif mount_point == "cache":
copy_prop("cache_fs_type", "fs_type")
copy_prop("cache_size", "partition_size")
elif mount_point == "vendor":
copy_prop("vendor_fs_type", "fs_type")
copy_prop("vendor_size", "partition_size")
copy_prop("vendor_journal_size", "journal_size")
copy_prop("vendor_verity_block_device", "verity_block_device")
copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
copy_prop("vendor_squashfs_compressor", "squashfs_compressor")
copy_prop("vendor_squashfs_compressor_opt", "squashfs_compressor_opt")
copy_prop("vendor_squashfs_disable_4k_align", "squashfs_disable_4k_align")
copy_prop("vendor_base_fs_file", "base_fs_file")
elif mount_point == "oem":
copy_prop("fs_type", "fs_type")
copy_prop("oem_size", "partition_size")
copy_prop("oem_journal_size", "journal_size")
copy_prop("has_ext4_reserved_blocks", "has_ext4_reserved_blocks")
return d
def LoadGlobalDict(filename):
"""Load "name=value" pairs from filename"""
d = {}
f = open(filename)
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
k, v = line.split("=", 1)
d[k] = v
f.close()
return d
def main(argv):
if len(argv) != 4:
print __doc__
sys.exit(1)
in_dir = argv[0]
glob_dict_file = argv[1]
out_file = argv[2]
target_out = argv[3]
glob_dict = LoadGlobalDict(glob_dict_file)
if "mount_point" in glob_dict:
# The caller knows the mount point and provides a dictionay needed by
# BuildImage().
image_properties = glob_dict
else:
image_filename = os.path.basename(out_file)
mount_point = ""
if image_filename == "system.img":
mount_point = "system"
elif image_filename == "userdata.img":
mount_point = "data"
elif image_filename == "cache.img":
mount_point = "cache"
elif image_filename == "vendor.img":
mount_point = "vendor"
elif image_filename == "oem.img":
mount_point = "oem"
else:
print >> sys.stderr, "error: unknown image file name ", image_filename
exit(1)
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
if not BuildImage(in_dir, image_properties, out_file, target_out):
print >> sys.stderr, "error: failed to build %s from %s" % (out_file,
in_dir)
exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgetbase.py
__version__='3.3.0'
__doc__='''Base class for user-defined graphical widgets'''
from reportlab.graphics import shapes
from reportlab import rl_config
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
from weakref import ref as weakref_ref
class PropHolder:
'''Base for property holders'''
_attrMap = None
def verify(self):
"""If the _attrMap attribute is not None, this
checks all expected attributes are present; no
unwanted attributes are present; and (if a
checking function is found) checks each
attribute has a valid value. Either succeeds
or raises an informative exception.
"""
if self._attrMap is not None:
for key in self.__dict__.keys():
if key[0] != '_':
msg = "Unexpected attribute %s found in %s" % (key, self)
assert key in self._attrMap, msg
for attr, metavalue in self._attrMap.items():
msg = "Missing attribute %s from %s" % (attr, self)
assert hasattr(self, attr), msg
value = getattr(self, attr)
args = (value, attr, self.__class__.__name__)
assert metavalue.validate(value), "Invalid value %s for attribute %s in class %s" % args
if rl_config.shapeChecking:
"""This adds the ability to check every attribute assignment
as it is made. It slows down shapes but is a big help when
developing. It does not get defined if rl_config.shapeChecking = 0.
"""
def __setattr__(self, name, value):
"""By default we verify. This could be off
in some parallel base classes."""
validateSetattr(self,name,value)
def getProperties(self,recur=1):
"""Returns a list of all properties which can be edited and
which are not marked as private. This may include 'child
widgets' or 'primitive shapes'. You are free to override
this and provide alternative implementations; the default
one simply returns everything without a leading underscore.
"""
from reportlab.lib.validators import isValidChild
# TODO when we need it, but not before -
# expose sequence contents?
props = {}
for name in self.__dict__.keys():
if name[0:1] != '_':
component = getattr(self, name)
if recur and isValidChild(component):
# child object, get its properties too
childProps = component.getProperties(recur=recur)
for childKey, childValue in childProps.items():
#key might be something indexed like '[2].fillColor'
#or simple like 'fillColor'; in the former case we
#don't need a '.' between me and my child.
if childKey[0] == '[':
props['%s%s' % (name, childKey)] = childValue
else:
props['%s.%s' % (name, childKey)] = childValue
else:
props[name] = component
return props
def setProperties(self, propDict):
"""Permits bulk setting of properties. These may include
child objects e.g. "chart.legend.width = 200".
All assignments will be validated by the object as if they
were set individually in python code.
All properties of a top-level object are guaranteed to be
set before any of the children, which may be helpful to
widget designers.
"""
childPropDicts = {}
for name, value in propDict.items():
parts = name.split('.', 1)
if len(parts) == 1:
#simple attribute, set it now
setattr(self, name, value)
else:
(childName, remains) = parts
try:
childPropDicts[childName][remains] = value
except KeyError:
childPropDicts[childName] = {remains: value}
# now assign to children
for childName, childPropDict in childPropDicts.items():
child = getattr(self, childName)
child.setProperties(childPropDict)
def dumpProperties(self, prefix=""):
"""Convenience. Lists them on standard output. You
may provide a prefix - mostly helps to generate code
samples for documentation.
"""
propList = list(self.getProperties().items())
propList.sort()
if prefix:
prefix = prefix + '.'
for (name, value) in propList:
print('%s%s = %s' % (prefix, name, value))
class Widget(PropHolder, shapes.UserNode):
"""Base for all user-defined widgets. Keep as simple as possible. Does
not inherit from Shape so that we can rewrite shapes without breaking
widgets and vice versa."""
def _setKeywords(self,**kw):
for k,v in kw.items():
if k not in self.__dict__:
setattr(self,k,v)
def draw(self):
msg = "draw() must be implemented for each Widget!"
raise shapes.NotImplementedError(msg)
def demo(self):
msg = "demo() must be implemented for each Widget!"
raise shapes.NotImplementedError(msg)
def provideNode(self):
return self.draw()
def getBounds(self):
"Return outer boundary as x1,y1,x2,y2. Can be overridden for efficiency"
return self.draw().getBounds()
class ScaleWidget(Widget):
'''Contents with a scale and offset'''
_attrMap = AttrMap(
x = AttrMapValue(isNumber,desc="x offset"),
y = AttrMapValue(isNumber,desc="y offset"),
scale = AttrMapValue(isNumber,desc="scale"),
contents = AttrMapValue(None,desc="Contained drawable elements"),
)
def __init__(self,x=0,y=0,scale=1.0,contents=None):
self.x = x
self.y = y
if not contents: contents=[]
elif not isinstance(contents,(tuple,list)):
contents = (contents,)
self.contents = list(contents)
self.scale = scale
def draw(self):
return shapes.Group(transform=(self.scale,0,0,self.scale,self.x,self.y),*self.contents)
_ItemWrapper={}
class CloneMixin:
def clone(self,**kwds):
n = self.__class__()
n.__dict__.clear()
n.__dict__.update(self.__dict__)
if kwds: n.__dict__.update(kwds)
return n
class TypedPropertyCollection(PropHolder):
"""A container with properties for objects of the same kind.
This makes it easy to create lists of objects. You initialize
it with a class of what it is to contain, and that is all you
can add to it. You can assign properties to the collection
as a whole, or to a numeric index within it; if so it creates
a new child object to hold that data.
So:
wedges = TypedPropertyCollection(WedgeProperties)
wedges.strokeWidth = 2 # applies to all
wedges.strokeColor = colors.red # applies to all
wedges[3].strokeColor = colors.blue # only to one
The last line should be taken as a prescription of how to
create wedge no. 3 if one is needed; no error is raised if
there are only two data points.
We try and make sensible use of tuple indices.
line[(3,x)] is backed by line[(3,)] == line[3] & line
"""
def __init__(self, exampleClass):
#give it same validation rules as what it holds
self.__dict__['_value'] = exampleClass()
self.__dict__['_children'] = {}
def wKlassFactory(self,Klass):
class WKlass(Klass,CloneMixin):
def __getattr__(self,name):
try:
return self.__class__.__bases__[0].__getattr__(self,name)
except:
parent = self.__propholder_parent__()
c = parent._children
x = self.__propholder_index__
while x:
if x in c:
return getattr(c[x],name)
x = x[:-1]
return getattr(parent,name)
return WKlass
def __getitem__(self, x):
if isinstance(x,(tuple,list)):
x = tuple(x)
else:
x = (x,)
try:
return self._children[x]
except KeyError:
Klass = self._value.__class__
if Klass in _ItemWrapper:
WKlass = _ItemWrapper[Klass]
else:
_ItemWrapper[Klass] = WKlass = self.wKlassFactory(Klass)
child = WKlass()
for i in filter(lambda x,K=list(child.__dict__.keys()): x in K,list(child._attrMap.keys())):
del child.__dict__[i]
child.__dict__.update(dict(
__propholder_parent__ = weakref_ref(self),
__propholder_index__ = x[:-1])
)
self._children[x] = child
return child
def __contains__(self,key):
if isinstance(key,(tuple,list)):
key = tuple(key)
else:
key = key,
return key in self._children
def __setitem__(self, key, value):
msg = "This collection can only hold objects of type %s" % self._value.__class__.__name__
assert isinstance(value, self._value.__class__), msg
def __len__(self):
return len(list(self._children.keys()))
def getProperties(self,recur=1):
# return any children which are defined and whatever
# differs from the parent
props = {}
for key, value in self._value.getProperties(recur=recur).items():
props['%s' % key] = value
for idx in self._children.keys():
childProps = self._children[idx].getProperties(recur=recur)
for key, value in childProps.items():
if not hasattr(self,key) or getattr(self, key)!=value:
newKey = '[%s].%s' % (idx if len(idx)>1 else idx[0], key)
props[newKey] = value
return props
def setVector(self,**kw):
for name, value in kw.items():
for i in range(len(value)):
setattr(self[i],name,value[i])
def __getattr__(self,name):
return getattr(self._value,name)
def __setattr__(self,name,value):
return setattr(self._value,name,value)
## No longer needed!
class StyleProperties(PropHolder):
"""A container class for attributes used in charts and legends.
Attributes contained can be those for any graphical element
(shape?) in the ReportLab graphics package. The idea for this
container class is to be useful in combination with legends
and/or the individual appearance of data series in charts.
A legend could be as simple as a wrapper around a list of style
properties, where the 'desc' attribute contains a descriptive
string and the rest could be used by the legend e.g. to draw
something like a color swatch. The graphical presentation of
the legend would be its own business, though.
A chart could be inspecting a legend or, more directly, a list
of style properties to pick individual attributes that it knows
about in order to render a particular row of the data. A bar
chart e.g. could simply use 'strokeColor' and 'fillColor' for
drawing the bars while a line chart could also use additional
ones like strokeWidth.
"""
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber,desc='width of the stroke line'),
strokeLineCap = AttrMapValue(isNumber,desc='Line cap 0=butt, 1=round & 2=square',advancedUsage=1),
strokeLineJoin = AttrMapValue(isNumber,desc='Line join 0=miter, 1=round & 2=bevel',advancedUsage=1),
strokeMiterLimit = AttrMapValue(None,desc='miter limit control miter line joins',advancedUsage=1),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone,desc='dashing patterns e.g. (1,3)'),
strokeOpacity = AttrMapValue(isNumber,desc='level of transparency (alpha) accepts values between 0..1',advancedUsage=1),
strokeColor = AttrMapValue(isColorOrNone,desc='the color of the stroke'),
fillColor = AttrMapValue(isColorOrNone,desc='the filling color'),
desc = AttrMapValue(isString),
)
def __init__(self, **kwargs):
"Initialize with attributes if any."
for k, v in kwargs.items():
setattr(self, k, v)
def __setattr__(self, name, value):
"Verify attribute name and value, before setting it."
validateSetattr(self,name,value)
class TwoCircles(Widget):
def __init__(self):
self.leftCircle = shapes.Circle(100,100,20, fillColor=colors.red)
self.rightCircle = shapes.Circle(300,100,20, fillColor=colors.red)
def draw(self):
return shapes.Group(self.leftCircle, self.rightCircle)
class Face(Widget):
"""This draws a face with two eyes.
It exposes a couple of properties
to configure itself and hides all other details.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
size = AttrMapValue(isNumber),
skinColor = AttrMapValue(isColorOrNone),
eyeColor = AttrMapValue(isColorOrNone),
mood = AttrMapValue(OneOf('happy','sad','ok')),
)
def __init__(self):
self.x = 10
self.y = 10
self.size = 80
self.skinColor = None
self.eyeColor = colors.blue
self.mood = 'happy'
def demo(self):
pass
def draw(self):
s = self.size # abbreviate as we will use this a lot
g = shapes.Group()
g.transform = [1,0,0,1,self.x, self.y]
# background
g.add(shapes.Circle(s * 0.5, s * 0.5, s * 0.5, fillColor=self.skinColor))
# left eye
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# right eye
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# nose
g.add(shapes.Polygon(
points=[s * 0.5, s * 0.6, s * 0.4, s * 0.3, s * 0.6, s * 0.3],
fillColor=None))
# mouth
if self.mood == 'happy':
offset = -0.05
elif self.mood == 'sad':
offset = +0.05
else:
offset = 0
g.add(shapes.Polygon(
points = [
s * 0.3, s * 0.2, #left of mouth
s * 0.7, s * 0.2, #right of mouth
s * 0.6, s * (0.2 + offset), # the bit going up or down
s * 0.4, s * (0.2 + offset) # the bit going up or down
],
fillColor = colors.pink,
strokeColor = colors.red,
strokeWidth = s * 0.03
))
return g
class TwoFaces(Widget):
def __init__(self):
self.faceOne = Face()
self.faceOne.mood = "happy"
self.faceTwo = Face()
self.faceTwo.x = 100
self.faceTwo.mood = "sad"
def draw(self):
"""Just return a group"""
return shapes.Group(self.faceOne, self.faceTwo)
def demo(self):
"""The default case already looks good enough,
no implementation needed here"""
pass
class Sizer(Widget):
"Container to show size of all enclosed objects"
_attrMap = AttrMap(BASE=shapes.SolidShape,
contents = AttrMapValue(isListOfShapes,desc="Contained drawable elements"),
)
def __init__(self, *elements):
self.contents = []
self.fillColor = colors.cyan
self.strokeColor = colors.magenta
for elem in elements:
self.add(elem)
def _addNamedNode(self,name,node):
'if name is not None add an attribute pointing to node and add to the attrMap'
if name:
if name not in list(self._attrMap.keys()):
self._attrMap[name] = AttrMapValue(isValidChild)
setattr(self, name, node)
def add(self, node, name=None):
"""Appends non-None child node to the 'contents' attribute. In addition,
if a name is provided, it is subsequently accessible by name
"""
# propagates properties down
if node is not None:
assert isValidChild(node), "Can only add Shape or UserNode objects to a Group"
self.contents.append(node)
self._addNamedNode(name,node)
def getBounds(self):
# get bounds of each object
if self.contents:
b = []
for elem in self.contents:
b.append(elem.getBounds())
return shapes.getRectsBounds(b)
else:
return (0,0,0,0)
def draw(self):
g = shapes.Group()
(x1, y1, x2, y2) = self.getBounds()
r = shapes.Rect(
x = x1,
y = y1,
width = x2-x1,
height = y2-y1,
fillColor = self.fillColor,
strokeColor = self.strokeColor
)
g.add(r)
for elem in self.contents:
g.add(elem)
return g
def test():
from reportlab.graphics.charts.piecharts import WedgeProperties
wedges = TypedPropertyCollection(WedgeProperties)
wedges.fillColor = colors.red
wedges.setVector(fillColor=(colors.blue,colors.green,colors.white))
print(len(_ItemWrapper))
d = shapes.Drawing(400, 200)
tc = TwoCircles()
d.add(tc)
from reportlab.graphics import renderPDF
renderPDF.drawToFile(d, 'sample_widget.pdf', 'A Sample Widget')
print('saved sample_widget.pdf')
d = shapes.Drawing(400, 200)
f = Face()
f.skinColor = colors.yellow
f.mood = "sad"
d.add(f, name='theFace')
print('drawing 1 properties:')
d.dumpProperties()
renderPDF.drawToFile(d, 'face.pdf', 'A Sample Widget')
print('saved face.pdf')
d2 = d.expandUserNodes()
renderPDF.drawToFile(d2, 'face_copy.pdf', 'An expanded drawing')
print('saved face_copy.pdf')
print('drawing 2 properties:')
d2.dumpProperties()
if __name__=='__main__':
test()
|
|
#!/usr/bin/env python
import sys
import os
import json
from pprint import pprint
import random
from uuid import getnode
try:
# see if we're running in a plex plug-in
HTTP
except:
import requests
HTTP = None
class Logger:
def Debug(self, *args):
print args
Log = Logger()
BASE_HEADERS = {
'X-Plex-Platform': "PMS",
'X-Plex-Platform-Version': "1",
'X-Plex-Provides': 'controller',
'X-Plex-Product': "shufflebyalbum",
'X-Plex-Version': "1",
'X-Plex-Device': "PMS-Plugin",
'X-Plex-Device-Name': "pms",
'X-Plex-Client-Identifier': str(hex(getnode()))
}
def http_comm(url, method, headers):
if HTTP:
r = HTTP.Request(url, headers=headers, cacheTime=0, method=method)
else:
if method == "GET":
request_func = requests.get
if method == "POST":
request_func = requests.post
if method == "DELETE":
request_func = requests.delete
r = request_func(url, headers=headers, allow_redirects=True)
return r
class PlexServer(object):
def __init__(self, host='localhost',port=32400, token = ""):
self.base_url = "http://{}:{}".format(host,port)
self.token = token
def query(self, path, method):
url = self.base_url + path
headers = dict(BASE_HEADERS)
headers['Accept'] = 'application/json'
if self.token:
headers['X-Plex-Token'] = self.token
r = http_comm(url, method, headers)
try:
response = json.loads( r.content )
return response
except:
return None
def get(self, path):
return self.query(path, "GET")
def post(self, path):
return self.query(path, "POST")
def delete(self, path):
return self.query(path, "DELETE")
def getClients(self):
path = "/clients"
response = self.get(path)
try:
return response['MediaContainer']['Server']
except:
return []
def getSections(self):
path = "/library/sections"
response = self.get(path)
try:
return response['MediaContainer']['Directory']
except:
return []
def getAlbums(self, section):
path = "/library/sections/{}/albums".format(section)
response = self.get(path)
try:
albums = response['MediaContainer']['Metadata']
return albums
except:
return []
def getServerInfo(self):
path = ""
response = self.get(path)
try:
return response['MediaContainer']
except:
return {}
def getPlaylists(self):
path = "/playlists"
response = self.get(path)
try:
return response['MediaContainer']['Metadata']
except:
return []
# takes a dict item as returned from getPlaylists
def deletePlaylist(self, playlist):
playlist_key = playlist['key']
path = playlist_key.replace("/items", "")
return self.delete(path)
# takes a list of album dict items as returned from getAlbums
def createPlaylistOfAlbums(self, title, album_list, guid):
key_list = []
for a in album_list:
key_num = a['key'].replace("/children","").replace("/library/metadata/", "")
key_list.append(key_num)
path = "/playlists"
path += "?type=audio"
path += "&title={}".format(title)
path += "&smart=0"
path += "&uri=library://{}/directory//library/metadata/".format(guid)
path += ",".join(key_list)
response = self.post(path)
try:
return response['MediaContainer']['Metadata'][0]
except:
return []
def createPlayQueueForPlaylist(self, playlist_id):
path = "/playQueues"
path += "?playlistID={}".format(playlist_id)
path += "&shuffle=0&type=audio&includeChapters=1&includeRelated=1"
return self.post(path)['MediaContainer']
def get_music_sections(server_ip, server_port, token):
server = PlexServer(server_ip, server_port, token)
music_sections = []
# Look for music sections
sections = server.getSections()
for s in sections:
if s['type'] == 'artist':
music_sections.append(s)
return music_sections
def generate_playlist(server_ip, server_port, token, section, playlist_name, list_size):
server = PlexServer(server_ip, server_port, token)
max_num_of_random_albums = list_size
section_key = section['key']
section_uuid = section['uuid']
# list all albums for section
print "Getting full album list from music section..."
all_albums = server.getAlbums(section_key)
# TODO: filter out unwanted genres here...
num_of_random_albums = min(max_num_of_random_albums, len(all_albums))
# choose random set of albums
print "Creating random list of {} albums".format(num_of_random_albums)
random_album_list = []
while len(random_album_list) < num_of_random_albums:
idx = random.randrange(len(all_albums))
a = all_albums[idx]
if a not in random_album_list:
print u" {} - {}".format(a['title'], a['parentTitle'])
random_album_list.append(a)
if not random_album_list:
print "No albums in random list. Done."
return
# Delete old playlist with the same name, if it exists
print "Getting list of existing playlists..."
playlists = server.getPlaylists()
for p in playlists:
if p['title'] == playlist_name:
print u"Deleting playlist: [{}]...".format(playlist_name)
server.deletePlaylist(p)
break
# create new playlist with the selected albums
print u"Creating playlist: [{}]".format(playlist_name)
playlist = server.createPlaylistOfAlbums(playlist_name, random_album_list, section_uuid)
return playlist
def get_clients(server_ip, server_port, token):
server = PlexServer(server_ip, server_port, token)
return server.getClients()
def play_on_client(server_ip, server_port, token, client, playlist):
server = PlexServer(server_ip, server_port, token)
CLIENT_IP = client['host']
CLIENT_PORT = client['port']
MEDIA_ID = playlist['ratingKey']
CLIENT_ID = client['machineIdentifier']
SERVER_ID = server.getServerInfo()['machineIdentifier']
# Make a playqueue for the playlist
playqueue = server.createPlayQueueForPlaylist(MEDIA_ID)
playqueue_selected_metadata_item_id = playqueue[u'playQueueSelectedMetadataItemID']
playqueue_id = playqueue[u'playQueueID']
# Tell the client to play the playlist
url = "http://{}:{}/player/playback/playMedia".format(CLIENT_IP,CLIENT_PORT)
url += "?key=%2Flibrary%2Fmetadata%2F{}".format(playqueue_selected_metadata_item_id)
url += "&offset=0"
#url += "&X-Plex-Client-Identifier={}".format(CLIENT_ID)
url += "&machineIdentifier={}".format(SERVER_ID)
url += "&address={}".format(server_ip)
url += "&port={}".format(server_port)
url += "&protocol=http"
url += "&containerKey=%2FplayQueues%2F{}%3Fown%3D1%26window%3D200".format(playqueue_id)
url += "&commandID=2"
headers = dict()
headers['X-Plex-Target-Client-Identifier'] = CLIENT_ID
r = http_comm(url, "GET", headers=headers)
print r.content
def test():
name = "ShuffleByAlbum"
list_size = 15
server_ip = "localhost"
server_port = 32400
token = "9494tdZFWpKRXsWV6Fjp"
music_sections = get_music_sections(server_ip, server_port, token)
if not music_sections:
print "No music sections"
return
# choose the first section
section = music_sections[0]
playlist = generate_playlist(server_ip, server_port, token, section, name, list_size)
clients = get_clients(server_ip, server_port, token)
new_list = []
for c in clients:
if c['product'] != "Plex Web":
new_list.append(c)
clients = new_list
if not clients:
print "No clients"
return
# choose the first client
client = clients[0]
try:
play_on_client(server_ip, server_port, token, client, playlist)
except:
print "Error talking to client"
#------------------------------------
if __name__ == "__main__":
test()
|
|
"""Growatt Sensor definitions for the Mix type."""
from __future__ import annotations
from homeassistant.components.sensor import SensorDeviceClass, SensorStateClass
from homeassistant.const import (
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_KILO_WATT,
POWER_WATT,
)
from .sensor_entity_description import GrowattSensorEntityDescription
MIX_SENSOR_TYPES: tuple[GrowattSensorEntityDescription, ...] = (
# Values from 'mix_info' API call
GrowattSensorEntityDescription(
key="mix_statement_of_charge",
name="Statement of charge",
api_key="capacity",
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.BATTERY,
),
GrowattSensorEntityDescription(
key="mix_battery_charge_today",
name="Battery charged today",
api_key="eBatChargeToday",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
GrowattSensorEntityDescription(
key="mix_battery_charge_lifetime",
name="Lifetime battery charged",
api_key="eBatChargeTotal",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL,
),
GrowattSensorEntityDescription(
key="mix_battery_discharge_today",
name="Battery discharged today",
api_key="eBatDisChargeToday",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
GrowattSensorEntityDescription(
key="mix_battery_discharge_lifetime",
name="Lifetime battery discharged",
api_key="eBatDisChargeTotal",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL,
),
GrowattSensorEntityDescription(
key="mix_solar_generation_today",
name="Solar energy today",
api_key="epvToday",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
GrowattSensorEntityDescription(
key="mix_solar_generation_lifetime",
name="Lifetime solar energy",
api_key="epvTotal",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL,
),
GrowattSensorEntityDescription(
key="mix_battery_discharge_w",
name="Battery discharging W",
api_key="pDischarge1",
native_unit_of_measurement=POWER_WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="mix_battery_voltage",
name="Battery voltage",
api_key="vbat",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
device_class=SensorDeviceClass.VOLTAGE,
),
GrowattSensorEntityDescription(
key="mix_pv1_voltage",
name="PV1 voltage",
api_key="vpv1",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
device_class=SensorDeviceClass.VOLTAGE,
),
GrowattSensorEntityDescription(
key="mix_pv2_voltage",
name="PV2 voltage",
api_key="vpv2",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
device_class=SensorDeviceClass.VOLTAGE,
),
# Values from 'mix_totals' API call
GrowattSensorEntityDescription(
key="mix_load_consumption_today",
name="Load consumption today",
api_key="elocalLoadToday",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
GrowattSensorEntityDescription(
key="mix_load_consumption_lifetime",
name="Lifetime load consumption",
api_key="elocalLoadTotal",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL,
),
GrowattSensorEntityDescription(
key="mix_export_to_grid_today",
name="Export to grid today",
api_key="etoGridToday",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
GrowattSensorEntityDescription(
key="mix_export_to_grid_lifetime",
name="Lifetime export to grid",
api_key="etogridTotal",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL,
),
# Values from 'mix_system_status' API call
GrowattSensorEntityDescription(
key="mix_battery_charge",
name="Battery charging",
api_key="chargePower",
native_unit_of_measurement=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="mix_load_consumption",
name="Load consumption",
api_key="pLocalLoad",
native_unit_of_measurement=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="mix_wattage_pv_1",
name="PV1 Wattage",
api_key="pPv1",
native_unit_of_measurement=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="mix_wattage_pv_2",
name="PV2 Wattage",
api_key="pPv2",
native_unit_of_measurement=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="mix_wattage_pv_all",
name="All PV Wattage",
api_key="ppv",
native_unit_of_measurement=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="mix_export_to_grid",
name="Export to grid",
api_key="pactogrid",
native_unit_of_measurement=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="mix_import_from_grid",
name="Import from grid",
api_key="pactouser",
native_unit_of_measurement=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="mix_battery_discharge_kw",
name="Battery discharging kW",
api_key="pdisCharge1",
native_unit_of_measurement=POWER_KILO_WATT,
device_class=SensorDeviceClass.POWER,
),
GrowattSensorEntityDescription(
key="mix_grid_voltage",
name="Grid voltage",
api_key="vAc1",
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
device_class=SensorDeviceClass.VOLTAGE,
),
# Values from 'mix_detail' API call
GrowattSensorEntityDescription(
key="mix_system_production_today",
name="System production today (self-consumption + export)",
api_key="eCharge",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
GrowattSensorEntityDescription(
key="mix_load_consumption_solar_today",
name="Load consumption today (solar)",
api_key="eChargeToday",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
GrowattSensorEntityDescription(
key="mix_self_consumption_today",
name="Self consumption today (solar + battery)",
api_key="eChargeToday1",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
GrowattSensorEntityDescription(
key="mix_load_consumption_battery_today",
name="Load consumption today (battery)",
api_key="echarge1",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
GrowattSensorEntityDescription(
key="mix_import_from_grid_today",
name="Import from grid today (load)",
api_key="etouser",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
),
# This sensor is manually created using the most recent X-Axis value from the chartData
GrowattSensorEntityDescription(
key="mix_last_update",
name="Last Data Update",
api_key="lastdataupdate",
native_unit_of_measurement=None,
device_class=SensorDeviceClass.TIMESTAMP,
),
# Values from 'dashboard_data' API call
GrowattSensorEntityDescription(
key="mix_import_from_grid_today_combined",
name="Import from grid today (load + charging)",
api_key="etouser_combined", # This id is not present in the raw API data, it is added by the sensor
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
),
)
|
|
"""Support for Wireless Sensor Tags."""
import logging
from requests.exceptions import HTTPError, ConnectTimeout
import voluptuous as vol
from homeassistant.const import (
ATTR_BATTERY_LEVEL, ATTR_VOLTAGE, CONF_USERNAME, CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
from homeassistant import util
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.dispatcher import (
dispatcher_send)
_LOGGER = logging.getLogger(__name__)
# Strength of signal in dBm
ATTR_TAG_SIGNAL_STRENGTH = 'signal_strength'
# Indicates if tag is out of range or not
ATTR_TAG_OUT_OF_RANGE = 'out_of_range'
# Number in percents from max power of tag receiver
ATTR_TAG_POWER_CONSUMPTION = 'power_consumption'
NOTIFICATION_ID = 'wirelesstag_notification'
NOTIFICATION_TITLE = "Wireless Sensor Tag Setup"
DOMAIN = 'wirelesstag'
DEFAULT_ENTITY_NAMESPACE = 'wirelesstag'
# Template for signal - first parameter is tag_id,
# second, tag manager mac address
SIGNAL_TAG_UPDATE = 'wirelesstag.tag_info_updated_{}_{}'
# Template for signal - tag_id, sensor type and
# tag manager mac address
SIGNAL_BINARY_EVENT_UPDATE = 'wirelesstag.binary_event_updated_{}_{}_{}'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
class WirelessTagPlatform:
"""Principal object to manage all registered in HA tags."""
def __init__(self, hass, api):
"""Designated initializer for wirelesstags platform."""
self.hass = hass
self.api = api
self.tags = {}
self._local_base_url = None
@property
def tag_manager_macs(self):
"""Return list of tag managers mac addresses in user account."""
return self.api.mac_addresses
def load_tags(self):
"""Load tags from remote server."""
self.tags = self.api.load_tags()
return self.tags
def arm(self, switch):
"""Arm entity sensor monitoring."""
func_name = 'arm_{}'.format(switch.sensor_type)
arm_func = getattr(self.api, func_name)
if arm_func is not None:
arm_func(switch.tag_id, switch.tag_manager_mac)
def disarm(self, switch):
"""Disarm entity sensor monitoring."""
func_name = 'disarm_{}'.format(switch.sensor_type)
disarm_func = getattr(self.api, func_name)
if disarm_func is not None:
disarm_func(switch.tag_id, switch.tag_manager_mac)
def make_notifications(self, binary_sensors, mac):
"""Create configurations for push notifications."""
_LOGGER.info("Creating configurations for push notifications.")
configs = []
bi_url = self.binary_event_callback_url
for bi_sensor in binary_sensors:
configs.extend(bi_sensor.event.build_notifications(bi_url, mac))
update_url = self.update_callback_url
from wirelesstagpy import NotificationConfig as NC
update_config = NC.make_config_for_update_event(update_url, mac)
configs.append(update_config)
return configs
def install_push_notifications(self, binary_sensors):
"""Register local push notification from tag manager."""
_LOGGER.info("Registering local push notifications.")
for mac in self.tag_manager_macs:
configs = self.make_notifications(binary_sensors, mac)
# install notifications for all tags in tag manager
# specified by mac
result = self.api.install_push_notification(0, configs, True, mac)
if not result:
self.hass.components.persistent_notification.create(
"Error: failed to install local push notifications <br />",
title="Wireless Sensor Tag Setup Local Push Notifications",
notification_id="wirelesstag_failed_push_notification")
else:
_LOGGER.info("Installed push notifications for all\
tags in %s.", mac)
@property
def local_base_url(self):
"""Define base url of hass in local network."""
if self._local_base_url is None:
self._local_base_url = "http://{}".format(util.get_local_ip())
port = self.hass.config.api.port
if port is not None:
self._local_base_url += ':{}'.format(port)
return self._local_base_url
@property
def update_callback_url(self):
"""Return url for local push notifications(update event)."""
return '{}/api/events/wirelesstag_update_tags'.format(
self.local_base_url)
@property
def binary_event_callback_url(self):
"""Return url for local push notifications(binary event)."""
return '{}/api/events/wirelesstag_binary_event'.format(
self.local_base_url)
def handle_update_tags_event(self, event):
"""Handle push event from wireless tag manager."""
_LOGGER.info("push notification for update arrived: %s", event)
try:
tag_id = event.data.get('id')
mac = event.data.get('mac')
dispatcher_send(
self.hass,
SIGNAL_TAG_UPDATE.format(tag_id, mac),
event)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error("Unable to handle tag update event:\
%s error: %s", str(event), str(ex))
def handle_binary_event(self, event):
"""Handle push notifications for binary (on/off) events."""
_LOGGER.info("Push notification for binary event arrived: %s", event)
try:
tag_id = event.data.get('id')
event_type = event.data.get('type')
mac = event.data.get('mac')
dispatcher_send(
self.hass,
SIGNAL_BINARY_EVENT_UPDATE.format(tag_id, event_type, mac),
event)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error("Unable to handle tag binary event:\
%s error: %s", str(event), str(ex))
def setup(hass, config):
"""Set up the Wireless Sensor Tag component."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
try:
from wirelesstagpy import (WirelessTags, WirelessTagsException)
wirelesstags = WirelessTags(username=username, password=password)
platform = WirelessTagPlatform(hass, wirelesstags)
platform.load_tags()
hass.data[DOMAIN] = platform
except (ConnectTimeout, HTTPError, WirelessTagsException) as ex:
_LOGGER.error("Unable to connect to wirelesstag.net service: %s",
str(ex))
hass.components.persistent_notification.create(
"Error: {}<br />"
"Please restart hass after fixing this."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
return False
# listen to custom events
hass.bus.listen('wirelesstag_update_tags',
hass.data[DOMAIN].handle_update_tags_event)
hass.bus.listen('wirelesstag_binary_event',
hass.data[DOMAIN].handle_binary_event)
return True
class WirelessTagBaseSensor(Entity):
"""Base class for HA implementation for Wireless Sensor Tag."""
def __init__(self, api, tag):
"""Initialize a base sensor for Wireless Sensor Tag platform."""
self._api = api
self._tag = tag
self._uuid = self._tag.uuid
self.tag_id = self._tag.tag_id
self.tag_manager_mac = self._tag.tag_manager_mac
self._name = self._tag.name
self._state = None
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def principal_value(self):
"""Return base value.
Subclasses need override based on type of sensor.
"""
return 0
def updated_state_value(self):
"""Return formatted value.
The default implementation formats principal value.
"""
return self.decorate_value(self.principal_value)
# pylint: disable=no-self-use
def decorate_value(self, value):
"""Decorate input value to be well presented for end user."""
return '{:.1f}'.format(value)
@property
def available(self):
"""Return True if entity is available."""
return self._tag.is_alive
def update(self):
"""Update state."""
if not self.should_poll:
return
updated_tags = self._api.load_tags()
updated_tag = updated_tags[self._uuid]
if updated_tag is None:
_LOGGER.error('Unable to update tag: "%s"', self.name)
return
self._tag = updated_tag
self._state = self.updated_state_value()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_BATTERY_LEVEL: int(self._tag.battery_remaining*100),
ATTR_VOLTAGE: '{:.2f}V'.format(self._tag.battery_volts),
ATTR_TAG_SIGNAL_STRENGTH: '{}dBm'.format(
self._tag.signal_strength),
ATTR_TAG_OUT_OF_RANGE: not self._tag.is_in_range,
ATTR_TAG_POWER_CONSUMPTION: '{:.2f}%'.format(
self._tag.power_consumption)
}
|
|
from glob import glob
from distutils.util import convert_path
import distutils.command.build_py as orig
import os
import fnmatch
import textwrap
import io
import distutils.errors
import itertools
from setuptools.extern.six.moves import map, filter, filterfalse
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
class build_py(orig.build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
orig.build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = (self.distribution.exclude_package_data or
{})
if 'data_files' in self.__dict__:
del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
"lazily compute data files"
if attr == 'data_files':
self.data_files = self._get_data_files()
return self.data_files
return orig.build_py.__getattr__(self, attr)
def build_module(self, module, module_file, package):
outfile, copied = orig.build_py.build_module(self, module, module_file,
package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
return list(map(self._get_pkg_data_files, self.packages or ()))
def _get_pkg_data_files(self, package):
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Strip directory from globbed filenames
filenames = [
os.path.relpath(file, src_dir)
for file in self.find_data_files(package, src_dir)
]
return package, src_dir, build_dir, filenames
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
patterns = self._get_platform_patterns(
self.package_data,
package,
src_dir,
)
globs_expanded = map(glob, patterns)
# flatten the expanded globs into an iterable of matches
globs_matches = itertools.chain.from_iterable(globs_expanded)
glob_files = filter(os.path.isfile, globs_matches)
files = itertools.chain(
self.manifest_files.get(package, []),
glob_files,
)
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
if (copied and
srcfile in self.distribution.convert_2to3_doctests):
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d, f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d != prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f == oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d], []).append(path)
def get_data_files(self):
pass # Lazily compute data files in _get_data_files() function.
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = orig.build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg == package or pkg.startswith(package + '.'):
break
else:
return init_py
with io.open(init_py, 'rb') as f:
contents = f.read()
if b'declare_namespace' not in contents:
raise distutils.errors.DistutilsError(
"Namespace package problem: %s is a namespace package, but "
"its\n__init__.py does not call declare_namespace()! Please "
'fix it.\n(See the setuptools manual under '
'"Namespace Packages" for details.)\n"' % (package,)
)
return init_py
def initialize_options(self):
self.packages_checked = {}
orig.build_py.initialize_options(self)
def get_package_dir(self, package):
res = orig.build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
files = list(files)
patterns = self._get_platform_patterns(
self.exclude_package_data,
package,
src_dir,
)
match_groups = (
fnmatch.filter(files, pattern)
for pattern in patterns
)
# flatten the groups of matches into an iterable of matches
matches = itertools.chain.from_iterable(match_groups)
bad = set(matches)
keepers = (
fn
for fn in files
if fn not in bad
)
# ditch dupes
return list(_unique_everseen(keepers))
@staticmethod
def _get_platform_patterns(spec, package, src_dir):
"""
yield platfrom-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir.
"""
raw_patterns = itertools.chain(
spec.get('', []),
spec.get(package, []),
)
return (
# Each pattern has to be converted to a platform-specific path
os.path.join(src_dir, convert_path(pattern))
for pattern in raw_patterns
)
# from Python docs
def _unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
|
|
# -*- coding: utf-8 -*-
import base64
import datetime
import hashlib
import io
import itertools
import json
import os
import random
import shutil
import subprocess
import tarfile
import tempfile
from configparser import ConfigParser
from contextlib import contextmanager
import factory
import factory.fuzzy
import factory.alchemy
import faker
from factory.alchemy import SQLAlchemyModelFactory
from flask import url_for, current_app
from flask_testing import TestCase
from spkrepo import create_app
from spkrepo.ext import db
from spkrepo.models import (Package, Version, Description, Language, Architecture, DisplayName, Icon, Build, Firmware,
User, Role, Service, Screenshot, Download)
fake = faker.Faker()
class QueryFactory(factory.DictFactory):
timezone = fake.timezone().split('/')[1]
language = factory.LazyAttribute(lambda x: random.choice([l.code for l in Language.query.all()]))
arch = factory.LazyAttribute(lambda x: random.choice([Architecture.to_syno.get(a.code, a.code) for a in
Architecture.query.filter(Architecture.code != 'noarch').
all()]))
build = factory.LazyAttribute(lambda x: random.choice([f.build for f in Firmware.query.all()]))
major = factory.LazyAttribute(lambda x: int(Firmware.find(x.build).version.split('.')[0]))
minor = factory.LazyAttribute(lambda x: int(Firmware.find(x.build).version.split('.')[1]))
unique = factory.LazyAttribute(lambda x: 'synology_%s_%s' % (x.arch, str(random.choice([1, 2, 4, 15, 18, 24])) +
str(random.choice([12, 13, 14, 15])) +
random.choice(['', 'j', '+'])))
package_update_channel = factory.fuzzy.FuzzyChoice(['stable', 'beta'])
class UserFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = User
id = factory.Sequence(lambda n: n)
username = factory.LazyAttribute(lambda x: fake.user_name())
email = factory.LazyAttribute(lambda x: fake.email())
password = factory.LazyAttribute(lambda x: fake.password())
api_key = factory.LazyAttribute(lambda x: fake.md5())
github_access_token = None
active = True
confirmed_at = datetime.datetime.now()
class IconFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Icon
id = factory.Sequence(lambda n: n)
size = factory.fuzzy.FuzzyChoice(['72', '120'])
class ScreenshotFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Screenshot
id = factory.Sequence(lambda n: n)
class DisplayNameFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = DisplayName
language = factory.LazyAttribute(lambda x: Language.find('enu'))
displayname = factory.LazyAttribute(lambda x: ' '.join(fake.words(nb=2)).title())
class DescriptionFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Description
language = factory.LazyAttribute(lambda x: Language.find('enu'))
description = factory.LazyAttribute(lambda x: ' '.join(fake.sentences(nb=5)))
class PackageFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Package
id = factory.Sequence(lambda n: n)
name = factory.Sequence(lambda n: 'test_%d' % n)
@factory.post_generation
def add_screenshot(self, create, extracted, **kwargs):
if extracted is None or extracted:
if not self.screenshots:
screenshot_path = os.path.join(self.name, 'screenshot_0.png')
self.screenshots.append(ScreenshotFactory.simple_generate(create, path=screenshot_path))
@classmethod
def _after_postgeneration(cls, obj, create, results=None):
if not create:
return
os.mkdir(os.path.join(current_app.config['DATA_PATH'], obj.name))
for screenshot in obj.screenshots:
screenshot_path = os.path.join(current_app.config['DATA_PATH'], screenshot.path)
if not os.path.exists(screenshot_path):
screenshot.save(create_image('Screenshot %s' % obj.name))
class VersionFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Version
id = factory.Sequence(lambda n: n)
package = factory.SubFactory(PackageFactory)
version = factory.Sequence(lambda n: n)
upstream_version = factory.LazyAttribute(lambda x: '%d.%d.%d' % (fake.random_int(0, 5), fake.random_int(0, 10),
fake.random_int(0, 15)))
changelog = factory.LazyAttribute(lambda x: fake.sentence())
report_url = factory.LazyAttribute(lambda x: fake.url())
distributor = factory.LazyAttribute(lambda x: fake.name())
distributor_url = factory.LazyAttribute(lambda x: fake.url())
maintainer = factory.LazyAttribute(lambda x: fake.name())
maintainer_url = factory.LazyAttribute(lambda x: fake.url())
dependencies = factory.LazyAttribute(lambda x: fake.word())
conf_dependencies = factory.LazyAttribute(lambda x: json.dumps({fake.word(): {'dsm_min_ver': '5.0-4300'}}))
conflicts = factory.LazyAttribute(lambda x: fake.word())
conf_conflicts = factory.LazyAttribute(lambda x: json.dumps({fake.word(): {'dsm_min_ver': '5.0-4300'}}))
install_wizard = False
upgrade_wizard = False
startable = None
license = factory.LazyAttribute(lambda x: fake.text())
service_dependencies = factory.LazyAttribute(lambda x: [random.choice(Service.query.all())])
@factory.post_generation
def add_displayname(self, create, extracted, **kwargs):
if extracted is None or extracted:
if 'enu' not in self.displaynames:
displayname = self.package.name.replace('_', ' ').title()
self.displaynames['enu'] = DisplayNameFactory.simple_generate(create, language=Language.find('enu'),
displayname=displayname)
@factory.post_generation
def add_description(self, create, extracted, **kwargs):
if extracted is None or extracted:
if 'enu' not in self.descriptions:
self.descriptions['enu'] = DescriptionFactory.simple_generate(create, language=Language.find('enu'))
@factory.post_generation
def add_icon(self, create, extracted, **kwargs):
if extracted is None or extracted:
if '72' not in self.icons:
icon_path = os.path.join(self.package.name, str(self.version), 'icon_72.png')
self.icons['72'] = IconFactory.simple_generate(create, path=icon_path, size='72')
@classmethod
def _after_postgeneration(cls, obj, create, results=None):
if not create:
return
os.mkdir(os.path.join(current_app.config['DATA_PATH'], obj.package.name, str(obj.version)))
for size, icon in obj.icons.items():
icon_path = os.path.join(current_app.config['DATA_PATH'], icon.path)
if not os.path.exists(icon_path):
icon.save(create_icon(obj.displaynames['enu'].displayname, int(size)))
class BuildFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Build
version = factory.SubFactory(VersionFactory)
firmware = factory.LazyAttribute(lambda x: random.choice(Firmware.query.all()))
architectures = factory.LazyAttribute(lambda x: [random.choice(Architecture.query.
filter(Architecture.code != 'noarch').
all())])
@factory.post_generation
def create_spk(self, create, extracted, **kwargs):
if not create:
return
build_filename = Build.generate_filename(self.version.package, self.version, self.firmware, self.architectures)
self.path = os.path.join(self.version.package.name, str(self.version.version), build_filename)
with create_spk(self) as spk_stream:
self.save(spk_stream)
if self.md5 is None:
spk_stream.seek(0)
self.md5 = hashlib.md5(spk_stream.read()).hexdigest()
spk_stream.close()
@classmethod
def create_batch(cls, size, **kwargs):
if 'version' in kwargs and 'firmware' not in kwargs and 'architectures' not in kwargs:
combinations = itertools.product(Firmware.query.all(),
Architecture.query.filter(Architecture.code != 'noarch').all())
batch = []
for _ in range(size):
firmware, architecture = next(combinations)
batch.append(cls.create(architectures=[architecture], firmware=firmware, **kwargs))
return batch
return super(BuildFactory, cls).create_batch(size, **kwargs)
class DownloadFactory(SQLAlchemyModelFactory):
class Meta:
sqlalchemy_session = db.session
model = Download
id = factory.Sequence(lambda n: n)
build = factory.SubFactory(BuildFactory)
architecture = factory.LazyAttribute(lambda x: x.build.architectures[0])
firmware_build = factory.LazyAttribute(lambda x: random.choice([f.build for f in Firmware.query.all()]))
ip_address = factory.LazyAttribute(lambda x: fake.ipv4())
user_agent = factory.LazyAttribute(lambda x: fake.user_agent())
date = factory.LazyAttribute(lambda x: fake.date_time_this_month())
def populate_db():
"""Populate the database"""
db.session.execute(Architecture.__table__.insert().values([
{'code': 'noarch'}, {'code': 'cedarview'}, {'code': '88f628x'}, {'code': 'qoriq'}
]))
db.session.execute(Firmware.__table__.insert().values([
{'version': '3.1', 'build': 1594}, {'version': '5.0', 'build': 4458}
]))
db.session.execute(Language.__table__.insert().values([
{'code': 'enu', 'name': 'English'}, {'code': 'fre', 'name': 'French'}
]))
db.session.execute(Role.__table__.insert().values([
{'name': 'admin', 'description': 'Administrator'},
{'name': 'package_admin', 'description': 'Package Administrator'},
{'name': 'developer', 'description': 'Developer'}
]))
db.session.execute(Service.__table__.insert().values([
{'code': 'apache-web'},
{'code': 'mysql'}
]))
# Base test case
class BaseTestCase(TestCase):
DEBUG = False
TESTING = True
LOGIN_DISABLED = False
WTF_CSRF_ENABLED = False
DATA_PATH = tempfile.mkdtemp('spkrepo')
SQLALCHEMY_ECHO = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s/test.db' % DATA_PATH
CACHE_NO_NULL_WARNING = True
def create_app(self):
return create_app(config=self)
def setUp(self):
if not os.path.exists(self.DATA_PATH):
os.mkdir(self.DATA_PATH)
db.drop_all()
db.create_all()
populate_db()
db.session.commit()
db.session.autoflush = False
def tearDown(self):
db.session.remove()
db.drop_all()
db.session.autoflush = True
shutil.rmtree(self.DATA_PATH)
def login(self, email, password):
"""
Perform a login action
:param email: email of the user
:param password: password of the user
:return: the response
"""
return self.client.post(url_for('security.login'), data=dict(email=email, password=password),
follow_redirects=True)
def logout(self):
"""
Perform a logout action
:return: the response
"""
return self.client.get(url_for('security.logout'), follow_redirects=True)
def create_user(self, *args, **kwargs):
"""
Create a user with the given roles
:param args: role names for the created user
:param kwargs: attributes to pass to the :class:`UserFactory`
:return: the created user
"""
user = UserFactory(roles=[Role.query.filter_by(name=role).one() for role in args], **kwargs)
db.session.commit()
return user
@contextmanager
def logged_user(self, *args, **kwargs):
"""
Create a user with the given roles and perform login action
:param args: role names for the created user
:param kwargs: attributes to pass to the :class:`UserFactory`
:return: the logged user
"""
user = self.create_user(*args, **kwargs)
self.login(user.email, user.password)
yield user
self.logout()
def assert201(self, response, message=None):
"""
Check if response status code is 201
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 201, message)
def assert302(self, response, message=None):
"""
Check if response status code is 302
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 302, message)
def assertRedirectsTo(self, response, location, message=None):
"""
Check if response is a redirect
:param response: Flask response
:param location: the redirect location
:param message: Message to display on test failure
"""
self.assertRedirects(response, location, message)
def assert409(self, response, message=None):
"""
Check if response status code is 409
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 409, message)
def assert422(self, response, message=None):
"""
Check if response status code is 422
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 422, message)
def assertHeader(self, response, header, value, message=None):
"""
Check a response header value
:param response: Flask response
:param header: Header name
:param value: Expected value of the header
:param message: Message to display on test failure
"""
self.assertIn(header, response.headers, message)
self.assertEqual(response.headers[header], value, message)
def create_info(build):
"""
Create a dict to emulate the INFO file of a SPK
:param build: build to use to construct the info dict
:type build: :class:`~spkrepo.models.Build`
:return: the info dict
"""
info = {'package': build.version.package.name, 'version': build.version.version_string,
'arch': ' '.join(Architecture.to_syno.get(a.code, a.code) for a in build.architectures),
'displayname': build.version.displaynames['enu'].displayname,
'description': build.version.descriptions['enu'].description,
'firmware': build.firmware.firmware_string}
if build.version.changelog:
info['changelog'] = build.version.changelog
if build.version.report_url:
info['report_url'] = build.version.report_url
if build.version.distributor:
info['distributor'] = build.version.distributor
if build.version.distributor_url:
info['distributor_url'] = build.version.distributor_url
if build.version.maintainer:
info['maintainer'] = build.version.maintainer
if build.version.maintainer_url:
info['maintainer_url'] = build.version.maintainer_url
if build.version.dependencies:
info['install_dep_packages'] = build.version.dependencies
if build.version.conflicts:
info['install_conflict_packages'] = build.version.conflicts
if build.version.service_dependencies:
info['install_dep_services'] = ':'.join([s.code for s in build.version.service_dependencies])
if build.version.startable is not None:
info['startable'] = 'yes' if build.version.startable else 'no'
for l, d in build.version.displaynames.items():
info['displayname_%s' % l] = d.displayname
for l, d in build.version.descriptions.items():
info['description_%s' % l] = d.description
if build.version.conf_dependencies is not None or build.version.conf_conflicts is not None:
info['support_conf_folder'] = 'yes'
return info
def create_icon(text, size=72):
"""
Create a square icon with some `text` and the given `size`
:param text: text to display in the icon
:param int size: size of the icon
:return: the icon stream
"""
return create_image(text, size, size)
def create_image(text, width=640, height=480):
"""
Create a image with some `text` and the given `width` and `height`
:param text: text to display in the image
:param int width: width of the image
:param int height: height of the image
:return: the image stream
"""
command = ['convert', '-size', '%dx%d' % (width, height), 'canvas:none', '-gravity', 'Center',
'-fill', 'grey', '-draw', 'roundRectangle 0,0 %d,%d 15,15' % (width, height),
'-fill', 'black', '-pointsize', '12', '-draw', 'text 0,0 \'%s\'' % text,
'png:-']
screenshot_stream = io.BytesIO()
process = subprocess.Popen(command, stdout=subprocess.PIPE)
screenshot_stream.write(process.communicate()[0])
screenshot_stream.seek(0)
return screenshot_stream
def create_spk(build, info=None, signature=None, with_checksum=False, with_package_icons=True, with_info_icons=False,
with_info=True, with_package=True, with_scripts=True, with_conf=False, info_encoding='utf-8',
license_encoding='utf-8', signature_encoding='ascii', conf_dependencies_encoding='utf-8',
conf_conflicts_encoding='utf-8', conf_privilege_encoding='utf-8', conf_resource_encoding='utf-8'):
"""
Create a valid SPK file
:param build: base build on which the SPK will be built
:type build: :class:`~spkrepo.models.Build`
:param info: INFO dict or `None` to use the result of :func:`create_info`
:type info: dict or io.BytesIO
:param signature: content of the syno_signature.asc file, if any
:param bool with_checksum: whether to compute the checksum and include it in the INFO
:param bool with_package_icons: whether to include the icons in the SPK
:param bool with_info_icons: whether to include the icons in the INFO
:param bool with_info: whether to include the INFO file
:param bool with_package: whether to include the package.tgz file
:param bool with_scripts: whether to include the scripts folder
:param bool with_conf: whether to include the conf folder
:param info_encoding: encoding for the INFO file
:param license_encoding: encoding for the LICENSE file
:param signature_encoding: encoding for the syno_signature.asc file
:param conf_dependencies_encoding: encoding for the conf/PKG_DEPS file
:param conf_conflicts_encoding: encoding for the conf/PKG_CONX file
:param conf_privilege_encoding: encoding for the conf/privilege file
:param conf_resource_encoding: encoding for the conf/resource file
:return: the created SPK stream
"""
# generate an info if none is given
info = info or create_info(build)
# open structure
spk_stream = io.BytesIO()
spk = tarfile.TarFile(fileobj=spk_stream, mode='w')
# license
if build.version.license:
license_stream = io.BytesIO(build.version.license.encode(license_encoding))
license_tarinfo = tarfile.TarInfo('LICENSE')
license_stream.seek(0, io.SEEK_END)
license_tarinfo.size = license_stream.tell()
license_stream.seek(0)
spk.addfile(license_tarinfo, fileobj=license_stream)
# signature
if signature is not None:
signature_stream = io.BytesIO(signature.encode(signature_encoding))
signature_tarinfo = tarfile.TarInfo('syno_signature.asc')
signature_stream.seek(0, io.SEEK_END)
signature_tarinfo.size = signature_stream.tell()
signature_stream.seek(0)
spk.addfile(signature_tarinfo, fileobj=signature_stream)
# conf
if with_conf or build.version.conf_dependencies is not None or build.version.conf_conflicts or build.version.conf_privilege is not None:
conf_folder_tarinfo = tarfile.TarInfo('conf')
conf_folder_tarinfo.type = tarfile.DIRTYPE
conf_folder_tarinfo.mode = 0o755
spk.addfile(conf_folder_tarinfo)
if build.version.conf_dependencies is not None:
conf_tarinfo = tarfile.TarInfo('conf/PKG_DEPS')
config = ConfigParser()
config.read_dict(json.loads(build.version.conf_dependencies))
conf_stream = io.StringIO()
config.write(conf_stream)
conf_stream_bytes = io.BytesIO(conf_stream.getvalue().encode(conf_dependencies_encoding))
conf_stream_bytes.seek(0, io.SEEK_END)
conf_tarinfo.size = conf_stream_bytes.tell()
conf_stream_bytes.seek(0)
spk.addfile(conf_tarinfo, fileobj=conf_stream_bytes)
if build.version.conf_conflicts is not None:
conf_tarinfo = tarfile.TarInfo('conf/PKG_CONX')
config = ConfigParser()
config.read_dict(json.loads(build.version.conf_conflicts))
conf_stream = io.StringIO()
config.write(conf_stream)
conf_stream_bytes = io.BytesIO(conf_stream.getvalue().encode(conf_conflicts_encoding))
conf_stream_bytes.seek(0, io.SEEK_END)
conf_tarinfo.size = conf_stream_bytes.tell()
conf_stream_bytes.seek(0)
spk.addfile(conf_tarinfo, fileobj=conf_stream_bytes)
if build.version.conf_privilege is not None:
conf_tarinfo = tarfile.TarInfo('conf/privilege')
config = ConfigParser()
config.read_dict(json.loads(build.version.conf_privilege))
conf_stream = io.StringIO()
config.write(conf_stream)
conf_stream_bytes = io.BytesIO(conf_stream.getvalue().encode(conf_privilege_encoding))
conf_stream_bytes.seek(0, io.SEEK_END)
conf_tarinfo.size = conf_stream_bytes.tell()
conf_stream_bytes.seek(0)
spk.addfile(conf_tarinfo, fileobj=conf_stream_bytes)
if build.version.conf_resource is not None:
conf_tarinfo = tarfile.TarInfo('conf/resource')
config = ConfigParser()
config.read_dict(json.loads(build.version.conf_resource))
conf_stream = io.StringIO()
config.write(conf_stream)
conf_stream_bytes = io.BytesIO(conf_stream.getvalue().encode(conf_resource_encoding))
conf_stream_bytes.seek(0, io.SEEK_END)
conf_tarinfo.size = conf_stream_bytes.tell()
conf_stream_bytes.seek(0)
spk.addfile(conf_tarinfo, fileobj=conf_stream_bytes)
# wizards
wizards = []
if build.version.install_wizard:
wizards.append('install')
if build.version.upgrade_wizard:
wizards.append('upgrade')
if wizards:
wizard_folder_tarinfo = tarfile.TarInfo('WIZARD_UIFILES')
wizard_folder_tarinfo.type = tarfile.DIRTYPE
wizard_folder_tarinfo.mode = 0o755
spk.addfile(wizard_folder_tarinfo)
for wizard in wizards:
wizard_tarinfo = tarfile.TarInfo('WIZARD_UIFILES/%s_uifile' % wizard)
wizard_stream = io.BytesIO(wizard.encode('utf-8'))
wizard_stream.seek(0, io.SEEK_END)
wizard_tarinfo.size = wizard_stream.tell()
wizard_stream.seek(0)
spk.addfile(wizard_tarinfo, fileobj=wizard_stream)
# scripts
if with_scripts:
scripts_folder_tarinfo = tarfile.TarInfo('scripts')
scripts_folder_tarinfo.type = tarfile.DIRTYPE
scripts_folder_tarinfo.mode = 0o755
spk.addfile(scripts_folder_tarinfo)
for script in ('preinst', 'postinst', 'preuninst', 'postuninst', 'preupgrade', 'postupgrade',
'start-stop-status'):
script_tarinfo = tarfile.TarInfo('scripts/%s' % script)
script_stream = io.BytesIO(script.encode('utf-8'))
script_stream.seek(0, io.SEEK_END)
script_tarinfo.size = script_stream.tell()
script_stream.seek(0)
spk.addfile(script_tarinfo, fileobj=script_stream)
# package
if with_package:
package_stream = io.BytesIO()
package = tarfile.TarFile(fileobj=package_stream, mode='w')
unique = '%s-%d-%d-[%s]' % (build.version.package.name, build.version.version, build.firmware.build,
'-'.join(a.code for a in build.architectures))
unique_stream = io.BytesIO(unique.encode('utf-8'))
unique_tarinfo = tarfile.TarInfo('unique')
unique_stream.seek(0, io.SEEK_END)
unique_tarinfo.size = unique_stream.tell()
unique_stream.seek(0)
package.addfile(unique_tarinfo, fileobj=unique_stream)
unique_stream.close()
package.close()
package_tarinfo = tarfile.TarInfo('package.tgz')
package_stream.seek(0, io.SEEK_END)
package_tarinfo.size = package_stream.tell()
package_stream.seek(0)
spk.addfile(package_tarinfo, fileobj=package_stream)
if 'checksum' not in info and with_checksum:
checksum = hashlib.md5()
package_stream.seek(0)
for chunk in iter(lambda: package_stream.read(io.DEFAULT_BUFFER_SIZE), b''):
checksum.update(chunk)
info['checksum'] = checksum.hexdigest().decode('utf-8')
package_stream.close()
# icons
if with_package_icons or with_info_icons:
for size, icon in build.version.icons.items():
with create_icon(build.version.package.name, int(size)) as f:
suffix = '' if size == '72' else '_%s' % size
if with_package_icons:
icon_tarinfo = tarfile.TarInfo('PACKAGE_ICON%s.PNG' % suffix)
f.seek(0, io.SEEK_END)
icon_tarinfo.size = f.tell()
f.seek(0)
spk.addfile(icon_tarinfo, fileobj=f)
if with_info_icons:
f.seek(0)
info['package_icon%s' % suffix] = base64.b64encode(f.read()).decode('utf-8')
# info
if with_info:
if isinstance(info, io.BytesIO):
info_stream = info
else:
b = '\n'.join(['%s="%s"' % (k, v) for k, v in info.items()]).encode(info_encoding)
info_stream = io.BytesIO(b)
info_tarinfo = tarfile.TarInfo('INFO')
info_stream.seek(0, io.SEEK_END)
info_tarinfo.size = info_stream.tell()
info_stream.seek(0)
spk.addfile(info_tarinfo, fileobj=info_stream)
# close structure
spk.close()
spk_stream.seek(0)
return spk_stream
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from django import shortcuts
from horizon import views
from horizon.templatetags.horizon import has_permissions # noqa
class MultiTableMixin(object):
"""A generic mixin which provides methods for handling DataTables."""
data_method_pattern = "get_%s_data"
def __init__(self, *args, **kwargs):
super(MultiTableMixin, self).__init__(*args, **kwargs)
self.table_classes = getattr(self, "table_classes", [])
self._data = {}
self._tables = {}
self._data_methods = defaultdict(list)
self.get_data_methods(self.table_classes, self._data_methods)
def _get_data_dict(self):
if not self._data:
for table in self.table_classes:
data = []
name = table._meta.name
func_list = self._data_methods.get(name, [])
for func in func_list:
data.extend(func())
self._data[name] = data
return self._data
def get_data_methods(self, table_classes, methods):
for table in table_classes:
name = table._meta.name
if table._meta.mixed_data_type:
for data_type in table._meta.data_types:
func = self.check_method_exist(self.data_method_pattern,
data_type)
if func:
type_name = table._meta.data_type_name
methods[name].append(self.wrap_func(func,
type_name,
data_type))
else:
func = self.check_method_exist(self.data_method_pattern,
name)
if func:
methods[name].append(func)
def wrap_func(self, data_func, type_name, data_type):
def final_data():
data = data_func()
self.assign_type_string(data, type_name, data_type)
return data
return final_data
def check_method_exist(self, func_pattern="%s", *names):
func_name = func_pattern % names
func = getattr(self, func_name, None)
if not func or not callable(func):
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"in %s." % (func_name, cls_name))
else:
return func
def assign_type_string(self, data, type_name, data_type):
for datum in data:
setattr(datum, type_name, data_type)
def get_tables(self):
if not self.table_classes:
raise AttributeError('You must specify one or more DataTable '
'classes for the "table_classes" attribute '
'on %s.' % self.__class__.__name__)
if not self._tables:
for table in self.table_classes:
if not has_permissions(self.request.user,
table._meta):
continue
func_name = "get_%s_table" % table._meta.name
table_func = getattr(self, func_name, None)
if table_func is None:
tbl = table(self.request, **self.kwargs)
else:
tbl = table_func(self, self.request, **self.kwargs)
self._tables[table._meta.name] = tbl
return self._tables
def get_context_data(self, **kwargs):
context = super(MultiTableMixin, self).get_context_data(**kwargs)
tables = self.get_tables()
for name, table in tables.items():
context["%s_table" % name] = table
return context
def has_prev_data(self, table):
return False
def has_more_data(self, table):
return False
def handle_table(self, table):
name = table.name
data = self._get_data_dict()
self._tables[name].data = data[table._meta.name]
self._tables[name]._meta.has_more_data = self.has_more_data(table)
self._tables[name]._meta.has_prev_data = self.has_prev_data(table)
handled = self._tables[name].maybe_handle()
return handled
def get_server_filter_info(self, request, table=None):
if not table:
table = self.get_table()
filter_action = table._meta._filter_action
if filter_action is None or filter_action.filter_type != 'server':
return None
param_name = filter_action.get_param_name()
filter_string = request.POST.get(param_name)
filter_string_session = request.session.get(param_name, "")
changed = (filter_string is not None
and filter_string != filter_string_session)
if filter_string is None:
filter_string = filter_string_session
filter_field_param = param_name + '_field'
filter_field = request.POST.get(filter_field_param)
filter_field_session = request.session.get(filter_field_param)
if filter_field is None and filter_field_session is not None:
filter_field = filter_field_session
filter_info = {
'action': filter_action,
'value_param': param_name,
'value': filter_string,
'field_param': filter_field_param,
'field': filter_field,
'changed': changed
}
return filter_info
def handle_server_filter(self, request, table=None):
"""Update the table server filter information in the session and
determine if the filter has been changed.
"""
if not table:
table = self.get_table()
filter_info = self.get_server_filter_info(request, table)
if filter_info is None:
return False
request.session[filter_info['value_param']] = filter_info['value']
if filter_info['field_param']:
request.session[filter_info['field_param']] = filter_info['field']
return filter_info['changed']
def update_server_filter_action(self, request, table=None):
"""Update the table server side filter action based on the current
filter. The filter info may be stored in the session and this will
restore it.
"""
if not table:
table = self.get_table()
filter_info = self.get_server_filter_info(request, table)
if filter_info is not None:
action = filter_info['action']
setattr(action, 'filter_string', filter_info['value'])
if filter_info['field_param']:
setattr(action, 'filter_field', filter_info['field'])
class MultiTableView(MultiTableMixin, views.HorizonTemplateView):
"""A class-based generic view to handle the display and processing of
multiple :class:`~horizon.tables.DataTable` classes in a single view.
Three steps are required to use this view: set the ``table_classes``
attribute with a tuple of the desired
:class:`~horizon.tables.DataTable` classes;
define a ``get_{{ table_name }}_data`` method for each table class
which returns a set of data for that table; and specify a template for
the ``template_name`` attribute.
"""
def construct_tables(self):
tables = self.get_tables().values()
# Early out before data is loaded
for table in tables:
preempted = table.maybe_preempt()
if preempted:
return preempted
# Load data into each table and check for action handlers
for table in tables:
handled = self.handle_table(table)
if handled:
return handled
# If we didn't already return a response, returning None continues
# with the view as normal.
return None
def get(self, request, *args, **kwargs):
handled = self.construct_tables()
if handled:
return handled
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
# GET and POST handling are the same
return self.get(request, *args, **kwargs)
class DataTableView(MultiTableView):
"""A class-based generic view to handle basic DataTable processing.
Three steps are required to use this view: set the ``table_class``
attribute with the desired :class:`~horizon.tables.DataTable` class;
define a ``get_data`` method which returns a set of data for the
table; and specify a template for the ``template_name`` attribute.
Optionally, you can override the ``has_more_data`` method to trigger
pagination handling for APIs that support it.
"""
table_class = None
context_object_name = 'table'
def _get_data_dict(self):
if not self._data:
self.update_server_filter_action(self.request)
self._data = {self.table_class._meta.name: self.get_data()}
return self._data
def get_data(self):
return []
def get_tables(self):
if not self._tables:
self._tables = {}
if has_permissions(self.request.user,
self.table_class._meta):
self._tables[self.table_class._meta.name] = self.get_table()
return self._tables
def get_table(self):
# Note: this method cannot be easily memoized, because get_context_data
# uses its cached value directly.
if not self.table_class:
raise AttributeError('You must specify a DataTable class for the '
'"table_class" attribute on %s.'
% self.__class__.__name__)
if not hasattr(self, "table"):
self.table = self.table_class(self.request, **self.kwargs)
return self.table
def get_context_data(self, **kwargs):
context = super(DataTableView, self).get_context_data(**kwargs)
if hasattr(self, "table"):
context[self.context_object_name] = self.table
return context
def post(self, request, *args, **kwargs):
# If the server side table filter changed then go back to the first
# page of data. Otherwise GET and POST handling are the same.
if self.handle_server_filter(request):
return shortcuts.redirect(self.get_table().get_absolute_url())
return self.get(request, *args, **kwargs)
class MixedDataTableView(DataTableView):
"""A class-based generic view to handle DataTable with mixed data
types.
Basic usage is the same as DataTableView.
Three steps are required to use this view:
#. Set the ``table_class`` attribute with desired
:class:`~horizon.tables.DataTable` class. In the class the
``data_types`` list should have at least two elements.
#. Define a ``get_{{ data_type }}_data`` method for each data type
which returns a set of data for the table.
#. Specify a template for the ``template_name`` attribute.
"""
table_class = None
context_object_name = 'table'
def _get_data_dict(self):
if not self._data:
table = self.table_class
self._data = {table._meta.name: []}
for data_type in table.data_types:
func_name = "get_%s_data" % data_type
data_func = getattr(self, func_name, None)
if data_func is None:
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"for %s data type in %s." %
(func_name, data_type, cls_name))
data = data_func()
self.assign_type_string(data, data_type)
self._data[table._meta.name].extend(data)
return self._data
def assign_type_string(self, data, type_string):
for datum in data:
setattr(datum, self.table_class.data_type_name,
type_string)
def get_table(self):
self.table = super(MixedDataTableView, self).get_table()
if not self.table._meta.mixed_data_type:
raise AttributeError('You must have at least two elements in '
'the data_types attribute '
'in table %s to use MixedDataTableView.'
% self.table._meta.name)
return self.table
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from __future__ import unicode_literals
import json
import logging
from datetime import datetime
from google.appengine.api import urlfetch
from google.appengine.api.apiproxy_stub_map import UserRPC
from google.appengine.ext import db, ndb
from typing import List, Tuple, Dict
from mcfw.consts import MISSING
from mcfw.exceptions import HttpBadRequestException
from mcfw.utils import Enum
from rogerthat.bizz.opening_hours import OPENING_HOURS_RED_COLOR, OPENING_HOURS_ORANGE_COLOR
from rogerthat.consts import DEBUG
from rogerthat.models import OpeningHours, OpeningPeriod, OpeningHour, OpeningHourException, ServiceIdentity
from rogerthat.models.settings import SyncedNameValue, SyncedField, ServiceInfo
from rogerthat.rpc import users
from solutions.common.dal import get_solution_settings
from solutions.common.models import SolutionSettings
from solutions.common.models.cityapp import PaddleOrganizationalUnits, PaddleSettings
from solutions.common.to.paddle import PaddleOrganizationalUnitsTO, PaddleOrganizationUnitDetails, PaddleAddress, \
PaddleRegularOpeningHours, PaddlePeriod, PaddleExceptionalOpeningHours
class ServiceInfoSyncProvider(Enum):
PADDLE = 'paddle'
def get_organizational_units(base_url):
# type: (str) -> UserRPC
url = '%s/organizational_units' % base_url
rpc_item = urlfetch.create_rpc(20)
return urlfetch.make_fetch_call(rpc_item, url)
def _get_result(rpc_item):
result = rpc_item.get_result() # type: urlfetch._URLFetchResult
if result.status_code != 200:
logging.debug('%d: %s', result.status_code, result.content)
raise Exception('Error while fetching data from paddle')
if DEBUG:
logging.debug(result.content)
return json.loads(result.content)
def _rpc_to_organization_units(rpc_item):
# type: (UserRPC) -> PaddleOrganizationalUnitsTO
return PaddleOrganizationalUnitsTO.from_dict(_get_result(rpc_item))
def _rpc_to_organization_unit_details(rpc_item):
# type: (UserRPC) -> PaddleOrganizationUnitDetails
return PaddleOrganizationUnitDetails.from_dict(_get_result(rpc_item))
def get_organizational_unit_details(base_url, organizational_unit_id):
# type: (str, str) -> UserRPC
url = '%s/organizational_units/%s' % (base_url, organizational_unit_id)
rpc_item = urlfetch.create_rpc(15)
return urlfetch.make_fetch_call(rpc_item, url)
def get_paddle_info(paddle_settings):
# type: (PaddleSettings) -> PaddleOrganizationalUnits
try:
organizational_units = _rpc_to_organization_units(get_organizational_units(paddle_settings.base_url))
except urlfetch.InvalidURLError:
raise HttpBadRequestException('invalid_url')
rpcs = [get_organizational_unit_details(paddle_settings.base_url, organizational_unit.nid)
for organizational_unit in organizational_units.list]
model = PaddleOrganizationalUnits(key=PaddleOrganizationalUnits.create_key(paddle_settings.service_user))
for organizational_unit, rpc in zip(organizational_units.list, rpcs):
organizational_unit_details = _rpc_to_organization_unit_details(rpc)
model.units.append(organizational_unit_details)
return model
def _get_address_str(address):
# type: (PaddleAddress) -> str
lines = [
address.thoroughfare,
'%s %s' % (address.postal_code, address.locality)
]
return '\n'.join(lines)
def populate_info_from_paddle(paddle_settings, paddle_data):
# type: (PaddleSettings, PaddleOrganizationalUnits) -> List[SolutionSettings]
keys = [ServiceInfo.create_key(users.User(m.service_email), ServiceIdentity.DEFAULT)
for m in paddle_settings.mapping if m.service_email]
models = {s.key: s for s in ndb.get_multi(keys)} # type: dict[str, ndb.Model]
paddle_mapping = {u.node.nid: u for u in paddle_data.units} # type: Dict[str, PaddleOrganizationUnitDetails]
to_put = []
ndb_puts = []
if not paddle_settings.synced_fields:
# TODO make user editable
paddle_settings.synced_fields = ['addresses', 'email_addresses', 'phone_numbers', 'websites']
ndb_puts.append(paddle_settings)
for mapping in paddle_settings.mapping:
if not mapping.service_email:
continue
provider = ServiceInfoSyncProvider.PADDLE
service_user = users.User(mapping.service_email)
hours_key = OpeningHours.create_key(service_user, ServiceIdentity.DEFAULT)
opening_hours = models.get(hours_key) or OpeningHours(key=hours_key, type=OpeningHours.TYPE_TEXTUAL)
service_info = models.get(ServiceInfo.create_key(service_user, ServiceIdentity.DEFAULT)) # type: ServiceInfo
original_service_info = service_info.to_dict()
service_info.synced_fields = [SyncedField(key=field, provider=provider)
for field in paddle_settings.synced_fields]
paddle_info = paddle_mapping[mapping.paddle_id]
changed, _ = _update_opening_hours_from_paddle(opening_hours, paddle_info)
if 'phone_numbers' in paddle_settings.synced_fields:
service_info.phone_numbers = _sync_paddle_values(service_info.phone_numbers, paddle_info.node.telephone)
if 'email_addresses' in paddle_settings.synced_fields:
service_info.email_addresses = _sync_paddle_values(service_info.email_addresses, paddle_info.node.email)
if 'websites' in paddle_settings.synced_fields:
service_info.websites = _sync_paddle_values(service_info.websites, paddle_info.node.website)
if 'name' in paddle_settings.synced_fields:
service_info.name = paddle_info.node.title
if 'description' in paddle_settings.synced_fields and paddle_info.node.body:
service_info.description = paddle_info.node.body
changed = changed or original_service_info != service_info.to_dict()
if changed:
sln_settings = get_solution_settings(service_user)
sln_settings.updates_pending = True
to_put.append(sln_settings)
ndb_puts.append(service_info)
db.put(to_put)
ndb.put_multi(ndb_puts)
return to_put
def _sync_paddle_values(values, paddle_value):
# type: (List[SynceNameValue], str) -> List[SyncedNameValue]
value_found = False
# Take copy so we can remove elements from the original list
for val in list(values):
if val.provider == ServiceInfoSyncProvider.PADDLE:
value_found = True
if val.value != paddle_value:
if not paddle_value:
values.remove(val)
else:
val.value = paddle_value
if not value_found and paddle_value:
value = SyncedNameValue()
value.value = paddle_value
value.provider = ServiceInfoSyncProvider.PADDLE
values.append(value)
return values
def _update_opening_hours_from_paddle(opening_hours, paddle_data):
# type: (OpeningHours, PaddleOrganizationUnitDetails) -> Tuple[bool, OpeningHours]
opening_hours.type = OpeningHours.TYPE_STRUCTURED
new_periods = _paddle_periods_to_periods(paddle_data.opening_hours.regular, None)
closing_days = [_paddle_closing_period_to_exception(closing_day)
for closing_day in paddle_data.opening_hours.closing_days]
new_exceptional_opening_hours = [_convert_paddle_exceptional_hour(exception)
for exception in paddle_data.opening_hours.exceptional_opening_hours]
existing_opening_hours = opening_hours.to_dict()
opening_hours.periods = new_periods
opening_hours.exceptional_opening_hours = closing_days + new_exceptional_opening_hours
opening_hours.sort_dates()
new_opening_hours = opening_hours.to_dict()
# python actually implements deep compare by default
changed = existing_opening_hours != new_opening_hours
if changed:
opening_hours.put()
return changed, opening_hours
def _paddle_periods_to_periods(hours, color):
# type: (PaddleRegularOpeningHours, str) -> List[OpeningPeriod]
periods = []
days = [hours.sunday, hours.monday, hours.tuesday, hours.wednesday, hours.thursday, hours.friday, hours.saturday]
for day_number, paddle_periods in enumerate(days):
if paddle_periods is MISSING:
continue
for paddle_period in paddle_periods:
period = OpeningPeriod()
period.open = OpeningHour()
period.open.day = day_number
period.open.time = _paddle_time_to_time(paddle_period.start)
period.close = OpeningHour()
period.close.day = day_number
period.close.time = _paddle_time_to_time(paddle_period.end)
period.description = paddle_period.description
period.description_color = color
periods.append(period)
return periods
def _paddle_time_to_time(paddle_time):
return paddle_time.replace(':', '')
def _convert_paddle_exceptional_hour(exceptional_hours):
# type: (PaddleExceptionalOpeningHours) -> OpeningHourException
orange_color = OPENING_HOURS_ORANGE_COLOR
exception = OpeningHourException()
exception.start_date = _parse_paddle_date(exceptional_hours.start)
exception.end_date = _parse_paddle_date(exceptional_hours.end) or exception.start_date
exception.description = exceptional_hours.description
exception.description_color = orange_color
exception.periods = _paddle_periods_to_periods(exceptional_hours.opening_hours, orange_color)
return exception
def _paddle_closing_period_to_exception(paddle_period):
# type: (PaddlePeriod) -> OpeningHourException
exception = OpeningHourException()
exception.start_date = _parse_paddle_date(paddle_period.start)
exception.end_date = _parse_paddle_date(paddle_period.end) or exception.start_date
exception.description = paddle_period.description
exception.description_color = OPENING_HOURS_RED_COLOR
exception.periods = []
return exception
def _parse_paddle_date(date):
# Not actually an iso date, dd--mm-yy
return datetime.strptime(date, '%d-%m-%Y').date() if date else None
|
|
"""
Autopsy Forensic Browser
Copyright 2019-2020 Basis Technology Corp.
Contact: carrier <at> sleuthkit <dot> org
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from java.io import File
from java.lang import Class
from java.lang import ClassNotFoundException
from java.lang import Long
from java.lang import String
from java.sql import ResultSet
from java.sql import SQLException
from java.sql import Statement
from java.util.logging import Level
from java.util import ArrayList
from org.apache.commons.codec.binary import Base64
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.coreutils import MessageNotifyUtil
from org.sleuthkit.autopsy.coreutils import AppSQLiteDB
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestJobContext
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.datamodel import Content
from org.sleuthkit.datamodel import TskCoreException
from org.sleuthkit.datamodel.Blackboard import BlackboardException
from org.sleuthkit.autopsy.casemodule import NoCurrentCaseException
from org.sleuthkit.datamodel import Account
from org.sleuthkit.datamodel.blackboardutils import CommunicationArtifactsHelper
from org.sleuthkit.datamodel.blackboardutils.attributes import MessageAttachments
from org.sleuthkit.datamodel.blackboardutils.attributes.MessageAttachments import FileAttachment
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import MessageReadStatus
from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import CommunicationDirection
from TskContactsParser import TskContactsParser
from TskMessagesParser import TskMessagesParser
from TskCallLogsParser import TskCallLogsParser
import traceback
import general
class LineAnalyzer(general.AndroidComponentAnalyzer):
"""
Parses the Line App databases for contacts,
message and call log artifacts.
About Line parser for v9.15.1:
- Line Database Design Details:
Line has unique ids associated with their users and with their groups. These ids
are referred to as mid in the database.
Databases:
- naver_line: contains contact and msg artifacts
- call_history: contains call artifacts
Tables:
- naver_line/groups: This table contains group ids paired with metadata
about the group (such as creator, group name, etc).
- naver_line/membership This table maps user mids to group ids. Each record
contains 1 group id and 1 user mid.
- naver_line/chat_history This table contains all chat history for private
(1 to 1) and group conversations. It maps a user mid
or group id to the message details. The user mid and
group id are stored into the same column "chat_id".
If the message direction is incoming, the sender mid
is stored in the from_mid column.
- naver_line/contacts This table contains all Line contacts known to the
device.
- call_history/call_history This table contains all call history for private
and group calls. It maps a user mid or a group id
to the call details. The user mid and group id are
stored in the "caller_mid" column.
- Implementation Details:
1) Both group calls and single calls are extracted in one query. The general approach
is to build one result table with both contact mids and group ids.
This result is consistently labeled contact_list_with_groups queries below.
This table is then joined once onto the messages table to produce all communication
data.
2) Both group chats and single chats are extracted in one query.
"""
def __init__(self):
self._logger = Logger.getLogger(self.__class__.__name__)
self._LINE_PACKAGE_NAME = "jp.naver.line.android"
self._PARSER_NAME = "Line Parser"
self._VERSION = "9.15.1"
def analyze(self, dataSource, fileManager, context):
try:
contact_and_message_dbs = AppSQLiteDB.findAppDatabases(dataSource,
"naver_line", True, self._LINE_PACKAGE_NAME)
calllog_dbs = AppSQLiteDB.findAppDatabases(dataSource,
"call_history", True, self._LINE_PACKAGE_NAME)
for contact_and_message_db in contact_and_message_dbs:
current_case = Case.getCurrentCaseThrows()
helper = CommunicationArtifactsHelper(
current_case.getSleuthkitCase(), self._PARSER_NAME,
contact_and_message_db.getDBFile(), Account.Type.LINE)
self.parse_contacts(contact_and_message_db, helper)
self.parse_messages(contact_and_message_db, helper, current_case)
for calllog_db in calllog_dbs:
current_case = Case.getCurrentCaseThrows()
helper = CommunicationArtifactsHelper(
current_case.getSleuthkitCase(), self._PARSER_NAME,
calllog_db.getDBFile(), Account.Type.LINE)
self.parse_calllogs(dataSource, calllog_db, helper)
except NoCurrentCaseException as ex:
# Error parsing Line databases.
self._logger.log(Level.WARNING, "Error parsing the Line App Databases", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
for contact_and_message_db in contact_and_message_dbs:
contact_and_message_db.close()
for calllog_db in calllog_dbs:
calllog_db.close()
def parse_contacts(self, contacts_db, helper):
try:
contacts_parser = LineContactsParser(contacts_db, self._PARSER_NAME)
while contacts_parser.next():
helper.addContact(
contacts_parser.get_contact_name(),
contacts_parser.get_phone(),
contacts_parser.get_home_phone(),
contacts_parser.get_mobile_phone(),
contacts_parser.get_email(),
contacts_parser.get_other_attributes()
)
contacts_parser.close()
except SQLException as ex:
self._logger.log(Level.WARNING, "Error parsing the Line App Database for contacts", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
#Error adding artifact to case database... case is not complete.
self._logger.log(Level.SEVERE,
"Error adding Line contact artifacts to the case database.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
#Error posting notification to blackboard
self._logger.log(Level.WARNING,
"Error posting Line contact artifacts to blackboard.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
def parse_calllogs(self, dataSource, calllogs_db, helper):
try:
calllogs_db.attachDatabase(
dataSource, "naver_line",
calllogs_db.getDBFile().getParentPath(), "naver")
calllog_parser = LineCallLogsParser(calllogs_db)
while calllog_parser.next():
helper.addCalllog(
calllog_parser.get_call_direction(),
calllog_parser.get_phone_number_from(),
calllog_parser.get_phone_number_to(),
calllog_parser.get_call_start_date_time(),
calllog_parser.get_call_end_date_time(),
calllog_parser.get_call_type()
)
calllog_parser.close()
except SQLException as ex:
self._logger.log(Level.WARNING, "Error parsing the Line App Database for calllogs", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
#Error adding artifact to case database... case is not complete.
self._logger.log(Level.SEVERE,
"Error adding Line calllog artifacts to the case database.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
#Error posting notification to blackboard
self._logger.log(Level.WARNING,
"Error posting Line calllog artifacts to blackboard.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
def parse_messages(self, messages_db, helper, current_case):
try:
messages_parser = LineMessagesParser(messages_db)
while messages_parser.next():
message_artifact = helper.addMessage(
messages_parser.get_message_type(),
messages_parser.get_message_direction(),
messages_parser.get_phone_number_from(),
messages_parser.get_phone_number_to(),
messages_parser.get_message_date_time(),
messages_parser.get_message_read_status(),
messages_parser.get_message_subject(),
messages_parser.get_message_text(),
messages_parser.get_thread_id()
)
if (messages_parser.get_file_attachment() is not None):
file_attachments = ArrayList()
file_attachments.add(FileAttachment(current_case.getSleuthkitCase(), messages_db.getDBFile().getDataSource(), messages_parser.get_file_attachment()))
message_attachments = MessageAttachments(file_attachments, [])
helper.addAttachments(message_artifact, message_attachments)
messages_parser.close()
except SQLException as ex:
self._logger.log(Level.WARNING, "Error parsing the Line App Database for messages.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
except TskCoreException as ex:
#Error adding artifact to case database... case is not complete.
self._logger.log(Level.SEVERE,
"Error adding Line message artifacts to the case database.", ex)
self._logger.log(Level.SEVERE, traceback.format_exc())
except BlackboardException as ex:
#Error posting notification to blackboard
self._logger.log(Level.WARNING,
"Error posting Line message artifacts to blackboard.", ex)
self._logger.log(Level.WARNING, traceback.format_exc())
class LineCallLogsParser(TskCallLogsParser):
"""
Parses out TSK_CALLLOG information from the Line database.
TSK_CALLLOG fields that are not in the line database are given
a default value inherited from the super class.
"""
def __init__(self, calllog_db):
super(LineCallLogsParser, self).__init__(calllog_db.runQuery(
"""
SELECT Substr(calls.call_type, -1) AS direction,
calls.start_time AS start_time,
calls.end_time AS end_time,
contact_book_w_groups.members AS group_members,
calls.caller_mid,
calls.voip_type AS call_type,
calls.voip_gc_media_type AS group_call_type
FROM (SELECT id,
Group_concat(M.m_id) AS members
FROM membership AS M
GROUP BY id
UNION
SELECT m_id,
NULL
FROM naver.contacts) AS contact_book_w_groups
JOIN call_history AS calls
ON calls.caller_mid = contact_book_w_groups.id
"""
)
)
self._OUTGOING_CALL_TYPE = "O"
self._INCOMING_CALL_TYPE = "I"
self._VIDEO_CALL_TYPE = "V"
self._AUDIO_CALL_TYPE = "A"
self._GROUP_CALL_TYPE = "G"
self._GROUP_VIDEO_CALL_TYPE = "VIDEO"
self._GROUP_AUDIO_CALL_TYPE = "AUDIO"
def get_call_direction(self):
direction = self.result_set.getString("direction")
if direction == self._OUTGOING_CALL_TYPE:
return self.OUTGOING_CALL
return self.INCOMING_CALL
def get_call_start_date_time(self):
try:
return long(self.result_set.getString("start_time")) / 1000
except ValueError as ve:
return super(LineCallLogsParser, self).get_call_start_date_time()
def get_call_end_date_time(self):
try:
return long(self.result_set.getString("end_time")) / 1000
except ValueError as ve:
return super(LineCallLogsParser, self).get_call_end_date_time()
def get_phone_number_to(self):
if self.get_call_direction() == self.OUTGOING_CALL:
group_members = self.result_set.getString("group_members")
if group_members is not None:
group_members = group_members.split(",")
return group_members
return self.result_set.getString("caller_mid")
return super(LineCallLogsParser, self).get_phone_number_to()
def get_phone_number_from(self):
if self.get_call_direction() == self.INCOMING_CALL:
return self.result_set.getString("caller_mid")
return super(LineCallLogsParser, self).get_phone_number_from()
def get_call_type(self):
call_type = self.result_set.getString("call_type")
if call_type == self._VIDEO_CALL_TYPE:
return self.VIDEO_CALL
if call_type == self._AUDIO_CALL_TYPE:
return self.AUDIO_CALL
if call_type == self._GROUP_CALL_TYPE:
g_type = self.result_set.getString("group_call_type")
if g_type == self._GROUP_VIDEO_CALL_TYPE:
return self.VIDEO_CALL
if g_type == self._GROUP_AUDIO_CALL_TYPE:
return self.AUDIO_CALL
return super(LineCallLogsParser, self).get_call_type()
class LineContactsParser(TskContactsParser):
"""
Parses out TSK_CONTACT information from the Line database.
TSK_CONTACT fields that are not in the line database are given
a default value inherited from the super class.
"""
def __init__(self, contact_db, analyzer):
super(LineContactsParser, self).__init__(contact_db.runQuery(
"""
SELECT m_id,
server_name
FROM contacts
"""
)
)
self._PARENT_ANALYZER = analyzer
def get_contact_name(self):
return self.result_set.getString("server_name")
def get_other_attributes(self):
return [BlackboardAttribute(
BlackboardAttribute.ATTRIBUTE_TYPE.TSK_ID,
self._PARENT_ANALYZER,
self.result_set.getString("m_id"))]
class LineMessagesParser(TskMessagesParser):
"""
Parse out TSK_MESSAGE information from the Line database.
TSK_MESSAGE fields that are not in the line database are given
a default value inherited from the super class.
"""
def __init__(self, message_db):
super(LineMessagesParser, self).__init__(message_db.runQuery(
"""
SELECT contact_book_w_groups.id,
contact_book_w_groups.members,
messages.from_mid,
messages.content,
messages.created_time,
messages.attachement_type,
messages.attachement_local_uri,
messages.status
FROM (SELECT id,
Group_concat(M.m_id) AS members
FROM membership AS M
GROUP BY id
UNION
SELECT m_id,
NULL
FROM contacts) AS contact_book_w_groups
JOIN chat_history AS messages
ON messages.chat_id = contact_book_w_groups.id
WHERE attachement_type != 6
"""
)
)
self._LINE_MESSAGE_TYPE = "Line Message"
#From the limited test data, it appeared that incoming
#was only associated with a 1 status. Status # 3 and 7
#was only associated with outgoing.
self._INCOMING_MESSAGE_TYPE = 1
def get_message_type(self):
return self._LINE_MESSAGE_TYPE
def get_message_date_time(self):
created_time = self.result_set.getString("created_time")
try:
#Get time in seconds (created_time is stored in ms from epoch)
return long(created_time) / 1000
except ValueError as ve:
return super(LineMessagesParser, self).get_message_date_time()
def get_message_text(self):
content = self.result_set.getString("content")
return content
def get_message_direction(self):
if self.result_set.getInt("status") == self._INCOMING_MESSAGE_TYPE:
return self.INCOMING
return self.OUTGOING
def get_phone_number_from(self):
if self.get_message_direction() == self.INCOMING:
from_mid = self.result_set.getString("from_mid")
if from_mid is not None:
return from_mid
return super(LineMessagesParser, self).get_phone_number_from()
def get_phone_number_to(self):
if self.get_message_direction() == self.OUTGOING:
group = self.result_set.getString("members")
if group is not None:
group = group.split(",")
return group
return self.result_set.getString("id")
return super(LineMessagesParser, self).get_phone_number_to()
def get_thread_id(self):
members = self.result_set.getString("members")
if members is not None:
return self.result_set.getString("id")
return super(LineMessagesParser, self).get_thread_id()
def get_file_attachment(self):
if (self.result_set.getString("attachement_local_uri") is None):
return None
# If "content:" in the beginning of the string we cannot determine at this point where a file resides. Ignoring for
# now unless data can be obtained to determine where the file may reside.
elif ("content:" in self.result_set.getString("attachement_local_uri")):
return None
else:
return self.result_set.getString("attachement_local_uri")
|
|
from rest_framework import serializers as ser
from api.base.serializers import (
JSONAPISerializer,
RelationshipField,
RestrictedDictSerializer,
LinksField,
is_anonymized,
DateByVersion,
HideIfNotNodePointerLog,
HideIfNotRegistrationPointerLog,
)
from osf.models import OSFUser, AbstractNode, PreprintService
from website.util import permissions as osf_permissions
class NodeLogIdentifiersSerializer(RestrictedDictSerializer):
doi = ser.CharField(read_only=True)
ark = ser.CharField(read_only=True)
class NodeLogInstitutionSerializer(RestrictedDictSerializer):
id = ser.CharField(read_only=True)
name = ser.CharField(read_only=True)
class NodeLogFileParamsSerializer(RestrictedDictSerializer):
materialized = ser.CharField(read_only=True)
url = ser.URLField(read_only=True)
addon = ser.CharField(read_only=True)
node_url = ser.URLField(read_only=True, source='node.url')
node_title = ser.SerializerMethodField()
def get_node_title(self, obj):
user = self.context['request'].user
node_title = obj['node']['title']
node = AbstractNode.load(obj['node']['_id'])
if not user.is_authenticated:
if node.is_public:
return node_title
elif node.has_permission(user, osf_permissions.READ):
return node_title
return 'Private Component'
class NodeLogParamsSerializer(RestrictedDictSerializer):
addon = ser.CharField(read_only=True)
bucket = ser.CharField(read_only=True)
citation_name = ser.CharField(read_only=True, source='citation.name')
contributors = ser.SerializerMethodField(read_only=True)
data_set = ser.CharField(read_only=True, source='dataset')
destination = NodeLogFileParamsSerializer(read_only=True)
figshare_title = ser.CharField(read_only=True, source='figshare.title')
forward_url = ser.CharField(read_only=True)
github_user = ser.CharField(read_only=True, source='github.user')
github_repo = ser.CharField(read_only=True, source='github.repo')
bitbucket_user = ser.CharField(read_only=True, source='bitbucket.user')
bitbucket_repo = ser.CharField(read_only=True, source='bitbucket.repo')
gitlab_user = ser.CharField(read_only=True, source='gitlab.user')
gitlab_repo = ser.CharField(read_only=True, source='gitlab.repo')
file = ser.DictField(read_only=True)
filename = ser.CharField(read_only=True)
kind = ser.CharField(read_only=True)
folder = ser.CharField(read_only=True)
folder_name = ser.CharField(read_only=True)
license = ser.CharField(read_only=True, source='new_license')
identifiers = NodeLogIdentifiersSerializer(read_only=True)
institution = NodeLogInstitutionSerializer(read_only=True)
old_page = ser.CharField(read_only=True)
page = ser.CharField(read_only=True)
page_id = ser.CharField(read_only=True)
params_node = ser.SerializerMethodField(read_only=True)
params_project = ser.SerializerMethodField(read_only=True)
path = ser.CharField(read_only=True)
pointer = ser.SerializerMethodField(read_only=True)
preprint = ser.CharField(read_only=True)
preprint_provider = ser.SerializerMethodField(read_only=True)
previous_institution = NodeLogInstitutionSerializer(read_only=True)
source = NodeLogFileParamsSerializer(read_only=True)
study = ser.CharField(read_only=True)
tag = ser.CharField(read_only=True)
tags = ser.CharField(read_only=True)
target = NodeLogFileParamsSerializer(read_only=True)
template_node = ser.DictField(read_only=True)
title_new = ser.CharField(read_only=True)
title_original = ser.CharField(read_only=True)
updated_fields = ser.DictField(read_only=True)
urls = ser.DictField(read_only=True)
version = ser.CharField(read_only=True)
wiki = ser.DictField(read_only=True)
citation_name = ser.CharField(read_only=True, source='citation.name')
institution = NodeLogInstitutionSerializer(read_only=True)
anonymous_link = ser.BooleanField(read_only=True)
def get_view_url(self, obj):
urls = obj.get('urls', None)
if urls:
view = urls.get('view', None)
if view:
return view
return None
def get_params_node(self, obj):
node_id = obj.get('node', None)
if node_id:
node = AbstractNode.objects.filter(guids___id=node_id).values('title').get()
return {'id': node_id, 'title': node['title']}
return None
def get_params_project(self, obj):
project_id = obj.get('project', None)
if project_id:
node = AbstractNode.objects.filter(guids___id=project_id).values('title').get()
return {'id': project_id, 'title': node['title']}
return None
def get_pointer(self, obj):
user = self.context['request'].user
pointer = obj.get('pointer', None)
if pointer:
pointer_node = AbstractNode.objects.get(guids___id=pointer['id'])
if not pointer_node.is_deleted:
if pointer_node.is_public or (user.is_authenticated and pointer_node.has_permission(user, osf_permissions.READ)):
pointer['title'] = pointer_node.title
return pointer
return None
def get_contributors(self, obj):
contributor_info = []
if is_anonymized(self.context['request']):
return contributor_info
contributor_ids = obj.get('contributors', None)
params_node = obj.get('node', None)
if contributor_ids:
users = (
OSFUser.objects.filter(guids___id__in=contributor_ids)
.only('fullname', 'given_name',
'middle_names', 'family_name',
'unclaimed_records', 'is_active')
.order_by('fullname')
)
for user in users:
unregistered_name = None
if user.unclaimed_records.get(params_node):
unregistered_name = user.unclaimed_records[params_node].get('name', None)
contributor_info.append({
'id': user._id,
'full_name': user.fullname,
'given_name': user.given_name,
'middle_names': user.middle_names,
'family_name': user.family_name,
'unregistered_name': unregistered_name,
'active': user.is_active
})
return contributor_info
def get_preprint_provider(self, obj):
preprint_id = obj.get('preprint', None)
if preprint_id:
preprint = PreprintService.load(preprint_id)
if preprint:
provider = preprint.provider
return {'url': provider.external_url, 'name': provider.name}
return None
class NodeLogSerializer(JSONAPISerializer):
filterable_fields = frozenset(['action', 'date'])
non_anonymized_fields = [
'id',
'date',
'action',
]
id = ser.CharField(read_only=True, source='_id')
date = DateByVersion(read_only=True)
action = ser.CharField(read_only=True)
params = ser.SerializerMethodField(read_only=True)
links = LinksField({'self': 'get_absolute_url'})
class Meta:
type_ = 'logs'
node = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'},
)
original_node = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<original_node._id>'},
)
user = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'},
)
# This would be a node_link, except that data isn't stored in the node log params
linked_node = HideIfNotNodePointerLog(
RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<params.pointer.id>'}
)
)
linked_registration = HideIfNotRegistrationPointerLog(
RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<params.pointer.id>'}
)
)
template_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<params.template_node.id>'}
)
def get_absolute_url(self, obj):
return obj.absolute_url
def get_params(self, obj):
if obj.action == 'osf_storage_folder_created' and obj.params.get('urls'):
obj.params.pop('urls')
return NodeLogParamsSerializer(obj.params, context=self.context, read_only=True).data
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test desidatamodel.check functions
"""
import os
import sys
import unittest
from unittest.mock import patch
from pkg_resources import resource_filename
from .datamodeltestcase import DataModelTestCase, DM
from .. import DataModelError
from ..check import (DataModel, collect_files, files_to_regexp, scan_model,
validate_prototypes, log, _options)
class TestCheck(DataModelTestCase):
def test_scan_model(self):
"""Test identification of data model files.
"""
root = os.path.join(os.environ[DM], 'doc', 'examples')
files = scan_model(root)
expected = set([os.path.join(root, f) for f in ('badModel.rst',
'sdR.rst',
'spPlate.rst')])
found = set([p.filename for p in files])
self.assertEqual(expected, found)
def test_files_to_regexp(self):
"""Test compilation of regular expressions.
"""
root = os.path.join(os.environ[DM], 'doc', 'DESI_SPECTRO_DATA')
files = scan_model(root)
files_to_regexp('/desi/spectro/data', files)
regexps = ['/desi/spectro/data/20160703/12345678/coordinates-12345678.fits',
'/desi/spectro/data/20160703/12345678/desi-12345678.fits.fz',
'/desi/spectro/data/20160703/12345678/fiberassign-12345678.fits.gz',
'/desi/spectro/data/20160703/12345678/fibermap-12345678.fits',
'/desi/spectro/data/20160703/00000123/focus-00000123.fits.fz',
'/desi/spectro/data/20160703/00000123/fvc-00000123.fits.fz',
'/desi/spectro/data/20160703/00000123/guide-00000123.fits.fz',
'/desi/spectro/data/20160703/00000123/guide-rois-00000123.fits.fz',
'/desi/spectro/data/20160703/00000123/pm-00000123.fits',
'/desi/spectro/data/20160703/00000123/sky-00000123.fits.fz',]
expected = [os.path.join(root, 'NIGHT', 'EXPID', f) for f in (
'coordinates-EXPID.rst',
'desi-EXPID.rst',
'fiberassign-EXPID.rst',
'fibermap-EXPID.rst',
'focus-EXPID.rst',
'fvc-EXPID.rst',
'guide-EXPID.rst',
'guide-rois-EXPID.rst',
'pm-EXPID.rst',
'sky-EXPID.rst',)]
expected_f2r = dict(zip(expected, regexps))
for f in files:
self.assertRegex(expected_f2r[f.filename], f.regexp,
("{0} does not " +
"match {1}").format(f.regexp.pattern,
expected_f2r[f.filename]))
def test_collect_files(self):
"""Test finding files that correspond to data model files.
"""
test_files = (os.path.join(self.data_dir, 'sdR-12345678.fits'),
os.path.join(self.data_dir, 'sdR-01234567.fits'),
os.path.join(self.data_dir, 'spPlate-1234-54321.fits'),
os.path.join(self.data_dir, 'extraneous.fits'))
for f in test_files:
open(f, 'a').close()
root = os.path.join(os.environ[DM], 'doc', 'examples')
files = scan_model(root)
files_to_regexp(self.data_dir, files)
self.assertInLog(log, ("{0}/doc/examples/badModel.rst has no file " +
"regexp!").format(os.environ[DM]))
collect_files(self.data_dir, files)
self.assertInLog(log, 'Extraneous file detected: {0}'.format(test_files[3]))
for f in files:
if os.path.basename(f.filename) == 'badModel.rst':
self.assertIsNone(f.regexp)
self.assertIsNone(f.prototype)
else:
self.assertIsNotNone(f.regexp)
self.assertIsNotNone(f.prototype)
for f in test_files:
os.remove(f)
def test_collect_files_missing(self):
"""Test finding files when some are missing.
"""
test_files = (os.path.join(self.data_dir, 'sdR-12345678.fits'),
os.path.join(self.data_dir, 'sdR-01234567.fits'))
for f in test_files:
open(f, 'a').close()
root = os.path.join(os.environ[DM], 'doc', 'examples')
files = scan_model(root)
files_to_regexp(self.data_dir, files)
self.assertInLog(log, ("{0}/doc/examples/badModel.rst has no file " +
"regexp!").format(os.environ[DM]))
collect_files(self.data_dir, files)
self.assertInLog(log, ('No files found matching {0}/doc/examples/' +
'spPlate.rst!').format(os.environ[DM]))
for f in test_files:
os.remove(f)
def test_extract_metadata(self):
"""Test reading metadata from data model files.
"""
ex_meta = {'PRIMARY':
{'title': 'HDU0',
'number': 0,
'extension': 'IMAGE',
'extname': 'PRIMARY',
'format': 'Data: FITS image [int16, 100x100]',
'keywords': [('NAXIS1', '100', 'int', ''),
('NAXIS2', '100', 'int', ''),
('BSCALE', '1', 'int', ''),
('BZERO', '32768', 'int',
'Data are really unsigned 16-bit int.'),
('EXTNAME', 'PRIMARY', 'str', '')]},
'Galaxies':
{'title': 'HDU1',
'number': 1,
'extension': 'BINTABLE',
'extname': 'Galaxies',
'format': [('target', 'char[20]', '', ''),
('V_mag', 'float32', 'mag', ''),
('vdisp', 'float64', 'km/s', '')],
'keywords': [('NAXIS1', '32', 'int',
'length of dimension 1'),
('NAXIS2', '3', 'int',
'length of dimension 2')]}}
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
model = DataModel(modelfile, os.path.dirname(modelfile))
meta = model.extract_metadata()
self.assertEqual(len(meta.keys()), len(ex_meta.keys()))
for key, m in meta.items():
self.assertEqual(m['title'], ex_meta[key]['title'])
self.assertEqual(m['number'], ex_meta[key]['number'])
self.assertEqual(m['extension'], ex_meta[key]['extension'])
self.assertEqual(m['extname'], ex_meta[key]['extname'])
for k in range(len(m['keywords'])):
self.assertEqual(m['keywords'][k], ex_meta[key]['keywords'][k])
if m['extension'] == "IMAGE":
self.assertEqual(m['format'], ex_meta[key]['format'])
else:
for k in range(len(m['format'])):
self.assertEqual(m['format'][k], ex_meta[key]['format'][k])
def test_extract_metadata_missing_extname(self):
"""Test reading metadata with missing EXTNAME.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
model = DataModel(modelfile, os.path.dirname(modelfile))
meta = model.extract_metadata()
lines = model._metafile_data.split('\n')
lines[53] = ''
model._metafile_data = '\n'.join(lines) + '\n'
with self.assertRaises(DataModelError) as e:
meta = model.extract_metadata(error=True)
self.assertEqual(str(e.exception), "HDU 1 in {0} has no EXTNAME!".format(modelfile))
meta = model.extract_metadata(error=False)
self.assertLog(log, -1, "HDU 1 in {0} has no EXTNAME!".format(modelfile))
def test_extract_metadata_bad_keyword_unit(self):
"""Test reading metadata with bad FITS BUNIT values.
"""
erg_msg = self.badUnitMessage('ergs')
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
model = DataModel(modelfile, os.path.dirname(modelfile))
meta = model.extract_metadata()
lines = model._metafile_data.split('\n')
lines.insert(46, "BUNIT ergs str This is a bad unit.")
model._metafile_data = '\n'.join(lines) + '\n'
with self.assertRaises(ValueError) as e:
meta = model.extract_metadata(error=True)
self.assertEqual(str(e.exception), erg_msg)
meta = model.extract_metadata(error=False)
self.assertLog(log, -1, "HDU 0 in {0} should have a more meaningful EXTNAME than 'PRIMARY'.".format(modelfile))
self.assertLog(log, -2, erg_msg)
def test_extract_metadata_missing_keyword_unit(self):
"""Test reading metadata with missing units for header keywords.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
model = DataModel(modelfile, os.path.dirname(modelfile))
meta = model.extract_metadata()
lines = model._metafile_data.split('\n')
lines.insert(46, "BUNIT erg This is a bad unit.")
model._metafile_data = '\n'.join(lines) + '\n'
with self.assertRaises(DataModelError) as e:
meta = model.extract_metadata(error=True)
self.assertEqual(str(e.exception), "Missing type for keyword BUNIT in HDU 0 of {0}!".format(modelfile))
meta = model.extract_metadata(error=False)
self.assertLog(log, -1, "HDU 0 in {0} should have a more meaningful EXTNAME than 'PRIMARY'.".format(modelfile))
self.assertLog(log, -2, "Missing type for keyword BUNIT in HDU 0 of {0}!".format(modelfile))
def test_extract_metadata_bad_column_unit(self):
"""Test reading metadata with bad FITS column units.
"""
erg_msg = self.badUnitMessage('ergs')
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
model = DataModel(modelfile, os.path.dirname(modelfile))
meta = model.extract_metadata()
lines = model._metafile_data.split('\n')
lines[75] = 'vdisp float64 ergs'
model._metafile_data = '\n'.join(lines) + '\n'
with self.assertRaises(ValueError) as e:
meta = model.extract_metadata(error=True)
self.assertEqual(str(e.exception), erg_msg)
meta = model.extract_metadata(error=False)
self.assertLog(log, -1, erg_msg)
def test_extract_metadata_missing_column_type(self):
"""Test reading metadata with missing FITS column types.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
model = DataModel(modelfile, os.path.dirname(modelfile))
meta = model.extract_metadata()
lines = model._metafile_data.split('\n')
lines[75] = 'vdisp'
model._metafile_data = '\n'.join(lines) + '\n'
with self.assertRaises(DataModelError) as e:
meta = model.extract_metadata(error=True)
self.assertEqual(str(e.exception), "Missing type for column vdisp in HDU 1 of {0}!".format(modelfile))
meta = model.extract_metadata(error=False)
self.assertLog(log, -1, "Missing type for column vdisp in HDU 1 of {0}!".format(modelfile))
def test_validate_prototypes(self):
"""Test the data model validation function.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
f = DataModel(modelfile, os.path.dirname(modelfile))
f.get_regexp(os.path.dirname(modelfile))
collect_files(os.path.dirname(modelfile), [f])
validate_prototypes([f])
def test_validate_prototype_no_prototype(self):
"""Test the data model validation method with no prototype.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
f = DataModel(modelfile, os.path.dirname(modelfile))
f.get_regexp(os.path.dirname(modelfile))
collect_files(os.path.dirname(modelfile), [f])
f.prototype = None
f.validate_prototype(error=True)
def test_validate_prototype_hdu_mismatch(self):
"""Test the data model validation method with wrong number of HDUs.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
f = DataModel(modelfile, os.path.dirname(modelfile))
f.get_regexp(os.path.dirname(modelfile))
collect_files(os.path.dirname(modelfile), [f])
f.validate_prototype()
f._stub.nhdr = 3
f.validate_prototype(error=True)
self.assertLog(log, -1, "Prototype file {0} has the wrong number of sections (HDUs) according to {1}.".format(modelfile.replace('.rst', '.fits'), modelfile))
def test_validate_prototype_hdu_keyword_mismatch(self):
"""Test the data model validation method with wrong number of HDU keywords.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
f = DataModel(modelfile, os.path.dirname(modelfile))
f.get_regexp(os.path.dirname(modelfile))
collect_files(os.path.dirname(modelfile), [f])
f.validate_prototype()
f._stub_meta[0]['keywords'].append(('BUNIT', 'erg', 'str', 'This is a test.'))
f.validate_prototype(error=True)
self.assertLog(log, -1, "File {0} HDU0 extra keywords according to {1}: {{'BUNIT'}}".format(modelfile.replace('.rst', '.fits'), modelfile))
def test_validate_prototype_hdu_wrong_keyword(self):
"""Test the data model validation method with wrong HDU keyword names.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
f = DataModel(modelfile, os.path.dirname(modelfile))
f.get_regexp(os.path.dirname(modelfile))
collect_files(os.path.dirname(modelfile), [f])
f.validate_prototype()
f._stub_meta[0]['keywords'][-1] = ('BUNIT', 'erg', 'str', 'This is a test.')
f.validate_prototype(error=True)
self.assertLog(log, -2, "File {0} HDU0 missing keywords according to {1}: {{'BZERO'}}".format(modelfile.replace('.rst', '.fits'), modelfile))
self.assertLog(log, -1, "File {0} HDU0 extra keywords according to {1}: {{'BUNIT'}}".format(modelfile.replace('.rst', '.fits'), modelfile))
def test_validate_prototype_hdu_extension_type(self):
"""Test the data model validation method with wrong HDU extension type.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
f = DataModel(modelfile, os.path.dirname(modelfile))
f.get_regexp(os.path.dirname(modelfile))
collect_files(os.path.dirname(modelfile), [f])
f.validate_prototype()
f._stub_meta[1]['extension'] = 'IMAGE'
f.validate_prototype(error=True)
self.assertLog(log, -1, "Prototype file {0} has an extension type mismatch in HDU1 (IMAGE != BINTABLE) according to {1}.".format(modelfile.replace('.rst', '.fits'), modelfile))
# f._stub_meta[1]['extname'] = ''
# f.validate_prototype(error=True)
# self.assertLog(log, -1, "Prototype file {0} has no EXTNAME in HDU1.".format(modelfile.replace('.rst', '.fits')))
def test_validate_prototype_hdu_extension_name(self):
"""Test the data model validation method with wrong HDU extension name.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
f = DataModel(modelfile, os.path.dirname(modelfile))
f.get_regexp(os.path.dirname(modelfile))
collect_files(os.path.dirname(modelfile), [f])
f.validate_prototype()
f._stub_meta[1]['extname'] = 'GALAXY'
f.validate_prototype(error=True)
self.assertLog(log, -1, "Prototype file {0} has an EXTNAME mismatch in HDU1 (GALAXY != Galaxies) according to {1}.".format(modelfile.replace('.rst', '.fits'), modelfile))
f._stub_meta[1]['extname'] = ''
f.validate_prototype(error=True)
self.assertLog(log, -2, "Prototype file {0} has no EXTNAME in HDU1.".format(modelfile.replace('.rst', '.fits')))
self.assertLog(log, -1, "Could not find EXTNAME = '' in {0}; trying by HDU number.".format(modelfile))
def test_extract_columns(self):
"""Test extraction of columns from a row of data.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
f = DataModel(modelfile, os.path.dirname(modelfile))
foo = '======= ============= ==== ====================='
columns = list(map(len, foo.split()))
row = 'NAXIS1 32 int length of dimension 1'
exc = ('NAXIS1', '32', 'int', 'length of dimension 1')
c = f._extract_columns(row, columns)
self.assertEqual(c, exc)
def test_cross_reference(self):
"""Test parsing of cross-references.
"""
modelfile = resource_filename('desidatamodel.test', 't/fits_file.rst')
f = DataModel(modelfile, os.path.dirname(modelfile))
line = "See :doc:`Other file <fits_file>`"
ref = f._cross_reference(line)
self.assertEqual(ref, resource_filename('desidatamodel.test',
't/fits_file.rst'))
@patch('sys.argv', ['check_model', '--verbose', '--compare-files', 'DESI_SPECTRO_DATA', '/desi/spectro/data/desi-00000000.fits.fz'])
def test_options(self):
"""Test parse of command-line options.
"""
options = _options()
self.assertTrue(options.verbose)
self.assertTrue(options.files)
self.assertEqual(options.section, 'DESI_SPECTRO_DATA')
self.assertEqual(options.directory, '/desi/spectro/data/desi-00000000.fits.fz')
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
|
import numpy as np
import random
import json
import math
neuron_decay=0.9
maxneuronsperunit=64
maxaxonsperunit=128
maxunits=10
binary_io=True
bitsize=8
runtime=20
testsize=20
class neuron:
id=-1
myunit=-1
active=False
can_mutate=True
threshold=999
amount=0
decay=neuron_decay
downstream_axons=[]
upstream_axons=[]
def __init__(self,id,threshold):
self.id=id
self.threshold=threshold
def check(self):
global units
if self.amount>=self.threshold and self.active:
for x in self.downstream_axons:
if x>-1:
units[self.myunit].axons[x].fire()
self.amount=self.amount*self.decay
class axon:
id=-1
myunit=-1
active=False
fireamount=0
upstream_neuron=-1
downstream_neuron=-1
def __init__(self,id):
self.id=id
def fire(self):
global units
units[self.myunit].neurons[self.downstream_neuron].amount=units[self.myunit].neurons[self.downstream_neuron].amount+self.fireamount
#print "AXON "+str(self.id)+" IS FIRING WITH "+str(self.fireamount)
return True
class unit:
id=-1
active=False
active_neurons=0
active_axons=0
input_neurons=[]
output_neurons=[]
neurons=[neuron(i,999) for i in range(maxneuronsperunit)]
axons=[axon(i) for i in range(maxaxonsperunit)]
def __init__(self,id):
self.id=id
def add_neuron(self,threshold):
a=0
b=-1
while a<maxneuronsperunit:
if self.neurons[a].active==False:
b=a
self.active_neurons=self.active_neurons+1
a=maxneuronsperunit
a=a+1
self.neurons[b].active=True
self.neurons[b].myunit=self.id
self.neurons[b].threshold=threshold
return b
def add_n_neurons(self,n,threshold):
a=0
while a<n:
self.add_neuron(threshold)
a=a+1
def remove_neuron(self,n):
if self.neurons[n].active==True:
self.neurons[n].active=False
self.neurons[n].amount=0
self.neurons[n].threshold=999
self.active_neurons=self.active_neurons-1
def connect(self,a,b,amount):
if self.neurons[a].active and self.neurons[b].active:
c=0
d=0
while c<maxaxonsperunit:
if self.axons[c].active==False:
d=c
c=maxaxonsperunit
c=c+1
self.neurons[a].downstream_axons.append(d)
self.neurons[b].upstream_axons.append(d)
self.axons[d].active=True
self.axons[d].fireamount=amount
self.axons[d].myunit=self.id
self.axons[d].downstream_neuron=b
self.axons[d].upstream_neuron=a
return True
else:
return False
def cycle(self,inputs):
a=0
outputs=[]
#RESET ALL NEURONS BETWEEN CYCLES
for x in self.neurons:
x.amount=0
while a<runtime:
b=0
c=0
while c<len(self.input_neurons) and c<len(inputs):
self.neurons[self.input_neurons[c]].amount=inputs[c]
c=c+1
while b<maxneuronsperunit:
if self.neurons[b].active:
self.neurons[b].check()
b=b+1
#print "RUN CYCLE "+str(a)
a=a+1
def print_neurons(self):
a=0
while a<maxneuronsperunit:
if self.neurons[a].active:
print "NEURON "+str(a)+" AMT: "+str(self.neurons[a].amount)+" / "+str(self.neurons[a].threshold)
a=a+1
print "INPUTS"
for x in self.input_neurons:
print str(x)
print ""
print "OUTPUTS"
for y in self.output_neurons:
print str(y)
def designate_io(self,ins,outs):
a=0
b=0
while b<ins and a<maxneuronsperunit:
if self.neurons[a].active:
self.neurons[a].can_mutate=False
self.neurons[a].decay=1
if binary_io:
self.neurons[a].threshold=1 #IO are BINARY
self.input_neurons.append(a)
b=b+1
a=a+1
c=0
d=a
while c<outs and d<maxneuronsperunit:
if self.neurons[d].active:
self.neurons[d].can_mutate=False
self.neurons[d].decay=1
if binary_io:
self.neurons[d].threshold=1
self.output_neurons.append(d)
c=c+1
d=d+1
if c==ins and b==outs:
return True
else:
return False
def remove_axon(self,n):
if self.axons[n].active:
self.axons[n].active=False
self.axons[n].id=-1
self.axons[n].fireamount=0
u=self.axons[n].upstream_neuron
d=self.axons[n].downstream_neuron
self.axons[n].upstream_neuron=-1
self.axons[n].downstream_neuron=-1
if self.neurons[u].active:
a=0
while a<len(self.neurons[u].downstream_axons):
if self.neurons[u].downstream_axons[a]==n:
self.neurons[u].downstream_axons[a]=-1
a=a+1
if self.neurons[d].active:
b=0
while b<len(self.neurons[d].upstream_axons):
if self.neurons[d].upstream_axons[b]==n:
self.neurons[d].upstream_axons[b]=-1
b=b+1
def change_axon_destination(self,a,d):
if self.axons[a].active:
b=self.axons[a].downstream_neuron
h=0
while h<len(self.neurons[b].upstream_axons):
if self.neurons[b].upstream_axons[h]==a:
self.neurons[b].upstream_axons[h]=-1
h=h+1
self.neurons[b].upstream_axons.append(a)
self.axons[a].downstream_neuron=d
def change_axon_source(self,a,s):
if self.axons[a].active:
b=self.axons[a].upstream_neuron
h=0
while h<len(self.neurons[b].downstream_axons):
if self.neurons[b].downstream_axons[h]==a:
self.neurons[b].downstream_axons[h]=-1
h=h+1
self.axons[a].upstream_neuron=s
self.neurons[b].downstream_axons.append(a)
def change_threshold(self,n,r):
if self.neurons[n].active:
self.neurons[n].threshold=r
return True
else:
return False
def change_fireamount(self,a,r):
if self.axons[a].active:
self.axons[a].fireamount=r
return True
else:
return False
def change_decay(self,n,r):
if self.neurons[n].active:
self.neurons[n].decay=r
return True
else:
return False
def mutate(self):
choice=random.randint(0,100)
#print choice
if choice<10: #add neuron
self.add_neuron(1)
elif choice<20: # remove neuron
ok=True
found=False
a=0
while ok:
if self.neurons[a].active:
ok=False
found=True
elif a==maxneuronsperunit:
ok=False
a=a+1
if found:
self.remove_neuron(a)
#print "removed "+str(a)
elif choice<30: #add connection
ok=True
fireamount=random.randint(0,4)
fro=-1
to=-1
a=0
while ok and a<maxneuronsperunit:
f=random.randint(0,maxneuronsperunit-1)
if self.neurons[f].active:
fro=f
ok=False
a=a+1
ok=True
b=0
while ok and b<maxneuronsperunit:
t=random.randint(0,maxneuronsperunit-1)
if self.neurons[t].active:
to=t
ok=False
b=b+1
if to>-1 and fro > -1:
self.connect(fro,to,fireamount)
#print "connected "+str(fro)+" to "+str(to)+" for "+str(fireamount)
elif choice<40: #remove connection
ok=True
a=0
while ok:
h=random.randint(0,maxaxonsperunit-1)
if self.axons[h].active:
ok=False
#self.remove_axon(h)
# print "removed "+str(a)
a=a+1
if a>1000:
ok=False
elif choice<50: #change threshold WORKS
ok=True
changeamt=(random.random()-0.5)*2
while ok:
a=random.randint(0,maxneuronsperunit-1)
if self.neurons[a].active:
self.neurons[a].threshold=self.neurons[a].threshold+changeamt
# print "changed threshold for "+str(a)+ " by "+str(changeamt)
ok=False
a=a+1
elif choice<60: #change fireamount
ok=True
a=0
while ok and a<len(self.axons):
changeamt=(random.randint(-5,5))/10
if self.axons[a].active:
ok=False
self.axons[a].fireamount=self.axons[a].fireamount+changeamt
# print "changed fireamount "+str(a)+" by "+str(changeamt)
a=a+1
elif choice<70: # change axon source
a=0
b=0
kk=True
while kk:
towhere=random.randint(0,maxneuronsperunit-1)
if self.neurons[towhere].active:
kk=False
b=b+1
if b>100:
kk=False
ok=True
if b>100:
ok=False
while ok and a<len(self.axons):
if self.axons[a].active:
self.change_axon_source(a,towhere)
# print "changed axon source to "+str(towhere)+" for "+str(a)
ok=False
a=a+1
elif choice<80: # change axon destination
a=0
b=0
kk=True
while kk:
towhere=random.randint(0,maxneuronsperunit-1)
if self.neurons[towhere].active:
kk=False
b=b+1
if b>100:
kk=False
ok=True
if b>100:
ok=False
while ok and a<len(self.axons):
if self.axons[a].active:
self.change_axon_destination(a,towhere)
# print "changed axon destination to "+str(towhere)+" for "+str(a)
ok=False
a=a+1
elif choice<90: # change decay
ok=True
a=0
changeamt=(random.random()-0.5)
while ok and a<maxneuronsperunit:
if self.neurons[a].active:
self.neurons[a].decay=self.neurons[a].decay+changeamt
# print "changed decay for "+str(a)+ " by "+str(changeamt)
ok=False
a=a+1
def mutate_n(self,n):
a=0
while a<n:
self.mutate()
a=a+1
def read_outputs(self):
#OUTPUTS IN BINARY
outputs=[]
a=0
while a<len(self.output_neurons):
n=self.output_neurons[a]
if self.neurons[n].active and self.neurons[n].amount>=self.neurons[n].threshold:
outputs.append(1)
else:
outputs.append(0)
a=a+1
return outputs
def read_inputs(self):
inputs=[]
a=0
while a<len(self.input_neurons):
n=self.input_neurons[a]
if self.neurons[n].active:
inputs.append(self.neurons[n].amount)
else:
inputs.append(0)
a=a+1
return inputs
class system:
units=[unit(i) for i in range(maxunits)]
def init(self, n_units):
for i in range(0,n_units):
self.units[i].add_n_neurons(maxneuronsperunit,1)
self.units[i].designate_io(bitsize*2,bitsize)
self.units[i].active=True
def save(self):
global data
a=0
data=[] #each element is a unit
while a<maxunits:
if self.units[a].active:
r={'active_neurons':self.units[a].active_neurons,'active_axons':self.units[a].active_axons,'input_neurons':self.units[a].input_neurons,'output_neurons':self.units[a].output_neurons}
r['neurons']=[]
r['unitid']=a
#save neuron data in each active unit
b=0
while b<maxneuronsperunit:
if self.units[a].neurons[b].active:
d={'can_mutate':self.units[a].neurons[b].can_mutate,'threshold':self.units[a].neurons[b].threshold,'currentamount':self.units[a].neurons[b].amount,'decay':self.units[a].neurons[b].decay}
d['downstream_axons']=self.units[a].neurons[b].downstream_axons
d['upstream_axons']=self.units[a].neurons[b].upstream_axons
d['neuronid']=b
r['neurons'].append(d)
b=b+1
b=0
r['axons']=[]
while b<maxaxonsperunit:
if self.units[a].axons[b].active:
g={'fire_amount':self.units[a].axons[b].fireamount,'axonid':b,'upstream_neuron':self.units[a].axons[b].upstream_neuron,'downstream_neuron':self.units[a].axons[b].downstream_neuron}
r['axons'].append(g)
b=b+1
data.append(r)
a=a+1
v=json.dumps(data)
file=open('config.txt','wb')
file.write(v)
file.close()
def load(self):
global data,units
file=open('config.txt')
f=file.read()
data=json.loads(f)
a=0
while a<len(data):
r=data[a]['unitid']
self.units[r].active_axons=data[a]['active_axons']
self.units[r].active_neurons=data[a]['active_neurons']
self.units[r].input_neurons=data[a]['input_neurons']
self.units[r].output_neurons=data[a]['output_neurons']
#load neuron data
n=0
while n<len(data[a]['neurons']):
neuronid=data[a]['neurons'][n]['neuronid']
self.units[r].neurons[neuronid].threshold=data[a]['neurons'][n]['threshold']
self.units[r].neurons[neuronid].can_mutate=data[a]['neurons'][n]['can_mutate']
self.units[r].neurons[neuronid].amount=data[a]['neurons'][n]['currentamount']
self.units[r].neurons[neuronid].decay=data[a]['neurons'][n]['decay']
self.units[r].neurons[neuronid].downstream_axons=data[a]['neurons'][n]['downstream_axons']
self.units[r].neurons[neuronid].upstream_axons=data[a]['neurons'][n]['upstream_axons']
self.units[r].neurons[neuronid].active=True
n=n+1
#load axon data
g=0
while g<len(data[a]['axons']):
axon=data[a]['axons'][g]
axonid=axon['axonid']
self.units[r].axons[axonid].fire_amount=axon['fire_amount']
self.units[r].axons[axonid].upstream_neuron=axon['upstream_neuron']
self.units[r].axons[axonid].downstream_neuron=axon['downstream_neuron']
self.units[r].axons[axonid].active=True
g=g+1
a=a+1
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, division
import dataset
import logging
import re
from datetime import datetime
from copy import deepcopy
import os
log = logging.getLogger('usertrack')
if not os.path.exists('databases'):
os.makedirs('databases')
db = dataset.connect('sqlite:///databases/usertrack.db')
def get_table(bot, channel):
''' Returns table-instance from database.
Database names are in format "networkalias_channel".
Network alias is the name assigned to network in config.
Channel is stripped from the &#!+ prepending the channel name. '''
return db['%s_%s' % (bot.network.alias, re.sub(r'&|#|!|\+', '', channel))]
def upsert_row(bot, channel, data, keys=['nick', 'ident', 'host']):
''' Updates row to database.
Default keys are nick, ident and host,
which are normally present for data received by get_base_data -function. '''
table = get_table(bot, channel)
table.upsert(data, keys)
def get_base_data(user):
''' Fetches "base" data according to user.
Normally this is nick, ident, host and current time to be set to action_time in database.
Ident and host might be missing when the full mask isn't provided,
for example in handle_userKicked, where kickee doesn't get anything but name. '''
data = {
'nick': getNick(user),
'action_time': datetime.now()
}
# For example kickee doesn't get full hostmask -> needs some work...
try:
data['ident'] = getIdent(user)
except IndexError:
pass
try:
data['host'] = getHost(user)
except IndexError:
pass
return data
def handle_privmsg(bot, user, channel, message):
''' Handles all messages bot sees.
If message is private message to bot, doesn't update.
Otherwise updates the DB.
- last_action = 'message'
- last_message = message
- message_time = current time '''
# if user == channel -> this is a query -> don't update
if user == channel:
return
data = get_base_data(user)
data['last_action'] = 'message'
data['last_message'] = message
data['message_time'] = datetime.now()
upsert_row(bot, channel, data)
def handle_userJoined(bot, user, channel):
''' Handles user joining the channel and auto-ops if op == True in database.
- last_action = 'join' '''
data = get_base_data(user)
data['last_action'] = 'join'
upsert_row(bot, channel, data)
table = get_table(bot, channel)
if table.find_one(nick=getNick(user), ident=getIdent(user), host=getHost(user), op=True):
log.info('auto-opping %s' % user)
bot.mode(channel, True, 'o', user=getNick(user))
def handle_userLeft(bot, user, channel, message):
''' Handles user leaving the channel (or quitting).
For leaving, only updates the channel left.
For quitting, updates all channels in network, which the user was on (as bot knows...)
- last_action = 'left' '''
data = get_base_data(user)
data['last_message'] = message
if channel is not None:
data['last_action'] = 'left'
upsert_row(bot, channel, data)
# QUIT returns the channel as None, loop through all tables, check if user exists and update it
else:
data['last_action'] = 'quit'
for t in db.tables:
if not t.startswith('%s_' % bot.network.alias):
continue
table = db.load_table(t)
res = table.find_one(nick=getNick(user), ident=getIdent(user), host=getHost(user))
if res:
data['id'] = res['id']
table.update(data, ['id'])
def handle_userKicked(bot, kickee, channel, kicker, message):
''' Handles user being kicked.
As 'kickee' doesn't get full mask, it's only determined by nick.
For kickee:
- last_action = kicked by kicker [message]
For kicker:
- last_action = kicked kickee [message] '''
data = get_base_data(kickee)
data['last_action'] = 'kicked by %s [%s]' % (getNick(kicker), message)
# We don't get full info from kickee, need to update by nick only
upsert_row(bot, channel, data, ['nick'])
# Update the kickers action also...
data = get_base_data(kicker)
data['last_action'] = 'kicked %s [%s]' % (getNick(kickee), message)
upsert_row(bot, channel, data)
def handle_userRenamed(bot, user, newnick):
''' Handles nick change.
Updates both data, related to old and new nick, doesn't remove anything from db.
- last_action = nick change from oldnick to newnick '''
nick = getNick(user)
ident = getIdent(user)
host = getHost(user)
data = get_base_data(user)
data['last_action'] = 'nick change from %s to %s' % (nick, newnick)
# loop through all the tables, if user exists, update nick to match
for t in db.tables:
if not t.startswith('%s_' % bot.network.alias):
continue
table = db.load_table(t)
# if row is found with new or old nick -> user is on the channel -> update
if table.find_one(nick=nick, ident=ident, host=host) or \
table.find_one(nick=newnick, ident=ident, host=host):
# need to create a deep copy of data, as dataset seems to put changed fields back to data...
# haven't found any documentation on this, so might be a bug?
tmp_data = deepcopy(data)
# update the old user
table.upsert(tmp_data, ['nick', 'ident', 'host'])
# update new user
tmp_data = deepcopy(data)
tmp_data['nick'] = newnick
table.upsert(tmp_data, ['nick', 'ident', 'host'])
def handle_action(bot, user, channel, message):
''' Handles action (/me etc). Ignores stuff directed to bot (/describe botnick etc).
- last_action = action
- last_message = message
- message_time = current time '''
# if action is directed to bot instead of channel -> don't log
if channel == bot.nickname:
return
data = get_base_data(user)
data['last_action'] = 'action'
data['last_message'] = message
data['message_time'] = datetime.now()
upsert_row(bot, channel, data)
def command_add_op(bot, user, channel, args):
''' Adds op-status according to nickname or full hostmask. Only for admins.
If user is found from database, set op = True and return info with full hostmask.
Else returns user not found. '''
if not isAdmin(user) or user == channel or not args:
return
table = get_table(bot, channel)
# Get basedata for user to be opped
u = get_base_data(args)
# If we got full mask, use it..
if 'nick' in u and 'ident' in u and 'host' in u:
res = table.find_one(nick=u['nick'], ident=u['ident'], host=u['host'])
# else use just nickname
else:
res = table.find_one(nick=u['nick'])
if not res:
return bot.say(channel, 'user not found')
data = {'id': res['id'], 'op': True}
table.upsert(data, ['id'])
return bot.say(channel, 'auto-opping %s!%s@%s' % (res['nick'], res['ident'], res['host']))
def command_remove_op(bot, user, channel, args):
''' Removes op-status from nick. Logic same as command_add_op. Only for admins. '''
if not isAdmin(user) or user == channel or not args:
return
table = get_table(bot, channel)
# Get basedata for user to be opped
u = get_base_data(args)
# If we got full mask, use it..
if 'nick' in u and 'ident' in u and 'host' in u:
res = table.find_one(nick=u['nick'], ident=u['ident'], host=u['host'])
# else use just nickname
else:
res = table.find_one(nick=u['nick'])
if not res:
return bot.say(channel, 'user not found')
data = {'id': res['id'], 'op': False}
table.upsert(data, ['id'])
return bot.say(channel, 'removed auto-op from %s!%s@%s' % (res['nick'], res['ident'], res['host']))
def command_op(bot, user, channel, args):
''' Ops user if op = True for user or isAdmin. '''
table = get_table(bot, channel)
if table.find_one(nick=getNick(user), ident=getIdent(user), host=getHost(user), op=True) or isAdmin(user):
log.info('opping %s on %s by request' % (user, channel))
bot.mode(channel, True, 'o', user=getNick(user))
def command_list_ops(bot, user, channel, args):
''' Lists ops in current channel. Only for admins.
By default lists nicks, if args == 'full', lists full hostmask. '''
if not isAdmin(user) or user == channel:
return
table = get_table(bot, channel)
if args.strip() == 'full':
ops = ', '.join(['%s!%s@%s' % (r['nick'], r['ident'], r['host']) for r in table.find(op=True)])
else:
ops = ', '.join(['%s' % r['nick'] for r in table.find(op=True)])
return bot.say(channel, 'ops: %s' % ops)
def __get_length_str(secs):
days, hours, minutes, seconds = secs // 86400, secs // 3600, secs // 60 % 60, secs % 60
if days > 0:
return '%dd' % days
if hours > 0:
return '%dh' % hours
if minutes > 0:
return '%dm' % minutes
if seconds > 0:
return '%ds' % seconds
return '0s'
def command_seen(bot, user, channel, args):
'''Displays the last action by the given user'''
if not args:
return bot.say(channel, 'Please provide a nick to search...')
table = get_table(bot, channel)
# Return the first match, there shouldn't be multiples anyway
user = table.find_one(nick=args)
if not user:
return bot.say(channel, "I haven't seen %s on %s" % (args, channel))
# Calculate last seen in seconds
last_seen = datetime.now() - user['action_time']
# Get string for last seen
last_seen = __get_length_str(last_seen.days * 86400 + last_seen.seconds)
# If the last action was part or quit, show also the message
if user['last_action'] in ['left', 'quit']:
return bot.say(channel, "%s was last seen at %s (%s ago) [%s, %s]" %
(user['nick'],
'{0:%Y-%m-%d %H:%M:%S}'.format(user['action_time']),
last_seen,
user['last_action'],
user['last_message']
))
# Otherwise just show the time and action
return bot.say(channel, "%s was last seen at %s (%s ago) [%s]" %
(user['nick'],
'{0:%Y-%m-%d %H:%M:%S}'.format(user['action_time']),
last_seen,
user['last_action']
))
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""
Support for ROS Names
See: U{http://www.ros.org/wiki/Names}
"""
import sys
import os
from rosgraph.names import namespace, get_ros_namespace, ns_join, make_global_ns, load_mappings, \
SEP, GLOBALNS, REMAP, ANYTYPE, \
is_global, is_private
import rosgraph.names
from rospy.exceptions import ROSException
from rospy.impl.validators import ParameterInvalid
TOPIC_ANYTYPE = ANYTYPE #indicates that a subscriber will connect any datatype given to it
SERVICE_ANYTYPE = ANYTYPE #indicates that a service client does not have a fixed type
import struct
if sys.hexversion > 0x03000000: #Python3
def isstring(s):
return isinstance(s, str) #Python 3.x
else:
def isstring(s):
return isinstance(s, basestring) #Python 2.x
def canonicalize_name(name):
"""
Put name in canonical form. Double slashes '//' are removed and
name is returned without any trailing slash, e.g. /foo/bar
@param name: ROS name
@type name: str
"""
if not name or name == SEP:
return name
elif name[0] == SEP:
return '/' + '/'.join([x for x in name.split(SEP) if x])
else:
return '/'.join([x for x in name.split(SEP) if x])
##if len(name) > 1 and name[-1] == SEP:
## return name[:-1]
##return name
# Mappings override name resolution by substituting fully-qualified
# names in for local name references. They override any name
# reference, with exception of '.local' names. We load remapping args
# as soon as client API is referenced so that they are initialized
# before Topic constructors are invoked.
_mappings = load_mappings(sys.argv)
_resolved_mappings = {}
def reload_mappings(argv):
"""
Re-initialize the name remapping table.
@param argv: Command line arguments to this program. ROS reads
these arguments to find renaming params.
@type argv: [str]
"""
global _mappings
_mappings = load_mappings(argv)
# #1810
def initialize_mappings(node_name):
"""
Initialize the remapping table based on provide node name.
@param node_name: name of node (caller ID)
@type node_name: str
"""
global _resolved_mappings
_resolved_mappings = {}
for m,v in _mappings.items():
# resolve both parts of the mappings. use the rosgraph.names
# version of resolve_name to avoid circular mapping.
if m.startswith('__'): # __name, __log, etc...
_resolved_mappings[m] = v
else:
_resolved_mappings[rosgraph.names.resolve_name(m, node_name)] = rosgraph.names.resolve_name(v, node_name)
def resolve_name_without_node_name(name):
"""
The need for this function is complicated -- Topics and Services can be created before init_node is called.
In general, this is okay, unless the name is a ~name, in which
case we have to raise an ValueError
@param name: ROS name to resolve
@type name: str
@raise ValueError: if name is a ~name
@raise ROSInitException: if name is remapped to a ~name
"""
if is_private(name):
raise ValueError("~name topics cannot be created before init_node() has been called")
# we use the underlying rosgraph.names.resolve_name to avoid dependencies on nodename/remappings
fake_caller_id = ns_join(get_namespace(), 'node')
fake_resolved = rosgraph.names.resolve_name(name, fake_caller_id)
for m, v in _mappings.items():
if rosgraph.names.resolve_name(m, fake_caller_id) == fake_resolved:
if is_private(name):
raise ROSInitException("due to the way this node is written, %s cannot be remapped to a ~name. \nThe declaration of topics/services must be moved after the call to init_node()"%name)
else:
return rosgraph.names.resolve_name(v, fake_caller_id)
return fake_resolved
def get_mappings():
"""
Get mapping table with unresolved names
@return: command-line remappings {name: name}
@rtype: {str: str}
"""
return _mappings
def get_resolved_mappings():
"""
Get mapping table with resolved names
@return: command-line remappings {name: name}
@rtype: {str: str}
"""
return _resolved_mappings
#TODO: port to a wrapped call to rosgraph.names.resolve_name
def resolve_name(name, caller_id=None):
"""
Resolve a ROS name to its global, canonical form. Private ~names
are resolved relative to the node name.
@param name: name to resolve.
@type name: str
@param caller_id: node name to resolve relative to. To
resolve to local namespace, omit this parameter (or use None)
@type caller_id: str
@return: Resolved name. If name is empty/None, resolve_name
returns parent namespace. If namespace is empty/None,
@rtype: str
"""
if not caller_id:
caller_id = get_name()
if not name: #empty string resolves to namespace
return namespace(caller_id)
name = str(name) # enforce string conversion else struct.pack might raise UnicodeDecodeError (see #3998)
name = canonicalize_name(name)
if name[0] == SEP: #global name
resolved_name = name
elif is_private(name): #~name
resolved_name = ns_join(caller_id, name[1:])
else: #relative
resolved_name = namespace(caller_id) + name
#Mappings override general namespace-based resolution
# - do this before canonicalization as remappings are meant to
# match the name as specified in the code
if resolved_name in _resolved_mappings:
return _resolved_mappings[resolved_name]
else:
return resolved_name
def remap_name(name, caller_id=None, resolved=True):
"""
Remap a ROS name. This API should be used to instead of
resolve_name for APIs in which you don't wish to resolve the name
unless it is remapped.
@param name: name to remap
@type name: str
@param resolved: if True (default), use resolved names in remappings, which is the standard for ROS.
@type resolved: bool
@return: Remapped name
@rtype: str
"""
if not caller_id:
caller_id = get_caller_id()
if name in _mappings:
return rosgraph.names.resolve_name(_mappings[name], caller_id)
return name
def scoped_name(caller_id, name):
"""
Convert the global caller_id to a relative name within the namespace. For example, for
namespace '/foo' and name '/foo/bar/name', the return value will
be 'bar/name'
WARNING: scoped_name does not validate that name is actually within
the supplied namespace.
@param caller_id: caller ID, in canonical form
@type caller_id: str
@param name: name to scope
@type name: str
@return: name scoped to the caller_id's namespace.
@rtype: str
"""
if not is_global(caller_id):
raise ROSException("caller_id must be global")
return canonicalize_name(name)[len(namespace(caller_id)):]
###################################################
# Name validators ############################
#Technically XMLRPC will never send a None, but I don't want to code masterslave.py to be
#XML-RPC specific in this way.
def valid_name_validator_resolved(param_name, param_value, caller_id):
if not param_value or not isstring(param_value):
raise ParameterInvalid("ERROR: parameter [%s] must be a non-empty string"%param_name)
#TODO: actual validation of chars
# I added the colon check as the common error will be to send an URI instead of name
if ':' in param_value or ' ' in param_value:
raise ParameterInvalid("ERROR: parameter [%s] contains illegal chars"%param_name)
#don't use our own resolve_name because we do not want to remap
return rosgraph.names.resolve_name(param_value, caller_id, remappings=None)
def valid_name_validator_unresolved(param_name, param_value, caller_id):
if not param_value or not isstring(param_value):
raise ParameterInvalid("ERROR: parameter [%s] must be a non-empty string"%param_name)
#TODO: actual validation of chars
# I added the colon check as the common error will be to send an URI instead of name
if ':' in param_value or ' ' in param_value:
raise ParameterInvalid("ERROR: parameter [%s] contains illegal chars"%param_name)
return param_value
def valid_name(param_name, resolve=True):
"""
Validator that resolves names and also ensures that they are not empty
@param param_name: name
@type param_name: str
@param resolve: if True/omitted, the name will be resolved to
a global form. Otherwise, no resolution occurs.
@type resolve: bool
@return: resolved parameter value
@rtype: str
"""
def validator(param_value, caller_id):
if resolve:
return valid_name_validator_resolved(param_name, param_value, caller_id)
return valid_name_validator_unresolved(param_name, param_value, caller_id)
return validator
def global_name(param_name):
"""
Validator that checks for valid, global graph resource name.
@return: parameter value
@rtype: str
"""
def validator(param_value, caller_id):
if not param_value or not isstring(param_value):
raise ParameterInvalid("ERROR: parameter [%s] must be a non-empty string"%param_name)
#TODO: actual validation of chars
if not is_global(param_value):
raise ParameterInvalid("ERROR: parameter [%s] must be a globally referenced name"%param_name)
return param_value
return validator
#########################################################
#Global Namespace Routines
# - Global state, e.g. singletons and namespace
_caller_namespace = get_ros_namespace()
_caller_id = _caller_namespace+'unnamed' #default for non-node.
def get_namespace():
"""
Get namespace of local node.
@return: fully-qualified name of local node or '' if not applicable
@rtype: str
"""
return _caller_namespace
def get_name():
"""
Get fully resolved name of local node. If this is not a node,
use empty string
@return: fully-qualified name of local node or '' if not applicable
@rtype: str
"""
return _caller_id
# backwards compatibility
get_caller_id = get_name
def _set_caller_id(caller_id):
"""
Internal API.
Set the global name (i.e. caller_id) and namespace. Methods can
check what the name of the current node is by calling get_caller_id.
The caller_id is important as it is the first parameter to any API
call on a remote node. Invoked by ROSNode constructor
@param caller_id: new caller ID
@type caller_id: str
"""
global _caller_id, _caller_namespace
_caller_id = caller_id
_caller_namespace = namespace(caller_id)
|
|
"""
Manage RabbitMQ Users
=====================
Example:
.. code-block:: yaml
rabbit_user:
rabbitmq_user.present:
- password: password
- force: True
- tags:
- monitoring
- user
- perms:
- '/':
- '.*'
- '.*'
- '.*'
- runas: rabbitmq
"""
import logging
import salt.utils.path
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if RabbitMQ is installed.
"""
if salt.utils.path.which("rabbitmqctl"):
return True
return (False, "Command not found: rabbitmqctl")
def _check_perms_changes(name, newperms, runas=None, existing=None):
"""
Check whether Rabbitmq user's permissions need to be changed.
"""
if not newperms:
return False
if existing is None:
try:
existing = __salt__["rabbitmq.list_user_permissions"](name, runas=runas)
except CommandExecutionError as err:
log.error("Error: %s", err)
return False
empty_perms = {"configure": "", "write": "", "read": ""}
perm_need_change = False
for vhost_perms in newperms:
for vhost, perms in vhost_perms.items():
if vhost in existing:
new_perms = {"configure": perms[0], "write": perms[1], "read": perms[2]}
existing_vhost = existing[vhost]
if new_perms != existing_vhost:
# This checks for setting permissions to nothing in the state,
# when previous state runs have already set permissions to
# nothing. We don't want to report a change in this case.
if existing_vhost == empty_perms and perms == empty_perms:
continue
perm_need_change = True
else:
perm_need_change = True
return perm_need_change
def _get_current_tags(name, runas=None):
"""
Whether Rabbitmq user's tags need to be changed
"""
try:
return list(__salt__["rabbitmq.list_users"](runas=runas)[name])
except CommandExecutionError as err:
log.error("Error: %s", err)
return []
def present(name, password=None, force=False, tags=None, perms=(), runas=None):
"""
Ensure the RabbitMQ user exists.
name
User name
password
The user's password
force
If force is ``True``, the password will be automatically updated without extra password change check.
tags
Optional list of tags for the user
perms
A list of dicts with vhost keys and 3-tuple values
runas
Name of the user to run the command
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
try:
user = __salt__["rabbitmq.user_exists"](name, runas=runas)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
passwd_reqs_update = False
if user and password is not None:
try:
if not __salt__["rabbitmq.check_password"](name, password, runas=runas):
passwd_reqs_update = True
log.debug("RabbitMQ user %s password update required", name)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if user and not any((force, perms, tags, passwd_reqs_update)):
log.debug(
"RabbitMQ user '%s' exists, password is up to date and force is not set.",
name,
)
ret["comment"] = "User '{}' is already present.".format(name)
ret["result"] = True
return ret
if not user:
ret["changes"].update({"user": {"old": "", "new": name}})
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "User '{}' is set to be created.".format(name)
return ret
log.debug("RabbitMQ user '%s' doesn't exist - Creating.", name)
try:
__salt__["rabbitmq.add_user"](name, password, runas=runas)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
else:
log.debug("RabbitMQ user '%s' exists", name)
if force or passwd_reqs_update:
if password is not None:
if not __opts__["test"]:
try:
__salt__["rabbitmq.change_password"](
name, password, runas=runas
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
ret["changes"].update({"password": {"old": "", "new": "Set password."}})
else:
if not __opts__["test"]:
log.debug("Password for %s is not set - Clearing password.", name)
try:
__salt__["rabbitmq.clear_password"](name, runas=runas)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
ret["changes"].update(
{"password": {"old": "Removed password.", "new": ""}}
)
if tags is not None:
current_tags = _get_current_tags(name, runas=runas)
if isinstance(tags, str):
tags = tags.split()
# Diff the tags sets. Symmetric difference operator ^ will give us
# any element in one set, but not both
if set(tags) ^ set(current_tags):
if not __opts__["test"]:
try:
__salt__["rabbitmq.set_user_tags"](name, tags, runas=runas)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
ret["changes"].update({"tags": {"old": current_tags, "new": tags}})
try:
existing_perms = __salt__["rabbitmq.list_user_permissions"](name, runas=runas)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if _check_perms_changes(name, perms, runas=runas, existing=existing_perms):
for vhost_perm in perms:
for vhost, perm in vhost_perm.items():
if not __opts__["test"]:
try:
__salt__["rabbitmq.set_permissions"](
vhost, name, perm[0], perm[1], perm[2], runas=runas
)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
new_perms = {
vhost: {"configure": perm[0], "write": perm[1], "read": perm[2]}
}
if vhost in existing_perms:
if existing_perms[vhost] != new_perms[vhost]:
if ret["changes"].get("perms") is None:
ret["changes"].update({"perms": {"old": {}, "new": {}}})
ret["changes"]["perms"]["old"].update(existing_perms[vhost])
ret["changes"]["perms"]["new"].update(new_perms)
else:
ret["changes"].update({"perms": {"new": {}}})
ret["changes"]["perms"]["new"].update(new_perms)
ret["result"] = True
if ret["changes"] == {}:
ret["comment"] = "'{}' is already in the desired state.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Configuration for '{}' will change.".format(name)
return ret
ret["comment"] = "'{}' was configured.".format(name)
return ret
def absent(name, runas=None):
"""
Ensure the named user is absent
name
The name of the user to remove
runas
User to run the command
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
try:
user_exists = __salt__["rabbitmq.user_exists"](name, runas=runas)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
if user_exists:
if not __opts__["test"]:
try:
__salt__["rabbitmq.delete_user"](name, runas=runas)
except CommandExecutionError as err:
ret["comment"] = "Error: {}".format(err)
return ret
ret["changes"].update({"name": {"old": name, "new": ""}})
else:
ret["result"] = True
ret["comment"] = "The user '{}' is not present.".format(name)
return ret
if __opts__["test"] and ret["changes"]:
ret["result"] = None
ret["comment"] = "The user '{}' will be removed.".format(name)
return ret
ret["result"] = True
ret["comment"] = "The user '{}' was removed.".format(name)
return ret
|
|
from __future__ import unicode_literals
from django.test import TestCase
import factory
from factory.django import DjangoModelFactory
from django.contrib.auth.models import User
from faker import Faker as fake_factory
from models import Photo, Album
from django.test import Client
fake = fake_factory()
class UserFactory(DjangoModelFactory):
class Meta:
model = User
first_name = fake.first_name()
last_name = fake.last_name()
email = factory.LazyAttribute(lambda x:
'{0}@example.com'.format(x.first_name))
username = factory.Sequence(lambda n: 'user{}'.format(n))
class PhotoFactory(DjangoModelFactory):
class Meta:
model = Photo
title = fake.sentences(nb=1)[0]
description = fake.text()
file = fake.mime_type()
class AlbumFactory(factory.Factory):
class Meta:
model = Album
title = fake.sentences(nb=1)[0]
description = fake.text()
class PhotoTestCase(TestCase):
def setUp(self):
user = UserFactory.create(username='user1')
user.save()
for i in range(100):
photo = PhotoFactory.create(user=user)
photo.save()
def test_photos_are_created(self):
self.assertTrue(Photo.objects.count() == 100)
def test_photos_belong_to_user(self):
user = User.objects.get(username='user1')
self.assertEqual(100, len(user.photos.all()))
def test_photos_do_not_belong_to_other_user(self):
new_user = UserFactory.create(username='user2')
new_user.save()
self.assertEqual(len(new_user.photos.all()), 0)
class AlbumTestCase(TestCase):
def setUp(self):
user = UserFactory.create()
user.save()
cover = PhotoFactory.create(user=user)
cover.save()
for i in range(5):
album = AlbumFactory.create(cover=cover, user=user)
album.save()
def test_albums_are_created(self):
self.assertTrue(Album.objects.count() == 5)
def test_add_photos_to_albums(self):
album = Album.objects.all()[0]
user = User.objects.all()[0]
for i in range(5):
photo = PhotoFactory.create(user=user)
photo.save()
photos = list(Photo.objects.all())
album.photos.add(*photos)
self.assertTrue(len(album.photos.all()) == 6)
self.assertTrue(album.user == user)
class AlbumViewTestCase(TestCase):
def setUp(self):
user = UserFactory.create(username='userbob')
user.set_password('secret')
user.save()
user2 = UserFactory.create(username='usereve')
user2.set_password('secret')
user2.save()
cover = PhotoFactory.create(user=user)
cover.save()
album = AlbumFactory.create(cover=cover, user=user)
album.save()
album.photos.add(cover)
def test_album_detail_view(self):
album = Album.objects.all()[0]
photo = Photo.objects.all()[0]
c = Client()
c.login(username='userbob', password='secret')
response = c.get('/images/album/{}/'.format(album.id))
self.assertIn(album.title, response.content)
self.assertIn(album.description, response.content)
self.assertIn(photo.title, response.content)
def test_album_not_owner(self):
album = Album.objects.all()[0]
c = Client()
c.login(username='usereve', password='secret')
response = c.get('/images/album/{}/'.format(album.id))
assert response.status_code == 404
def test_album_public(self):
user = User.objects.get(username='userbob')
album = AlbumFactory.create(user=user, published='Public')
album.save()
c = Client()
c.login(username='usereve', password='secret')
response = c.get('/images/album/{}/'.format(album.id))
self.assertEqual(response.status_code, 200)
self.assertIn(album.title, response.content)
def test_album_unauthenticated(self):
album = Album.objects.all()[0]
c = Client()
response = c.get('/images/album/{}/'.format(album.id), follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn('form method="post" action="/login/"',
response.content)
class PhotoViewTestCase(TestCase):
def setUp(self):
user = UserFactory.create(username='userbob')
user.set_password('secret')
user.save()
user2 = UserFactory.create(username='usereve')
user2.set_password('secret')
user2.save()
photo = PhotoFactory.create(user=user)
photo.save()
def test_photo_detail_view(self):
photo = Photo.objects.all()[0]
c = Client()
c.login(username='userbob', password='secret')
response = c.get('/images/photos/{}/'.format(photo.id))
self.assertIn(photo.title, response.content)
self.assertIn(photo.description, response.content)
def test_photo_not_owner(self):
photo = Photo.objects.all()[0]
c = Client()
c.login(username='usereve', password='secret')
response = c.get('/images/photos/{}/'.format(photo.id))
assert response.status_code == 404
def test_photo_public(self):
user = User.objects.get(username='userbob')
photo = PhotoFactory.create(user=user, published='Public')
photo.save()
c = Client()
c.login(username='usereve', password='secret')
response = c.get('/images/photos/{}/'.format(photo.id))
self.assertEqual(response.status_code, 200)
self.assertIn(photo.title, response.content)
def test_photo_unauthenticated(self):
photo = Photo.objects.all()[0]
c = Client()
response = c.get('/images/photos/{}/'.format(photo.id), follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn('form method="post" action="/login/"',
response.content)
class LibraryViewTestCase(TestCase):
def setUp(self):
user = UserFactory.create(username='userbob')
user.set_password('secret')
user.save()
cover = PhotoFactory.create(user=user)
cover.save()
album = AlbumFactory.create(cover=cover, user=user)
album.save()
album.photos.add(cover)
def test_library_view(self):
photo = Photo.objects.all()[0]
album = Album.objects.all()[0]
c = Client()
c.login(username='userbob', password='secret')
response = c.get('/images/library/')
self.assertIn(photo.title, response.content)
self.assertIn(album.title, response.content)
def test_library_view_different_user(self):
photo = Photo.objects.all()[0]
album = Album.objects.all()[0]
c = Client()
c.login(username='usereve', password='secret')
response = c.get('/images/library/')
self.assertNotIn(photo.title, response.content)
self.assertNotIn(album.title, response.content)
def test_library_unauthenticated(self):
c = Client()
response = c.get('/images/library/', follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn('form method="post" action="/login/"',
response.content)
class PhotoAddTestCase(TestCase):
def setUp(self):
user = UserFactory.create(username='userbob')
user.set_password('secret')
user.save()
cover = PhotoFactory.create(user=user)
cover.save()
album = AlbumFactory.create(cover=cover, user=user)
album.save()
album.photos.add(cover)
def test_add_photo(self):
c = Client()
c.login(username='userbob', password='secret')
with open('imager_images/thumbnail.jpg', 'rb') as fh:
response = c.post(
'/images/photos/add/',
{'file': fh, 'title': 'test title', 'published': 'Private'},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertIn('<img src=\'/media/cache/', response.content)
self.assertIn('test title', response.content)
def test_photo_add_unauthenticated(self):
c = Client()
response = c.get('/images/photos/add', follow=True)
self.assertEqual(len(response.redirect_chain), 2)
self.assertIn('form method="post" action="/login/"',
response.content)
class PhotoEditTestCase(TestCase):
def setUp(self):
user = UserFactory.create(username='userbob')
user.set_password('secret')
user.save()
user2 = UserFactory.create(username='usereve')
user2.set_password('secret')
user2.save()
photo = PhotoFactory.create(user=user)
photo.save()
def test_edit_photo(self):
c = Client()
c.login(username='userbob', password='secret')
photo = Photo.objects.all()[0]
response = c.get('/images/photos/edit/{}/'.format(photo.id))
self.assertIn(photo.title, response.content)
with open('imager_images/thumbnail.jpg', 'rb') as fh:
response = c.post(
'/images/photos/edit/{}/'.format(photo.id),
{
'file': fh,
'title': 'new test title',
'published': 'Private'
},
follow=True
)
self.assertIn('new test title', response.content)
response = c.get('/images/photos/{}/'.format(photo.id))
# make sure we have the same photo id
self.assertIn('new test title', response.content)
def test_edit_other_user(self):
# what you end up with is a create form for yourself.
c = Client()
c.login(username='usereve', password='secret')
photo = Photo.objects.all()[0]
with open('imager_images/thumbnail.jpg', 'rb') as fh:
response = c.post(
'/images/photos/edit/{}/'.format(photo.id),
{
'file': fh,
'title': 'other user',
'published': 'Private'
},
follow=True
)
self.assertEqual(response.status_code, 404)
userbob = User.objects.get(username='userbob')
self.assertEqual(photo.user, userbob)
self.assertNotIn(photo.title, 'other user')
def test_photo_edit_unauthenticated(self):
c = Client()
photo = Photo.objects.all()[0]
response = c.get('/images/photos/edit/{}/'.format(photo.id),
follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn('form method="post" action="/login/"',
response.content)
class AlbumAddTestCase(TestCase):
def setUp(self):
user = UserFactory.create(username='userbob')
user.set_password('secret')
user.save()
user2 = UserFactory.create(username='usereve')
user2.set_password('secret')
user2.save()
photo = PhotoFactory.create(user=user)
photo.save()
def test_add_album(self):
c = Client()
c.login(username='userbob', password='secret')
photo = Photo.objects.all()[0]
response = c.post(
'/images/album/add/',
{
'title': 'new album title',
'photos': photo.id,
'published': 'Private',
},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertIn('new album title', response.content)
album = Album.objects.all()[0]
self.assertIn(photo, album.photos.all())
def test_album_add_unauthenticated(self):
c = Client()
response = c.get('/images/album/add/', follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn('form method="post" action="/login/"',
response.content)
class AlbumEditTestCase(TestCase):
def setUp(self):
user = UserFactory.create(username='userbob')
user.set_password('secret')
user.save()
user2 = UserFactory.create(username='usereve')
user2.set_password('secret')
user2.save()
cover = PhotoFactory.create(user=user)
cover.save()
album = AlbumFactory.create(cover=cover, user=user)
album.save()
album.photos.add(cover)
def test_edit_album(self):
c = Client()
c.login(username='userbob', password='secret')
album = Album.objects.all()[0]
response = c.get('/images/album/edit/{}/'.format(album.id))
self.assertIn(album.title, response.content)
response = c.post(
'/images/album/edit/{}/'.format(album.id),
{
'title': 'new test title',
'published': 'Private'
},
follow=True
)
self.assertIn('new test title', response.content)
response = c.get('/images/album/{}/'.format(album.id))
self.assertIn('new test title', response.content)
def test_edit_other_user(self):
c = Client()
c.login(username='usereve', password='secret')
album = Album.objects.all()[0]
response = c.post(
'/images/album/edit/{}/'.format(album.id),
{
'title': 'other user',
'published': 'Private'
},
follow=True
)
self.assertEqual(response.status_code, 404)
userbob = User.objects.get(username='userbob')
album = Album.objects.all()[0]
self.assertEqual(album.user, userbob)
self.assertNotIn(album.title, 'other user')
def test_album_edit_unauthenticated(self):
c = Client()
album = Album.objects.all()[0]
response = c.get('/images/album/edit/{}/'.format(album.id),
follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn('form method="post" action="/login/"',
response.content)
|
|
import warnings
import cx_Oracle
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type objects to Django Field types.
data_types_reverse = {
cx_Oracle.BLOB: 'BinaryField',
cx_Oracle.CLOB: 'TextField',
cx_Oracle.DATETIME: 'DateField',
cx_Oracle.FIXED_CHAR: 'CharField',
cx_Oracle.FIXED_NCHAR: 'CharField',
cx_Oracle.NATIVE_FLOAT: 'FloatField',
cx_Oracle.NCHAR: 'CharField',
cx_Oracle.NCLOB: 'TextField',
cx_Oracle.NUMBER: 'DecimalField',
cx_Oracle.STRING: 'CharField',
cx_Oracle.TIMESTAMP: 'DateTimeField',
}
cache_bust_counter = 1
def get_field_type(self, data_type, description):
# If it's a NUMBER with scale == 0, consider it an IntegerField
if data_type == cx_Oracle.NUMBER:
precision, scale = description[4:6]
if scale == 0:
if precision > 11:
return 'BigIntegerField'
elif precision == 1:
return 'BooleanField'
else:
return 'IntegerField'
elif scale == -127:
return 'FloatField'
return super().get_field_type(data_type, description)
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("SELECT TABLE_NAME, 't' FROM USER_TABLES UNION ALL "
"SELECT VIEW_NAME, 'v' FROM USER_VIEWS")
return [TableInfo(row[0].lower(), row[1]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# user_tab_columns gives data default for columns
cursor.execute("""
SELECT
column_name,
data_default,
CASE
WHEN char_used IS NULL THEN data_length
ELSE char_length
END as internal_size
FROM user_tab_cols
WHERE table_name = UPPER(%s)""", [table_name])
field_map = {
column: (internal_size, default if default != 'NULL' else None)
for column, default, internal_size in cursor.fetchall()
}
self.cache_bust_counter += 1
cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format(
self.connection.ops.quote_name(table_name),
self.cache_bust_counter))
description = []
for desc in cursor.description:
name = force_text(desc[0]) # cx_Oracle always returns a 'str'
internal_size, default = field_map[name]
name = name % {} # cx_Oracle, for some reason, doubles percent signs.
description.append(FieldInfo(*(name.lower(),) + desc[1:3] + (internal_size,) + desc[4:] + (default,)))
return description
def table_name_converter(self, name):
"""Table name comparison is case insensitive under Oracle."""
return name.lower()
def _name_to_index(self, cursor, table_name):
"""
Return a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return {d[0]: i for i, d in enumerate(self.get_table_description(cursor, table_name))}
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
table_name = table_name.upper()
cursor.execute("""
SELECT ta.column_name, tb.table_name, tb.column_name
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
user_tab_cols ta, user_tab_cols tb
WHERE user_constraints.table_name = %s AND
ta.table_name = user_constraints.table_name AND
ta.column_name = ca.column_name AND
ca.table_name = ta.table_name AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
cb.table_name = tb.table_name AND
cb.column_name = tb.column_name AND
ca.position = cb.position""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[0].lower()] = (row[2].lower(), row[1].lower())
return relations
def get_key_columns(self, cursor, table_name):
cursor.execute("""
SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column
FROM user_constraints c
JOIN user_cons_columns ccol
ON ccol.constraint_name = c.constraint_name
JOIN user_cons_columns rcol
ON rcol.constraint_name = c.r_constraint_name
WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()])
return [tuple(cell.lower() for cell in row)
for row in cursor.fetchall()]
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
sql = """
SELECT LOWER(uic1.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1 ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1 ELSE 0
END AS is_unique
FROM user_constraints, user_indexes, user_ind_columns uic1
WHERE user_constraints.constraint_type (+) = 'P'
AND user_constraints.index_name (+) = uic1.index_name
AND user_indexes.uniqueness (+) = 'UNIQUE'
AND user_indexes.index_name (+) = uic1.index_name
AND uic1.table_name = UPPER(%s)
AND uic1.column_position = 1
AND NOT EXISTS (
SELECT 1
FROM user_ind_columns uic2
WHERE uic2.index_name = uic1.index_name
AND uic2.column_position = 2
)
"""
cursor.execute(sql, [table_name])
indexes = {}
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': bool(row[1]),
'unique': bool(row[2])}
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs, uniques, and checks
cursor.execute("""
SELECT
user_constraints.constraint_name,
LOWER(cols.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1
ELSE 0
END AS is_primary_key,
CASE
WHEN EXISTS (
SELECT 1
FROM user_indexes
WHERE user_indexes.index_name = user_constraints.index_name
AND user_indexes.uniqueness = 'UNIQUE'
)
THEN 1
ELSE 0
END AS is_unique,
CASE user_constraints.constraint_type
WHEN 'C' THEN 1
ELSE 0
END AS is_check_constraint,
CASE
WHEN user_constraints.constraint_type IN ('P', 'U') THEN 1
ELSE 0
END AS has_index
FROM
user_constraints
LEFT OUTER JOIN
user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name
WHERE
user_constraints.constraint_type = ANY('P', 'U', 'C')
AND user_constraints.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, pk, unique, check, index in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": pk,
"unique": unique,
"foreign_key": None,
"check": check,
"index": index, # All P and U come with index
}
# Record the details
constraints[constraint]['columns'].append(column)
# Foreign key constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name,
LOWER(rcols.table_name),
LOWER(rcols.column_name)
FROM
user_constraints cons
INNER JOIN
user_cons_columns rcols ON rcols.constraint_name = cons.r_constraint_name
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'R' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, other_table, other_column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": (other_table, other_column),
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
cols.index_name, LOWER(cols.column_name), cols.descend,
LOWER(ind.index_type)
FROM
user_ind_columns cols, user_indexes ind
WHERE
cols.table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_constraints cons
WHERE cols.index_name = cons.index_name
) AND cols.index_name = ind.index_name
ORDER BY cols.column_position
""", [table_name])
for constraint, column, order, type_ in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"orders": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": False,
"index": True,
"type": 'idx' if type_ == 'normal' else type_,
}
# Record the details
constraints[constraint]['columns'].append(column)
constraints[constraint]['orders'].append(order)
return constraints
|
|
import time
import random
#game function
def game():
print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print ("Welcome to the cavern of secrets!")
print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
time.sleep(3)
print ("You enter a dark cavern out of curiosity. It is dark and you can only make out a small stick on the floor.")
ch1 = str(input("Do you take it? [y/n]: "))
#STICK TAKEN
if ch1 in ['y', 'Y', 'Yes', 'YES', 'yes']:
print("You have taken the stick!")
time.sleep(2)
stick = 1
#STICK NOT TAKEN
else:
print("You did not take the stick")
stick = 0
print ("As you proceed further into the cave, you see a small glowing object")
ch2 = str(input("Do you approach the object? [y/n]"))
#APPROACH SPIDER
if ch2 in ['y', 'Y', 'Yes', 'YES', 'yes']:
print ("You approach the object...")
time.sleep(2)
print ("As you draw closer, you begin to make out the object as an eye!")
time.sleep(1)
print ("The eye belongs to a giant spider!")
ch3 = str(input("Do you try to fight it? [Y/N]"))
#APPROACH SPIDER
elif ch2 in ['n', 'N', 'No', 'NO', 'no']:
print ("You don't approach the object...")
time.sleep(2)
print ("As you walk away, the object begins to come closer to you!")
time.sleep(1)
print ("The object is an eye that belongs to a giant spider!")
ch3 = str(input("Do you try to fight it? [Y/N]"))
# FIGHT SPIDER
if ch3 in ['y', 'Y', 'Yes', 'YES', 'yes']:
# WITH STICK
if stick == 1:
print("You only have a stick to fight with!")
print("You quickly jab the spider in it's eye and gain an advantage")
time.sleep(2)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(" Fighting... ")
print(" YOU MUST HIT ABOVE A 5 TO KILL THE SPIDER ")
print("IF THE SPIDER HITS HIGHER THAN YOU, YOU WILL DIE")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
time.sleep(2)
fdmg1 = int(random.randint(3, 10))
edmg1 = int(random.randint(1, 5))
print("you hit a", fdmg1)
print("the spider hits a", edmg1)
time.sleep(2)
if edmg1 > fdmg1:
print ("The spider has dealt more damage than you!")
complete = 0
return complete
elif fdmg1 < 5:
print ("You didn't do enough damage to kill the spider, but you manage to escape")
complete = 1
return complete
else:
print ("You killed the spider!")
print ("As you want to walk away you heard a girl screaming!")
explore = input ('Do you want to find out who screamed? [y/n] ')
if explore in ['y', 'Y', 'yes', 'YES', 'Yes', ]:
print ("As you where going further into the cave, you see a princess!")
fight = input("Do you want to save her? [y/n]")
if fight in ['y', 'Y', 'yes', 'YES', 'Yes', ]:
print ("As you walk closer to her a skeleton with a sword and a shield reveals himself from the darkness of the cave!")
fight = str(input("Do you try to fight it? [Y/N]"))
if fight in ['y', 'Y', 'yes', 'YES', 'Yes', ]:
print ("You choose to fight it!")
time.sleep(2)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(" Fighting... ")
print(" YOU MUST HIT ABOVE A 20 TO KILL THE Skeleton ")
print("IF THE Skeleton HITS HIGHER THAN YOU, YOU WILL DIE")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
time.sleep(2)
fdmg1 = int(random.randint(20, 30))
edmg1 = int(random.randint(10, 15))
print("you hit a", fdmg1)
print("the skeleton hits a", edmg1)
time.sleep(2)
print("You saved the princess and she thanks you for saving her!")
print("Getting out of the cave .......")
print("Getting the princess to her kingdom......")
time.sleep(2)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(" You Won the Game! Congrats! ")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
complete = 1
return complete
else:
if fight in ['n', 'N', 'no', ]:
print("You choose not to fight the Skeleton")
time.sleep(1)
print("As yo turn away it ambushes you with its sword and kills you!!!")
elif explore in ['n', 'N', 'no', 'NO', 'No', ]:
print("When you wanted to get out of the cave and go home a giant spider jumped in front of you from the darkness and killed you!")
# WITHOUT STICK
else:
print("You don't have anything to fight with!")
time.sleep(2)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(" Fighting... ")
print(" YOU MUST HIT ABOVE A 10 TO KILL THE SPIDER ")
print("IF THE SPIDER HITS HIGHER THAN YOU, YOU WILL DIE")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
time.sleep(2)
fdmg1 = int(random.randint(10, 12))
edmg1 = int(random.randint(1, 5))
print("you hit a", fdmg1)
print("the spider hits a", edmg1)
time.sleep(2)
if edmg1 > fdmg1:
print ("The spider has dealt more damage than you!")
complete = 0
return complete
elif fdmg1 < 5:
print ("You didn't do enough damage to kill the spider, but you manage to escape")
complete = 1
return complete
else:
print ("You killed the spider!")
print ("As you want to walk away you heard a girl screaming!")
explore = input ('Do you want to find out who screamed? [y/n]')
fight = input("Do you want to save her? [y/n]")
if explore in ['y', 'Y', 'yes', 'YES', 'Yes']:
print ("As you where going further into the cave ,you saw a princess! Do you want to save her? [y/n]")
if fight in ['y', 'Y', 'yes', 'YES', 'Yes', ]:
print ("As you walk closer to her a skeleton with a sword and a shield reveals himself from the darkness of the cave!")
fight = str(input("Do you try to fight it? [Y/N]"))
if fight in ['y', 'Y', 'yes', 'YES', 'Yes', ]:
print ("You choose to fight it!")
time.sleep(2)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(" Fighting... ")
print(" YOU MUST HIT ABOVE A 20 TO KILL THE Skeleton ")
print("IF THE Skeleton HITS HIGHER THAN YOU, YOU WILL DIE")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
time.sleep(2)
fdmg1 = int(random.randint(1, 20))
edmg1 = int(random.randint(1, 15))
print("you hit a", fdmg1)
print("the skeleton hits a", edmg1)
time.sleep(2)
print("You saved the princess and she thanks you for saving her!")
print("Getting out of the cave .......")
print("Getting the princess to her kingdom......")
time.sleep(2)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print(" You Won the Game! Congrats! ")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
complete = 1
return complete
else:
if fight in ['n', 'N', 'no' , ]:
print("You choose not to fight the Skeleton")
time.sleep(1)
print("As yo turn away it ambushes you with its sword and kills you!!!")
elif explore in ['n', 'N', 'no', 'NO', 'No', ]:
print("When you wanted to get out of the cave and go home a giant spider jumped in front of you from the darkness and killed you!")
#DON'T FIGHT SPIDER
elif ch3 in ['n', 'N', 'No', 'NO', 'no']:
print ("You choose not to fight the spider.")
time.sleep(1)
print ("As you turn away, it ambushes you and impales you with it's fangs!!!")
complete = 0
return complete
# game loop
alive = True
while alive:
complete = game()
if complete == 1:
alive = input('You managed to escape the cavern alive! Would you like to play again? [y/n]: ')
if alive in ['y', 'Y', 'YES', 'yes', 'Yes',]:
alive
else:
break
else:
alive = input('You have died! Would you like to play again? [y/n]: ')
if alive in ['y', 'Y', 'YES', 'yes', 'Yes',]:
alive
else:
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.