commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
f276c840f2981ec2951e07c7b847f82811db0745
|
Remove unnecessary None handling
|
dudymas/python-openstacksdk,stackforge/python-openstacksdk,dtroyer/python-openstacksdk,stackforge/python-openstacksdk,briancurtin/python-openstacksdk,dudymas/python-openstacksdk,openstack/python-openstacksdk,dtroyer/python-openstacksdk,mtougeron/python-openstacksdk,mtougeron/python-openstacksdk,openstack/python-openstacksdk,briancurtin/python-openstacksdk
|
openstack/database/v1/user.py
|
openstack/database/v1/user.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.database import database_service
from openstack import resource
class User(resource.Resource):
id_attribute = 'name'
resource_key = 'user'
resources_key = 'users'
base_path = '/instances/%(instance_id)s/users'
service = database_service.DatabaseService()
# capabilities
allow_create = True
allow_delete = True
allow_list = True
# path args
instance_id = resource.prop('instance_id')
# Properties
databases = resource.prop('databases')
name = resource.prop('name')
password = resource.prop('password')
@classmethod
def create_by_id(cls, session, attrs, r_id=None, path_args=None):
url = cls._get_url(path_args)
# Create expects an array of users
body = {'users': [attrs]}
resp = session.post(url, service=cls.service, json=body).body
return resp
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.database import database_service
from openstack import resource
class User(resource.Resource):
id_attribute = 'name'
resource_key = 'user'
resources_key = 'users'
base_path = '/instances/%(instance_id)s/users'
service = database_service.DatabaseService()
# capabilities
allow_create = True
allow_delete = True
allow_list = True
# path args
instance_id = resource.prop('instance_id')
# Properties
databases = resource.prop('databases')
name = resource.prop('name')
_password = resource.prop('password')
@property
def password(self):
try:
val = self._password
except AttributeError:
val = None
return val
@password.setter
def password(self, val):
self._password = val
@classmethod
def create_by_id(cls, session, attrs, r_id=None, path_args=None):
url = cls._get_url(path_args)
# Create expects an array of users
body = {'users': [attrs]}
resp = session.post(url, service=cls.service, json=body).body
return resp
|
apache-2.0
|
Python
|
d1596872f11f95e406a6a3a97222e499abf4f222
|
update plot_ts
|
johannfaouzi/pyts
|
examples/plot_ts.py
|
examples/plot_ts.py
|
"""
======================
Plotting a time series
======================
Plotting a time series.
"""
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
# Parameters
n_samples = 100
n_features = 48
rng = np.random.RandomState(41)
delta = 0.5
dt = 1
# Generate a toy dataset
X = (norm.rvs(scale=delta**2 * dt,
size=n_samples * n_features,
random_state=rng).reshape((n_samples, n_features)))
X[:, 0] = 0
X = np.cumsum(X, axis=1)
# Plot the first sample
plt.plot(X[0])
plt.show()
|
"""
======================
Plotting a time series
======================
An example plot of `pyts.visualization.plot_ts`
"""
import numpy as np
from scipy.stats import norm
from pyts.visualization import plot_ts
# Parameters
n_samples = 100
n_features = 48
rng = np.random.RandomState(41)
delta = 0.5
dt = 1
# Generate a toy dataset
X = (norm.rvs(scale=delta**2 * dt, size=n_samples*n_features, random_state=rng)
.reshape((n_samples, n_features)))
X[:, 0] = 0
X = np.cumsum(X, axis=1)
# Plot the first sample
plot_ts(X[0])
|
bsd-3-clause
|
Python
|
28e64a576a25b7fb41997da8ecfb4472d9adee38
|
simplify main greenlet caching
|
teepark/greenhouse
|
greenhouse/compat.py
|
greenhouse/compat.py
|
import os
import sys
try:
from greenlet import greenlet, GreenletExit
except ImportError, error:
try:
from py.magic import greenlet
GreenletExit = greenlet.GreenletExit
except ImportError:
# suggest standalone greenlet, not the old py.magic.greenlet
raise error
__all__ = ["main_greenlet", "GreenletExit"]
# it's conceivable that we might not be in the main greenlet at import time,
# so chase the parent tree until we get to it
main_greenlet = getcurrent()
while main_greenlet.parent:
main_greenlet = main_greenlet.parent
|
import os
import sys
try:
from greenlet import greenlet, GreenletExit
except ImportError, error:
try:
from py.magic import greenlet
GreenletExit = greenlet.GreenletExit
except ImportError:
# suggest standalone greenlet, not the old py.magic.greenlet
raise error
__all__ = ["main_greenlet", "GreenletExit"]
# it's conceivable that we might not be in the main greenlet at import time,
# so chase the parent tree until we get to it
def _find_main():
glet = greenlet.getcurrent()
while glet.parent:
glet = glet.parent
return glet
main_greenlet = _find_main()
|
bsd-3-clause
|
Python
|
38989bf6e449bf2ada1ac4729564d9feacbc7b90
|
use parseargs for cli processing
|
awynne/scripts
|
activity/activitylog.py
|
activity/activitylog.py
|
from peewee import *
from pprint import pprint
from copy import copy
import sys, os, inspect
import optparse
import argparse
db = SqliteDatabase('activitylog.db')
################
# Model classes
################
class BaseModel(Model):
is_abstract = BooleanField(default=False)
class Meta:
database = db
class NamedModel(BaseModel):
name = CharField(primary_key=True)
class Person(NamedModel):
first = CharField()
last = CharField()
born = DateField()
class ActivityType(NamedModel):
parent = ForeignKeyField('self', null=True, related_name='children')
class MeasurementType(NamedModel):
parent = ForeignKeyField('self', null=True, related_name='children')
class Location(NamedModel):
address = CharField()
class Entry(BaseModel):
person = ForeignKeyField(Person)
location = ForeignKeyField(Location)
props = CharField(null=True)
class Activity(Entry):
start = DateTimeField()
end = DateTimeField()
activityType = ForeignKeyField(ActivityType)
distance = IntegerField(default=0)
class Measurement(Entry):
time = DateTimeField()
measurementType = ForeignKeyField(MeasurementType)
value = DecimalField()
############
# Functions
############
def main(argv):
args = parse_args();
if args.list:
lsModel(args.list)
elif (args.list_all):
for table in db.get_tables():
print table.title()
else:
script = os.path.basename(__file__)
print "%s: you must specify an option" % script
exit(2)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--list", metavar='<model-class>', dest='list',
help='List model objects for the specified class')
parser.add_argument('--list-all', dest='list_all', action='store_true',
help='List all model classes')
return parser.parse_args()
def lsModel(clazzStr):
clazz = globals()[clazzStr]
for item in clazz.select():
if item.is_abstract == False:
attrs = copy(vars(item)['_data'])
del(attrs['is_abstract'])
pprint(attrs)
if __name__ == '__main__':
main(sys.argv[1:])
|
from peewee import *
from pprint import pprint
from copy import copy
import sys, getopt, os, inspect
db = SqliteDatabase('activitylog.db')
################
# Model classes
################
class BaseModel(Model):
is_abstract = BooleanField(default=False)
class Meta:
database = db
class NamedModel(BaseModel):
name = CharField(primary_key=True)
class Person(NamedModel):
first = CharField()
last = CharField()
born = DateField()
class ActivityType(NamedModel):
parent = ForeignKeyField('self', null=True, related_name='children')
class MeasurementType(NamedModel):
parent = ForeignKeyField('self', null=True, related_name='children')
class Location(NamedModel):
address = CharField()
class Entry(BaseModel):
person = ForeignKeyField(Person)
location = ForeignKeyField(Location)
props = CharField(null=True)
class Activity(Entry):
start = DateTimeField()
end = DateTimeField()
activityType = ForeignKeyField(ActivityType)
distance = IntegerField(default=0)
class Measurement(Entry):
time = DateTimeField()
measurementType = ForeignKeyField(MeasurementType)
value = DecimalField()
############
# Functions
############
def main(argv):
try:
opts, args = getopt.getopt(argv, "", ["list=", "list-all"])
if not opts:
usage()
except getopt.GetoptError:
usage()
for opt, arg in opts:
if opt == '--list':
lsModel(arg)
elif opt == '--list-all':
for table in db.get_tables():
print table.title()
else:
usage()
def usage():
script = os.path.basename(__file__)
print "%s --ls <modelClass>" % script
sys.exit(2)
def lsModel(clazzStr):
clazz = globals()[clazzStr]
for item in clazz.select():
if item.is_abstract == False:
attrs = copy(vars(item)['_data'])
del(attrs['is_abstract'])
pprint(attrs)
if __name__ == '__main__':
main(sys.argv[1:])
|
mit
|
Python
|
4565c961eca6f0d904d010cbdbf3d42fe2a6080b
|
Add persistence test
|
seppo0010/rlite-py,seppo0010/rlite-py,pombredanne/rlite-py,pombredanne/rlite-py
|
test/rlite.py
|
test/rlite.py
|
# coding=utf-8
from unittest import *
import os.path
import sys
import hirlite
class RliteTest(TestCase):
def setUp(self):
self.rlite = hirlite.Rlite()
def test_none(self):
self.assertEquals(None, self.rlite.command('get', 'hello'))
def test_ok(self):
self.assertEquals(True, self.rlite.command('set', 'hello', 'world'))
def test_string(self):
self.rlite.command('set', 'hello', 'world')
self.assertEquals('world', self.rlite.command('get', 'hello'))
def test_integer(self):
self.assertEquals(2, self.rlite.command('lpush', 'list', 'value', 'other value'))
def test_error(self):
self.assertIsInstance(self.rlite.command('set', 'key'), hirlite.HirliteError)
def test_array(self):
self.rlite.command('rpush', 'mylist', '1', '2', '3')
self.assertEquals(self.rlite.command('lrange', 'mylist', 0, -1), ['1', '2', '3'])
class PersistentTest(TestCase):
PATH = 'rlite.rld'
def setUp(self):
if os.path.exists(PersistentTest.PATH):
os.unlink(PersistentTest.PATH)
self.rlite = hirlite.Rlite(PersistentTest.PATH)
def tearDown(self):
if os.path.exists(PersistentTest.PATH):
os.unlink(PersistentTest.PATH)
def test_write_close_open(self):
self.rlite.command('set', 'key', 'value')
self.rlite = hirlite.Rlite(PersistentTest.PATH) # close db, open a new one
self.assertEquals('value', self.rlite.command('get', 'key'))
|
# coding=utf-8
from unittest import *
import hirlite
import sys
class RliteTest(TestCase):
def setUp(self):
self.rlite = hirlite.Rlite()
def test_none(self):
self.assertEquals(None, self.rlite.command('get', 'hello'))
def test_ok(self):
self.assertEquals(True, self.rlite.command('set', 'hello', 'world'))
def test_string(self):
self.rlite.command('set', 'hello', 'world')
self.assertEquals('world', self.rlite.command('get', 'hello'))
def test_integer(self):
self.assertEquals(2, self.rlite.command('lpush', 'list', 'value', 'other value'))
def test_error(self):
self.assertIsInstance(self.rlite.command('set', 'key'), hirlite.HirliteError)
def test_array(self):
self.rlite.command('rpush', 'mylist', '1', '2', '3')
self.assertEquals(self.rlite.command('lrange', 'mylist', 0, -1), ['1', '2', '3'])
|
bsd-2-clause
|
Python
|
18428461f230d7d57056bd1a6a0fc2a66cedb5f1
|
increment version
|
albertfxwang/grizli
|
grizli/version.py
|
grizli/version.py
|
# git describe --tags
__version__ = "0.8.0-14-g548f692"
|
# git describe --tags
__version__ = "0.8.0-10-g15486e6"
|
mit
|
Python
|
68eaa885e15b98bc05376d9ddca6926258be2c46
|
make header fields with dates (e.g. last-modified) comparable
|
spaceone/httoop,spaceone/httoop,spaceone/httoop
|
httoop/header/conditional.py
|
httoop/header/conditional.py
|
# -*- coding: utf-8 -*-
from httoop.header.element import HeaderElement
class _DateComparable(object):
from httoop.date import Date
def sanitize(self):
self.value = self.Date.parse(self.value)
class ETag(HeaderElement):
pass
class LastModified(_DateComparable, HeaderElement):
__name__ = 'Last-Modified'
class IfMatch(HeaderElement):
__name__ = 'If-Match'
class IfModifiedSince(_DateComparable, HeaderElement):
__name__ = 'If-Modified-Since'
class IfNoneMatch(HeaderElement):
__name__ = 'If-None-Match'
class IfUnmodifiedSince(_DateComparable, HeaderElement):
__name__ = 'If-Unmodified-Since'
|
# -*- coding: utf-8 -*-
from httoop.header.element import HeaderElement
class ETag(HeaderElement):
pass
class LastModified(HeaderElement):
__name__ = 'Last-Modified'
class IfMatch(HeaderElement):
__name__ = 'If-Match'
class IfModifiedSince(HeaderElement):
__name__ = 'If-Modified-Since'
class IfNoneMatch(HeaderElement):
__name__ = 'If-None-Match'
class IfUnmodifiedSince(HeaderElement):
__name__ = 'If-Unmodified-Since'
|
mit
|
Python
|
82bc502cf7bb64236feba6e140d98bb9e555f4ca
|
Fix assert_raises for catching parents of exceptions.
|
rocky4570/moto,ZuluPro/moto,Affirm/moto,Affirm/moto,kefo/moto,botify-labs/moto,okomestudio/moto,spulec/moto,Affirm/moto,dbfr3qs/moto,okomestudio/moto,kefo/moto,heddle317/moto,whummer/moto,2rs2ts/moto,Brett55/moto,dbfr3qs/moto,ZuluPro/moto,kefo/moto,william-richard/moto,spulec/moto,spulec/moto,ZuluPro/moto,gjtempleton/moto,2rs2ts/moto,spulec/moto,william-richard/moto,Brett55/moto,okomestudio/moto,Affirm/moto,gjtempleton/moto,okomestudio/moto,Brett55/moto,dbfr3qs/moto,rocky4570/moto,Affirm/moto,heddle317/moto,spulec/moto,rocky4570/moto,2rs2ts/moto,spulec/moto,william-richard/moto,botify-labs/moto,kefo/moto,Affirm/moto,Brett55/moto,gjtempleton/moto,rocky4570/moto,ZuluPro/moto,rocky4570/moto,whummer/moto,okomestudio/moto,whummer/moto,heddle317/moto,2rs2ts/moto,whummer/moto,botify-labs/moto,2rs2ts/moto,heddle317/moto,ZuluPro/moto,Brett55/moto,gjtempleton/moto,whummer/moto,william-richard/moto,rocky4570/moto,gjtempleton/moto,dbfr3qs/moto,kefo/moto,botify-labs/moto,Brett55/moto,william-richard/moto,whummer/moto,okomestudio/moto,ZuluPro/moto,heddle317/moto,dbfr3qs/moto,william-richard/moto,botify-labs/moto,silveregg/moto,dbfr3qs/moto,botify-labs/moto
|
tests/backport_assert_raises.py
|
tests/backport_assert_raises.py
|
from __future__ import unicode_literals
"""
Patch courtesy of:
https://marmida.com/blog/index.php/2012/08/08/monkey-patching-assert_raises/
"""
# code for monkey-patching
import nose.tools
# let's fix nose.tools.assert_raises (which is really unittest.assertRaises)
# so that it always supports context management
# in order for these changes to be available to other modules, you'll need
# to guarantee this module is imported by your fixture before either nose or
# unittest are imported
try:
nose.tools.assert_raises(Exception)
except TypeError:
# this version of assert_raises doesn't support the 1-arg version
class AssertRaisesContext(object):
def __init__(self, expected):
self.expected = expected
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
self.exception = exc_val
if issubclass(exc_type, self.expected):
return True
nose.tools.assert_equal(exc_type, self.expected)
# if you get to this line, the last assertion must have passed
# suppress the propagation of this exception
return True
def assert_raises_context(exc_type):
return AssertRaisesContext(exc_type)
nose.tools.assert_raises = assert_raises_context
|
from __future__ import unicode_literals
"""
Patch courtesy of:
https://marmida.com/blog/index.php/2012/08/08/monkey-patching-assert_raises/
"""
# code for monkey-patching
import nose.tools
# let's fix nose.tools.assert_raises (which is really unittest.assertRaises)
# so that it always supports context management
# in order for these changes to be available to other modules, you'll need
# to guarantee this module is imported by your fixture before either nose or
# unittest are imported
try:
nose.tools.assert_raises(Exception)
except TypeError:
# this version of assert_raises doesn't support the 1-arg version
class AssertRaisesContext(object):
def __init__(self, expected):
self.expected = expected
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
self.exception = exc_val
nose.tools.assert_equal(exc_type, self.expected)
# if you get to this line, the last assertion must have passed
# suppress the propagation of this exception
return True
def assert_raises_context(exc_type):
return AssertRaisesContext(exc_type)
nose.tools.assert_raises = assert_raises_context
|
apache-2.0
|
Python
|
fc94bda4cb840b74fbd1226d69bf0aafc5e16e61
|
return when not installed (#283)
|
cebrusfs/217gdb,cebrusfs/217gdb,0xddaa/pwndbg,0xddaa/pwndbg,cebrusfs/217gdb,pwndbg/pwndbg,disconnect3d/pwndbg,cebrusfs/217gdb,anthraxx/pwndbg,pwndbg/pwndbg,pwndbg/pwndbg,disconnect3d/pwndbg,anthraxx/pwndbg,anthraxx/pwndbg,disconnect3d/pwndbg,0xddaa/pwndbg,pwndbg/pwndbg,anthraxx/pwndbg,chubbymaggie/pwndbg,chubbymaggie/pwndbg
|
pwndbg/commands/rop.py
|
pwndbg/commands/rop.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import subprocess
import tempfile
import gdb
import pwndbg.commands
import pwndbg.vmmap
parser = argparse.ArgumentParser(description="Dump ROP gadgets with Jon Salwan's ROPgadget tool.",
epilog="Example: rop --grep 'pop rdi' -- --nojop")
parser.add_argument('--grep', type=str,
help='String to grep the output for')
parser.add_argument('argument', nargs='*', type=str,
help='Arguments to pass to ROPgadget')
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWithFile
def rop(grep, argument):
with tempfile.NamedTemporaryFile() as corefile:
# If the process is running, dump a corefile so we get actual addresses.
if pwndbg.proc.alive:
filename = corefile.name
gdb.execute('gcore %s' % filename)
else:
filename = pwndbg.proc.exe
# Build up the command line to run
cmd = ['ROPgadget',
'--binary',
filename]
cmd += argument
try:
io = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except Exception:
print("Could not run ROPgadget. Please ensure it's installed and in $PATH.")
return
(stdout, stderr) = io.communicate()
stdout = stdout.decode('latin-1')
if not grep:
print(stdout)
return
for line in stdout.splitlines():
if re.search(grep, line):
print(line)
@pwndbg.commands.Command
def ropgadget(*a):
return rop(*a)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import subprocess
import tempfile
import gdb
import pwndbg.commands
import pwndbg.vmmap
parser = argparse.ArgumentParser(description="Dump ROP gadgets with Jon Salwan's ROPgadget tool.",
epilog="Example: rop --grep 'pop rdi' -- --nojop")
parser.add_argument('--grep', type=str,
help='String to grep the output for')
parser.add_argument('argument', nargs='*', type=str,
help='Arguments to pass to ROPgadget')
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWithFile
def rop(grep, argument):
with tempfile.NamedTemporaryFile() as corefile:
# If the process is running, dump a corefile so we get actual addresses.
if pwndbg.proc.alive:
filename = corefile.name
gdb.execute('gcore %s' % filename)
else:
filename = pwndbg.proc.exe
# Build up the command line to run
cmd = ['ROPgadget',
'--binary',
filename]
cmd += argument
try:
io = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except Exception:
print("Could not run ROPgadget. Please ensure it's installed and in $PATH.")
(stdout, stderr) = io.communicate()
stdout = stdout.decode('latin-1')
if not grep:
print(stdout)
return
for line in stdout.splitlines():
if re.search(grep, line):
print(line)
@pwndbg.commands.Command
def ropgadget(*a):
return rop(*a)
|
mit
|
Python
|
0a5b7c606a711307bdc41179cf94c0a72c15ee92
|
Make BaseCommandTest automatically instantiate commands using decoration magic.
|
google/hypebot
|
hypebot/commands/hypetest.py
|
hypebot/commands/hypetest.py
|
# Copyright 2019 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing commands.
This file will be a dependency of all tests within hypebot, but will not be
included in the main binary.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypebot import basebot
from hypebot import hypecore
from hypebot.core import params_lib
from hypebot.interfaces import interface_factory
from hypebot.protos import channel_pb2
TEST_CHANNEL = channel_pb2.Channel(
id='#test', name='Test', visibility=channel_pb2.Channel.PUBLIC)
def ForCommand(command_cls):
"""Decorator to enable setting the command for each test class."""
def _Internal(test_cls):
test_cls._command_cls = command_cls
return test_cls
return _Internal
class BaseCommandTestCase(unittest.TestCase):
# Set the default bot params (used by core) to something sane for testing.
BOT_PARAMS = params_lib.MergeParams(basebot.BaseBot.DEFAULT_PARAMS, {
'interface': {
'type': 'CaptureInterface',
},
'storage': {
'type': 'MemStore',
'cached_type': 'MemStore',
},
'execution_mode': {
# This currently sets the command prefix to `!`. We should figure out
# a better long-term solution for the command prefix though since this
# can in theory change other behavior within core, but currently
# should have no other impacts.
'dev': False,
},
'commands': {},
'subscriptions': {},
})
@classmethod
def setUpClass(cls):
super(BaseCommandTestCase, cls).setUpClass()
if not hasattr(cls, '_command_cls'):
raise AttributeError(
('%s is missing command initializer. All BaseCommandTestCases must'
' be decorated with @ForCommand and given the command they are'
' testing. For example:\n\n@ForCommand(simple_commands.HelpCommand'
')\nclass HelpCommandTest(BaseCommandTestCase):\n ...') %
cls.__name__)
def setUp(self):
super(BaseCommandTestCase, self).setUp()
self.interface = interface_factory.CreateFromParams(
self.BOT_PARAMS.interface)
self.core = hypecore.Core(self.BOT_PARAMS, self.interface)
# We disable ratelimiting for tests.
self.command = self._command_cls({'ratelimit': {
'enabled': False
}}, self.core)
|
# Copyright 2019 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing commands.
This file will be a dependency of all tests within hypebot, but will not be
included in the main binary.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypebot import basebot
from hypebot import hypecore
from hypebot.core import params_lib
from hypebot.interfaces import interface_factory
from hypebot.protos import channel_pb2
TEST_CHANNEL = channel_pb2.Channel(
id='#test', name='Test', visibility=channel_pb2.Channel.PUBLIC)
class BaseCommandTestCase(unittest.TestCase):
# Set the default bot params (used by core) to something sane for testing.
BOT_PARAMS = params_lib.MergeParams(basebot.BaseBot.DEFAULT_PARAMS, {
'interface': {
'type': 'CaptureInterface',
},
'storage': {
'type': 'MemStore',
'cached_type': 'MemStore',
},
'execution_mode': {
# This currently sets the command prefix to `!`. We should figure out
# a better long-term solution for the command prefix though since this
# can in theory change other behavior within core, but currently
# should have no other impacts.
'dev': False,
},
'commands': {},
'subscriptions': {},
})
def setUp(self):
super(BaseCommandTestCase, self).setUp()
self.interface = interface_factory.CreateFromParams(
self.BOT_PARAMS.interface)
self.core = hypecore.Core(self.BOT_PARAMS, self.interface)
|
apache-2.0
|
Python
|
6cef7f841fc34321d68e8c85ff7f78682c59eae2
|
Add help and version text; check for IO errors
|
bdesham/py-chrome-bookmarks,bdesham/py-chrome-bookmarks,bdesham/py-chrome-bookmarks
|
py-chrome-bookmarks.py
|
py-chrome-bookmarks.py
|
#!/usr/bin/python
# py-chrome-bookmarks
#
# A script to convert Google Chrome's bookmarks file to the standard HTML-ish
# format.
#
# (c) Benjamin Esham, 2011. See the accompanying README for this file's
# license and other information.
import json, sys, os, re
# html escaping code from http://wiki.python.org/moin/EscapingHtml
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
return ''.join(html_escape_table.get(c,c) for c in text)
def sanitize(string):
res = ''
string = html_escape(string)
for i in range(len(string)):
if ord(string[i]) > 127:
res += '&#x%x;' % ord(string[i])
else:
res += string[i]
return res
def html_for_node(node):
if 'url' in node:
return html_for_url_node(node)
elif 'children' in node:
return html_for_parent_node(node)
else:
return ''
def html_for_url_node(node):
if not re.match("javascript:", node['url']):
return '<dt><a href="%s">%s</a>\n' % (sanitize(node['url']), sanitize(node['name']))
else:
return ''
def html_for_parent_node(node):
return '<dt><h3>%s</h3>\n<dl><p>%s</dl><p>\n' % (sanitize(node['name']),
''.join([html_for_node(n) for n in node['children']]))
def version_text():
old_out = sys.stdout
sys.stdout = sys.stderr
print "py-chrome-bookmarks"
print "(c) 2011, Benjamin Esham"
print "https://github.com/bdesham/py-chrome-bookmarks"
sys.stdout = old_out
def help_text():
version_text()
old_out = sys.stdout
sys.stdout = sys.stderr
print
print "usage: python py-chrome-bookmarks input-file output-file"
print " input-file is the Chrome bookmarks file"
print " output-file is the destination for the generated HTML bookmarks file"
sys.stdout = old_out
# check for help or version requests
if len(sys.argv) != 3 or "-h" in sys.argv or "--help" in sys.argv:
help_text()
exit()
if "-v" in sys.argv or "--version" in sys.argv:
version_text()
exit()
# the actual code here...
in_file = os.path.expanduser(sys.argv[1])
out_file = os.path.expanduser(sys.argv[2])
try:
f = open(in_file, 'r')
except IOError, e:
print >> sys.stderr, "py-chrome-bookmarks: error opening the input file."
print >> sys.stderr, e
exit()
j = json.loads(f.read())
f.close()
try:
out = open(out_file, 'w')
except IOError, e:
print >> sys.stderr, "py-chrome-bookmarks: error opening the output file."
print >> sys.stderr, e
exit()
out.write("""<!DOCTYPE NETSCAPE-Bookmark-file-1>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8' />
<title>Bookmarks</title>
<h1>Bookmarks</h1>
<dl><p>
<dl>%(bookmark_bar)s</dl>
<dl>%(other)s</dl>
"""
% {'bookmark_bar': html_for_node(j['roots']['bookmark_bar']),
'other': html_for_node(j['roots']['other'])})
out.close()
|
#!/usr/bin/python
# py-chrome-bookmarks
#
# A script to convert Google Chrome's bookmarks file to the standard HTML-ish
# format.
#
# (c) Benjamin Esham, 2011. See the accompanying README for this file's
# license and other information.
import json, sys, os, re
# html escaping code from http://wiki.python.org/moin/EscapingHtml
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
return ''.join(html_escape_table.get(c,c) for c in text)
def sanitize(string):
res = ''
string = html_escape(string)
for i in range(len(string)):
if ord(string[i]) > 127:
res += '&#x%x;' % ord(string[i])
else:
res += string[i]
return res
def html_for_node(node):
if 'url' in node:
return html_for_url_node(node)
elif 'children' in node:
return html_for_parent_node(node)
else:
return ''
def html_for_url_node(node):
if not re.match("javascript:", node['url']):
return '<dt><a href="%s">%s</a>\n' % (sanitize(node['url']), sanitize(node['name']))
else:
return ''
def html_for_parent_node(node):
return '<dt><h3>%s</h3>\n<dl><p>%s</dl><p>\n' % (sanitize(node['name']),
''.join([html_for_node(n) for n in node['children']]))
in_file = os.path.expanduser(sys.argv[1])
out_file = os.path.expanduser(sys.argv[2])
f = open(in_file, 'r')
j = json.loads(f.read())
f.close()
out = open(out_file, 'w')
out.write("""<!DOCTYPE NETSCAPE-Bookmark-file-1>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8' />
<title>Bookmarks</title>
<h1>Bookmarks</h1>
<dl><p>
<dl>%(bookmark_bar)s</dl>
<dl>%(other)s</dl>
"""
% {'bookmark_bar': html_for_node(j['roots']['bookmark_bar']),
'other': html_for_node(j['roots']['other'])})
out.close()
|
isc
|
Python
|
98467f55ef8526d343065da7d6a896b16539fa53
|
use consistent hash for etag
|
sseg/hello_world
|
http_agent/utils/etag.py
|
http_agent/utils/etag.py
|
from zlib import adler32
def make_entity_tag(body):
checksum = adler32(body.encode())
return '"{checksum}"'.format(checksum=checksum)
|
def make_entity_tag(body):
checksum = hash(body) + (1 << 64)
return '"{checksum}"'.format(checksum=checksum)
|
bsd-3-clause
|
Python
|
ec668c693051f70026360ac2f3bc67ced6c01a21
|
fix little bug
|
shananin/fb_messenger
|
src/fb_messenger/test/test_attachements.py
|
src/fb_messenger/test/test_attachements.py
|
import unittest
class FirstTest(unittest.TestCase):
def test_first(self):
self.assertEqual(True, True, 'incorrect types')
|
import unittest
class FirstTest(unittest.TestCase):
def test_first(self):
self.assertEqual(True, False, 'incorrect types')
|
mit
|
Python
|
4979e8e5ee8ac6cb86ab260f44f052b27381eeb6
|
bump version
|
weikang9009/giddy,pysal/giddy,sjsrey/giddy
|
giddy/__init__.py
|
giddy/__init__.py
|
__version__ = "2.0.0"
# __version__ has to be defined in the first line
"""
:mod:`giddy` --- Spatial Dynamics and Mobility
==============================================
"""
from . import directional
from . import ergodic
from . import markov
from . import mobility
from . import rank
from . import util
|
__version__ = "1.2.0"
# __version__ has to be defined in the first line
"""
:mod:`giddy` --- Spatial Dynamics and Mobility
==============================================
"""
from . import directional
from . import ergodic
from . import markov
from . import mobility
from . import rank
from . import util
|
bsd-3-clause
|
Python
|
522906d2842d90722776f898015fde060c967401
|
Update cam.py
|
tcwissemann/pyCam
|
pyCam/build_0.3/cam.py
|
pyCam/build_0.3/cam.py
|
import cv2
import numpy as np
from twilio.rest import TwilioRestClient
import time
#importing modules ^^
body_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml')
#importing cascade-classfier ^^
vc = cv2.VideoCapture(0)
#finding default camera ^^
while -1:
ret, img = vc.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bodies = body_cascade.detectMultiScale(gray, 1.2, 2)
#converting img frame by frame to suitable type ^^
for (x,y,w,h) in bodies:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,171),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
client = TwilioRestClient("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") #account_sid, auth_token for twilio accaount.
client.messages.create(to="+15122997254", from_="+15125807197", #user number, twilio number
body="Alert: person(s) on property.") #messege
time.sleep(300)
#look for features, draw box on features, sends sms upon sight of features ^^
cv2.imshow('WebDetect',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
vc.release()
cv2.destroyAllWindows()
#shows video feed, ESC key kills program ^^
|
import cv2
import numpy as np
from twilio.rest import TwilioRestClient
import time
#importing modules ^^
body_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml')
#importing cascade-classfiers ^^
vc = cv2.VideoCapture(0)
#finding default camera ^^
while -1:
ret, img = vc.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bodies = body_cascade.detectMultiScale(gray, 1.2, 2)
#converting img frame by frame to suitable type ^^
for (x,y,w,h) in bodies:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,171),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
client = TwilioRestClient("AC47b13b617c5806614265237ce06fa110", "e4e74dbdf6719d769422a90225dd8814") #account_sid, auth_token for twilio accaount.
client.messages.create(to="+15122997254", from_="+15125807197", #my number, twilio number
body="Alert: person(s) on property.") #messege
time.sleep(300)
#look for features, draw box on features, sends sms upon sight of features ^^
cv2.imshow('WebDetect',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
vc.release()
cv2.destroyAllWindows()
#shows video feed, ESC key kills program ^^
|
mit
|
Python
|
64c08dfc40240c7b1b4b876b12bdb57ace22d675
|
remove print statement
|
gipit/gippy,gipit/gippy
|
gippy/__init__.py
|
gippy/__init__.py
|
#!/usr/bin/env python
################################################################################
# GIPPY: Geospatial Image Processing library for Python
#
# AUTHOR: Matthew Hanson
# EMAIL: [email protected]
#
# Copyright (C) 2015 Applied Geosolutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from .version import __version__
def mac_update():
""" update search path on mac """
import sys
if sys.platform == 'darwin':
import os
from subprocess import check_output
lib = 'libgip.so'
path = os.path.dirname(__file__)
for f in ['_gippy.so', '_algorithms.so']:
fin = os.path.join(path, f)
cmd = ['install_name_tool', '-change', lib, os.path.join(path, lib), fin]
print cmd
check_output(cmd)
mac_update()
from gippy import init, DataType, GeoImage, GeoVector, Options
# register GDAL and OGR formats
init()
# cleanup functions
del gippy
del version
del init
del mac_update
|
#!/usr/bin/env python
################################################################################
# GIPPY: Geospatial Image Processing library for Python
#
# AUTHOR: Matthew Hanson
# EMAIL: [email protected]
#
# Copyright (C) 2015 Applied Geosolutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from .version import __version__
def mac_update():
""" update search path on mac """
import sys
print 'here'
if sys.platform == 'darwin':
import os
from subprocess import check_output
lib = 'libgip.so'
path = os.path.dirname(__file__)
for f in ['_gippy.so', '_algorithms.so']:
fin = os.path.join(path, f)
cmd = ['install_name_tool', '-change', lib, os.path.join(path, lib), fin]
print cmd
check_output(cmd)
mac_update()
from gippy import init, DataType, GeoImage, GeoVector, Options
# register GDAL and OGR formats
init()
# cleanup functions
del gippy
del version
del init
del mac_update
|
apache-2.0
|
Python
|
30f89eacb428af7091d238d39766d6481735c670
|
fix for BASH_FUNC_module on qstat output
|
imperial-genomics-facility/data-management-python,imperial-genomics-facility/data-management-python,imperial-genomics-facility/data-management-python,imperial-genomics-facility/data-management-python,imperial-genomics-facility/data-management-python
|
igf_airflow/hpc/hpc_queue.py
|
igf_airflow/hpc/hpc_queue.py
|
import json
import subprocess
from collections import defaultdict
from tempfile import TemporaryFile
def get_pbspro_job_count(job_name_prefix=''):
'''
A function for fetching running and queued job information from a PBSPro HPC cluster
:param job_name_prefix: A text to filter running jobs, default ''
:returns: A defaultdict object with the following structure
{ job_name: {'Q': counts, 'R': counts }}
'''
try:
with TemporaryFile() as tmp_file:
subprocess.\
check_call(
'qstat -t -f -F json|grep -v BASH_FUNC_module', # this can fix or break pipeline as well
shell=True,
stdout=tmp_file)
tmp_file.seek(0)
json_data = tmp_file.read()
json_data = json.loads(json_data)
jobs = json_data.get('Jobs')
active_jobs = dict()
if jobs is not None:
active_jobs = defaultdict(lambda: defaultdict(int))
if len(jobs) > 0:
for _,job_data in jobs.items():
job_name = job_data.get('Job_Name')
job_state = job_data.get('job_state')
if job_name.startswith(job_name_prefix):
if job_state == 'Q':
active_jobs[job_name]['Q'] += 1
if job_state == 'R':
active_jobs[job_name]['R'] += 1
return active_jobs
except Exception as e:
raise ValueError('Failed to get job counts from hpc, error: {0}'.format(e))
|
import json
import subprocess
from collections import defaultdict
from tempfile import TemporaryFile
def get_pbspro_job_count(job_name_prefix=''):
'''
A function for fetching running and queued job information from a PBSPro HPC cluster
:param job_name_prefix: A text to filter running jobs, default ''
:returns: A defaultdict object with the following structure
{ job_name: {'Q': counts, 'R': counts }}
'''
try:
with TemporaryFile() as tmp_file:
subprocess.\
check_call(
['qstat','-t','-f','-F','json'],
stdout=tmp_file)
tmp_file.seek(0)
json_data = tmp_file.read()
json_data = json.loads(json_data)
jobs = json_data.get('Jobs')
active_jobs = dict()
if jobs is not None:
active_jobs = defaultdict(lambda: defaultdict(int))
if len(jobs) > 0:
for _,job_data in jobs.items():
job_name = job_data.get('Job_Name')
job_state = job_data.get('job_state')
if job_name.startswith(job_name_prefix):
if job_state == 'Q':
active_jobs[job_name]['Q'] += 1
if job_state == 'R':
active_jobs[job_name]['R'] += 1
return active_jobs
except Exception as e:
raise ValueError('Failed to get job counts from hpc, error: {0}'.format(e))
|
apache-2.0
|
Python
|
8998d0f617791f95b1ed6b4a1fffa0f71752b801
|
Update docs/params for initialization methods.
|
mwhoffman/pybo,jhartford/pybo
|
pybo/bayesopt/inits.py
|
pybo/bayesopt/inits.py
|
"""
Implementation of methods for sampling initial points.
"""
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# global imports
import numpy as np
# local imports
from ..utils import ldsample
# exported symbols
__all__ = ['init_middle', 'init_uniform', 'init_latin', 'init_sobol']
def init_middle(bounds):
"""
Initialize using a single query in the middle of the space.
"""
return np.mean(bounds, axis=1)[None, :]
def init_uniform(bounds, n=None, rng=None):
"""
Initialize using `n` uniformly distributed query points. If `n` is `None`
then use 3D points where D is the dimensionality of the input space.
"""
n = 3*len(bounds) if (n is None) else n
X = ldsample.random(bounds, n, rng)
return X
def init_latin(bounds, n=None, rng=None):
"""
Initialize using a Latin hypercube design of size `n`. If `n` is `None`
then use 3D points where D is the dimensionality of the input space.
"""
n = 3*len(bounds) if (n is None) else n
X = ldsample.latin(bounds, n, rng)
return X
def init_sobol(bounds, n=None, rng=None):
"""
Initialize using a Sobol sequence of length `n`. If `n` is `None` then use
3D points where D is the dimensionality of the input space.
"""
n = 3*len(bounds) if (n is None) else n
X = ldsample.sobol(bounds, n, rng)
return X
|
"""
Implementation of methods for sampling initial points.
"""
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# global imports
import numpy as np
# local imports
from ..utils import ldsample
# exported symbols
__all__ = ['init_middle', 'init_uniform', 'init_latin', 'init_sobol']
def init_middle(bounds):
return np.mean(bounds, axis=1)[None, :]
def init_uniform(bounds, rng=None):
n = 3*len(bounds)
X = ldsample.random(bounds, n, rng)
return X
def init_latin(bounds, rng=None):
n = 3*len(bounds)
X = ldsample.latin(bounds, n, rng)
return X
def init_sobol(bounds, rng=None):
n = 3*len(bounds)
X = ldsample.sobol(bounds, n, rng)
return X
|
bsd-2-clause
|
Python
|
cfc13f7e98062a2eb5a9a96298ebc67ee79d9602
|
Use path for urls
|
Clarity-89/clarityv2,Clarity-89/clarityv2,Clarity-89/clarityv2,Clarity-89/clarityv2
|
src/clarityv2/deductions/admin.py
|
src/clarityv2/deductions/admin.py
|
from django.urls import path
from django.contrib import admin
from django.db.models import Sum
from clarityv2.utils.views.private_media import PrivateMediaView
from .models import Deduction
class DeductionPrivateMediaView(PrivateMediaView):
model = Deduction
permission_required = 'invoices.can_view_invoice'
file_field = 'receipt'
@admin.register(Deduction)
class DeductionAdmin(admin.ModelAdmin):
list_display = ('name', 'date', 'amount')
search_fields = ('name', 'notes')
change_list_template = 'admin/deductions/deduction/change_list.html'
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(request, extra_context=None)
if hasattr(response, 'context_data'):
cl = response.context_data.get('cl')
if cl:
queryset = cl.get_queryset(request)
amount = queryset.aggregate(Sum('amount'))['amount__sum']
response.context_data['total_amount'] = amount
return response
def get_urls(self):
extra = [
path(
'<pk>/file/',
self.admin_site.admin_view(DeductionPrivateMediaView.as_view()),
name='deductions_deduction_receipt'
),
]
return extra + super().get_urls()
|
from django.conf.urls import url
from django.contrib import admin
from django.db.models import Sum
from clarityv2.utils.views.private_media import PrivateMediaView
from .models import Deduction
class DeductionPrivateMediaView(PrivateMediaView):
model = Deduction
permission_required = 'invoices.can_view_invoice'
file_field = 'receipt'
@admin.register(Deduction)
class DeductionAdmin(admin.ModelAdmin):
list_display = ('name', 'date', 'amount')
search_fields = ('name', 'notes')
change_list_template = 'admin/deductions/deduction/change_list.html'
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(request, extra_context=None)
if hasattr(response, 'context_data'):
cl = response.context_data.get('cl')
if cl:
queryset = cl.get_queryset(request)
amount = queryset.aggregate(Sum('amount'))['amount__sum']
response.context_data['total_amount'] = amount
return response
def get_urls(self):
extra = [
url(
r'^(?P<pk>.*)/file/$',
self.admin_site.admin_view(DeductionPrivateMediaView.as_view()),
name='deductions_deduction_receipt'
),
]
return extra + super().get_urls()
|
mit
|
Python
|
a8d7afe076c14115f3282114cecad216e46e7353
|
Update scipy_effects.py
|
jiaaro/pydub
|
pydub/scipy_effects.py
|
pydub/scipy_effects.py
|
"""
This module provides scipy versions of high_pass_filter, and low_pass_filter
as well as an additional band_pass_filter.
Of course, you will need to install scipy for these to work.
When this module is imported the high and low pass filters from this module
will be used when calling audio_segment.high_pass_filter() and
audio_segment.high_pass_filter() instead of the slower, less powerful versions
provided by pydub.effects.
"""
from scipy.signal import butter, sosfilt
from .utils import register_pydub_effect
def _mk_butter_filter(freq, type, order):
"""
Args:
freq: The cutoff frequency for highpass and lowpass filters. For
band filters, a list of [low_cutoff, high_cutoff]
type: "lowpass", "highpass", or "band"
order: nth order butterworth filter (default: 5th order). The
attenuation is -6dB/octave beyond the cutoff frequency (for 1st
order). A Higher order filter will have more attenuation, each level
adding an additional -6dB (so a 3rd order butterworth filter would
be -18dB/octave).
Returns:
function which can filter a mono audio segment
"""
def filter_fn(seg):
assert seg.channels == 1
nyq = 0.5 * seg.frame_rate
try:
freqs = [f / nyq for f in freq]
except TypeError:
freqs = freq / nyq
sos = butter(order, freqs, btype=type, output='sos')
y = sosfilt(sos, seg.get_array_of_samples())
return seg._spawn(y.astype(seg.array_type).tostring())
return filter_fn
@register_pydub_effect
def band_pass_filter(seg, low_cutoff_freq, high_cutoff_freq, order=5):
filter_fn = _mk_butter_filter([low_cutoff_freq, high_cutoff_freq], 'band', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def high_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'highpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def low_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'lowpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
|
"""
This module provides scipy versions of high_pass_filter, and low_pass_filter
as well as an additional band_pass_filter.
Of course, you will need to install scipy for these to work.
When this module is imported the high and low pass filters are used when calling
audio_segment.high_pass_filter() and audio_segment.high_pass_filter() instead
of the slower, less powerful versions provided by pydub.effects.
"""
from scipy.signal import butter, sosfilt
from .utils import register_pydub_effect
def _mk_butter_filter(freq, type, order):
"""
Args:
freq: The cutoff frequency for highpass and lowpass filters. For
band filters, a list of [low_cutoff, high_cutoff]
type: "lowpass", "highpass", or "band"
order: nth order butterworth filter (default: 5th order). The
attenuation is -6dB/octave beyond the cutoff frequency (for 1st
order). A Higher order filter will have more attenuation, each level
adding an additional -6dB (so a 3rd order butterworth filter would
be -18dB/octave).
Returns:
function which can filter a mono audio segment
"""
def filter_fn(seg):
assert seg.channels == 1
nyq = 0.5 * seg.frame_rate
try:
freqs = [f / nyq for f in freq]
except TypeError:
freqs = freq / nyq
sos = butter(order, freqs, btype=type, output='sos')
y = sosfilt(sos, seg.get_array_of_samples())
return seg._spawn(y.astype(seg.array_type).tostring())
return filter_fn
@register_pydub_effect
def band_pass_filter(seg, low_cutoff_freq, high_cutoff_freq, order=5):
filter_fn = _mk_butter_filter([low_cutoff_freq, high_cutoff_freq], 'band', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def high_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'highpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def low_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'lowpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
|
mit
|
Python
|
7508d20bd3d6af0b2e5a886c8ea2f895d9e69935
|
Bump version: 0.2.1 → 0.2.2
|
apetrynet/pyfilemail
|
pyfilemail/__init__.py
|
pyfilemail/__init__.py
|
# -*- coding: utf-8 -*-
__title__ = 'pyfilemail'
__version__ = '0.2.2'
__author__ = 'Daniel Flehner Heen'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Daniel Flehner Heen'
import os
import logging
from functools import wraps
import appdirs
# Init logger
logger = logging.getLogger('pyfilemail')
level = os.getenv('PYFILEMAÌL_DEBUG') and logging.DEBUG or logging.INFO
logger.setLevel(level)
# Formatter
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_string)
# File logger setup
datadir = appdirs.user_data_dir(appname='pyfilemail', version=__version__)
if not os.path.exists(datadir):
os.makedirs(datadir)
logfile = os.path.join(datadir, 'pyfilemail.log')
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(level)
filehandler.setFormatter(formatter)
# Stream logger
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.WARNING)
# Add handler
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
# Decorator to make sure user is logged in
from errors import FMBaseError
def login_required(f):
"""Decorator function to check if user is loged in.
:raises: :class:`FMBaseError` if not logged in
"""
@wraps(f)
def check_login(cls, *args, **kwargs):
if not cls.logged_in:
raise FMBaseError('Please login to use this method')
return f(cls, *args, **kwargs)
return check_login
from users import User # lint:ok
from transfer import Transfer # lint:ok
|
# -*- coding: utf-8 -*-
__title__ = 'pyfilemail'
__version__ = '0.2.1'
__author__ = 'Daniel Flehner Heen'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Daniel Flehner Heen'
import os
import logging
from functools import wraps
import appdirs
# Init logger
logger = logging.getLogger('pyfilemail')
level = os.getenv('PYFILEMAÌL_DEBUG') and logging.DEBUG or logging.INFO
logger.setLevel(level)
# Formatter
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_string)
# File logger setup
datadir = appdirs.user_data_dir(appname='pyfilemail', version=__version__)
if not os.path.exists(datadir):
os.makedirs(datadir)
logfile = os.path.join(datadir, 'pyfilemail.log')
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(level)
filehandler.setFormatter(formatter)
# Stream logger
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.WARNING)
# Add handler
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
# Decorator to make sure user is logged in
from errors import FMBaseError
def login_required(f):
"""Decorator function to check if user is loged in.
:raises: :class:`FMBaseError` if not logged in
"""
@wraps(f)
def check_login(cls, *args, **kwargs):
if not cls.logged_in:
raise FMBaseError('Please login to use this method')
return f(cls, *args, **kwargs)
return check_login
from users import User # lint:ok
from transfer import Transfer # lint:ok
|
mit
|
Python
|
f7066d6bdd4fefbf517cd8ab44951955bb9f3a2a
|
Fix min/max for None types
|
mlouhivu/build-recipes,mlouhivu/build-recipes
|
gpaw/setup/gcc.py
|
gpaw/setup/gcc.py
|
#!/usr/bin/env python3
"""Wrapper for the GNU compiler that converts / removes incompatible
compiler options and allows for file-specific tailoring."""
import sys
from subprocess import call
# Default compiler and options
compiler = 'gcc'
args2change = {}
fragile_files = ['c/xc/tpss.c']
# Default optimisation settings
default_level = 3
default_flags = ['-funroll-loops']
fragile_level = 2
fragile_flags = []
# Puhti (Bull Sequana X1000)
if True:
compiler = 'mpicc'
default_flags += ['-march=cascadelake']
# Sisu (Cray XC40)
if not True:
compiler = 'cc'
default_flags += ['-march=haswell -mtune=haswell -mavx2']
fragile_files += ['c/xc/revtpss.c']
# Taito (HP cluster)
if not True:
compiler = 'mpicc'
default_flags += ['-ffast-math -march=sandybridge -mtune=haswell']
optimise = None # optimisation level 0/1/2/3 (None == default)
debug = False # use -g or not
fragile = False # use special flags for current file?
sandwich = True # use optimisation flag twice (= no override possible)
# process arguments
args = []
for arg in sys.argv[1:]:
arg = arg.strip()
if arg.startswith('-O'):
level = int(arg.replace('-O',''))
if not optimise or level > optimise:
optimise = level
elif arg == '-g':
debug = True
elif arg in args2change:
if args2change[arg]:
args.append(args2change[arg])
else:
if arg in fragile_files:
fragile = True
args.append(arg)
# set default optimisation level and flags
if fragile:
if optimise is not None:
optimise = min(fragile_level, optimise)
flags = fragile_flags
else:
if optimise is not None:
optimise = max(default_level, optimise)
flags = default_flags
# add optimisation level to flags
if optimise is not None:
flags.insert(0, '-O{0}'.format(optimise))
if sandwich:
args.append('-O{0}'.format(optimise))
# make sure -g is always the _first_ flag, so it doesn't mess e.g. with the
# optimisation level
if debug:
flags.insert(0, '-g')
# construct and execute the compile command
cmd = '{0} {1} {2}'.format(compiler, ' '.join(flags), ' '.join(args))
print(cmd)
call(cmd, shell=True)
|
#!/usr/bin/env python3
"""Wrapper for the GNU compiler that converts / removes incompatible
compiler options and allows for file-specific tailoring."""
import sys
from subprocess import call
# Default compiler and options
compiler = 'gcc'
args2change = {}
fragile_files = ['c/xc/tpss.c']
# Default optimisation settings
default_level = 3
default_flags = ['-funroll-loops']
fragile_level = 2
fragile_flags = []
# Puhti (Bull Sequana X1000)
if True:
compiler = 'mpicc'
default_flags += ['-march=cascadelake']
# Sisu (Cray XC40)
if not True:
compiler = 'cc'
default_flags += ['-march=haswell -mtune=haswell -mavx2']
fragile_files += ['c/xc/revtpss.c']
# Taito (HP cluster)
if not True:
compiler = 'mpicc'
default_flags += ['-ffast-math -march=sandybridge -mtune=haswell']
optimise = None # optimisation level 0/1/2/3 (None == default)
debug = False # use -g or not
fragile = False # use special flags for current file?
sandwich = True # use optimisation flag twice (= no override possible)
# process arguments
args = []
for arg in sys.argv[1:]:
arg = arg.strip()
if arg.startswith('-O'):
level = int(arg.replace('-O',''))
if not optimise or level > optimise:
optimise = level
elif arg == '-g':
debug = True
elif arg in args2change:
if args2change[arg]:
args.append(args2change[arg])
else:
if arg in fragile_files:
fragile = True
args.append(arg)
# set default optimisation level and flags
if fragile:
optimise = min(fragile_level, optimise)
flags = fragile_flags
else:
optimise = max(default_level, optimise)
flags = default_flags
# add optimisation level to flags
if optimise is not None:
flags.insert(0, '-O{0}'.format(optimise))
if sandwich:
args.append('-O{0}'.format(optimise))
# make sure -g is always the _first_ flag, so it doesn't mess e.g. with the
# optimisation level
if debug:
flags.insert(0, '-g')
# construct and execute the compile command
cmd = '{0} {1} {2}'.format(compiler, ' '.join(flags), ' '.join(args))
print(cmd)
call(cmd, shell=True)
|
mit
|
Python
|
0b6b7ab518362445f3901f8d3b0d6281e2671c3f
|
Make code python3 compatible
|
grandquista/rethinkdb,marshall007/rethinkdb,mquandalle/rethinkdb,ajose01/rethinkdb,KSanthanam/rethinkdb,tempbottle/rethinkdb,mquandalle/rethinkdb,yaolinz/rethinkdb,yaolinz/rethinkdb,sebadiaz/rethinkdb,sontek/rethinkdb,sbusso/rethinkdb,tempbottle/rethinkdb,mbroadst/rethinkdb,alash3al/rethinkdb,catroot/rethinkdb,wujf/rethinkdb,RubenKelevra/rethinkdb,tempbottle/rethinkdb,dparnell/rethinkdb,matthaywardwebdesign/rethinkdb,Wilbeibi/rethinkdb,niieani/rethinkdb,JackieXie168/rethinkdb,rrampage/rethinkdb,jesseditson/rethinkdb,robertjpayne/rethinkdb,scripni/rethinkdb,alash3al/rethinkdb,grandquista/rethinkdb,bpradipt/rethinkdb,urandu/rethinkdb,sebadiaz/rethinkdb,elkingtonmcb/rethinkdb,wojons/rethinkdb,JackieXie168/rethinkdb,4talesa/rethinkdb,wojons/rethinkdb,jesseditson/rethinkdb,sontek/rethinkdb,alash3al/rethinkdb,sontek/rethinkdb,KSanthanam/rethinkdb,mcanthony/rethinkdb,bchavez/rethinkdb,dparnell/rethinkdb,Wilbeibi/rethinkdb,scripni/rethinkdb,gdi2290/rethinkdb,wojons/rethinkdb,matthaywardwebdesign/rethinkdb,scripni/rethinkdb,AntouanK/rethinkdb,elkingtonmcb/rethinkdb,ayumilong/rethinkdb,catroot/rethinkdb,sbusso/rethinkdb,catroot/rethinkdb,catroot/rethinkdb,dparnell/rethinkdb,marshall007/rethinkdb,alash3al/rethinkdb,grandquista/rethinkdb,jmptrader/rethinkdb,elkingtonmcb/rethinkdb,sebadiaz/rethinkdb,rrampage/rethinkdb,marshall007/rethinkdb,spblightadv/rethinkdb,victorbriz/rethinkdb,jmptrader/rethinkdb,ajose01/rethinkdb,captainpete/rethinkdb,Qinusty/rethinkdb,JackieXie168/rethinkdb,wkennington/rethinkdb,mcanthony/rethinkdb,yakovenkodenis/rethinkdb,wkennington/rethinkdb,greyhwndz/rethinkdb,matthaywardwebdesign/rethinkdb,Wilbeibi/rethinkdb,robertjpayne/rethinkdb,eliangidoni/rethinkdb,pap/rethinkdb,marshall007/rethinkdb,niieani/rethinkdb,yakovenkodenis/rethinkdb,4talesa/rethinkdb,elkingtonmcb/rethinkdb,urandu/rethinkdb,bpradipt/rethinkdb,grandquista/rethinkdb,wkennington/rethinkdb,KSanthanam/rethinkdb,robertjpayne/rethinkdb,spblightadv/rethinkdb,JackieXie168/rethinkdb,grandquista/rethinkdb,captainpete/rethinkdb,jesseditson/rethinkdb,mbroadst/rethinkdb,4talesa/rethinkdb,ajose01/rethinkdb,victorbriz/rethinkdb,AntouanK/rethinkdb,catroot/rethinkdb,dparnell/rethinkdb,Qinusty/rethinkdb,tempbottle/rethinkdb,yakovenkodenis/rethinkdb,yakovenkodenis/rethinkdb,grandquista/rethinkdb,KSanthanam/rethinkdb,jmptrader/rethinkdb,urandu/rethinkdb,jesseditson/rethinkdb,AntouanK/rethinkdb,greyhwndz/rethinkdb,yakovenkodenis/rethinkdb,losywee/rethinkdb,wojons/rethinkdb,wujf/rethinkdb,alash3al/rethinkdb,bchavez/rethinkdb,gavioto/rethinkdb,eliangidoni/rethinkdb,gavioto/rethinkdb,sbusso/rethinkdb,wkennington/rethinkdb,losywee/rethinkdb,urandu/rethinkdb,eliangidoni/rethinkdb,bchavez/rethinkdb,catroot/rethinkdb,catroot/rethinkdb,mbroadst/rethinkdb,wojons/rethinkdb,niieani/rethinkdb,JackieXie168/rethinkdb,greyhwndz/rethinkdb,mcanthony/rethinkdb,alash3al/rethinkdb,AntouanK/rethinkdb,matthaywardwebdesign/rethinkdb,pap/rethinkdb,yakovenkodenis/rethinkdb,gdi2290/rethinkdb,rrampage/rethinkdb,greyhwndz/rethinkdb,jesseditson/rethinkdb,lenstr/rethinkdb,eliangidoni/rethinkdb,spblightadv/rethinkdb,JackieXie168/rethinkdb,bpradipt/rethinkdb,scripni/rethinkdb,yakovenkodenis/rethinkdb,tempbottle/rethinkdb,pap/rethinkdb,wojons/rethinkdb,ajose01/rethinkdb,mbroadst/rethinkdb,yaolinz/rethinkdb,bpradipt/rethinkdb,gavioto/rethinkdb,JackieXie168/rethinkdb,dparnell/rethinkdb,yaolinz/rethinkdb,jmptrader/rethinkdb,niieani/rethinkdb,KSanthanam/rethinkdb,JackieXie168/rethinkdb,bpradipt/rethinkdb,bchavez/rethinkdb,4talesa/rethinkdb,rrampage/rethinkdb,RubenKelevra/rethinkdb,matthaywardwebdesign/rethinkdb,sebadiaz/rethinkdb,mquandalle/rethinkdb,jesseditson/rethinkdb,yaolinz/rethinkdb,grandquista/rethinkdb,bpradipt/rethinkdb,mquandalle/rethinkdb,spblightadv/rethinkdb,yakovenkodenis/rethinkdb,pap/rethinkdb,Qinusty/rethinkdb,gavioto/rethinkdb,marshall007/rethinkdb,elkingtonmcb/rethinkdb,lenstr/rethinkdb,wkennington/rethinkdb,eliangidoni/rethinkdb,RubenKelevra/rethinkdb,niieani/rethinkdb,jmptrader/rethinkdb,rrampage/rethinkdb,4talesa/rethinkdb,RubenKelevra/rethinkdb,AntouanK/rethinkdb,spblightadv/rethinkdb,mbroadst/rethinkdb,yaolinz/rethinkdb,jmptrader/rethinkdb,wujf/rethinkdb,robertjpayne/rethinkdb,niieani/rethinkdb,mquandalle/rethinkdb,bpradipt/rethinkdb,RubenKelevra/rethinkdb,Wilbeibi/rethinkdb,scripni/rethinkdb,sbusso/rethinkdb,bchavez/rethinkdb,alash3al/rethinkdb,sbusso/rethinkdb,ajose01/rethinkdb,wkennington/rethinkdb,lenstr/rethinkdb,Qinusty/rethinkdb,marshall007/rethinkdb,gavioto/rethinkdb,eliangidoni/rethinkdb,wujf/rethinkdb,AntouanK/rethinkdb,wkennington/rethinkdb,grandquista/rethinkdb,mcanthony/rethinkdb,bpradipt/rethinkdb,pap/rethinkdb,AntouanK/rethinkdb,gavioto/rethinkdb,alash3al/rethinkdb,losywee/rethinkdb,4talesa/rethinkdb,Qinusty/rethinkdb,wojons/rethinkdb,losywee/rethinkdb,greyhwndz/rethinkdb,RubenKelevra/rethinkdb,yaolinz/rethinkdb,lenstr/rethinkdb,eliangidoni/rethinkdb,mbroadst/rethinkdb,captainpete/rethinkdb,victorbriz/rethinkdb,rrampage/rethinkdb,KSanthanam/rethinkdb,JackieXie168/rethinkdb,lenstr/rethinkdb,captainpete/rethinkdb,mbroadst/rethinkdb,jesseditson/rethinkdb,robertjpayne/rethinkdb,4talesa/rethinkdb,KSanthanam/rethinkdb,elkingtonmcb/rethinkdb,captainpete/rethinkdb,wojons/rethinkdb,gdi2290/rethinkdb,sontek/rethinkdb,ajose01/rethinkdb,marshall007/rethinkdb,bchavez/rethinkdb,mbroadst/rethinkdb,scripni/rethinkdb,victorbriz/rethinkdb,captainpete/rethinkdb,mcanthony/rethinkdb,bchavez/rethinkdb,Qinusty/rethinkdb,sebadiaz/rethinkdb,grandquista/rethinkdb,greyhwndz/rethinkdb,dparnell/rethinkdb,sebadiaz/rethinkdb,lenstr/rethinkdb,tempbottle/rethinkdb,jesseditson/rethinkdb,losywee/rethinkdb,losywee/rethinkdb,sontek/rethinkdb,eliangidoni/rethinkdb,gdi2290/rethinkdb,sontek/rethinkdb,sebadiaz/rethinkdb,niieani/rethinkdb,greyhwndz/rethinkdb,lenstr/rethinkdb,mbroadst/rethinkdb,ajose01/rethinkdb,catroot/rethinkdb,robertjpayne/rethinkdb,urandu/rethinkdb,ayumilong/rethinkdb,jmptrader/rethinkdb,pap/rethinkdb,captainpete/rethinkdb,mquandalle/rethinkdb,yaolinz/rethinkdb,mcanthony/rethinkdb,Qinusty/rethinkdb,gdi2290/rethinkdb,tempbottle/rethinkdb,ayumilong/rethinkdb,sbusso/rethinkdb,marshall007/rethinkdb,dparnell/rethinkdb,Qinusty/rethinkdb,victorbriz/rethinkdb,Wilbeibi/rethinkdb,gdi2290/rethinkdb,matthaywardwebdesign/rethinkdb,matthaywardwebdesign/rethinkdb,scripni/rethinkdb,sbusso/rethinkdb,jmptrader/rethinkdb,mcanthony/rethinkdb,gavioto/rethinkdb,mcanthony/rethinkdb,tempbottle/rethinkdb,wujf/rethinkdb,elkingtonmcb/rethinkdb,AntouanK/rethinkdb,matthaywardwebdesign/rethinkdb,robertjpayne/rethinkdb,pap/rethinkdb,dparnell/rethinkdb,losywee/rethinkdb,niieani/rethinkdb,greyhwndz/rethinkdb,robertjpayne/rethinkdb,KSanthanam/rethinkdb,RubenKelevra/rethinkdb,Wilbeibi/rethinkdb,eliangidoni/rethinkdb,victorbriz/rethinkdb,bpradipt/rethinkdb,bchavez/rethinkdb,mquandalle/rethinkdb,ayumilong/rethinkdb,dparnell/rethinkdb,4talesa/rethinkdb,sontek/rethinkdb,scripni/rethinkdb,Wilbeibi/rethinkdb,ayumilong/rethinkdb,ajose01/rethinkdb,lenstr/rethinkdb,RubenKelevra/rethinkdb,bchavez/rethinkdb,rrampage/rethinkdb,pap/rethinkdb,losywee/rethinkdb,elkingtonmcb/rethinkdb,robertjpayne/rethinkdb,gdi2290/rethinkdb,urandu/rethinkdb,Wilbeibi/rethinkdb,sontek/rethinkdb,urandu/rethinkdb,sebadiaz/rethinkdb,sbusso/rethinkdb,ayumilong/rethinkdb,spblightadv/rethinkdb,spblightadv/rethinkdb,victorbriz/rethinkdb,urandu/rethinkdb,mquandalle/rethinkdb,gavioto/rethinkdb,victorbriz/rethinkdb,wkennington/rethinkdb,captainpete/rethinkdb,wujf/rethinkdb,rrampage/rethinkdb,Qinusty/rethinkdb,wujf/rethinkdb,ayumilong/rethinkdb,ayumilong/rethinkdb,spblightadv/rethinkdb
|
drivers/python/setup.py
|
drivers/python/setup.py
|
# Copyright 2010-2012 RethinkDB, all rights reserved.
from setuptools import setup, Extension
from distutils.command.build_ext import build_ext
from distutils.errors import DistutilsPlatformError, CCompilerError, DistutilsExecError
import sys
class build_ext_nofail(build_ext):
# This class can replace the build_ext command with one that does not fail
# when the extension fails to build.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as e:
self._failed(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError) as e:
self._failed(e)
else:
try:
import google.protobuf.internal.cpp_message
except ImportError:
print >> sys.stderr, "*** WARNING: The installed protobuf library does not seem to include the C++ extension"
print >> sys.stderr, "*** WARNING: The RethinkDB driver will fallback to using the pure python implementation"
def _failed(self, e):
print >> sys.stderr, "*** WARNING: Unable to compile the C++ extension"
print >> sys.stderr, e
print >> sys.stderr, "*** WARNING: Defaulting to the python implementation"
setup(name="rethinkdb"
,version="1.7.0-2"
,description="This package provides the Python driver library for the RethinkDB database server."
,url="http://rethinkdb.com"
,maintainer="RethinkDB Inc."
,maintainer_email="[email protected]"
,packages=['rethinkdb']
,install_requires=['protobuf']
,ext_modules=[Extension('rethinkdb_pbcpp', sources=['./rethinkdb/pbcpp.cpp', './rethinkdb/ql2.pb.cc'],
include_dirs=['./rethinkdb'], libraries=['protobuf'])]
,cmdclass={"build_ext":build_ext_nofail}
)
|
# Copyright 2010-2012 RethinkDB, all rights reserved.
from setuptools import setup, Extension
from distutils.command.build_ext import build_ext
from distutils.errors import DistutilsPlatformError, CCompilerError, DistutilsExecError
import sys
class build_ext_nofail(build_ext):
# This class can replace the build_ext command with one that does not fail
# when the extension fails to build.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError, e:
self._failed(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError), e:
self._failed(e)
else:
try:
import google.protobuf.internal.cpp_message
except ImportError:
print >> sys.stderr, "*** WARNING: The installed protobuf library does not seem to include the C++ extension"
print >> sys.stderr, "*** WARNING: The RethinkDB driver will fallback to using the pure python implementation"
def _failed(self, e):
print >> sys.stderr, "*** WARNING: Unable to compile the C++ extension"
print >> sys.stderr, e
print >> sys.stderr, "*** WARNING: Defaulting to the python implementation"
setup(name="rethinkdb"
,version="1.7.0-2"
,description="This package provides the Python driver library for the RethinkDB database server."
,url="http://rethinkdb.com"
,maintainer="RethinkDB Inc."
,maintainer_email="[email protected]"
,packages=['rethinkdb']
,install_requires=['protobuf']
,ext_modules=[Extension('rethinkdb_pbcpp', sources=['./rethinkdb/pbcpp.cpp', './rethinkdb/ql2.pb.cc'],
include_dirs=['./rethinkdb'], libraries=['protobuf'])]
,cmdclass={"build_ext":build_ext_nofail}
)
|
apache-2.0
|
Python
|
7418379d959cba0e96161c9e61f340541b82d85f
|
clean up xor a bit
|
MiniLight/DeepCL,hughperkins/DeepCL,hughperkins/DeepCL,MiniLight/DeepCL,MiniLight/DeepCL,MiniLight/DeepCL,hughperkins/DeepCL,hughperkins/DeepCL,hughperkins/DeepCL,MiniLight/DeepCL
|
python/examples/xor.py
|
python/examples/xor.py
|
# Copyright Hugh Perkins 2016
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
"""
Simple example of xor
"""
from __future__ import print_function
import PyDeepCL
import random
import numpy as np
def go():
print('xor')
data = [
{'data': [-1, -1], 'label': 0},
{'data': [1, -1], 'label': 1},
{'data': [-1, 1], 'label': 1},
{'data': [1, 1], 'label': 0}
]
N = len(data)
batchSize = N
planes = 2
size = 1
learningRate = 0.1
numEpochs = 4000
cl = PyDeepCL.DeepCL()
net = PyDeepCL.NeuralNet(cl, planes, size)
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
net.addLayer(PyDeepCL.SoftMaxMaker())
print(net.asString())
images = np.zeros((N, planes, size, size), dtype=np.float32)
labels = np.zeros((N,), dtype=np.int32)
for n in range(N):
for plane in range(planes):
images[n,plane,0,0] = data[n]['data'][plane]
labels[n] = data[n]['label']
sgd = PyDeepCL.SGD(cl, learningRate, 0.0)
netLearner = PyDeepCL.NetLearner(
sgd, net,
N, images.reshape((images.size,)), labels,
N, images.reshape((images.size,)), labels,
batchSize)
netLearner.setSchedule(numEpochs)
netLearner.run()
if __name__ == '__main__':
go()
|
# Copyright Hugh Perkins 2015
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
# import sys
# import array
import PyDeepCL
import random
import numpy as np
def go():
print('xor')
random.seed(1)
cl = PyDeepCL.DeepCL()
net = PyDeepCL.NeuralNet(cl, 2, 1)
# net.addLayer(PyDeepCL.InputLayerMaker().numPlanes(2).imageSize(1))
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
# net.addLayer(PyDeepCL.FullyConnectedMaker().numPlanes(2).imageSize(1).biased().relu())
# net.addLayer(PyDeepCL.FullyConnectedMaker().numPlanes(2).imageSize(1).biased().relu())
# net.addLayer( PyDeepCL.FullyConnectedMaker().numPlanes(10).imageSize(1).biased().linear() )
#net.addLayer( PyDeepCL.SquareLossMaker() )
net.addLayer(PyDeepCL.SoftMaxMaker())
print(net.asString())
data = [
{'data': [-1, -1], 'label': 0},
{'data': [1, -1], 'label': 1},
{'data': [-1, 1], 'label': 1},
{'data': [1, 1], 'label': 0}
]
N = len(data)
planes = 2
size = 1
images = np.zeros((N, planes, size, size), dtype=np.float32)
labels = np.zeros((N,), dtype=np.int32)
for n in range(N):
images[n,0,0,0] = data[n]['data'][0]
images[n,1,0,0] = data[n]['data'][1]
labels[n] = data[n]['label']
sgd = PyDeepCL.SGD(cl, 0.1, 0.0)
netLearner = PyDeepCL.NetLearner(
sgd, net,
N, images.reshape((images.size,)), labels,
N, images.reshape((images.size,)), labels,
N)
netLearner.setSchedule(2000)
netLearner.run()
if __name__ == '__main__':
go()
|
mpl-2.0
|
Python
|
9729c3aecccfa8130db7b5942c423c0807726f81
|
Add feature importance bar chart.
|
yarny/gbdt,yarny/gbdt,yarny/gbdt,yarny/gbdt
|
python/gbdt/_forest.py
|
python/gbdt/_forest.py
|
from libgbdt import Forest as _Forest
class Forest:
def __init__(self, forest):
if type(forest) is str or type(forest) is unicode:
self._forest = _Forest(forest)
elif type(forest) is _Forest:
self._forest = forest
else:
raise TypeError, 'Unsupported forest type: {0}'.format(type(forest))
def predict(self, data_store):
"""Computes prediction scores for data_store."""
return self._forest.predict(data_store._data_store)
def feature_importance(self):
"""Outputs list of feature importances in descending order."""
return self._forest.feature_importance()
def feature_importance_bar_chart(self, color='blue'):
try:
from matplotlib import pyplot as plt
import numpy
except ImportError:
raise ImportError('Please install matplotlib and numpy.')
fimps = self.feature_importance()
importances = [v for _, v in fimps]
features = [f for f,_ in fimps]
ind = -numpy.arange(len(fimps))
_, ax = plt.subplots()
plt.barh(ind, importances, align='center', color=color)
ax.set_yticks(ind)
ax.set_yticklabels(features)
ax.set_xlabel('Feature importance')
def __str__(self):
return self._forest.as_json()
|
from libgbdt import Forest as _Forest
class Forest:
def __init__(self, forest):
if type(forest) is str or type(forest) is unicode:
self._forest = _Forest(forest)
elif type(forest) is _Forest:
self._forest = forest
else:
raise TypeError, 'Unsupported forest type: {0}'.format(type(forest))
def predict(self, data_store):
"""Computes prediction scores for data_store."""
return self._forest.predict(data_store._data_store)
def feature_importance(self):
"""Outputs list of feature importances in descending order."""
return self._forest.feature_importance()
def __str__(self):
return self._forest.as_json()
|
apache-2.0
|
Python
|
50d44ec25eb102451a495dd645ffb2a6f77012ae
|
Add a shortcut for imports
|
EDITD/queue_util,sujaymansingh/queue_util
|
queue_util/__init__.py
|
queue_util/__init__.py
|
from queue_util.consumer import Consumer
from queue_util.producer import Producer
|
mit
|
Python
|
|
325c5a8f407340fa8901f406c301fa8cbdac4ff8
|
bump version to 0.13.0
|
alex/gunicorn,prezi/gunicorn,beni55/gunicorn,malept/gunicorn,z-fork/gunicorn,prezi/gunicorn,jamesblunt/gunicorn,mvaled/gunicorn,urbaniak/gunicorn,prezi/gunicorn,MrKiven/gunicorn,zhoucen/gunicorn,zhoucen/gunicorn,wong2/gunicorn,jamesblunt/gunicorn,elelianghh/gunicorn,malept/gunicorn,gtrdotmcs/gunicorn,gtrdotmcs/gunicorn,jamesblunt/gunicorn,harrisonfeng/gunicorn,urbaniak/gunicorn,gtrdotmcs/gunicorn,WSDC-NITWarangal/gunicorn,1stvamp/gunicorn,wong2/gunicorn,ephes/gunicorn,keakon/gunicorn,wong2/gunicorn,tejasmanohar/gunicorn,alex/gunicorn,1stvamp/gunicorn,ccl0326/gunicorn,mvaled/gunicorn,ammaraskar/gunicorn,alex/gunicorn,ccl0326/gunicorn,zhoucen/gunicorn,urbaniak/gunicorn,GitHublong/gunicorn,malept/gunicorn,1stvamp/gunicorn,ccl0326/gunicorn,mvaled/gunicorn,tempbottle/gunicorn
|
gunicorn/__init__.py
|
gunicorn/__init__.py
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 13, 0)
__version__ = ".".join(map(str, version_info))
SERVER_SOFTWARE = "gunicorn/%s" % __version__
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 12, 2)
__version__ = ".".join(map(str, version_info))
SERVER_SOFTWARE = "gunicorn/%s" % __version__
|
mit
|
Python
|
0f4d2b75cde58f6926636563691182fb5896c894
|
Add docstring to autocorr and ambiguity functions so the axes and peak location of the result is made clear.
|
ryanvolz/echolect
|
echolect/core/coding.py
|
echolect/core/coding.py
|
# Copyright 2013 Ryan Volz
# This file is part of echolect.
# Echolect is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Echolect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with echolect. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from echolect.filtering import filtering
__all__ = ['autocorr', 'ambiguity']
def autocorr(code, nfreq=1):
"""Calculate autocorrelation of code for nfreq frequencies.
If nfreq == 1, the result is a 1-D array with length that is
2*len(code) - 1. The peak value of sum(abs(code)**2) is located
in the middle at index len(code) - 1.
If nfreq > 1, the result is a 2-D array with the first index
corresponding to frequency shift. The code is frequency shifted
by normalized frequencies of range(nfreq)/nfreq and correlated
with the baseband code. The result acorr[0] gives the
autocorrelation with 0 frequency shift, acorr[1] with 1/nfreq
frequency shift, etc. These frequencies are the same as (and
are in the same order as) the FFT frequencies for an nfreq-
length FFT.
****Thus, the peak value is at acorr[0, len(code) - 1]****
To relocate the peak to the middle of the result, use
np.fft.fftshift(acorr, axes=0)
To relocate the peak to the [0, 0] entry, use
np.fft.ifftshift(acorr, axes=1)
"""
# special case because matched_doppler does not handle nfreq < len(code)
if nfreq == 1:
acorr = filtering.matched(code, code)
else:
acorr = filtering.matched_doppler(code, nfreq, code)
return acorr
def ambiguity(code, nfreq=1):
"""Calculate the ambiguity function of code for nfreq frequencies.
The ambiguity function is the square of the autocorrelation,
normalized so the peak value is 1.
See autocorr for details.
"""
acorr = autocorr(code, nfreq)
# normalize so answer at zero delay, zero Doppler is 1
b = len(code)
if nfreq == 1:
acorr = acorr / acorr[b - 1]
else:
acorr = acorr / acorr[0, b - 1]
amb = acorr.real**2 + acorr.imag**2
return amb
|
# Copyright 2013 Ryan Volz
# This file is part of echolect.
# Echolect is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Echolect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with echolect. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from echolect.filtering import filtering
__all__ = ['autocorr', 'ambiguity']
def autocorr(code, nfreq=1):
# special case because matched_doppler does not handle nfreq < len(code)
if nfreq == 1:
acorr = filtering.matched(code, code)
else:
acorr = filtering.matched_doppler(code, nfreq, code)
return acorr
def ambiguity(code, nfreq=1):
acorr = autocorr(code, nfreq)
# normalize so answer at zero delay, zero Doppler is 1
b = len(code)
if nfreq == 1:
acorr = acorr / acorr[b - 1]
else:
acorr = acorr / acorr[0, b - 1]
amb = acorr.real**2 + acorr.imag**2
return amb
|
bsd-3-clause
|
Python
|
cc6bc2b9af67c064339371b43795c36ed3e5ddcb
|
use TemplateResponse everywhere
|
ella/ella-galleries,MichalMaM/ella-galleries
|
ella_galleries/views.py
|
ella_galleries/views.py
|
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.translation import ungettext
from django.utils.cache import patch_vary_headers
from ella.core.views import get_templates_from_publishable
def gallery_item_detail(request, context, item_slug=None):
'''
Returns ``GalleryItem`` object by its slug or first one (given by
``GalleryItem``.``order``) from ``Gallery``.
'''
gallery = context['object']
item_sorted_dict = gallery.items
count = len(item_sorted_dict)
count_str = ungettext('%(count)d object total', '%(count)d objects total',
count) % {'count': count}
next = None
previous = None
if count == 0:
# TODO: log empty gallery
raise Http404()
if item_slug is None:
item = item_sorted_dict.value_for_index(0)
if count > 1:
next = item_sorted_dict.value_for_index(1)
position = 1
else:
try:
item = item_sorted_dict[item_slug]
except KeyError:
raise Http404()
item_index = item_sorted_dict.keyOrder.index(item_slug)
if item_index > 0:
previous = item_sorted_dict.value_for_index(item_index - 1)
if (item_index + 1) < count:
next = item_sorted_dict.value_for_index(item_index + 1)
position = item_index + 1
context.update({
'gallery': gallery,
'item': item,
'item_list' : item_sorted_dict.values(),
'next' : next,
'previous' : previous,
'count' : count,
'count_str' : count_str,
'position' : position,
})
if request.is_ajax():
template_name = "item-ajax.html"
else:
template_name = "item.html"
response = TemplateResponse(
request,
get_templates_from_publishable(template_name, context['object']),
context,
)
patch_vary_headers(response, ('X-Requested-With',))
return response
|
from django.http import Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ungettext
from django.utils.cache import patch_vary_headers
from ella.core.views import get_templates_from_publishable
def gallery_item_detail(request, context, item_slug=None):
'''
Returns ``GalleryItem`` object by its slug or first one (given by
``GalleryItem``.``order``) from ``Gallery``.
'''
gallery = context['object']
item_sorted_dict = gallery.items
count = len(item_sorted_dict)
count_str = ungettext('%(count)d object total', '%(count)d objects total',
count) % {'count': count}
next = None
previous = None
if count == 0:
# TODO: log empty gallery
raise Http404()
if item_slug is None:
item = item_sorted_dict.value_for_index(0)
if count > 1:
next = item_sorted_dict.value_for_index(1)
position = 1
else:
try:
item = item_sorted_dict[item_slug]
except KeyError:
raise Http404()
item_index = item_sorted_dict.keyOrder.index(item_slug)
if item_index > 0:
previous = item_sorted_dict.value_for_index(item_index - 1)
if (item_index + 1) < count:
next = item_sorted_dict.value_for_index(item_index + 1)
position = item_index + 1
context.update({
'gallery': gallery,
'item': item,
'item_list' : item_sorted_dict.values(),
'next' : next,
'previous' : previous,
'count' : count,
'count_str' : count_str,
'position' : position,
})
if request.is_ajax():
template_name = "item-ajax.html"
else:
template_name = "item.html"
response = render_to_response(
get_templates_from_publishable(template_name, context['object']),
context,
context_instance=RequestContext(request),
)
patch_vary_headers(response, ('X-Requested-With',))
return response
|
bsd-3-clause
|
Python
|
63587ab033a0aabd52af6b657600d2d2547f034f
|
Bump release version
|
rigetticomputing/grove,rigetticomputing/grove
|
grove/__init__.py
|
grove/__init__.py
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
__version__ = '1.3.0'
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
__version__ = '1.2.0'
|
apache-2.0
|
Python
|
e0de9a865b731f3f24bc7a42909849abc738217f
|
Increment version
|
johnwlockwood/stream_tap,johnwlockwood/stream_tap,johnwlockwood/karl_data,johnwlockwood/iter_karld_tools
|
karld/_meta.py
|
karld/_meta.py
|
version_info = (0, 2, 1)
version = '.'.join(map(str, version_info))
|
version_info = (0, 2, 0)
version = '.'.join(map(str, version_info))
|
apache-2.0
|
Python
|
c39ce3485af781e8974a70200baa1f51e5c1633b
|
fix imports
|
nixon/gstat
|
gstat/__init__.py
|
gstat/__init__.py
|
from gstat import gstat, gstats, gstat_elapsed, gstat_event
|
mit
|
Python
|
|
2a0e3fe9c83da1d11b892c7c35e367f414329936
|
Update teaching_modules.py
|
sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs
|
src/ensae_teaching_cs/automation/teaching_modules.py
|
src/ensae_teaching_cs/automation/teaching_modules.py
|
# -*- coding: utf-8 -*-
"""
@file
@brief List of modules to maintain for the teachings.
"""
def get_teaching_modules():
"""
List of teachings modules to maintain (CI + documentation).
.. runpython::
:showcode:
from ensae_teaching_cs.automation import get_teaching_modules
print('\\n'.join(sorted(get_teaching_modules())))
"""
return ["pymlbenchmark", "_benchmarks", "ensae_teaching_dl", "machinelearningext",
"lecture_citation", "botadi", "pyquickhelper", "jyquickhelper",
"python3_module_template", "mathenjeu", "pymmails", "pymyinstall",
"pyensae", "pyrsslocal", "pysqllike", "ensae_projects", "ensae_teaching_cs",
"code_beatrix", "actuariat_python", "mlstatpy", "jupytalk", "teachpyx",
"tkinterquickhelper", "cpyquickhelper", "pandas_streaming",
"lightmlboard", "lightmlrestapi", "mlinsights", "pyenbc", "mlprodict",
"papierstat", "sparkouille", "manydataapi", "csharpy", "csharpyml",
"wrapclib", "myblog", "_check_python_install"
]
|
# -*- coding: utf-8 -*-
"""
@file
@brief List of modules to maintain for the teachings.
"""
def get_teaching_modules():
"""
List of teachings modules to maintain (CI + documentation).
.. runpython::
:showcode:
from ensae_teaching_cs.automation import get_teaching_modules
print('\\n'.join(sorted(get_teaching_modules())))
"""
return ["pymlbenchmark", "_benchmarks", "ensae_teaching_dl", "machinelearningext",
"lecture_citation", "botadi", "pyquickhelper", "jyquickhelper",
"python3_module_template", "mathenjeu", "pymmails", "pymyinstall",
"pyensae", "pyrsslocal", "pysqllike", "ensae_projects", "ensae_teaching_cs",
"code_beatrix", "actuariat_python", "mlstatpy", "jupytalk", "teachpyx",
"tkinterquickhelper", "cpyquickhelper", "pandas_streaming",
"lightmlboard", "lightmlrestapi", "mlinsights", "pyenbc", "mlprodict",
"papierstat", "sparkouille", "manydataapi", "csharpy", "csharpyml",
"wrapclib", "myblog", "_check_python_install", "onnxcustom"
]
|
mit
|
Python
|
bc08499fd803278ea502bafdf845dec438f951f3
|
Update range-sum-query-2d-immutable.py
|
jaredkoontz/leetcode,githubutilities/LeetCode,jaredkoontz/leetcode,githubutilities/LeetCode,jaredkoontz/leetcode,githubutilities/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,jaredkoontz/leetcode,githubutilities/LeetCode,githubutilities/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode
|
Python/range-sum-query-2d-immutable.py
|
Python/range-sum-query-2d-immutable.py
|
# Time: ctor: O(m * n)
# lookup: O(1)
# Space: O(m * n)
#
# Given a 2D matrix matrix, find the sum of the elements inside
# the rectangle defined by its upper left corner (row1, col1)
# and lower right corner (row2, col2).
#
# Range Sum Query 2D
# The above rectangle (with the red border) is defined by
# (row1, col1) = (2, 1) and (row2, col2) = (4, 3),
# which contains sum = 8.
#
# Example:
# Given matrix = [
# [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]
# ]
#
# sumRegion(2, 1, 4, 3) -> 8
# sumRegion(1, 1, 2, 2) -> 11
# sumRegion(1, 2, 2, 4) -> 12
# Note:
# You may assume that the matrix does not change.
# There are many calls to sumRegion function.
# You may assume that row1 <= row2 and col1 <= col2.
class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
if not matrix:
return
m, n = len(matrix), len(matrix[0])
self.sums = [[0 for _ in xrange(n+1)] for _ in xrange(m+1)]
for i in xrange(1, m+1):
for j in xrange(1, n+1):
self.sums[i][j] = matrix[i-1][j-1]
self.sums[i][j] += self.sums[i][j-1]
for j in xrange(1, n+1):
for i in xrange(1, m+1):
self.sums[i][j] += self.sums[i-1][j]
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.sums[row2+1][col2+1] - self.sums[row2+1][col1] - \
self.sums[row1][col2+1] + self.sums[row1][col1]
# Your NumMatrix object will be instantiated and called as such:
# numMatrix = NumMatrix(matrix)
# numMatrix.sumRegion(0, 1, 2, 3)
# numMatrix.sumRegion(1, 2, 3, 4)
|
# Time: ctor: O(m * n)
# lookup: O(1)
# Space: O(m * n)
#
# Given a 2D matrix matrix, find the sum of the elements inside
# the rectangle defined by its upper left corner (row1, col1)
# and lower right corner (row2, col2).
#
# Range Sum Query 2D
# The above rectangle (with the red border) is defined by
# (row1, col1) = (2, 1) and (row2, col2) = (4, 3),
# which contains sum = 8.
#
# Example:
# Given matrix = [
# [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]
# ]
#
# sumRegion(2, 1, 4, 3) -> 8
# sumRegion(1, 1, 2, 2) -> 11
# sumRegion(1, 2, 2, 4) -> 12
# Note:
# You may assume that the matrix does not change.
# There are many calls to sumRegion function.
# You may assume that row1 <= row2 and col1 <= col2.
class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
if not matrix:
return
m, n = len(matrix), len(matrix[0])
self.sums = [[0 for _ in xrange(n+1)] for _ in xrange(m+1)]
for i in xrange(1, m+1):
for j in xrange(1, n+1):
self.sums[i][j] = matrix[i-1][j-1]
self.sums[i][j] += self.sums[i][j-1]
for j in xrange(1, n+1):
for i in xrange(1, m+1):
self.sums[i][j] += self.sums[i-1][j]
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.sums[row2+1][col2+1] - self.sums[row2+1][col1] - \
self.sums[row1][col2+1] + self.sums[row1][col1]
# Your NumMatrix object will be instantiated and called as such:
# numMatrix = NumMatrix(matrix)
# numMatrix.sumRegion(0, 1, 2, 3)
# numMatrix.sumRegion(1, 2, 3, 4)
|
mit
|
Python
|
27f8b342e1a4bea9c807b005d16f932880bb7136
|
Document utils.setup_readline
|
flurischt/jedi,dwillmer/jedi,flurischt/jedi,jonashaag/jedi,tjwei/jedi,WoLpH/jedi,jonashaag/jedi,dwillmer/jedi,mfussenegger/jedi,mfussenegger/jedi,WoLpH/jedi,tjwei/jedi
|
jedi/utils.py
|
jedi/utils.py
|
"""
Utilities for end-users.
"""
import sys
from jedi import Interpreter
def readline_complete(text, state):
"""
Function to be passed to :func:`readline.set_completer`.
Usage::
import readline
readline.set_completer(readline_complete)
"""
ns = vars(sys.modules['__main__'])
completions = Interpreter(text, [ns]).completions()
try:
return text + completions[state].complete
except IndexError:
return None
def setup_readline():
"""
Install Jedi completer to :mod:`readline`.
This function setups :mod:`readline` to use Jedi in Python interactive
shell. If you want to use custom ``PYTHONSTARTUP`` file, you can call
this function like this:
>>> from jedi.utils import setup_readline
>>> setup_readline()
"""
try:
import readline
except ImportError:
print("Module readline not available.")
else:
readline.set_completer(readline_complete)
readline.parse_and_bind("tab: complete")
# Default delimiters minus "()":
readline.set_completer_delims(' \t\n`~!@#$%^&*-=+[{]}\\|;:\'",<>/?')
|
"""
Utilities for end-users.
"""
import sys
from jedi import Interpreter
def readline_complete(text, state):
"""
Function to be passed to :func:`readline.set_completer`.
Usage::
import readline
readline.set_completer(readline_complete)
"""
ns = vars(sys.modules['__main__'])
completions = Interpreter(text, [ns]).completions()
try:
return text + completions[state].complete
except IndexError:
return None
def setup_readline():
"""
Install Jedi completer to :mod:`readline`.
"""
try:
import readline
except ImportError:
print("Module readline not available.")
else:
readline.set_completer(readline_complete)
readline.parse_and_bind("tab: complete")
# Default delimiters minus "()":
readline.set_completer_delims(' \t\n`~!@#$%^&*-=+[{]}\\|;:\'",<>/?')
|
mit
|
Python
|
c9dceb4dc83490ab0eebcd3efc9590d3275f53df
|
tidy up messytables-jts integration
|
mk270/ktbh,mk270/ktbh
|
ktbh/schema.py
|
ktbh/schema.py
|
import unicodecsv
from cStringIO import StringIO
import messytables
import itertools
import slugify
import jsontableschema
from messytables.types import *
from messytables_jts import celltype_as_string
def censor(dialect):
tmp = dict(dialect)
censored = [
"doublequote",
"lineterminator",
"skipinitialspace"
]
[ tmp.pop(i) for i in censored ]
return tmp
def sabotage(d):
[ d.__setitem__(k, d[k].encode('utf-8')) for k in d
if isinstance(d[k], unicode) ]
def get_type_of_column(col):
try:
return celltype_as_string(col)
except:
return "any"
def infer_schema(data, _dialect):
f = StringIO(data)
sabotage(_dialect)
d = unicodecsv.reader(f, dialect=None, **_dialect)
field_names = d.next()
f.seek(0)
dialect = censor(_dialect)
t = messytables.CSVTableSet(f, **dialect).tables[0]
sample = itertools.islice(t, 0, 9)
types = messytables.type_guess(sample)
json_table_schema_types = map(get_type_of_column,
types)
slugs = [ slugify.slugify(i) for i in field_names ]
metadata = zip(slugs, field_names, json_table_schema_types)
sch = jsontableschema.JSONTableSchema()
for field_id, label, field_type in metadata:
sch.add_field(field_id=field_id,
label=label,
field_type=field_type)
return sch.as_json()
|
import unicodecsv
from cStringIO import StringIO
import messytables
import itertools
import slugify
import jsontableschema
from messytables.types import *
from messytables_jts import rowset_as_schema
def censor(dialect):
tmp = dict(dialect)
censored = [
"doublequote",
"lineterminator",
"skipinitialspace"
]
[ tmp.pop(i) for i in censored ]
return tmp
def sabotage(d):
[ d.__setitem__(k, d[k].encode('utf-8')) for k in d
if isinstance(d[k], unicode) ]
def get_type_of_column(col):
try:
return rowset_as_schema(col)
except:
return "any"
def infer_schema(data, _dialect):
f = StringIO(data)
sabotage(_dialect)
d = unicodecsv.reader(f, dialect=None, **_dialect)
field_names = d.next()
f.seek(0)
dialect = censor(_dialect)
t = messytables.CSVTableSet(f, **dialect).tables[0]
sample = itertools.islice(t, 0, 9)
types = messytables.type_guess(sample)
json_table_schema_types = map(get_type_of_column(t),
types)
slugs = [ slugify.slugify(i) for i in field_names ]
metadata = zip(slugs, field_names, json_table_schema_types)
sch = jsontableschema.JSONTableSchema()
for field_id, label, field_type in metadata:
sch.add_field(field_id=field_id,
label=label,
field_type=field_type)
return sch.as_json()
|
agpl-3.0
|
Python
|
39de531241f987daf2f417fd419c7bd63248dd9d
|
Bump version number.
|
SunDwarf/Kyoukai
|
kyokai/util.py
|
kyokai/util.py
|
"""
Misc utilities.
"""
import os
import pathlib
VERSION = "1.5.0"
VERSIONT = tuple(map(int, VERSION.split('.')))
HTTP_CODES = {
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
400: "Bad Request",
401: "Unauthorized",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
410: "Gone",
413: "Payload Too Large",
429: "Too Many Requests",
500: "Internal Server Error"
}
def static_filename(filename: str) -> str:
"""
Naive static filename implementation, to allow serving static files.
"""
built = ""
p = pathlib.PurePath(filename)
for part in p.parts:
if part != "..":
built += part + os.path.sep
return built[:-1]
|
"""
Misc utilities.
"""
import os
import pathlib
VERSION = "1.3.8"
VERSIONT = tuple(map(int, VERSION.split('.')))
HTTP_CODES = {
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
400: "Bad Request",
401: "Unauthorized",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
410: "Gone",
413: "Payload Too Large",
429: "Too Many Requests",
500: "Internal Server Error"
}
def static_filename(filename: str) -> str:
"""
Naive static filename implementation, to allow serving static files.
"""
built = ""
p = pathlib.PurePath(filename)
for part in p.parts:
if part != "..":
built += part + os.path.sep
return built[:-1]
|
mit
|
Python
|
34ca71d5db9c1f17d236e5e49471fb6f2a6e1747
|
Implement Paulo tip about router.
|
aldryn/aldryn-search,nephila/aldryn-search,nephila/aldryn-search,aldryn/aldryn-search,nephila/aldryn-search,aldryn/aldryn-search
|
aldryn_search/router.py
|
aldryn_search/router.py
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.utils.translation import get_language
from cms.utils.i18n import alias_from_language
from haystack import routers
from haystack.constants import DEFAULT_ALIAS
class LanguageRouter(routers.BaseRouter):
def for_read(self, **hints):
language = get_language()
alias = alias_from_language(language)
if alias not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return alias
def for_write(self, **hints):
language = get_language()
alias = alias_from_language(language)
if alias not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return alias
|
# -*- coding: utf-8 -*-
from django.conf import settings
from cms.utils.i18n import get_current_language
from haystack import routers
from haystack.constants import DEFAULT_ALIAS
class LanguageRouter(routers.BaseRouter):
def for_read(self, **hints):
language = get_current_language()
if language not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return language
def for_write(self, **hints):
language = get_current_language()
if language not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return language
|
bsd-3-clause
|
Python
|
e5e523890dd1129402d7a0477468ee47dee3fd91
|
Fix missing part/block conversion.
|
wakermahmud/sync-engine,EthanBlackburn/sync-engine,closeio/nylas,ErinCall/sync-engine,gale320/sync-engine,Eagles2F/sync-engine,ErinCall/sync-engine,ErinCall/sync-engine,nylas/sync-engine,jobscore/sync-engine,Eagles2F/sync-engine,PriviPK/privipk-sync-engine,jobscore/sync-engine,PriviPK/privipk-sync-engine,Eagles2F/sync-engine,gale320/sync-engine,wakermahmud/sync-engine,closeio/nylas,nylas/sync-engine,Eagles2F/sync-engine,nylas/sync-engine,jobscore/sync-engine,nylas/sync-engine,wakermahmud/sync-engine,wakermahmud/sync-engine,EthanBlackburn/sync-engine,EthanBlackburn/sync-engine,wakermahmud/sync-engine,gale320/sync-engine,jobscore/sync-engine,EthanBlackburn/sync-engine,gale320/sync-engine,closeio/nylas,Eagles2F/sync-engine,PriviPK/privipk-sync-engine,ErinCall/sync-engine,EthanBlackburn/sync-engine,ErinCall/sync-engine,PriviPK/privipk-sync-engine,gale320/sync-engine,closeio/nylas,PriviPK/privipk-sync-engine
|
inbox/sendmail/smtp/common.py
|
inbox/sendmail/smtp/common.py
|
from inbox.sendmail.base import generate_attachments, SendError
from inbox.sendmail.smtp.postel import BaseSMTPClient
from inbox.sendmail.smtp.message import create_email, create_reply
class SMTPClient(BaseSMTPClient):
""" SMTPClient for Gmail and other providers. """
def _send_mail(self, db_session, message, smtpmsg):
"""Send the email message."""
# Send it using SMTP:
try:
return self._send(smtpmsg.recipients, smtpmsg.msg)
except SendError as e:
self.log.error(str(e))
raise
def send_new(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email from this user account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
blocks = [p.block for p in draft.attachments]
attachments = generate_attachments(blocks)
smtpmsg = create_email(self.sender_name, self.email_address,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
def send_reply(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email reply from this user
account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
blocks = [p.block for p in draft.attachments]
attachments = generate_attachments(blocks)
smtpmsg = create_reply(self.sender_name, self.email_address,
draft.in_reply_to, draft.references,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
|
from inbox.sendmail.base import generate_attachments, SendError
from inbox.sendmail.smtp.postel import BaseSMTPClient
from inbox.sendmail.smtp.message import create_email, create_reply
class SMTPClient(BaseSMTPClient):
""" SMTPClient for Gmail and other providers. """
def _send_mail(self, db_session, message, smtpmsg):
"""Send the email message."""
# Send it using SMTP:
try:
return self._send(smtpmsg.recipients, smtpmsg.msg)
except SendError as e:
self.log.error(str(e))
raise
def send_new(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email from this user account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
attachments = generate_attachments(draft.attachments)
smtpmsg = create_email(self.sender_name, self.email_address,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
def send_reply(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email reply from this user
account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
attachments = generate_attachments(draft.attachments)
smtpmsg = create_reply(self.sender_name, self.email_address,
draft.in_reply_to, draft.references,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
|
agpl-3.0
|
Python
|
872a96b52061bd9ab3a3178aacf3e3d0be2cc498
|
Make field filter errors ValidationErrors
|
limbera/django-nap,MarkusH/django-nap
|
nap/dataviews/fields.py
|
nap/dataviews/fields.py
|
from django.db.models.fields import NOT_PROVIDED
from django.forms import ValidationError
from nap.utils import digattr
class field(property):
'''A base class to compare against.'''
def __get__(self, instance, cls=None):
if instance is None:
return self
return self.fget(instance._obj)
def __set__(self, instance, value):
self.fset(instance._obj, value)
class Field(field):
'''
class V(DataView):
foo = Field('bar', default=1)
'''
def __init__(self, name, default=NOT_PROVIDED, filters=None):
self.name = name
self.default = default
self.filters = filters or []
def __get__(self, instance, cls=None):
if instance is None:
return self
value = getattr(instance._obj, self.name, self.default)
for filt in self.filters:
try:
value = filt.from_python(value)
except (TypeError, ValueError):
raise ValidationError('Invalid value')
return value
def __set__(self, instance, value):
for filt in self.filters[::-1]:
value = filt.to_python(value)
setattr(instance._obj, self.name, value)
class DigField(Field):
def __get__(self, instance, cls=None):
if instance is None:
return self
return digattr(instance._obj, self.name, self.default)
def __set__(self, instance):
raise NotImplementedError
|
from django.db.models.fields import NOT_PROVIDED
from nap.utils import digattr
class field(property):
'''A base class to compare against.'''
def __get__(self, instance, cls=None):
if instance is None:
return self
return self.fget(instance._obj)
def __set__(self, instance, value):
self.fset(instance._obj, value)
class Field(field):
'''
class V(DataView):
foo = Field('bar', default=1)
'''
def __init__(self, name, default=NOT_PROVIDED, filters=None):
self.name = name
self.default = default
self.filters = filters or []
def __get__(self, instance, cls=None):
if instance is None:
return self
value = getattr(instance._obj, self.name, self.default)
for filt in self.filters:
value = filt.from_python(value)
return value
def __set__(self, instance, value):
for filt in self.filters[::-1]:
value = filt.to_python(value)
setattr(instance._obj, self.name, value)
class DigField(Field):
def __get__(self, instance, cls=None):
if instance is None:
return self
return digattr(instance._obj, self.name, self.default)
def __set__(self, instance):
raise NotImplementedError
|
bsd-3-clause
|
Python
|
bbe263e8bd9bb12ccef681d4f21f6b90c89f059d
|
Remove some debug logging
|
ErinCall/splinter_demo,ErinCall/splinter_demo
|
flask/test/test_signup.py
|
flask/test/test_signup.py
|
from __future__ import unicode_literals
from test import TestCase
from web import app
from db import session, User
from nose.tools import eq_
class TestSignup(TestCase):
def test_sign_up(self):
app.test_client().post('/', data={'email': '[email protected]'})
users = session().query(User.email).all()
eq_(users, [('[email protected]',)])
self.visit('/')
self.browser.fill('email', '[email protected]')
self.browser.find_by_name('go').click()
assert self.browser.is_text_present('Thanks'), 'rude!'
users = session().query(User.email).all()
eq_(users, [('[email protected]',), ('[email protected]',)])
def test_valid_emails_get_validated(self):
self.visit('/')
self.browser.fill('email', '[email protected]')
assert self.browser.is_text_present('valid'), "didn't get validated"
def test_invalid_emails_get_yelled_about(self):
self.visit('/')
self.browser.fill('email', 'aghlaghlaghl')
assert self.browser.is_text_present('invalid'), "didn't get yelled at"
|
from __future__ import unicode_literals
from test import TestCase
from web import app
from db import session, User
from nose.tools import eq_
class TestSignup(TestCase):
def test_sign_up(self):
app.test_client().post('/', data={'email': '[email protected]'})
users = session().query(User.email).all()
eq_(users, [('[email protected]',)])
self.visit('/')
self.browser.fill('email', '[email protected]')
self.browser.find_by_name('go').click()
assert self.browser.is_text_present('Thanks'), 'rude!'
users = session().query(User.email).all()
eq_(users, [('[email protected]',), ('[email protected]',)])
def test_valid_emails_get_validated(self):
print 'here before visit'
self.visit('/')
print 'here after visit'
self.browser.fill('email', '[email protected]')
assert self.browser.is_text_present('valid'), "didn't get validated"
def test_invalid_emails_get_yelled_about(self):
self.visit('/')
self.browser.fill('email', 'aghlaghlaghl')
assert self.browser.is_text_present('invalid'), "didn't get yelled at"
|
mit
|
Python
|
6d9e8e8831cd08fa358f33f155a760de3ec59f3b
|
document that this file is generated
|
fonttools/fonttools,googlefonts/fonttools
|
Lib/fontTools/ttLib/tables/__init__.py
|
Lib/fontTools/ttLib/tables/__init__.py
|
# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
def _moduleFinderHint():
"""Dummy function to let modulefinder know what tables may be
dynamically imported. Generated by MetaTools/buildTableList.py.
"""
import B_A_S_E_
import C_F_F_
import D_S_I_G_
import G_D_E_F_
import G_P_O_S_
import G_S_U_B_
import J_S_T_F_
import L_T_S_H_
import O_S_2f_2
import T_S_I_B_
import T_S_I_D_
import T_S_I_J_
import T_S_I_P_
import T_S_I_S_
import T_S_I_V_
import T_S_I__0
import T_S_I__1
import T_S_I__2
import T_S_I__3
import T_S_I__5
import V_O_R_G_
import _c_m_a_p
import _c_v_t
import _f_p_g_m
import _g_a_s_p
import _g_l_y_f
import _h_d_m_x
import _h_e_a_d
import _h_h_e_a
import _h_m_t_x
import _k_e_r_n
import _l_o_c_a
import _m_a_x_p
import _n_a_m_e
import _p_o_s_t
import _p_r_e_p
import _v_h_e_a
import _v_m_t_x
|
def _moduleFinderHint():
"""Dummy function to let modulefinder know what tables may be
dynamically imported. Generated by MetaTools/buildTableList.py.
"""
import B_A_S_E_
import C_F_F_
import D_S_I_G_
import G_D_E_F_
import G_P_O_S_
import G_S_U_B_
import J_S_T_F_
import L_T_S_H_
import O_S_2f_2
import T_S_I_B_
import T_S_I_D_
import T_S_I_J_
import T_S_I_P_
import T_S_I_S_
import T_S_I_V_
import T_S_I__0
import T_S_I__1
import T_S_I__2
import T_S_I__3
import T_S_I__5
import _c_m_a_p
import _c_v_t
import _f_p_g_m
import _g_a_s_p
import _g_l_y_f
import _h_d_m_x
import _h_e_a_d
import _h_h_e_a
import _h_m_t_x
import _k_e_r_n
import _l_o_c_a
import _m_a_x_p
import _n_a_m_e
import _p_o_s_t
import _p_r_e_p
import _v_h_e_a
import _v_m_t_x
|
mit
|
Python
|
1f977aa5fa28ed1e351f337191291198384abe02
|
Set auth_encryption_key option to be secret
|
noironetworks/heat,cwolferh/heat-scratch,steveb/heat,cwolferh/heat-scratch,steveb/heat,maestro-hybrid-cloud/heat,dragorosson/heat,openstack/heat,jasondunsmore/heat,openstack/heat,dragorosson/heat,miguelgrinberg/heat,takeshineshiro/heat,rh-s/heat,srznew/heat,cryptickp/heat,pratikmallya/heat,noironetworks/heat,gonzolino/heat,rh-s/heat,srznew/heat,maestro-hybrid-cloud/heat,takeshineshiro/heat,miguelgrinberg/heat,dims/heat,pratikmallya/heat,jasondunsmore/heat,gonzolino/heat,cryptickp/heat,dims/heat
|
heat/common/crypt.py
|
heat/common/crypt.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from Crypto.Cipher import AES
from oslo_config import cfg
from heat.openstack.common.crypto import utils
auth_opts = [
cfg.StrOpt('auth_encryption_key',
secret=True,
default='notgood but just long enough i t',
help="Key used to encrypt authentication info in the database. "
"Length of this key must be 16, 24 or 32 characters.")
]
cfg.CONF.register_opts(auth_opts)
def encrypt(auth_info):
if auth_info is None:
return None, None
sym = utils.SymmetricCrypto()
res = sym.encrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64encode=True)
return 'oslo_decrypt_v1', res
def oslo_decrypt_v1(auth_info):
if auth_info is None:
return None
sym = utils.SymmetricCrypto()
return sym.decrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64decode=True)
def heat_decrypt(auth_info):
"""Decrypt function for data that has been encrypted using an older
version of Heat.
Note: the encrypt function returns the function that is needed to
decrypt the data. The database then stores this. When the data is
then retrieved (potentially by a later version of Heat) the decrypt
function must still exist. So whilst it may seem that this function
is not referenced, it will be referenced from the database.
"""
if auth_info is None:
return None
auth = base64.b64decode(auth_info)
iv = auth[:AES.block_size]
cipher = AES.new(cfg.CONF.auth_encryption_key[:32], AES.MODE_CFB, iv)
res = cipher.decrypt(auth[AES.block_size:])
return res
def list_opts():
yield None, auth_opts
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from Crypto.Cipher import AES
from oslo_config import cfg
from heat.openstack.common.crypto import utils
auth_opts = [
cfg.StrOpt('auth_encryption_key',
default='notgood but just long enough i think',
help="Encryption key used for authentication info in database.")
]
cfg.CONF.register_opts(auth_opts)
def encrypt(auth_info):
if auth_info is None:
return None, None
sym = utils.SymmetricCrypto()
res = sym.encrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64encode=True)
return 'oslo_decrypt_v1', res
def oslo_decrypt_v1(auth_info):
if auth_info is None:
return None
sym = utils.SymmetricCrypto()
return sym.decrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64decode=True)
def heat_decrypt(auth_info):
"""Decrypt function for data that has been encrypted using an older
version of Heat.
Note: the encrypt function returns the function that is needed to
decrypt the data. The database then stores this. When the data is
then retrieved (potentially by a later version of Heat) the decrypt
function must still exist. So whilst it may seem that this function
is not referenced, it will be referenced from the database.
"""
if auth_info is None:
return None
auth = base64.b64decode(auth_info)
iv = auth[:AES.block_size]
cipher = AES.new(cfg.CONF.auth_encryption_key[:32], AES.MODE_CFB, iv)
res = cipher.decrypt(auth[AES.block_size:])
return res
def list_opts():
yield None, auth_opts
|
apache-2.0
|
Python
|
197a0440ddbf31aa87a6a6998b41344be4924076
|
fix on the rows update
|
rrpg/world-editor,rrpg/world-editor
|
gui/placeslist.py
|
gui/placeslist.py
|
# -*- coding: utf8 -*-
from PyQt4 import QtGui
from PyQt4 import QtCore
class placesList(QtGui.QTableWidget):
_columns = ('Name', 'Type', 'X', 'Y', 'Locate')
_app = None
_parent = None
def __init__(self, parent, app):
"""
Initialisation of the window, creates the GUI and displays the window.
"""
self._app = app
QtGui.QTableView.__init__(self, parent)
self._parent = parent
self.setColumnCount(len(self._columns))
self.setHorizontalHeaderLabels(self._columns)
self.verticalHeader().setVisible(False)
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.setData()
def setData(self):
self.clearContents()
nbRowsToInsert = len(self._app.map.places)
for index, row in enumerate(self._app.map.places):
if self.rowCount() < nbRowsToInsert:
self.insertRow(index)
self.setItem(index, 0, QtGui.QTableWidgetItem(row['name']))
self.setItem(index, 1, QtGui.QTableWidgetItem(self._app.map.getPlaceTypesLabels()[row['type']]))
self.setItem(index, 2, QtGui.QTableWidgetItem(str(row['coordinates'][0])))
self.setItem(index, 3, QtGui.QTableWidgetItem(str(row['coordinates'][1])))
self.setCellWidget(index, 4, QtGui.QPushButton("Locate"))
self.resizeColumnsToContents()
|
# -*- coding: utf8 -*-
from PyQt4 import QtGui
from PyQt4 import QtCore
class placesList(QtGui.QTableWidget):
_columns = ('Name', 'Type', 'X', 'Y', 'Locate')
_app = None
_parent = None
def __init__(self, parent, app):
"""
Initialisation of the window, creates the GUI and displays the window.
"""
self._app = app
QtGui.QTableView.__init__(self, parent)
self._parent = parent
self.setColumnCount(len(self._columns))
self.setHorizontalHeaderLabels(self._columns)
self.verticalHeader().setVisible(False)
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.setData()
def setData(self):
for index, row in enumerate(self._app.map.places):
self.insertRow(index)
self.setItem(index, 0, QtGui.QTableWidgetItem(row['name']))
self.setItem(index, 1, QtGui.QTableWidgetItem(self._app.map.getPlaceTypesLabels()[row['type']]))
self.setItem(index, 2, QtGui.QTableWidgetItem(str(row['coordinates'][0])))
self.setItem(index, 3, QtGui.QTableWidgetItem(str(row['coordinates'][1])))
self.setCellWidget(index, 4, QtGui.QPushButton("Locate"))
self.resizeColumnsToContents()
|
mit
|
Python
|
5809fc832340e3ee5d798fa347e4933e874bdb8b
|
Allow older django to find the tests
|
ForumDev/djangocms-applicants,ForumDev/djangocms-applicants
|
voting/tests/__init__.py
|
voting/tests/__init__.py
|
import django
if django.VERSION[0] == 1 and django.VERSION[1] < 6:
from .tests import *
|
mit
|
Python
|
|
9246d33429e1940d5a98c3c16e708159437b88fa
|
enable header modification
|
nipy/nireg,alexis-roche/niseg,alexis-roche/nipy,nipy/nireg,bthirion/nipy,nipy/nipy-labs,bthirion/nipy,alexis-roche/register,alexis-roche/register,alexis-roche/nipy,alexis-roche/register,arokem/nipy,alexis-roche/niseg,bthirion/nipy,arokem/nipy,alexis-roche/nireg,bthirion/nipy,alexis-roche/nireg,arokem/nipy,alexis-roche/nipy,alexis-roche/nipy,nipy/nipy-labs,arokem/nipy
|
lib/neuroimaging/tools/AnalyzeHeaderTool.py
|
lib/neuroimaging/tools/AnalyzeHeaderTool.py
|
import os, sys
from optparse import OptionParser, Option
from neuroimaging.data import DataSource
from neuroimaging.refactoring.analyze import struct_fields, AnalyzeHeader
##############################################################################
class AnalyzeHeaderTool (OptionParser):
"Command-line tool for getting and setting Analyze header values."
_usage= "%prog [options] <hdrfile>\n"+__doc__
options = (
Option('-a', '--attribute', dest="attname",
help="Get or set this attribute"),
Option('-v', '--value', dest="value",
help="Set attribute to this value"))
#-------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
OptionParser.__init__(self, *args, **kwargs)
self.set_usage(self._usage)
self.add_options(self.options)
#-------------------------------------------------------------------------
def _error(self, message):
print message
self.print_help()
sys.exit(0)
#-------------------------------------------------------------------------
def run(self):
options, args = self.parse_args()
if len(args) != 1: self._error("Please provide a header file name")
filename = args[0]
if not DataSource().exists(filename):
self._error("File not found: %s"%filename)
header = AnalyzeHeader(filename)
attname, value = options.attname, options.value
if attname is not None:
if value is not None:
print "before: %s\t%s"%(attname, getattr(header, attname))
setattr(header, attname, value)
print "after: %s\t%s"%(attname, getattr(header, attname))
header.write(filename+".new")
else: print "%s\t%s"%(attname, getattr(header, attname))
elif value is not None:
self._error("Only provide a value when an attribute is provided")
else: print header
if __name__ == "__main__": AnalyzeHeaderTool().run()
|
import os, sys
from optparse import OptionParser, Option
from neuroimaging.data import DataSource
from neuroimaging.refactoring.analyze import struct_fields, AnalyzeHeader
##############################################################################
class AnalyzeHeaderTool (OptionParser):
"Command-line tool for getting and setting Analyze header values."
_usage= "%prog [options] <hdrfile>\n"+__doc__
options = (
Option('-a', '--attribute', dest="attname",
help="Get or set this attribute"),
Option('-v', '--value', dest="value",
help="Set attribute to this value"))
#-------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
OptionParser.__init__(self, *args, **kwargs)
self.set_usage(self._usage)
self.add_options(self.options)
#-------------------------------------------------------------------------
def _error(self, message):
print message
self.print_help()
sys.exit(0)
#-------------------------------------------------------------------------
def run(self):
options, args = self.parse_args()
if len(args) != 1: self._error("Please provide a header file name")
filename = args[0]
if not DataSource().exists(filename):
self._error("File not found: %s"%filename)
header = AnalyzeHeader(filename)
attname, value = options.attname, options.value
if attname is not None:
if value is not None:
print "before: %s = %s"%(attname, getattr(header, attname))
setattr(header, attname, value)
print "after: %s = %s"%(attname, getattr(header, attname))
#write back out
else: print "%s = %s"%(attname, getattr(header, attname))
elif value is not None:
self._error("Only provide a value when an attribute is provided")
else: print header
if __name__ == "__main__": AnalyzeHeaderTool().run()
|
bsd-3-clause
|
Python
|
1117f6e2d51ed6faf37aa2a6deab8a6ff8fa0e5b
|
test for compiling simple command
|
bwhmather/python-linemode
|
linemode/tests/test_command_list_printer.py
|
linemode/tests/test_command_list_printer.py
|
import unittest
from linemode.drivers.command_list import compile
class TestCommandListPrinter(unittest.TestCase):
def test_simple_command(self):
program = compile([
"reset"
])
self.assertEqual(program, b'reset')
|
import unittest
from linemode.drivers.command_list import CommandListPrinter
class TestCommandListPrinter(unittest.TestCase):
pass
|
bsd-3-clause
|
Python
|
3f0b19d153360ee5cf1fda1acfa0e4ad846b6c86
|
fix admin.py, remove site on AccountAccess and add it on Provider
|
iXioN/django-all-access,iXioN/django-all-access
|
allaccess/admin.py
|
allaccess/admin.py
|
from django.contrib import admin
from .models import Provider, AccountAccess
class ProviderAdmin(admin.ModelAdmin):
"Admin customization for OAuth providers."
list_display = ('name', 'enabled', 'site',)
list_filter = ('name', 'enabled', 'site', )
class AccountAccessAdmin(admin.ModelAdmin):
"Admin customization for accounts."
list_display = (
'__str__', 'provider', 'user', 'created', 'modified',)
list_filter = ('provider', 'created', 'modified', )
admin.site.register(Provider, ProviderAdmin)
admin.site.register(AccountAccess, AccountAccessAdmin)
|
from django.contrib import admin
from .models import Provider, AccountAccess
class ProviderAdmin(admin.ModelAdmin):
"Admin customization for OAuth providers."
list_display = ('name', 'enabled', )
class AccountAccessAdmin(admin.ModelAdmin):
"Admin customization for accounts."
list_display = (
'__str__', 'provider', 'user', 'created', 'modified', 'site',)
list_filter = ('provider', 'created', 'modified', 'site', )
admin.site.register(Provider, ProviderAdmin)
admin.site.register(AccountAccess, AccountAccessAdmin)
|
bsd-2-clause
|
Python
|
ee6d4f50b4a27e9cc8c3b5f8a821a6d9c0cf4f21
|
remove unwanted changes
|
StrellaGroup/frappe,almeidapaulopt/frappe,yashodhank/frappe,StrellaGroup/frappe,mhbu50/frappe,mhbu50/frappe,yashodhank/frappe,frappe/frappe,almeidapaulopt/frappe,yashodhank/frappe,frappe/frappe,yashodhank/frappe,mhbu50/frappe,StrellaGroup/frappe,mhbu50/frappe,almeidapaulopt/frappe,almeidapaulopt/frappe,frappe/frappe
|
frappe/website/page_renderers/document_page.py
|
frappe/website/page_renderers/document_page.py
|
import frappe
from frappe.model.document import get_controller
from frappe.website.page_renderers.base_template_page import BaseTemplatePage
from frappe.website.utils import cache_html
from frappe.website.router import (get_doctypes_with_web_view,
get_page_info_from_web_page_with_dynamic_routes)
class DocumentPage(BaseTemplatePage):
def can_render(self):
'''
Find a document with matching `route` from all doctypes with `has_web_view`=1
'''
if self.search_in_doctypes_with_web_view():
return True
if self.search_web_page_dynamic_routes():
return True
return False
def search_in_doctypes_with_web_view(self):
for doctype in get_doctypes_with_web_view():
filters = dict(route=self.path)
meta = frappe.get_meta(doctype)
condition_field = self.get_condition_field(meta)
if condition_field:
filters[condition_field] = 1
try:
self.docname = frappe.db.get_value(doctype, filters, 'name')
if self.docname:
self.doctype = doctype
return True
except Exception as e:
if not frappe.db.is_missing_column(e):
raise e
def search_web_page_dynamic_routes(self):
d = get_page_info_from_web_page_with_dynamic_routes(self.path)
if d:
self.doctype = 'Web Page'
self.docname = d.name
return True
else:
return False
def render(self):
html = self.get_html()
html = self.add_csrf_token(html)
return self.build_response(html)
@cache_html
def get_html(self):
self.doc = frappe.get_doc(self.doctype, self.docname)
self.init_context()
self.update_context()
self.post_process_context()
html = frappe.get_template(self.template_path).render(self.context)
return html
def update_context(self):
self.context.doc = self.doc
self.context.update(self.context.doc.as_dict())
self.context.update(self.context.doc.get_page_info())
self.template_path = self.context.template or self.template_path
if not self.template_path:
self.template_path = self.context.doc.meta.get_web_template()
if hasattr(self.doc, "get_context"):
ret = self.doc.get_context(self.context)
if ret:
self.context.update(ret)
for prop in ("no_cache", "sitemap"):
if prop not in self.context:
self.context[prop] = getattr(self.doc, prop, False)
def get_condition_field(self, meta):
condition_field = None
if meta.is_published_field:
condition_field = meta.is_published_field
elif not meta.custom:
controller = get_controller(meta.name)
condition_field = controller.website.condition_field
return condition_field
|
import frappe
from frappe.model.document import get_controller
from frappe.website.page_renderers.base_template_page import BaseTemplatePage
from frappe.website.utils import build_response
from frappe.website.router import (get_doctypes_with_web_view,
get_page_info_from_web_page_with_dynamic_routes)
class DocumentPage(BaseTemplatePage):
def can_render(self):
'''
Find a document with matching `route` from all doctypes with `has_web_view`=1
'''
if self.search_in_doctypes_with_web_view():
return True
if self.search_web_page_dynamic_routes():
return True
return False
def search_in_doctypes_with_web_view(self):
for doctype in get_doctypes_with_web_view():
filters = dict(route=self.path)
meta = frappe.get_meta(doctype)
condition_field = self.get_condition_field(meta)
if condition_field:
filters[condition_field] = 1
try:
self.docname = frappe.db.get_value(doctype, filters, 'name')
if self.docname:
self.doctype = doctype
return True
except Exception as e:
if not frappe.db.is_missing_column(e):
raise e
def search_web_page_dynamic_routes(self):
d = get_page_info_from_web_page_with_dynamic_routes(self.path)
if d:
self.doctype = 'Web Page'
self.docname = d.name
return True
else:
return False
def render(self):
self.doc = frappe.get_doc(self.doctype, self.docname)
self.init_context()
self.update_context()
self.post_process_context()
html = frappe.get_template(self.template_path).render(self.context)
html = self.add_csrf_token(html)
return build_response(self.path, html, self.http_status_code or 200, self.headers)
def update_context(self):
self.context.doc = self.doc
self.context.update(self.context.doc.as_dict())
self.context.update(self.context.doc.get_page_info())
self.template_path = self.context.template or self.template_path
if not self.template_path:
self.template_path = self.context.doc.meta.get_web_template()
if hasattr(self.doc, "get_context"):
ret = self.doc.get_context(self.context)
if ret:
self.context.update(ret)
for prop in ("no_cache", "sitemap"):
if prop not in self.context:
self.context[prop] = getattr(self.doc, prop, False)
def get_condition_field(self, meta):
condition_field = None
if meta.is_published_field:
condition_field = meta.is_published_field
elif not meta.custom:
controller = get_controller(meta.name)
condition_field = controller.website.condition_field
return condition_field
|
mit
|
Python
|
d80878788ddcc1443c54c11b923da23bc295b496
|
Fix wget -q flag
|
freeekanayaka/charmfixture,freeekanayaka/charm-test
|
charmtest/network.py
|
charmtest/network.py
|
import io
import argparse
class Wget(object):
name = "wget"
def __init__(self, network):
self._network = network
def __call__(self, proc_args):
parser = argparse.ArgumentParser()
parser.add_argument("url")
parser.add_argument("-O", dest="output")
parser.add_argument("-q", dest="quiet", action="store_true")
args = parser.parse_args(proc_args["args"][1:])
content = self._network[args.url]
result = {}
if args.output == "-":
result["stdout"] = io.BytesIO(content)
else:
with open(args.output, "wb") as fd:
fd.write(content)
return result
|
import io
import argparse
class Wget(object):
name = "wget"
def __init__(self, network):
self._network = network
def __call__(self, proc_args):
parser = argparse.ArgumentParser()
parser.add_argument("url")
parser.add_argument("-O", dest="output")
parser.add_argument("-q", dest="quite")
args = parser.parse_args(proc_args["args"][1:])
content = self._network[args.url]
result = {}
if args.output == "-":
result["stdout"] = io.BytesIO(content)
else:
with open(args.output, "wb") as fd:
fd.write(content)
return result
|
agpl-3.0
|
Python
|
730e765822932b5b0b00832c41140f39a9ae8d11
|
Bump version
|
thombashi/DateTimeRange
|
datetimerange/__version__.py
|
datetimerange/__version__.py
|
# encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.3.6"
__maintainer__ = __author__
__email__ = "[email protected]"
|
# encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.3.5"
__maintainer__ = __author__
__email__ = "[email protected]"
|
mit
|
Python
|
55f3e0e222246bfbc9c1a19f68b06941bac6cd70
|
Add an option to include spaces on random string generator
|
magnet-cl/django-project-template-py3,Angoreher/xcero,magnet-cl/django-project-template-py3,Angoreher/xcero,magnet-cl/django-project-template-py3,Angoreher/xcero,Angoreher/xcero,magnet-cl/django-project-template-py3
|
base/utils.py
|
base/utils.py
|
""" Small methods for generic use """
# standard library
import itertools
import random
import re
import string
import unicodedata
# django
from django.utils import timezone
def today():
"""
This method obtains today's date in local time
"""
return timezone.localtime(timezone.now()).date()
# BROKEN
def grouper(iterable, n):
args = [iter(iterable)] * n
return ([e for e in t if e is not None] for t in itertools.izip_longest(
*args
))
def format_rut(rut):
if not rut:
return ''
rut = rut.replace(' ', '').replace('.', '').replace('-', '')
rut = rut[:9]
if not rut:
return ''
verifier = rut[-1]
code = rut[0:-1][::-1]
code = re.sub("(.{3})", "\\1.", code, 0, re.DOTALL)
code = code[::-1]
return '%s-%s' % (code, verifier)
def camel_to_underscore(string):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def underscore_to_camel(word):
return ''.join(x.capitalize() or '_' for x in word.split('_'))
def strip_accents(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# BROKEN
def tz_datetime(s, *args, **kwargs):
"""
Creates a datetime.datetime object but with the current timezone
"""
tz = timezone.get_current_timezone()
naive_dt = timezone.datetime(*args, **kwargs)
return timezone.make_aware(naive_dt, tz)
def random_string(length=6, chars=None, include_spaces=True):
if chars is None:
chars = string.ascii_uppercase + string.digits
if include_spaces:
chars += ' '
return ''.join(random.choice(chars) for x in range(length))
|
""" Small methods for generic use """
# standard library
import itertools
import random
import re
import string
import unicodedata
# django
from django.utils import timezone
def today():
"""
This method obtains today's date in local time
"""
return timezone.localtime(timezone.now()).date()
# BROKEN
def grouper(iterable, n):
args = [iter(iterable)] * n
return ([e for e in t if e is not None] for t in itertools.izip_longest(
*args
))
def format_rut(rut):
if not rut:
return ''
rut = rut.replace(' ', '').replace('.', '').replace('-', '')
rut = rut[:9]
if not rut:
return ''
verifier = rut[-1]
code = rut[0:-1][::-1]
code = re.sub("(.{3})", "\\1.", code, 0, re.DOTALL)
code = code[::-1]
return '%s-%s' % (code, verifier)
def camel_to_underscore(string):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def underscore_to_camel(word):
return ''.join(x.capitalize() or '_' for x in word.split('_'))
def strip_accents(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# BROKEN
def tz_datetime(s, *args, **kwargs):
"""
Creates a datetime.datetime object but with the current timezone
"""
tz = timezone.get_current_timezone()
naive_dt = timezone.datetime(*args, **kwargs)
return timezone.make_aware(naive_dt, tz)
def random_string(length=6, chars=None):
if chars is None:
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for x in range(length))
|
mit
|
Python
|
5496bd29c4262c252367d7b305d2a78fd1ad2fa7
|
move debug call
|
smnorris/bcdata,smnorris/bcdata
|
bcdata/wcs.py
|
bcdata/wcs.py
|
import logging
import requests
import bcdata
log = logging.getLogger(__name__)
def get_dem(
bounds, out_file="dem.tif", src_crs="EPSG:3005", dst_crs="EPSG:3005", resolution=25, interpolation=None
):
"""Get TRIM DEM for provided bounds, write to GeoTIFF.
"""
bbox = ",".join([str(b) for b in bounds])
# do not upsample
if resolution < 25:
raise ValueError("Resolution requested must be 25m or greater")
# if specifying interpolation method, there has to actually be a
# resampling requested - resolution can't be the native 25m
if interpolation and resolution == 25:
raise ValueError("Requested coverage at native resolution, no resampling required, interpolation {} invalid")
# if downsampling, default to bilinear (the server defaults to nearest)
if resolution > 25 and not interpolation:
log.info("Interpolation not specified, defaulting to bilinear")
interpolation = "bilinear"
# make sure interpolation is valid
if interpolation:
valid_interpolations = ["nearest", "bilinear", "bicubic"]
if interpolation not in valid_interpolations:
raise ValueError("Interpolation {} invalid. Valid keys are: {}".format(interpolation, ",".join(valid_interpolations)))
# build request
payload = {
"service": "WCS",
"version": "1.0.0",
"request": "GetCoverage",
"coverage": "pub:bc_elevation_25m_bcalb",
"Format": "GeoTIFF",
"bbox": bbox,
"CRS": src_crs,
"RESPONSE_CRS": dst_crs,
"resx": str(resolution),
"resy": str(resolution),
}
if interpolation:
payload["INTERPOLATION"] = interpolation
# request data from WCS
r = requests.get(bcdata.WCS_URL, params=payload)
# save to tiff
if r.status_code == 200:
with open(out_file, "wb") as file:
file.write(r.content)
return out_file
else:
raise RuntimeError(
"WCS request {} failed with status code {}".format(r.url, str(r.status_code))
)
|
import logging
import requests
import bcdata
log = logging.getLogger(__name__)
def get_dem(
bounds, out_file="dem.tif", src_crs="EPSG:3005", dst_crs="EPSG:3005", resolution=25, interpolation=None
):
"""Get TRIM DEM for provided bounds, write to GeoTIFF.
"""
bbox = ",".join([str(b) for b in bounds])
# do not upsample
if resolution < 25:
raise ValueError("Resolution requested must be 25m or greater")
# if specifying interpolation method, there has to actually be a
# resampling requested - resolution can't be the native 25m
if interpolation and resolution == 25:
raise ValueError("Requested coverage at native resolution, no resampling required, interpolation {} invalid")
# if downsampling, default to bilinear (the server defaults to nearest)
if resolution > 25 and not interpolation:
log.info("Interpolation not specified, defaulting to bilinear")
interpolation = "bilinear"
# make sure interpolation is valid
if interpolation:
valid_interpolations = ["nearest", "bilinear", "bicubic"]
if interpolation not in valid_interpolations:
raise ValueError("Interpolation {} invalid. Valid keys are: {}".format(interpolation, ",".join(valid_interpolations)))
# build request
payload = {
"service": "WCS",
"version": "1.0.0",
"request": "GetCoverage",
"coverage": "pub:bc_elevation_25m_bcalb",
"Format": "GeoTIFF",
"bbox": bbox,
"CRS": src_crs,
"RESPONSE_CRS": dst_crs,
"resx": str(resolution),
"resy": str(resolution),
}
if interpolation:
payload["INTERPOLATION"] = interpolation
# request data from WCS
r = requests.get(bcdata.WCS_URL, params=payload)
log.debug(r.url)
# save to tiff
if r.status_code == 200:
with open(out_file, "wb") as file:
file.write(r.content)
return out_file
else:
raise RuntimeError(
"WCS request failed with status code {}".format(str(r.status_code))
)
|
mit
|
Python
|
c4ad9519c117edfdc59f229380fa0797bc6bfffa
|
Update BitshareComFolder.py
|
vuolter/pyload,vuolter/pyload,vuolter/pyload
|
module/plugins/crypter/BitshareComFolder.py
|
module/plugins/crypter/BitshareComFolder.py
|
# -*- coding: utf-8 -*-
from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo
class BitshareComFolder(SimpleCrypter):
__name__ = "BitshareComFolder"
__type__ = "crypter"
__version__ = "0.04"
__pattern__ = r'http://(?:www\.)?bitshare\.com/\?d=\w+'
__config__ = [("use_premium" , "bool", "Use premium account if available" , True),
("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description__ = """Bitshare.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "[email protected]")]
LINK_PATTERN = r'<a href="(http://bitshare\.com/files/.+)">.+</a></td>'
NAME_PATTERN = r'View public folder "(?P<N>.+?)"</h1>'
getInfo = create_getInfo(BitshareComFolder)
|
# -*- coding: utf-8 -*-
from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo
class BitshareComFolder(SimpleCrypter):
__name__ = "BitshareComFolder"
__type__ = "crypter"
__version__ = "0.03"
__pattern__ = r'http://(?:www\.)?bitshare\.com/\?d=\w+'
__config__ = [("use_premium" , "bool", "Use premium account if available" , True),
("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description__ = """Bitshare.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "[email protected]")]
LINK_PATTERN = r'<a href="(http://bitshare\.com/files/.+)">.+</a></td>'
NAME_PATTERN = r'View public folder "(?P<N>.+)"</h1>'
getInfo = create_getInfo(BitshareComFolder)
|
agpl-3.0
|
Python
|
326f0b881d36ed19d0a37495ae34fc24fc1eb707
|
Load the spotify header file from an absolute path
|
Fornoth/spotify-connect-web,Fornoth/spotify-connect-web,Fornoth/spotify-connect-web,Fornoth/spotify-connect-web,Fornoth/spotify-connect-web
|
connect_ffi.py
|
connect_ffi.py
|
from cffi import FFI
ffi = FFI()
print "Loading Spotify library..."
#TODO: Use absolute paths for open() and stuff
#Header generated with cpp spotify.h > spotify.processed.h && sed -i 's/__extension__//g' spotify.processed.h
with open(os.path.join(sys.path[0], "spotify.processed.h")) as file:
header = file.read()
ffi.cdef(header)
ffi.cdef("""
void *malloc(size_t size);
void exit(int status);
""")
C = ffi.dlopen(None)
lib = ffi.verify("""
#include "spotify.h"
""", include_dirs=['./'],
library_dirs=['./'],
libraries=[str('spotify_embedded_shared')])
|
from cffi import FFI
ffi = FFI()
print "Loading Spotify library..."
#TODO: Use absolute paths for open() and stuff
#Header generated with cpp spotify.h > spotify.processed.h && sed -i 's/__extension__//g' spotify.processed.h
with open("spotify.processed.h") as file:
header = file.read()
ffi.cdef(header)
ffi.cdef("""
void *malloc(size_t size);
void exit(int status);
""")
C = ffi.dlopen(None)
lib = ffi.verify("""
#include "spotify.h"
""", include_dirs=['./'],
library_dirs=['./'],
libraries=[str('spotify_embedded_shared')])
|
mit
|
Python
|
278dcb8b2fb3e1f69434ec9c41e566501cdc50bd
|
Remove unused functionality
|
bennylope/django-organizations,bennylope/django-organizations
|
organizations/backends/forms.py
|
organizations/backends/forms.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.forms import UserCreationForm
class UserRegistrationForm(UserCreationForm):
"""
Form class for completing a user's registration and activating the
User.
The class operates on a user model which is assumed to have the required
fields of a BaseUserModel
"""
# TODO(bennylope): Remove this entirely and replace with base class
def org_registration_form(org_model):
"""
Generates a registration ModelForm for the given organization model class
"""
class OrganizationRegistrationForm(forms.ModelForm):
"""Form class for creating new organizations owned by new users."""
email = forms.EmailField()
class Meta:
model = org_model
exclude = ("is_active", "users")
# def save(self, *args, **kwargs):
# self.instance.is_active = False
# super().save(*args, **kwargs)
return OrganizationRegistrationForm
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019, Ben Lopatin and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
class UserRegistrationForm(forms.ModelForm):
"""
Form class for completing a user's registration and activating the
User.
The class operates on a user model which is assumed to have the required
fields of a BaseUserModel
"""
# TODO decouple first/last names from this form
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
password = forms.CharField(max_length=30, widget=forms.PasswordInput)
password_confirm = forms.CharField(max_length=30, widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.initial["username"] = ""
def clean(self):
password = self.cleaned_data.get("password")
password_confirm = self.cleaned_data.get("password_confirm")
if password != password_confirm or not password:
raise forms.ValidationError(_("Your password entries must match"))
return super().clean()
class Meta:
model = get_user_model()
exclude = (
"is_staff",
"is_superuser",
"is_active",
"last_login",
"date_joined",
"groups",
"user_permissions",
)
def org_registration_form(org_model):
"""
Generates a registration ModelForm for the given organization model class
"""
class OrganizationRegistrationForm(forms.ModelForm):
"""Form class for creating new organizations owned by new users."""
email = forms.EmailField()
class Meta:
model = org_model
exclude = ("is_active", "users")
def save(self, *args, **kwargs):
self.instance.is_active = False
super().save(*args, **kwargs)
return OrganizationRegistrationForm
|
bsd-2-clause
|
Python
|
331fb50e6a4dcef99c8a6806d3efd7531859542f
|
add comments
|
kaduuuken/achievementsystem,kaduuuken/achievementsystem
|
achievements/templatetags/achievement_tags.py
|
achievements/templatetags/achievement_tags.py
|
from django import template
from achievements.models import Category, Trophy
from achievements import settings
register = template.Library()
# call single_category.html with the given parameters
@register.inclusion_tag('achievements/single_category.html')
def render_category(category, user):
return {
'category': category,
'percentage': category.get_complete_percentage(user),
'completed_achievements': category.count_all_complete_achievements(user)
}
# call navigation.html with the given parameters
@register.inclusion_tag('achievements/navigation.html')
def render_navigation(current_category=None):
return {
'categories': Category.objects.filter(parent_category__isnull=True),
'current_category': current_category,
}
# call trophies.html with the given parameters
@register.inclusion_tag('achievements/trophies.html')
def render_trophies(user, takes_context=True):
trophies = [None] * settings.TROPHY_COUNT
# put trophy on the given position in an array
for trophy in Trophy.objects.filter(user=user):
trophies[trophy.position] = trophy
return {'trophies': trophies}
# check type of achievement and return the accordingly render function
@register.simple_tag
def render_subachievement(user, achievement):
if hasattr(achievement, 'progressachievement'):
return achievement.progressachievement.render(user)
if hasattr(achievement, 'taskachievement'):
return achievement.taskachievement.render(user)
if hasattr(achievement, 'collectionachievement'):
return achievement.collectionachievement.render(user)
else:
return ""
|
from django import template
from achievements.models import Category, Trophy
from achievements import settings
register = template.Library()
@register.inclusion_tag('achievements/single_category.html')
def render_category(category, user):
return {
'category': category,
'percentage': category.get_complete_percentage(user),
'completed_achievements': category.count_all_complete_achievements(user)
}
@register.inclusion_tag('achievements/navigation.html')
def render_navigation(current_category=None):
return {
'categories': Category.objects.filter(parent_category__isnull=True),
'current_category': current_category,
}
@register.inclusion_tag('achievements/trophies.html')
def render_trophies(user, takes_context=True):
trophies = [None] * settings.TROPHY_COUNT
for trophy in Trophy.objects.filter(user=user):
trophies[trophy.position] = trophy
return {'trophies': trophies}
@register.simple_tag
def render_subachievement(user, achievement):
if hasattr(achievement, 'progressachievement'):
return achievement.progressachievement.render(user)
if hasattr(achievement, 'taskachievement'):
return achievement.taskachievement.render(user)
if hasattr(achievement, 'collectionachievement'):
return achievement.collectionachievement.render(user)
|
bsd-2-clause
|
Python
|
66fc121dbe0dbb7a69a62bfdaf98838a4f7a0bf3
|
Update yeti.py
|
VirusTotal/misp-modules,MISP/misp-modules,VirusTotal/misp-modules,MISP/misp-modules,MISP/misp-modules,VirusTotal/misp-modules
|
misp_modules/modules/expansion/yeti.py
|
misp_modules/modules/expansion/yeti.py
|
import json
import json
try:
import pyeti
except ImportError:
print("pyeti module not installed.")
misperrors = {'error': 'Error'}
mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domain'],
'output': ['hostname', 'domain', 'ip-src', 'ip-dst', 'url']}
# possible module-types: 'expansion', 'hover' or both
moduleinfo = {'version': '1', 'author': 'Sebastien Larinier @sebdraven',
'description': 'Query on yeti',
'module-type': ['expansion', 'hover']}
moduleconfig = ['apikey', 'url']
class Yeti:
def __init__(self, url, key):
self.api = pyeti.YetiApi(url, api_key=key)
self.dict = {'Ip': 'ip-src', 'Domain': 'domain', 'Hostname': 'hostname'}
def search(self, value):
obs = self.api.observable_search(value=value)
if obs:
return obs
def
def handler(q=False):
if q is False:
return False
request = json.loads(q)
attribute = request['attribute']
|
import json
import json
try:
import pyeti
except ImportError:
print("pyeti module not installed.")
misperrors = {'error': 'Error'}
mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domain'],
'output': ['hostname', 'domain', 'ip-src', 'ip-dst', 'url']}
# possible module-types: 'expansion', 'hover' or both
moduleinfo = {'version': '1', 'author': 'Sebastien Larinier @sebdraven',
'description': 'Query on yeti',
'module-type': ['expansion', 'hover']}
|
agpl-3.0
|
Python
|
c3dffef7869c0ce19801d78393a336b6b6ecbce7
|
stop littering /tmp with temporary resource files
|
gopro/gopro-lib-node.gl,gopro/gopro-lib-node.gl,gopro/gopro-lib-node.gl,gopro/gopro-lib-node.gl
|
pynodegl-utils/pynodegl_utils/tests/cmp_resources.py
|
pynodegl-utils/pynodegl_utils/tests/cmp_resources.py
|
#!/usr/bin/env python
#
# Copyright 2020 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import csv
import tempfile
from .cmp import CompareSceneBase, get_test_decorator
_COLS = (
'Textures memory',
'Buffers count',
'Buffers total',
'Blocks count',
'Blocks total',
'Medias count',
'Medias total',
'Textures count',
'Textures total',
'Computes',
'GraphicCfgs',
'Renders',
'RTTs',
)
class _CompareResources(CompareSceneBase):
def __init__(self, scene_func, columns=_COLS, **kwargs):
super().__init__(scene_func, width=320, height=240, **kwargs)
# We can't use NamedTemporaryFile because we may not be able to open it
# twice on some systems
fd, self._csvfile = tempfile.mkstemp(suffix='.csv', prefix='ngl-test-resources-')
os.close(fd)
self._columns = columns
self._hud = 1
self._hud_export_filename = self._csvfile
def get_out_data(self, debug=False, debug_func=None):
for frame in self.render_frames():
pass
# filter columns
with open(self._csvfile) as csvfile:
reader = csv.DictReader(csvfile)
data = [self._columns]
for row in reader:
data.append([row[k] for k in self._columns])
# rely on base string diff
ret = ''
for row in data:
ret += ','.join(row) + '\n'
return ret
def __del__(self):
os.remove(self._csvfile)
test_resources = get_test_decorator(_CompareResources)
|
#!/usr/bin/env python
#
# Copyright 2020 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import csv
import tempfile
from .cmp import CompareSceneBase, get_test_decorator
_COLS = (
'Textures memory',
'Buffers count',
'Buffers total',
'Blocks count',
'Blocks total',
'Medias count',
'Medias total',
'Textures count',
'Textures total',
'Computes',
'GraphicCfgs',
'Renders',
'RTTs',
)
class _CompareResources(CompareSceneBase):
def __init__(self, scene_func, columns=_COLS, **kwargs):
super().__init__(scene_func, width=320, height=240, **kwargs)
# We can't use NamedTemporaryFile because we may not be able to open it
# twice on some systems
fd, self._csvfile = tempfile.mkstemp(suffix='.csv', prefix='ngl-test-resources-')
os.close(fd)
self._columns = columns
self._hud = 1
self._hud_export_filename = self._csvfile
def get_out_data(self, debug=False, debug_func=None):
for frame in self.render_frames():
pass
# filter columns
with open(self._csvfile) as csvfile:
reader = csv.DictReader(csvfile)
data = [self._columns]
for row in reader:
data.append([row[k] for k in self._columns])
# rely on base string diff
ret = ''
for row in data:
ret += ','.join(row) + '\n'
os.remove(self._csvfile)
return ret
test_resources = get_test_decorator(_CompareResources)
|
apache-2.0
|
Python
|
deccf656db39ac949f93e562e4f41a32589feb9b
|
Use a more complex and extendable check for shortcuts in StructuredText
|
CybOXProject/python-cybox
|
cybox/common/structured_text.py
|
cybox/common/structured_text.py
|
# Copyright (c) 2013, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import cybox
import cybox.bindings.cybox_common as common_binding
class StructuredText(cybox.Entity):
_binding = common_binding
_namespace = 'http://cybox.mitre.org/common-2'
def __init__(self, value=None):
self.value = value
self.structuring_format = None
def to_obj(self, structured_text_obj=None):
if not structured_text_obj:
text_obj = common_binding.StructuredTextType()
else:
text_obj = structured_text_obj
text_obj.set_valueOf_(self.value)
if self.structuring_format is not None:
text_obj.set_structuring_format(self.structuring_format)
return text_obj
def to_dict(self):
# Shortcut if structuring_format is not defined.
if self.is_plain():
return self.value
text_dict = {}
text_dict['value'] = self.value
text_dict['structuring_format'] = self.structuring_format
return text_dict
def is_plain(self):
"""Whether this can be represented as a string rather than a dictionary
Subclasses can override this to include their custom fields in this
check:
return (super(..., self).is_plain() and self.other_field is None)
"""
return (self.structuring_format is None)
@classmethod
def from_obj(cls, text_obj, text_class=None):
if not text_obj:
return None
if not text_class:
text = StructuredText()
else:
text = text_class
text.value = text_obj.get_valueOf_()
text.structuring_format = text_obj.get_structuring_format()
return text
@classmethod
def from_dict(cls, text_dict, text_class=None):
if text_dict is None:
return None
if not text_class:
text = StructuredText()
else:
text = text_class
if not isinstance(text_dict, dict):
text.value = text_dict
else:
text.value = text_dict.get('value')
text.structuring_format = text_dict.get('structuring_format')
return text
|
# Copyright (c) 2013, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import cybox
import cybox.bindings.cybox_common as common_binding
class StructuredText(cybox.Entity):
_binding = common_binding
_namespace = 'http://cybox.mitre.org/common-2'
def __init__(self, value=None):
self.value = value
self.structuring_format = None
def to_obj(self, structured_text_obj=None):
if not structured_text_obj:
text_obj = common_binding.StructuredTextType()
else:
text_obj = structured_text_obj
text_obj.set_valueOf_(self.value)
if self.structuring_format is not None:
text_obj.set_structuring_format(self.structuring_format)
return text_obj
def to_dict(self):
text_dict = {}
text_dict['value'] = self.value
text_dict['structuring_format'] = self.structuring_format
return text_dict
@classmethod
def from_obj(cls, text_obj, text_class=None):
if not text_obj:
return None
if not text_class:
text = StructuredText()
else:
text = text_class
text.value = text_obj.get_valueOf_()
text.structuring_format = text_obj.get_structuring_format()
return text
@classmethod
def from_dict(cls, text_dict, text_class=None):
if text_dict is None:
return None
if not text_class:
text = StructuredText()
else:
text = text_class
if not isinstance(text_dict, dict):
text.value = text_dict
else:
text.value = text_dict.get('value')
text.structuring_format = text_dict.get('structuring_format')
return text
|
bsd-3-clause
|
Python
|
1eae87ee4435b4dda35d64295de13756394dbce9
|
Add GET to 'Allow-Methods' by default. Fixes #12
|
karan/HNify
|
crossdomain.py
|
crossdomain.py
|
#!/usr/bin/env python
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=['GET'], headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
|
#!/usr/bin/env python
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
|
mit
|
Python
|
9ec25b6a5f8400b68c51ce9c5667c8c0c1648521
|
Remove unneeded catch
|
davidmogar/cucco,davidmogar/cucco,davidmogar/normalizr
|
cucco/regex.py
|
cucco/regex.py
|
#-*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
"""
Regular expression to match URLs as seen on http://daringfireball.net/2010/07/improved_regex_for_matching_urls
"""
URL_REGEX = re.compile(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
re.IGNORECASE)
"""
Regular expression to match email addresses as seen on http://www.wellho.net/resources/ex.php4?item=y115/relib.py
"""
EMAIL_REGEX = re.compile(r"[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}", re.IGNORECASE)
EMOJI_REGEX = re.compile(u'([\U00002600-\U000027BF])|([\U0001F300-\U0001F64F])|([\U0001F680-\U0001F6FF])')
|
#-*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
"""
Regular expression to match URLs as seen on http://daringfireball.net/2010/07/improved_regex_for_matching_urls
"""
URL_REGEX = re.compile(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
re.IGNORECASE)
"""
Regular expression to match email addresses as seen on http://www.wellho.net/resources/ex.php4?item=y115/relib.py
"""
EMAIL_REGEX = re.compile(r"[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}", re.IGNORECASE)
try:
EMOJI_REGEX = re.compile(u'([\U00002600-\U000027BF])|([\U0001f300-\U0001f64F])|([\U0001f680-\U0001f6FF])')
except re.error:
EMOJI_REGEX = re.compile(u'([\u2600-\u27BF])|([\uD83C][\uDF00-\uDFFF])|([\uD83D][\uDC00-\uDE4F])|([\uD83D][\uDE80-\uDEFF])')
|
mit
|
Python
|
4a83439926181f26e4656d2a2b78021209d3b629
|
fix the dropout to 0.2 because that is what they use
|
snurkabill/pydeeplearn,Warvito/pydeeplearn,mihaelacr/pydeeplearn,Warvito/pydeeplearn,snurkabill/pydeeplearn
|
code/nolearntrail.py
|
code/nolearntrail.py
|
from nolearn.dbn import DBN
from readfacedatabases import *
from sklearn import cross_validation
from sklearn.metrics import zero_one_score
from sklearn.metrics import classification_report
import argparse
import numpy as np
from common import *
parser = argparse.ArgumentParser(description='nolearn test')
parser.add_argument('--equalize',dest='equalize',action='store_true', default=False,
help="if true, the input images are equalized before being fed into the net")
parser.add_argument('--maxEpochs', type=int, default=1000,
help='the maximum number of supervised epochs')
args = parser.parse_args()
def KanadeClassifier():
clf = DBN(
[1200, 1500, 1500, 1500, 7],
learn_rates=0.01,
learn_rates_pretrain=0.05,
learn_rate_decays=0.9,
use_re_lu=True,
nesterov=True,
momentum=0.95,
dropouts=[0.2, 0.5, 0.5, 0.5],
real_valued_vis=True,
minibatch_size=20,
epochs=args.maxEpochs,
verbose=False)
data, labels = readKanade(False, None, equalize=args.equalize)
data = scale(data)
data, labels = shuffle(data, labels)
labels = np.argmax(labels, axis=1)
# Split data for training and testing
kf = cross_validation.KFold(n=len(data), n_folds=5)
for train, test in kf:
break
trainData = data[train]
trainLabels = labels[train]
testData = data[test]
testLabels = labels[test]
clf.fit(trainData, trainLabels)
predictedLabels = clf.predict(testData)
print "testLabels"
print testLabels
print predictedLabels
print "Accuracy:", zero_one_score(testLabels, predictedLabels)
print "Classification report:"
print classification_report(testLabels, predictedLabels)
if __name__ == '__main__':
KanadeClassifier()
|
from nolearn.dbn import DBN
from readfacedatabases import *
from sklearn import cross_validation
from sklearn.metrics import zero_one_score
from sklearn.metrics import classification_report
import argparse
import numpy as np
from common import *
parser = argparse.ArgumentParser(description='nolearn test')
parser.add_argument('--equalize',dest='equalize',action='store_true', default=False,
help="if true, the input images are equalized before being fed into the net")
parser.add_argument('--maxEpochs', type=int, default=1000,
help='the maximum number of supervised epochs')
args = parser.parse_args()
def KanadeClassifier():
clf = DBN(
[1200, 1500, 1500, 1500, 7],
learn_rates=0.01,
learn_rates_pretrain=0.05,
learn_rate_decays=0.9,
use_re_lu=True,
nesterov=True,
momentum=0.95,
dropouts=[0.8, 0.5, 0.5, 0.5],
real_valued_vis=True,
minibatch_size=20,
epochs=args.maxEpochs,
verbose=False)
data, labels = readKanade(False, None, equalize=args.equalize)
data = scale(data)
data, labels = shuffle(data, labels)
labels = np.argmax(labels, axis=1)
# Split data for training and testing
kf = cross_validation.KFold(n=len(data), n_folds=5)
for train, test in kf:
break
trainData = data[train]
trainLabels = labels[train]
testData = data[test]
testLabels = labels[test]
clf.fit(trainData, trainLabels)
predictedLabels = clf.predict(testData)
print "testLabels"
print testLabels
print predictedLabels
print "Accuracy:", zero_one_score(testLabels, predictedLabels)
print "Classification report:"
print classification_report(testLabels, predictedLabels)
if __name__ == '__main__':
KanadeClassifier()
|
bsd-3-clause
|
Python
|
ddc571f32212a57f725101314878d17df9124bb8
|
fix loop range
|
TheReverend403/Pyper,TheReverend403/Pyper
|
commands/cmd_roll.py
|
commands/cmd_roll.py
|
import random
from lib.command import Command
class RollCommand(Command):
name = 'roll'
description = 'Roll some dice.'
def run(self, message, args):
if not args:
self.reply(message, 'No roll specification supplied. Try */roll 3d6*.', parse_mode='Markdown')
return
spec = ''.join(char for char in ''.join(args) if char.isdigit() or char == 'd')
dice_count, __, dice_size = spec.partition('d')
if not dice_count or not dice_size:
self.reply(message, 'Invalid roll specification. Example: */roll 3d6*', parse_mode='Markdown')
return
dice_count = int(''.join(char for char in dice_count if char.isdigit()))
dice_size = int(''.join(char for char in dice_size if char.isdigit()))
if dice_count < 1 or dice_count > 64 or dice_size < 4 or dice_size > 128:
self.reply(message, 'Invalid roll specification. Must be a minimum of *1d4* and a maximum of *64d128*',
parse_mode='Markdown')
return
rolls = [random.SystemRandom().randint(1, dice_size) for _ in range(dice_count)]
self.reply(message, '[{0}] = {1}'.format(', '.join(map(str, rolls)), sum(rolls)))
|
import random
from lib.command import Command
class RollCommand(Command):
name = 'roll'
description = 'Roll some dice.'
def run(self, message, args):
if not args:
self.reply(message, 'No roll specification supplied. Try */roll 3d6*.', parse_mode='Markdown')
return
spec = ''.join(char for char in ''.join(args) if char.isdigit() or char == 'd')
dice_count, __, dice_size = spec.partition('d')
if not dice_count or not dice_size:
self.reply(message, 'Invalid roll specification. Example: */roll 3d6*', parse_mode='Markdown')
return
dice_count = int(''.join(char for char in dice_count if char.isdigit()))
dice_size = int(''.join(char for char in dice_size if char.isdigit()))
if dice_count < 1 or dice_count > 64 or dice_size < 4 or dice_size > 128:
self.reply(message, 'Invalid roll specification. Must be a minimum of *1d4* and a maximum of *64d128*',
parse_mode='Markdown')
return
rolls = [random.SystemRandom().randint(1, dice_size) for _ in dice_count]
self.reply(message, '[{0}] = {1}'.format(', '.join(map(str, rolls)), sum(rolls)))
|
agpl-3.0
|
Python
|
fe06c3a839bdc13384250924a4a30d9dd3455fc7
|
fix archive resource unit test
|
pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent,pixelated-project/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent
|
service/test/unit/resources/test_archive_resource.py
|
service/test/unit/resources/test_archive_resource.py
|
from twisted.trial import unittest
import json
from mockito import mock, when, verify
from test.unit.resources import DummySite
from twisted.web.test.requesthelper import DummyRequest
from pixelated.resources.mails_resource import MailsArchiveResource
from twisted.internet import defer
class TestArchiveResource(unittest.TestCase):
def setUp(self):
self.mail_service = mock()
self.web = DummySite(MailsArchiveResource(self.mail_service))
def test_render_POST_should_archive_mails(self):
request = DummyRequest(['/mails/archive'])
request.method = 'POST'
idents = ['1', '2']
content = mock()
when(content).read().thenReturn(json.dumps({'idents': ['1', '2']}))
d1 = defer.Deferred()
d1.callback(None)
when(self.mail_service).archive_mail('1').thenReturn(d1)
d2 = defer.Deferred()
d2.callback(None)
when(self.mail_service).archive_mail('2').thenReturn(d2)
request.content = content
d = self.web.get(request)
def assert_response(_):
verify(self.mail_service).archive_mail('1')
verify(self.mail_service).archive_mail('2')
d.addCallback(assert_response)
return d
|
import unittest
import json
from mockito import mock, when, verify
from test.unit.resources import DummySite
from twisted.web.test.requesthelper import DummyRequest
from pixelated.resources.mails_resource import MailsArchiveResource
from twisted.internet import defer
class TestArchiveResource(unittest.TestCase):
def setUp(self):
self.mail_service = mock()
self.web = DummySite(MailsArchiveResource(self.mail_service))
def test_render_POST_should_archive_mails(self):
request = DummyRequest(['/mails/archive'])
request.method = 'POST'
content = mock()
when(content).read().thenReturn(json.dumps({'idents': ['1', '2']}))
when(self.mail_service).archive_mail('1').thenReturn(defer.Deferred())
when(self.mail_service).archive_mail('2').thenReturn(defer.Deferred())
request.content = content
d = self.web.get(request)
def assert_response(_):
verify(self.mail_service).archive_mail('1')
verify(self.mail_service).archive_mail('2')
d.addCallback(assert_response)
return d
|
agpl-3.0
|
Python
|
283dd9918bd16202bf799c470e8e5b50d2ef1cd6
|
Increment version number to 0.7.0
|
datajoint/datajoint-python,fabiansinz/datajoint-python,eywalker/datajoint-python,dimitri-yatsenko/datajoint-python
|
datajoint/version.py
|
datajoint/version.py
|
__version__ = "0.7.0"
|
__version__ = "0.6.1"
|
lgpl-2.1
|
Python
|
0f5fe279d6b4641b2a2741271da4f021238f00a1
|
fix import in generator
|
lolporer/anomaly_generator
|
dataset_generator.py
|
dataset_generator.py
|
import csv
import os
# execfile("C:\\Users\\YONI\\Documents\\Projects\\degree\\attack detection methods\\anomaly_generator\\dataset_generator.py")
ROW_NUM = 10
path = "C:\\Users\\YONI\\Documents\\anomally_detector\\data_sets\\example\\"
users_num = 100
features_num = 20
directory = "data_sets\\"
if not os.path.exists(directory):
os.makedirs(directory)
users = []
features = []
for i in range(0,users_num):
users.append('user'+str(i))
for i in range(0,features_num):
features.append('feature'+str(i))
for user in users:
with open("data_sets\\"+user+'.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n', fieldnames=features)
writer.writeheader()
for i in range(1,ROW_NUM):
featDic = {}
for feature in features:
featDic[feature] = user + '_' + feature + '_' + str(i)
writer.writerow(featDic)
|
import csv
# execfile("C:\\Users\\YONI\\Documents\\Projects\\degree\\attack detection methods\\anomaly_generator\\dataset_generator.py")
ROW_NUM = 10
path = "C:\\Users\\YONI\\Documents\\anomally_detector\\data_sets\\example\\"
users_num = 100
features_num = 20
directory = "data_sets\\"
if not os.path.exists(directory):
os.makedirs(directory)
users = []
features = []
for i in range(0,users_num):
users.append('user'+str(i))
for i in range(0,features_num):
features.append('feature'+str(i))
for user in users:
with open("data_sets\\"+user+'.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n', fieldnames=features)
writer.writeheader()
for i in range(1,ROW_NUM):
featDic = {}
for feature in features:
featDic[feature] = user + '_' + feature + '_' + str(i)
writer.writerow(featDic)
|
mit
|
Python
|
7ccb9cb0d6e3ce6e3c6c09604af5e2bbdfae63ae
|
update urls.py
|
Connexions/openstax-cms,Connexions/openstax-cms,openstax/openstax-cms,openstax/openstax-cms,openstax/openstax-cms,openstax/openstax-cms
|
openstax/urls.py
|
openstax/urls.py
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from wagtail.contrib.wagtailapi import urls as wagtailapi_urls
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailimages import urls as wagtailimages_urls
from .api import api_router
from news.search import search
from api import urls as api_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^accounts/', include('accounts.urls')),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^images/', include(wagtailimages_urls)),
url(r'^api/mail/', include('mail.urls')),
url(r'^api/', include(wagtailapi_urls)),
url(r'^api/', include(api_urls)),
url(r'^api/search/$', search, name='search'),
url(r'^api/v2/', api_router.urls),
url(r'^api/pages/', include('pages.urls')),
url(r'^api/books/', include('books.urls')),
url(r'^api/news/', include('news.urls')),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import RedirectView
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
url(r'^favicon\.ico$', RedirectView.as_view(
url=settings.STATIC_URL + 'pages/images/favicon.ico'))
]
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from wagtail.contrib.wagtailapi import urls as wagtailapi_urls
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailimages import urls as wagtailimages_urls
from .api import api_router
from .functions import S3DocumentServe
from news.search import search
from api import urls as api_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^accounts/', include('accounts.urls')),
url(r'^documents/(?P<document_id>\d+)/(.*)$', S3DocumentServe.as_view(), name='wagtaildocs_serve'),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^images/', include(wagtailimages_urls)),
url(r'^api/mail/', include('mail.urls')),
url(r'^api/v2/', api_router.urls),
url(r'^api/', include(wagtailapi_urls)),
url(r'^api/', include(api_urls)),
url(r'^api/search/$', search, name='search'),
url(r'^api/pages/', include('pages.urls')),
url(r'^api/books/', include('books.urls')),
url(r'^api/news/', include('news.urls')),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import RedirectView
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += [
url(r'^favicon\.ico$', RedirectView.as_view(
url=settings.STATIC_URL + 'pages/images/favicon.ico'))
]
|
agpl-3.0
|
Python
|
853135b61f34ece1363da9b53244e775a2ba16a8
|
Add docstring for convert_timezone()
|
ronrest/convenience_py,ronrest/convenience_py
|
datetime/datetime.py
|
datetime/datetime.py
|
import datetime
# ==============================================================================
# TIMESTAMP 2 STR
# ==============================================================================
def timestamp2str(t, pattern="%Y-%m-%d %H:%M:%S"):
""" Given a float timestamp it returns the date as a formatted string,
based on the date `pattern` specified """
return datetime.datetime.fromtimestamp(t).strftime(pattern)
# ==============================================================================
# CONVERT_TIMEZONE
# ==============================================================================
# import datetime
from dateutil import tz
def convert_timezone(time, a="UTC", b="local"):
""" Given a datetime object, in timezone a, it changes it to timezone b.
Args:
time: (datetime object)
a: (str) timezone code to set the from time as.
eg:
"UTC"
"Australia/Melbourne"
or..
"local"
b: (str) timezone to set the to time as.
"""
# TIMEZONE OBJECTS
tza = tz.tzlocal(a) if (a=="local") else tz.gettz(a)
tzb = tz.tzlocal(b) if (b=="local") else tz.gettz(b)
# FORMAT TIME WITH FROM TIMEZONE
time = time.replace(tzinfo=tza)
# CHANGE TIME ZONE
newtime = time.astimezone(tzb)
return newtime
|
import datetime
# ==============================================================================
# TIMESTAMP 2 STR
# ==============================================================================
def timestamp2str(t, pattern="%Y-%m-%d %H:%M:%S"):
""" Given a float timestamp it returns the date as a formatted string,
based on the date `pattern` specified """
return datetime.datetime.fromtimestamp(t).strftime(pattern)
# import datetime
from dateutil import tz
def convert_timezone(time, a="UTC", b="local"):
# TIMEZONE OBJECTS
tza = tz.tzlocal(a) if (a=="local") else tz.gettz(a)
tzb = tz.tzlocal(b) if (b=="local") else tz.gettz(b)
# FORMAT TIME WITH FROM TIMEZONE
time = time.replace(tzinfo=tza)
# CHANGE TIME ZONE
newtime = time.astimezone(tzb)
return newtime
|
apache-2.0
|
Python
|
9d2ef02367380c76f39c4bd84ea2f35897d0bebf
|
Edit school enrollment management command
|
unicefuganda/edtrac,unicefuganda/edtrac,unicefuganda/edtrac
|
education/management/commands/create_school_enrollment_script.py
|
education/management/commands/create_school_enrollment_script.py
|
'''
Created on May 28, 2013
@author: raybesiga
'''
import datetime
import logging
import itertools
from logging import handlers
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.conf import settings
from django.template import Context, Template
import traceback
from rapidsms.models import Contact, Connection, Backend
from rapidsms_httprouter.models import Message
from django.db import transaction
from rapidsms.messages.outgoing import OutgoingMessage
from script.utils.outgoing import check_progress
from script.models import ScriptProgress, Email, Script, ScriptStep
from poll.models import Poll
from optparse import OptionParser, make_option
class Command(BaseCommand):
help = "Create school enrollment termly script"
def handle(self, **options):
poll0 = Poll.objects.get(name="total_enrollment_girls")
poll1 = Poll.objects.get(name="total_enrollment_boys")
script_school_enrollment_termly = Script.objects.create(
slug="edtrac_school_enrollment_termly",
name="School Enrollment Termly Script",
)
script_school_enrollment_termly.sites.add(Site.objects.get_current())
script_school_enrollment_termly.steps.add(ScriptStep.objects.create(
script=script_school_enrollment_termly,
poll=poll0,
order=0,
rule = ScriptStep.WAIT_MOVEON,
start_offset=0,
giveup_offset=14400, # we'll give them four hours to respond
))
script_school_enrollment_termly.steps.add(ScriptStep.objects.create(
script=script_school_enrollment_termly,
poll=poll1,
order=1,
rule=ScriptStep.WAIT_MOVEON, # for polls, this likely means a poll whose answer we aren't particularly concerned with
start_offset=0, #start immediately after the giveup time has elapsed from the previous step
giveup_offset=14400, # we'll give them four hours to respond
))
|
'''
Created on May 28, 2013
@author: raybesiga
'''
import datetime
import logging
import itertools
from logging import handlers
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.conf import settings
from django.template import Context, Template
import traceback
from rapidsms.models import Contact, Connection, Backend
from rapidsms_httprouter.models import Message
from django.db import transaction
from rapidsms.messages.outgoing import OutgoingMessage
from script.utils.outgoing import check_progress
from script.models import ScriptProgress, Email, Script, ScriptStep
from poll.models import Poll
from optparse import OptionParser, make_option
class Command(BaseCommand):
help = "Create school enrollment termly polls"
def handle(self, **options):
poll0 = Poll.objects.get(name="total_enrollment_girls")
poll1 = Poll.objects.get(name="total_enrollment_boys")
script_school_enrollment_termly = Script.objects.create(
slug="edtrac_school_enrollment_termly",
name="School Enrollment Termly Script",
)
script_school_enrollment_termly.sites.add(Site.objects.get_current())
script_school_enrollment_termly.steps.add(ScriptStep.objects.create(
script=script_headteacher_violence_monthly,
poll=poll0,
order=0,
rule = ScriptStep.WAIT_MOVEON,
start_offset=0,
giveup_offset=14400, # we'll give them four hours to respond
))
script_school_enrollment_termly.steps.add(ScriptStep.objects.create(
script=script_headteacher_violence_monthly,
poll=poll1,
order=1,
rule=ScriptStep.WAIT_MOVEON, # for polls, this likely means a poll whose answer we aren't particularly concerned with
start_offset=0, #start immediately after the giveup time has elapsed from the previous step
giveup_offset=14400, # we'll give them four hours to respond
))
|
bsd-3-clause
|
Python
|
900ddf92a1cf65270a7b420a848c0f2611647899
|
handle &
|
ScorpionResponse/freelancefinder,ScorpionResponse/freelancefinder,ScorpionResponse/freelancefinder
|
freelancefinder/remotes/sources/workinstartups/workinstartups.py
|
freelancefinder/remotes/sources/workinstartups/workinstartups.py
|
"""Wrapper for the WorkInStartups source."""
import json
import bleach
import maya
import requests
from jobs.models import Post
ADDITIONAL_TAGS = ['p', 'br']
class WorkInStartups(object):
"""Wrapper for the WorkInStartups source."""
json_api_address = 'http://workinstartups.com/job-board/api/api.php?action=getJobs&type=0&category=0&count=100&random=0&days_behind=0&response=json'
def __init__(self, source):
"""Parse the API."""
self.api_response = requests.get(self.json_api_address)
self.source = source
def jobs(self):
"""Iterate through all available jobs."""
# Remove the 'var jobs = ' at the beginning and the ';' at the end
response_json = json.loads(self.api_response.text[len("var jobs = "):-1])
for job_info in response_json:
post = self.parse_job_to_post(job_info)
yield post
def parse_job_to_post(self, job_info):
"""Convert from the rss feed format to a Post."""
created = maya.parse(job_info['mysql_date']).datetime()
job_url = 'http://workinstartups.com/job-board/job/{}/{}/'.format(job_info['id'], job_info['url_title'])
post = Post(
url=job_url,
source=self.source,
title=job_info['type_name'] + " - " + bleach.clean(job_info['title'].replace("&", "&"), strip=True),
description=bleach.clean(job_info['description'], tags=bleach.ALLOWED_TAGS + ADDITIONAL_TAGS, strip=True),
unique=job_info['id'],
created=created,
subarea='all',
)
return post
|
"""Wrapper for the WorkInStartups source."""
import json
import bleach
import maya
import requests
from jobs.models import Post
ADDITIONAL_TAGS = ['p', 'br']
class WorkInStartups(object):
"""Wrapper for the WorkInStartups source."""
json_api_address = 'http://workinstartups.com/job-board/api/api.php?action=getJobs&type=0&category=0&count=100&random=0&days_behind=0&response=json'
def __init__(self, source):
"""Parse the API."""
self.api_response = requests.get(self.json_api_address)
self.source = source
def jobs(self):
"""Iterate through all available jobs."""
# Remove the 'var jobs = ' at the beginning and the ';' at the end
response_json = json.loads(self.api_response.text[len("var jobs = "):-1])
for job_info in response_json:
post = self.parse_job_to_post(job_info)
yield post
def parse_job_to_post(self, job_info):
"""Convert from the rss feed format to a Post."""
created = maya.parse(job_info['mysql_date']).datetime()
job_url = 'http://workinstartups.com/job-board/job/{}/{}/'.format(job_info['id'], job_info['url_title'])
post = Post(
url=job_url,
source=self.source,
title=job_info['type_name'] + " - " + bleach.clean(job_info['title'], strip=True),
description=bleach.clean(job_info['description'], tags=bleach.ALLOWED_TAGS + ADDITIONAL_TAGS, strip=True),
unique=job_info['id'],
created=created,
subarea='all',
)
return post
|
bsd-3-clause
|
Python
|
2c53bc17f98a3e9fdc71ba77f1ab9c1c06f82509
|
remove test param on srvy
|
andrewlrogers/srvy
|
collection/srvy.py
|
collection/srvy.py
|
#!/usr/bin/python
import sys
import time
from time import sleep
from datetime import datetime
import random
import sqlite3
import csv
from configparser import ConfigParser
from gpiozero import Button
import pygame
# VARIABLES
question_csv_location = '../archive/questions.csv'
sqlite_file = '../archive/srvy.db'
yes_button = Button(26)
no_button = Button(19)
# FUNCTIONS
def module_installed(module):
if module in sys.modules:
return True
else:
return False
def get_current_questions(file_location):
"""Add each question from a text file to a list. Questions should be separated by newlines."""
with open(file_location, 'r') as csv_file:
readCSV = csv.reader(csv_file, delimiter=',', quotechar='"')
questions = []
for row in readCSV:
if row:
question = row[0]
questions.append(question)
return questions
def random_questions():
"""pulls returns a random question into main loop."""
question = get_current_questions(question_csv_location)
return random.choice(question)
def add_response_to_database(question, opinion):
"""Add response to SQLite 3 database"""
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
current_date = datetime.now()
current_unix_time = time.time()
try:
c.execute('''INSERT INTO responses (pythonDateTime, unixTime, question, opinion) VALUES (?,?,?,?)''',
(current_date, current_unix_time, question, opinion))
print("Successfully added response to database.")
print("Thank you!")
except Exception as e:
print(e)
conn.commit()
conn.close()
main()
def main():
qs = random_questions() # calls questions function that returns random question.
print(qs)
while True:
opinion = input("Opinion [y/n]: ")
if opinion == "y":
sleep(.5)
opinion = 1
add_response_to_database(qs, opinion)
elif opinion == "n":
sleep(.5)
opinion = -1
add_response_to_database(qs, opinion)
main()
|
#!/usr/bin/python
import sys
import time
from time import sleep
from datetime import datetime
import random
import sqlite3
import csv
from configparser import ConfigParser
if __name__ == '__main__':
# Check if running on a Raspberry Pi
try:
from gpiozero import Button
except ImportError:
print("gpiozero is not installed.")
pass
try:
import pygame
except ImportError:
print("pygame is not installed.")
pass
# VARIABLES
question_csv_location = '../archive/questions.csv'
sqlite_file = '../archive/srvy.db'
yes_button = Button(26)
no_button = Button(19)
# FUNCTIONS
def module_installed(module):
if module in sys.modules:
return True
else:
return False
def get_current_questions(file_location):
"""Add each question from a text file to a list. Questions should be separated by newlines."""
with open(file_location, 'r') as csv_file:
readCSV = csv.reader(csv_file, delimiter=',', quotechar='"')
questions = []
for row in readCSV:
if row:
question = row[0]
questions.append(question)
return questions
def random_questions():
"""pulls returns a random question into main loop."""
question = get_current_questions(question_csv_location)
return random.choice(question)
def add_response_to_database(question, opinion):
"""Add response to SQLite 3 database"""
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
current_date = datetime.now()
current_unix_time = time.time()
try:
c.execute('''INSERT INTO responses (pythonDateTime, unixTime, question, opinion) VALUES (?,?,?,?)''',
(current_date, current_unix_time, question, opinion))
print("Successfully added response to database.")
print("Thank you!")
except Exception as e:
print(e)
conn.commit()
conn.close()
main()
def main():
qs = random_questions() # calls questions function that returns random question.
print(qs)
while True:
opinion = input("Opinion [y/n]: ")
if opinion == "y":
sleep(.5)
opinion = 1
add_response_to_database(qs, opinion)
elif opinion == "n":
sleep(.5)
opinion = -1
add_response_to_database(qs, opinion)
main()
|
mit
|
Python
|
a2d9edbe8b154858fe89be12ca281a926ad46ac7
|
Remove double negative
|
alexisrolland/data-quality,alexisrolland/data-quality,alexisrolland/data-quality,alexisrolland/data-quality
|
api/init/health/routes.py
|
api/init/health/routes.py
|
import os
from flask import jsonify
from flask_restplus import Resource, Namespace
# pylint: disable=unused-variable
def register_health(namespace: Namespace):
"""Method used to register the health check namespace and endpoint."""
@namespace.route('/health')
@namespace.doc()
class Health(Resource):
def get(self):
"""
Get API health status
Use this endpoint to get the health status of this API.
"""
is_debug = os.environ.get('FLASK_DEBUG')
mode = 'debug' if is_debug else 'production'
message = {'message': f'MobyDQ API running in {mode} mode'}
return jsonify(message)
|
import os
from flask import jsonify
from flask_restplus import Resource, Namespace
# pylint: disable=unused-variable
def register_health(namespace: Namespace):
"""Method used to register the health check namespace and endpoint."""
@namespace.route('/health')
@namespace.doc()
class Health(Resource):
def get(self):
"""
Get API health status
Use this endpoint to get the health status of this API.
"""
is_debug = os.environ.get('FLASK_DEBUG')
mode = 'production' if not is_debug else 'debug'
message = {'message': f'MobyDQ API running in {mode} mode'}
return jsonify(message)
|
apache-2.0
|
Python
|
a51a089e90719dfda2e6164b0f4c1aec50c26534
|
Add ordering
|
robdmc/django-entity,ambitioninc/django-entity,wesokes/django-entity
|
entity/migrations/0006_entity_relationship_unique.py
|
entity/migrations/0006_entity_relationship_unique.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-12-12 18:20
from __future__ import unicode_literals
from django.db import migrations, connection
from django.db.models import Count, Max
def disable_triggers(apps, schema_editor):
"""
Temporarily disable user triggers on the relationship table. We do not want things
like entity history to attach onto these migrations as this is a core bug where duplicates
should not exist
:param apps:
:param schema_editor:
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
ALTER TABLE entity_entityrelationship DISABLE TRIGGER USER;
"""
)
def enable_triggers(apps, schema_editor):
"""
Re-enable the triggers (if any)
:param apps:
:param schema_editor:
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
ALTER TABLE entity_entityrelationship ENABLE TRIGGER USER;
"""
)
def remove_duplicates(apps, schema_editor):
"""
Remove any duplicates from the entity relationship table
:param apps:
:param schema_editor:
:return:
"""
# Get the model
EntityRelationship = apps.get_model('entity', 'EntityRelationship')
# Find the duplicates
duplicates = EntityRelationship.objects.all().order_by(
'sub_entity_id',
'super_entity_id'
).values(
'sub_entity_id',
'super_entity_id'
).annotate(
Count('sub_entity_id'),
Count('super_entity_id'),
max_id=Max('id')
).filter(
super_entity_id__count__gt=1
)
# Loop over the duplicates and delete
for duplicate in duplicates:
EntityRelationship.objects.filter(
sub_entity_id=duplicate['sub_entity_id'],
super_entity_id=duplicate['super_entity_id']
).exclude(
id=duplicate['max_id']
).delete()
class Migration(migrations.Migration):
dependencies = [
('entity', '0005_remove_entitygroup_entities'),
]
operations = [
migrations.RunPython(disable_triggers),
migrations.RunPython(remove_duplicates),
migrations.RunPython(enable_triggers),
migrations.AlterUniqueTogether(
name='entityrelationship',
unique_together=set([('sub_entity', 'super_entity')]),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-12-12 18:20
from __future__ import unicode_literals
from django.db import migrations, connection
from django.db.models import Count, Max
def disable_triggers(apps, schema_editor):
"""
Temporarily disable user triggers on the relationship table. We do not want things
like entity history to attach onto these migrations as this is a core bug where duplicates
should not exist
:param apps:
:param schema_editor:
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
ALTER TABLE entity_entityrelationship DISABLE TRIGGER USER;
"""
)
def enable_triggers(apps, schema_editor):
"""
Re-enable the triggers (if any)
:param apps:
:param schema_editor:
:return:
"""
with connection.cursor() as cursor:
cursor.execute(
"""
ALTER TABLE entity_entityrelationship ENABLE TRIGGER USER;
"""
)
def remove_duplicates(apps, schema_editor):
"""
Remove any duplicates from the entity relationship table
:param apps:
:param schema_editor:
:return:
"""
# Get the model
EntityRelationship = apps.get_model('entity', 'EntityRelationship')
# Find the duplicates
duplicates = EntityRelationship.objects.all().order_by().values(
'sub_entity_id', 'super_entity_id'
).annotate(
Count('sub_entity_id'),
Count('super_entity_id'),
max_id=Max('id')
).filter(
super_entity_id__count__gt=1
)
# Loop over the duplicates and delete
for duplicate in duplicates:
EntityRelationship.objects.filter(
sub_entity_id=duplicate['sub_entity_id'],
super_entity_id=duplicate['super_entity_id']
).exclude(
id=duplicate['max_id']
).delete()
class Migration(migrations.Migration):
dependencies = [
('entity', '0005_remove_entitygroup_entities'),
]
operations = [
migrations.RunPython(disable_triggers),
migrations.RunPython(remove_duplicates),
migrations.RunPython(enable_triggers),
migrations.AlterUniqueTogether(
name='entityrelationship',
unique_together=set([('sub_entity', 'super_entity')]),
),
]
|
mit
|
Python
|
d9044815f4034a51d27e4949ffcd153e253cc882
|
use double quotes
|
yashodhank/frappe,frappe/frappe,StrellaGroup/frappe,frappe/frappe,StrellaGroup/frappe,almeidapaulopt/frappe,almeidapaulopt/frappe,yashodhank/frappe,mhbu50/frappe,mhbu50/frappe,almeidapaulopt/frappe,frappe/frappe,yashodhank/frappe,yashodhank/frappe,mhbu50/frappe,almeidapaulopt/frappe,mhbu50/frappe,StrellaGroup/frappe
|
frappe/integrations/doctype/google_settings/test_google_settings.py
|
frappe/integrations/doctype/google_settings/test_google_settings.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from .google_settings import get_file_picker_settings
class TestGoogleSettings(unittest.TestCase):
def setUp(self):
settings = frappe.get_single("Google Settings")
settings.client_id = "test_client_id"
settings.app_id = "test_app_id"
settings.api_key = "test_api_key"
settings.save()
def test_picker_disabled(self):
"""Google Drive Picker should be disabled if it is not enabled in Google Settings."""
frappe.db.set_value("Google Settings", None, "enable", 1)
frappe.db.set_value("Google Settings", None, "google_drive_picker_enabled", 0)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_google_disabled(self):
"""Google Drive Picker should be disabled if Google integration is not enabled."""
frappe.db.set_value("Google Settings", None, "enable", 0)
frappe.db.set_value("Google Settings", None, "google_drive_picker_enabled", 1)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_picker_enabled(self):
"""If picker is enabled, get_file_picker_settings should return the credentials."""
frappe.db.set_value("Google Settings", None, "enable", 1)
frappe.db.set_value("Google Settings", None, "google_drive_picker_enabled", 1)
settings = get_file_picker_settings()
self.assertEqual(True, settings.get("enabled", False))
self.assertEqual("test_client_id", settings.get("clientId", ""))
self.assertEqual("test_app_id", settings.get("appId", ""))
self.assertEqual("test_api_key", settings.get("developerKey", ""))
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from .google_settings import get_file_picker_settings
class TestGoogleSettings(unittest.TestCase):
def setUp(self):
settings = frappe.get_single('Google Settings')
settings.client_id = 'test_client_id'
settings.app_id = 'test_app_id'
settings.api_key = 'test_api_key'
settings.save()
def test_picker_disabled(self):
"""Google Drive Picker should be disabled if it is not enabled in Google Settings."""
frappe.db.set_value('Google Settings', None, 'enable', 1)
frappe.db.set_value('Google Settings', None, 'google_drive_picker_enabled', 0)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_google_disabled(self):
"""Google Drive Picker should be disabled if Google integration is not enabled."""
frappe.db.set_value('Google Settings', None, 'enable', 0)
frappe.db.set_value('Google Settings', None, 'google_drive_picker_enabled', 1)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_picker_enabled(self):
"""If picker is enabled, get_file_picker_settings should return the credentials."""
frappe.db.set_value('Google Settings', None, 'enable', 1)
frappe.db.set_value('Google Settings', None, 'google_drive_picker_enabled', 1)
settings = get_file_picker_settings()
self.assertEqual(True, settings.get('enabled', False))
self.assertEqual('test_client_id', settings.get('clientId', ''))
self.assertEqual('test_app_id', settings.get('appId', ''))
self.assertEqual('test_api_key', settings.get('developerKey', ''))
|
mit
|
Python
|
f83076f722d66ebc27d66bc13798d4e5bc9cc27a
|
Fix `TypeError: must use keyword argument for key function` on Python 3
|
almarklein/scikit-image,vighneshbirodkar/scikit-image,ofgulban/scikit-image,emmanuelle/scikits.image,almarklein/scikit-image,bennlich/scikit-image,ClinicalGraphics/scikit-image,dpshelio/scikit-image,SamHames/scikit-image,robintw/scikit-image,ajaybhat/scikit-image,Hiyorimi/scikit-image,blink1073/scikit-image,GaZ3ll3/scikit-image,warmspringwinds/scikit-image,chintak/scikit-image,rjeli/scikit-image,emon10005/scikit-image,oew1v07/scikit-image,paalge/scikit-image,bsipocz/scikit-image,ofgulban/scikit-image,Midafi/scikit-image,emmanuelle/scikits.image,emmanuelle/scikits.image,GaZ3ll3/scikit-image,vighneshbirodkar/scikit-image,chriscrosscutler/scikit-image,WarrenWeckesser/scikits-image,Britefury/scikit-image,warmspringwinds/scikit-image,newville/scikit-image,paalge/scikit-image,keflavich/scikit-image,Hiyorimi/scikit-image,rjeli/scikit-image,ajaybhat/scikit-image,juliusbierk/scikit-image,youprofit/scikit-image,SamHames/scikit-image,emon10005/scikit-image,SamHames/scikit-image,youprofit/scikit-image,almarklein/scikit-image,keflavich/scikit-image,paalge/scikit-image,emmanuelle/scikits.image,michaelpacer/scikit-image,blink1073/scikit-image,oew1v07/scikit-image,dpshelio/scikit-image,chriscrosscutler/scikit-image,Britefury/scikit-image,jwiggins/scikit-image,jwiggins/scikit-image,michaelaye/scikit-image,juliusbierk/scikit-image,SamHames/scikit-image,vighneshbirodkar/scikit-image,michaelpacer/scikit-image,michaelaye/scikit-image,almarklein/scikit-image,Midafi/scikit-image,WarrenWeckesser/scikits-image,bsipocz/scikit-image,bennlich/scikit-image,newville/scikit-image,ClinicalGraphics/scikit-image,ofgulban/scikit-image,robintw/scikit-image,pratapvardhan/scikit-image,pratapvardhan/scikit-image,chintak/scikit-image,chintak/scikit-image,rjeli/scikit-image,chintak/scikit-image
|
scikits/image/transform/tests/test_hough_transform.py
|
scikits/image/transform/tests/test_hough_transform.py
|
import numpy as np
from numpy.testing import *
import scikits.image.transform as tf
import scikits.image.transform.hough_transform as ht
from scikits.image.transform import probabilistic_hough
def append_desc(func, description):
"""Append the test function ``func`` and append
``description`` to its name.
"""
func.description = func.__module__ + '.' + func.func_name + description
return func
from scikits.image.transform import *
def test_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 1
out, angles, d = tf.hough(img)
y, x = np.where(out == out.max())
dist = d[y[0]]
theta = angles[x[0]]
assert_equal(dist > 70, dist < 72)
assert_equal(theta > 0.78, theta < 0.79)
def test_hough_angles():
img = np.zeros((10, 10))
img[0, 0] = 1
out, angles, d = tf.hough(img, np.linspace(0, 360, 10))
assert_equal(len(angles), 10)
def test_py_hough():
ht._hough, fast_hough = ht._py_hough, ht._hough
yield append_desc(test_hough, '_python')
yield append_desc(test_hough_angles, '_python')
tf._hough = fast_hough
def test_probabilistic_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 100
img[i, i] = 100
# decrease default theta sampling because similar orientations may confuse
# as mentioned in article of Galambos et al
theta=np.linspace(0, np.pi, 45)
lines = probabilistic_hough(img, theta=theta, threshold=10, line_length=10, line_gap=1)
# sort the lines according to the x-axis
sorted_lines = []
for line in lines:
line = list(line)
line.sort(key=lambda x: x[0])
sorted_lines.append(line)
assert([(25, 75), (74, 26)] in sorted_lines)
assert([(25, 25), (74, 74)] in sorted_lines)
if __name__ == "__main__":
run_module_suite()
|
import numpy as np
from numpy.testing import *
import scikits.image.transform as tf
import scikits.image.transform.hough_transform as ht
from scikits.image.transform import probabilistic_hough
def append_desc(func, description):
"""Append the test function ``func`` and append
``description`` to its name.
"""
func.description = func.__module__ + '.' + func.func_name + description
return func
from scikits.image.transform import *
def test_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 1
out, angles, d = tf.hough(img)
y, x = np.where(out == out.max())
dist = d[y[0]]
theta = angles[x[0]]
assert_equal(dist > 70, dist < 72)
assert_equal(theta > 0.78, theta < 0.79)
def test_hough_angles():
img = np.zeros((10, 10))
img[0, 0] = 1
out, angles, d = tf.hough(img, np.linspace(0, 360, 10))
assert_equal(len(angles), 10)
def test_py_hough():
ht._hough, fast_hough = ht._py_hough, ht._hough
yield append_desc(test_hough, '_python')
yield append_desc(test_hough_angles, '_python')
tf._hough = fast_hough
def test_probabilistic_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 100
img[i, i] = 100
# decrease default theta sampling because similar orientations may confuse
# as mentioned in article of Galambos et al
theta=np.linspace(0, np.pi, 45)
lines = probabilistic_hough(img, theta=theta, threshold=10, line_length=10, line_gap=1)
# sort the lines according to the x-axis
sorted_lines = []
for line in lines:
line = list(line)
line.sort(lambda x,y: cmp(x[0], y[0]))
sorted_lines.append(line)
assert([(25, 75), (74, 26)] in sorted_lines)
assert([(25, 25), (74, 74)] in sorted_lines)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
Python
|
f3cada11b253ceea129342040f7e3d75f4f0cf15
|
use assertions with the form elements in test_new instead of the regex on the html content
|
axelhodler/notesdude,axelhodler/notesdude
|
test_notes.py
|
test_notes.py
|
from webtest import TestApp
import os
import re
import notes
import dbaccessor
DB = 'notes.db'
class TestWebserver():
def test_index(self):
dba = dbaccessor.DbAccessor(DB)
dba.addNote('eins', 'lorem ipsum')
dba.addNote('zwei', 'blabla')
bottle = TestApp(notes.app)
result = bottle.get('/')
assert result.status == '200 OK'
match = re.search(r'<td>blabla</td>\s*</tr>', result.body)
assert match
def test_new(self):
bottle = TestApp(notes.app)
result = bottle.get('/new')
assert result.status == '200 OK'
form = result.form
assert form.action == '/new'
assert form.method == 'GET'
assert form['title'].value == ''
assert form['content'].value == ''
def test_adding_new_note(self):
bottle = TestApp(notes.app)
result = bottle.get('/new')
form = result.form
form['title'] = "testtitle"
form['content'] = "testcontent"
result = form.submit('save')
assert result.status == '200 OK'
def tearDown(self):
if os.path.isfile(DB):
os.remove(DB)
|
from webtest import TestApp
import os
import re
import notes
import dbaccessor
DB = 'notes.db'
class TestWebserver():
def test_index(self):
dba = dbaccessor.DbAccessor(DB)
dba.addNote('eins', 'lorem ipsum')
dba.addNote('zwei', 'blabla')
bottle = TestApp(notes.app)
result = bottle.get('/')
assert result.status == '200 OK'
match = re.search(r'<td>blabla</td>\s*</tr>', result.body)
assert match
def test_new(self):
bottle = TestApp(notes.app)
result = bottle.get('/new')
assert result.status == '200 OK'
match = re.search(r'<input type="text" size="100" maxlength="100" name="content">', result.body)
assert match
def test_adding_new_note(self):
bottle = TestApp(notes.app)
result = bottle.get('/new')
form = result.form
form['title'] = "testtitle"
form['content'] = "testcontent"
result = form.submit('save')
assert result.status == '200 OK'
def tearDown(self):
if os.path.isfile(DB):
os.remove(DB)
|
mit
|
Python
|
77e3f0da9bec64c2bf0f34faec735a29f1a74284
|
remove test for Google+
|
runningwolf666/you-get,linhua55/you-get,power12317/you-get,tigerface/you-get,chares-zhang/you-get,forin-xyz/you-get,specter4mjy/you-get,fffonion/you-get,xyuanmu/you-get,dream1986/you-get,zmwangx/you-get,linhua55/you-get,Red54/you-get,jindaxia/you-get,XiWenRen/you-get,cnbeining/you-get,CzBiX/you-get,lilydjwg/you-get,shanyimin/you-get,qzane/you-get,kzganesan/you-get,xyuanmu/you-get,j4s0nh4ck/you-get,pitatensai/you-get,zmwangx/you-get,FelixYin66/you-get,flwh/you-get,pastebt/you-get,rain1988/you-get,smart-techs/you-get,qzane/you-get,smart-techs/you-get,candlewill/you-get,lilydjwg/you-get,cnbeining/you-get
|
tests/test.py
|
tests/test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from you_get import *
from you_get.__main__ import url_to_module
def test_urls(urls):
for url in urls:
url_to_module(url).download(url, info_only = True)
class YouGetTests(unittest.TestCase):
def test_freesound(self):
test_urls([
"http://www.freesound.org/people/Corsica_S/sounds/184419/",
])
def test_jpopsuki(self):
test_urls([
"http://jpopsuki.tv/video/Dragon-Ash---Run-to-the-Sun/8ad7aec604badd0b0798cd999b63ae17",
])
def test_mixcloud(self):
test_urls([
"http://www.mixcloud.com/beatbopz/beat-bopz-disco-mix/",
"http://www.mixcloud.com/beatbopz/tokyo-taste-vol4/",
"http://www.mixcloud.com/DJVadim/north-america-are-you-ready/",
])
def test_vimeo(self):
test_urls([
"http://vimeo.com/56810854",
])
def test_xiami(self):
test_urls([
"http://www.xiami.com/song/1769835121",
])
def test_youtube(self):
test_urls([
"http://www.youtube.com/watch?v=pzKerr0JIPA",
"http://youtu.be/pzKerr0JIPA",
])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from you_get import *
from you_get.__main__ import url_to_module
def test_urls(urls):
for url in urls:
url_to_module(url).download(url, info_only = True)
class YouGetTests(unittest.TestCase):
def test_freesound(self):
test_urls([
"http://www.freesound.org/people/Corsica_S/sounds/184419/",
])
def test_googleplus(self):
test_urls([
"http://plus.google.com/102663035987142737445/posts/jJRu43KQFT5",
"http://plus.google.com/+%E5%B9%B3%E7%94%B0%E6%A2%A8%E5%A5%88/posts/jJRu43KQFT5",
"http://plus.google.com/+平田梨奈/posts/jJRu43KQFT5",
"http://plus.google.com/photos/102663035987142737445/albums/5844078581209509505/5844078587839097874",
"http://plus.google.com/photos/+%E5%B9%B3%E7%94%B0%E6%A2%A8%E5%A5%88/albums/5844078581209509505/5844078587839097874",
"http://plus.google.com/photos/+平田梨奈/albums/5844078581209509505/5844078587839097874",
])
def test_jpopsuki(self):
test_urls([
"http://jpopsuki.tv/video/Dragon-Ash---Run-to-the-Sun/8ad7aec604badd0b0798cd999b63ae17",
])
def test_mixcloud(self):
test_urls([
"http://www.mixcloud.com/beatbopz/beat-bopz-disco-mix/",
"http://www.mixcloud.com/beatbopz/tokyo-taste-vol4/",
"http://www.mixcloud.com/DJVadim/north-america-are-you-ready/",
])
def test_vimeo(self):
test_urls([
"http://vimeo.com/56810854",
])
def test_xiami(self):
test_urls([
"http://www.xiami.com/song/1769835121",
])
def test_youtube(self):
test_urls([
"http://www.youtube.com/watch?v=pzKerr0JIPA",
"http://youtu.be/pzKerr0JIPA",
])
|
mit
|
Python
|
bbbd535ecabc6017aec6a3549c917d26036aff3b
|
Remove checks for testGotTrace unit test until trace event importer is implemented.
|
ChromiumWebApps/chromium,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,Jonekee/chromium.src,markYoungH/chromium.src,Fireblend/chromium-crosswalk,anirudhSK/chromium,pozdnyakov/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,ondra-novak/chromium.src,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,littlstar/chromium.src,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,mogoweb/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,dednal/chromium.src,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,patrickm/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,pozdnyakov/chromium-crosswalk,pozdnyakov/chromium-crosswalk,patrickm/chromium.src,hujiajie/pa-chromium,dednal/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,ltilve/chromium,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,hujiajie/pa-chromium,ltilve/chromium,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,hgl888/chromium-crosswalk,Jonekee/chromium.src,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,Jonekee/chromium.src,Chilledheart/chromium,anirudhSK/chromium,Chilledheart/chromium,dushu1203/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,M4sse/chromium.src,patrickm/chromium.src,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,Chilledheart/chromium,ltilve/chromium,anirudhSK/chromium,M4sse/chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,jaruba/chromium.src,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,littlstar/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,pozdnyakov/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,mogoweb/chromium-crosswalk,anirudhSK/chromium,dushu1203/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hujiajie/pa-chromium,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,dushu1203/chromium.src,mogoweb/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,Chilledheart/chromium,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,hujiajie/pa-chromium,markYoungH/chromium.src,chuan9/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,pozdnyakov/chromium-crosswalk,littlstar/chromium.src,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,axinging/chromium-crosswalk,pozdnyakov/chromium-crosswalk,ondra-novak/chromium.src,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,mogoweb/chromium-crosswalk,dednal/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,Just-D/chromium-1,bright-sparks/chromium-spacewalk,patrickm/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,anirudhSK/chromium,littlstar/chromium.src,M4sse/chromium.src,dednal/chromium.src,ChromiumWebApps/chromium,markYoungH/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,littlstar/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,dednal/chromium.src,ondra-novak/chromium.src,dushu1203/chromium.src,anirudhSK/chromium,patrickm/chromium.src,Just-D/chromium-1,markYoungH/chromium.src,M4sse/chromium.src,hujiajie/pa-chromium,M4sse/chromium.src,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,Fireblend/chromium-crosswalk,hujiajie/pa-chromium,krieger-od/nwjs_chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,ltilve/chromium,Chilledheart/chromium,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,Jonekee/chromium.src,ChromiumWebApps/chromium,ltilve/chromium,krieger-od/nwjs_chromium.src,Just-D/chromium-1,patrickm/chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,axinging/chromium-crosswalk,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,ltilve/chromium,hujiajie/pa-chromium,Just-D/chromium-1,ltilve/chromium,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,littlstar/chromium.src,pozdnyakov/chromium-crosswalk,fujunwei/chromium-crosswalk,jaruba/chromium.src,krieger-od/nwjs_chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,dednal/chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,dushu1203/chromium.src,ondra-novak/chromium.src,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,mogoweb/chromium-crosswalk
|
tools/telemetry/telemetry/core/chrome/tracing_backend_unittest.py
|
tools/telemetry/telemetry/core/chrome/tracing_backend_unittest.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import os
import unittest
from telemetry.core import util
from telemetry.core.chrome import tracing_backend
from telemetry.test import tab_test_case
class TracingBackendTest(tab_test_case.TabTestCase):
def _StartServer(self):
base_dir = os.path.dirname(__file__)
self._browser.SetHTTPServerDirectories(
os.path.join(base_dir, '..', '..', '..', 'unittest_data'))
def _WaitForAnimationFrame(self):
def _IsDone():
js_is_done = """done"""
return bool(self._tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 5)
def testGotTrace(self):
if not self._browser.supports_tracing:
logging.warning('Browser does not support tracing, skipping test.')
return
self._StartServer()
self._browser.StartTracing()
self._browser.StopTracing()
# TODO(tengs): check model for correctness after trace_event_importer
# is implemented (crbug.com/173327).
class TracingResultImplTest(unittest.TestCase):
def testWrite1(self):
ri = tracing_backend.TraceResultImpl([])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], [])
def testWrite2(self):
ri = tracing_backend.TraceResultImpl([
'"foo"',
'"bar"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], ['foo', 'bar'])
def testWrite3(self):
ri = tracing_backend.TraceResultImpl([
'"foo"',
'"bar"',
'"baz"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'],
['foo', 'bar', 'baz'])
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import os
import unittest
from telemetry.core import util
from telemetry.core.chrome import tracing_backend
from telemetry.test import tab_test_case
class TracingBackendTest(tab_test_case.TabTestCase):
def _StartServer(self):
base_dir = os.path.dirname(__file__)
self._browser.SetHTTPServerDirectories(
os.path.join(base_dir, '..', '..', '..', 'unittest_data'))
def _WaitForAnimationFrame(self):
def _IsDone():
js_is_done = """done"""
return bool(self._tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 5)
def testGotTrace(self):
if not self._browser.supports_tracing:
logging.warning('Browser does not support tracing, skipping test.')
return
self._StartServer()
self._browser.StartTracing()
self._browser.StopTracing()
model = self._browser.GetTraceResultAndReset().AsTimelineModel()
events = model.GetAllEvents()
assert len(events) > 0
class TracingResultImplTest(unittest.TestCase):
def testWrite1(self):
ri = tracing_backend.TraceResultImpl([])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], [])
def testWrite2(self):
ri = tracing_backend.TraceResultImpl([
'"foo"',
'"bar"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], ['foo', 'bar'])
def testWrite3(self):
ri = tracing_backend.TraceResultImpl([
'"foo"',
'"bar"',
'"baz"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'],
['foo', 'bar', 'baz'])
|
bsd-3-clause
|
Python
|
a440cef4140a4225fc093c9143d7cfd1a0c4e917
|
Update metric through API
|
Thib17/biggraphite,criteo/biggraphite,criteo/biggraphite,Thib17/biggraphite,Thib17/biggraphite,iksaif/biggraphite,criteo/biggraphite,criteo/biggraphite,iksaif/biggraphite,iksaif/biggraphite,iksaif/biggraphite,Thib17/biggraphite
|
biggraphite/cli/web/namespaces/biggraphite.py
|
biggraphite/cli/web/namespaces/biggraphite.py
|
#!/usr/bin/env python
# Copyright 2018 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigGraphite API."""
from __future__ import absolute_import
from flask import request
import flask_restplus as rp
from biggraphite import metric as bg_metric
from biggraphite.cli.web import context
# TODO:
# - Add the equivalent of what the accessor provides
# - Add the ability to get/set points.
api = rp.Namespace("biggraphite", description="BigGraphite API")
metric_metadata = api.model(
"MetricMetadata",
{
"aggregator": rp.fields.String(description="The metric aggregator"),
"retention": rp.fields.String(description="The metric retention"),
"carbon_xfilesfactor": rp.fields.Float(description="The metric carbon xfiles factor"),
}
)
metric = api.model(
"Metric",
{
"id": rp.fields.String(readOnly=True, description="The metric identifier"),
"name": rp.fields.String(description="The metric name"),
"metadata": rp.fields.Nested(metric_metadata, description="The metric metadata"),
"created_on": rp.fields.DateTime(),
"updated_on": rp.fields.DateTime(),
"read_on": rp.fields.DateTime(),
},
)
@api.route("/metric/<string:name>")
@api.doc("Operations on metrics.")
@api.param("name", "The metric name")
class MetricResource(rp.Resource):
"""A Metric."""
@api.doc("Get a metric by name.")
@api.marshal_with(metric)
def get(self, name):
"""Get a metric."""
m = context.accessor.get_metric(name)
if not m:
rp.abort(404)
return m.as_string_dict()
@api.doc("Update a metric.")
@api.expect(metric_metadata)
def post(self, name):
"""Update a metric."""
if not context.accessor.has_metric(name):
return "Unknown metric: '%s'" % name, 404
payload = request.json
metadata = bg_metric.MetricMetadata(
aggregator=bg_metric.Aggregator.from_config_name(payload["aggregator"]),
retention=bg_metric.Retention.from_string(payload["retention"]),
carbon_xfilesfactor=payload["carbon_xfilesfactor"]
)
context.accessor.update_metric(name, metadata)
return '', 204
|
#!/usr/bin/env python
# Copyright 2018 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigGraphite API."""
from __future__ import absolute_import
import flask_restplus as rp
from biggraphite.cli.web import context
# TODO:
# - Add the equivalent of what the accessor provides
# - Add the ability to get/set points.
api = rp.Namespace("biggraphite", description="BigGraphite API")
metric = api.model(
"Metric",
{
"id": rp.fields.String(readOnly=True, description="The metric identifier"),
"name": rp.fields.String(description="The metric name"),
"metadata": rp.fields.Raw(description="The metric metadata"),
"created_on": rp.fields.DateTime(),
"updated_on": rp.fields.DateTime(),
"read_on": rp.fields.DateTime(),
},
)
@api.route("/metric/<string:name>")
@api.doc("Operations on metrics.")
@api.param("name", "The metric name")
class MetricResource(rp.Resource):
"""A Metric."""
@api.doc("Get a metric by name.")
@api.marshal_with(metric)
def get(self, name):
"""Get a metric."""
m = context.accessor.get_metric(name)
if not m:
rp.abort(404)
return m.as_string_dict()
|
apache-2.0
|
Python
|
1dbb1e0f8751f37271178665a727c4eefc49a88c
|
Remove subclassing of exception, since there is only one.
|
BT-fgarbely/partner-contact,diagramsoftware/partner-contact,andrius-preimantas/partner-contact,BT-jmichaud/partner-contact,Antiun/partner-contact,Therp/partner-contact,idncom/partner-contact,gurneyalex/partner-contact,Endika/partner-contact,Ehtaga/partner-contact,QANSEE/partner-contact,raycarnes/partner-contact,open-synergy/partner-contact,BT-ojossen/partner-contact,charbeljc/partner-contact,sergiocorato/partner-contact,alanljj/oca-partner-contact,acsone/partner-contact,akretion/partner-contact
|
partner_firstname/exceptions.py
|
partner_firstname/exceptions.py
|
# -*- encoding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import _, exceptions
class EmptyNames(exceptions.ValidationError):
def __init__(self, record, value=_("No name is set.")):
self.record = record
self._value = value
self.name = _("Error(s) with partner %d's name.") % record.id
|
# -*- encoding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import _, exceptions
class PartnerNameError(exceptions.ValidationError):
def __init__(self, record, value=None):
self.record = record
self._value = value
self.name = _("Error(s) with partner %d's name.") % record.id
@property
def value(self):
raise NotImplementedError()
class EmptyNames(PartnerNameError):
@property
def value(self):
return _("No name is set.")
|
agpl-3.0
|
Python
|
017889913a1dba443022ee032535bdc4cb40ddb6
|
Make nodepool git repo caching more robust
|
coolsvap/project-config,osrg/project-config,osrg/project-config,open-switch/infra_project-config,dongwenjuan/project-config,coolsvap/project-config,citrix-openstack/project-config,noorul/os-project-config,Tesora/tesora-project-config,anbangr/osci-project-config,anbangr/osci-project-config,dongwenjuan/project-config,noorul/os-project-config,openstack-infra/project-config,citrix-openstack/project-config,Tesora/tesora-project-config,openstack-infra/project-config,open-switch/infra_project-config
|
modules/openstack_project/files/nodepool/scripts/cache_git_repos.py
|
modules/openstack_project/files/nodepool/scripts/cache_git_repos.py
|
#!/usr/bin/env python
# Copyright (C) 2011-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import re
import shutil
import urllib2
from common import run_local
URL = ('http://git.openstack.org/cgit/openstack-infra/config/plain/'
'modules/openstack_project/files/review.projects.yaml')
PROJECT_RE = re.compile('^-?\s+project:\s+(.*)$')
def clone_repo(project):
remote = 'git://git.openstack.org/%s.git' % project
# Clear out any existing target directory first, in case of a retry.
try:
shutil.rmtree(os.path.join('/opt/git', project))
except OSError:
pass
# Try to clone the requested git repository.
(status, out) = run_local(['git', 'clone', remote, project],
status=True, cwd='/opt/git')
# If it claims to have worked, make sure we can list branches.
if status == 0:
(status, moreout) = run_local(['git', 'branch', '-a'], status=True,
cwd=os.path.join('/opt/git', project))
out = '\n'.join((out, moreout))
# If that worked, try resetting to HEAD to make sure it's there.
if status == 0:
(status, moreout) = run_local(['git', 'reset', '--hard', 'HEAD'],
status=True,
cwd=os.path.join('/opt/git', project))
out = '\n'.join((out, moreout))
# Status of 0 imples all the above worked, 1 means something failed.
return (status, out)
def main():
# TODO(jeblair): use gerrit rest api when available
data = urllib2.urlopen(URL).read()
for line in data.split('\n'):
# We're regex-parsing YAML so that we don't have to depend on the
# YAML module which is not in the stdlib.
m = PROJECT_RE.match(line)
if m:
(status, out) = clone_repo(m.group(1))
print out
if status != 0:
print 'Retrying to clone %s' % m.group(1)
(status, out) = clone_repo(m.group(1))
print out
if status != 0:
raise Exception('Failed to clone %s' % m.group(1))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Copyright (C) 2011-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import urllib2
from common import run_local
URL = ('http://git.openstack.org/cgit/openstack-infra/config/plain/'
'modules/openstack_project/files/review.projects.yaml')
PROJECT_RE = re.compile('^-?\s+project:\s+(.*)$')
def main():
# TODO(jeblair): use gerrit rest api when available
data = urllib2.urlopen(URL).read()
for line in data.split('\n'):
# We're regex-parsing YAML so that we don't have to depend on the
# YAML module which is not in the stdlib.
m = PROJECT_RE.match(line)
if m:
project = 'git://git.openstack.org/%s' % m.group(1)
print run_local(['git', 'clone', project, m.group(1)],
cwd='/opt/git')
if __name__ == '__main__':
main()
|
apache-2.0
|
Python
|
b0133c948555c821a9dcae1df4119a2bfcc19304
|
fix building
|
DeadSix27/python_cross_compile_script
|
packages/dependencies/librubberband.py
|
packages/dependencies/librubberband.py
|
{
'repo_type' : 'git',
'url' : 'https://github.com/breakfastquay/rubberband.git',
'download_header' : [
'https://raw.githubusercontent.com/DeadSix27/python_cross_compile_script/master/additional_headers/ladspa.h',
],
'env_exports' : {
'AR': '{cross_prefix_bare}ar',
'CC': '{cross_prefix_bare}gcc',
'PREFIX': '{target_prefix}',
'RANLIB': '{cross_prefix_bare}ranlib',
'LD': '{cross_prefix_bare}ld',
'STRIP': '{cross_prefix_bare}strip',
'CXX': '{cross_prefix_bare}g++',
# 'PKG_CONFIG': 'pkg-config --static',
'SNDFILE_LIBS': '-lsndfile -lopus -lFLAC -lvorbis -lvorbisenc -logg -lspeex',
},
'configure_options' : '--host={target_host} --prefix={target_prefix}',
'build_options' : '{make_prefix_options}',
'needs_make_install' : False,
'run_post_build' : [
'cp -fv lib/* "{target_prefix}/lib"',
'cp -frv rubberband "{target_prefix}/include"',
'cp -fv rubberband.pc.in "{pkg_config_path}/rubberband.pc"',
'sed -i.bak "s|%PREFIX%|{target_prefix_sed_escaped}|" "{pkg_config_path}/rubberband.pc"',
'sed -i.bak \'s/-lrubberband *$/-lrubberband -lfftw3 -lsamplerate -lstdc++/\' "{pkg_config_path}/rubberband.pc"',
],
'depends_on' : [
'libsndfile',
],
'_info' : { 'version' : '1.8.1', 'fancy_name' : 'librubberband' },
}
|
{
'repo_type' : 'git',
'url' : 'https://github.com/breakfastquay/rubberband.git',
'download_header' : [
'https://raw.githubusercontent.com/DeadSix27/python_cross_compile_script/master/additional_headers/ladspa.h',
],
'env_exports' : {
'AR' : '{cross_prefix_bare}ar',
'CC' : '{cross_prefix_bare}gcc',
'PREFIX' : '{target_prefix}',
'RANLIB' : '{cross_prefix_bare}ranlib',
'LD' : '{cross_prefix_bare}ld',
'STRIP' : '{cross_prefix_bare}strip',
'CXX' : '{cross_prefix_bare}g++',
},
'configure_options' : '--host={target_host} --prefix={target_prefix} --disable-shared --enable-static',
'build_options' : '{make_prefix_options}',
'needs_make_install' : False,
'run_post_build' : [
'cp -fv lib/* "{target_prefix}/lib"',
'cp -frv rubberband "{target_prefix}/include"',
'cp -fv rubberband.pc.in "{pkg_config_path}/rubberband.pc"',
'sed -i.bak "s|%PREFIX%|{target_prefix_sed_escaped}|" "{pkg_config_path}/rubberband.pc"',
'sed -i.bak \'s/-lrubberband *$/-lrubberband -lfftw3 -lsamplerate -lstdc++/\' "{pkg_config_path}/rubberband.pc"',
],
'depends_on' : [
'libsndfile',
],
'_info' : { 'version' : '1.8.1', 'fancy_name' : 'librubberband' },
}
|
mpl-2.0
|
Python
|
a96cb89524f2fa17a015011d972d396e509a1079
|
Add code for getting and releasing a database connection
|
rivese/learning_journal,EyuelAbebe/learning_journal,EyuelAbebe/learning_journal
|
journal.py
|
journal.py
|
# -*- coding: utf-8 -*-
from flask import Flask
import os
import psycopg2
from contextlib import closing
from flask import g
DB_SCHEMA = """
DROP TABLE IF EXISTS entries;
CREATE TABLE entries (
id serial PRIMARY KEY,
title VARCHAR (127) NOT NULL,
text TEXT NOT NULL,
created TIMESTAMP NOT NULL
)
"""
app = Flask(__name__)
@app.route('/')
def hello():
return u'Hello world!'
app.config['DATABASE'] = os.environ.get(
'DATABASE_URL', 'dbname=learning_journal user=elizabethrives'
)
def connect_db():
"""Return a connection to the configured database"""
return psycopg2.connect(app.config['DATABASE'])
def init_db():
"""Initialize the database using DB_SCHEMA
WARNING: executing this function will drop existing tables.
"""
with closing(connect_db()) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
def get_database_connection():
db = getattr(g, 'db', None)
if db is None:
g.db = db = connect_db()
return db
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
if exception and isinstance(exception, psycopg2.Error):
db.rollback()
else:
db.commit()
db.close()
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
from flask import Flask
import os
import psycopg2
from contextlib import closing
DB_SCHEMA = """
DROP TABLE IF EXISTS entries;
CREATE TABLE entries (
id serial PRIMARY KEY,
title VARCHAR (127) NOT NULL,
text TEXT NOT NULL,
created TIMESTAMP NOT NULL
)
"""
app = Flask(__name__)
@app.route('/')
def hello():
return u'Hello world!'
app.config['DATABASE'] = os.environ.get(
'DATABASE_URL', 'dbname=learning_journal user=elizabethrives'
)
def connect_db():
"""Return a connection to the configured database"""
return psycopg2.connect(app.config['DATABASE'])
def init_db():
"""Initialize the database using DB_SCHEMA
WARNING: executing this function will drop existing tables.
"""
with closing(connect_db()) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
if __name__ == '__main__':
app.run(debug=True)
|
mit
|
Python
|
69705079398391cdc392b18dcd440fbc3b7404fd
|
Set celery to ignore results
|
puruckertom/ubertool_ecorest,puruckertom/ubertool_ecorest,quanted/ubertool_ecorest,quanted/ubertool_ecorest,puruckertom/ubertool_ecorest,quanted/ubertool_ecorest,quanted/ubertool_ecorest,puruckertom/ubertool_ecorest
|
celery_cgi.py
|
celery_cgi.py
|
import os
import logging
from celery import Celery
from temp_config.set_environment import DeployEnv
runtime_env = DeployEnv()
runtime_env.load_deployment_environment()
redis_server = os.environ.get('REDIS_HOSTNAME')
redis_port = os.environ.get('REDIS_PORT')
celery_tasks = [
'hms_flask.modules.hms_controller',
'pram_flask.tasks'
]
redis = 'redis://' + redis_server + ':' + redis_port + '/0'
logging.info("Celery connecting to redis server: " + redis)
celery = Celery('flask_qed', broker=redis, backend=redis, include=celery_tasks)
celery.conf.update(
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_IGNORE_RESULT=True,
CELERY_TRACK_STARTED=True,
)
|
import os
import logging
from celery import Celery
from temp_config.set_environment import DeployEnv
runtime_env = DeployEnv()
runtime_env.load_deployment_environment()
redis_server = os.environ.get('REDIS_HOSTNAME')
redis_port = os.environ.get('REDIS_PORT')
celery_tasks = [
'hms_flask.modules.hms_controller',
'pram_flask.tasks'
]
redis = 'redis://' + redis_server + ':' + redis_port + '/0'
logging.info("Celery connecting to redis server: " + redis)
celery = Celery('flask_qed', broker=redis, backend=redis, include=celery_tasks)
celery.conf.update(
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_IGNORE_RESULT=False,
CELERY_TRACK_STARTED=True,
)
|
unlicense
|
Python
|
9bdc1dbc37a67d726f808e724b862e7de84fa06a
|
Change function name
|
ricaportela/convert-data-nbf,ricaportela/convert-data-nbf
|
changedate.py
|
changedate.py
|
""" Calcular Data a partir de uma quantidade de minutos """
def change_date(dataEnt, op, minutosEnt):
""" Calcular nova data """
dataEnt, horaEnt = dataEnt.split(" ", 2)
diaIni, mesIni, anoIni = dataEnt.split("/", 3)
horaIni, minuIni = horaEnt.split(":", 2)
# transformar tudo em minutos
# converter horas em minutos totais
minutosTotais = (int(horaIni) * 60) + int(minuIni) + minutosEnt
print("Total de Minutos: ", minutosTotais)
# 5415 / 60 minutos = 90.25 => separar inteiro de casas decimais 0.25 * 60 = 15
horas_minutos_conv = minutosTotais / 60
print(int(horas_minutos_conv))
# 90h e 15 min
i, d = divmod(horas_minutos_conv, 1)
resto_minutos = d * 60
print(int(resto_minutos))
# 90h / 24h = 3.75 => separar inteiro de casas decimais = 0.75 / 24
total_dias = horas_minutos_conv / 24
print(total_dias)
i, d = divmod(total_dias, 1)
xtotal_dias = i
xtotal_minutos = d
print("Total Dias", int(xtotal_dias))
# 3d 3.75 (0.75 * 24) = 18 h
minutosHora = xtotal_minutos * 24
print(int(xtotal_dias), " Dias", int(minutosHora), " horas", int(resto_minutos), " minutos")
# data_alterada = '01/01/2012 12:00' essa data sera calculada
# print(data_alterada)
if __name__ == ("__main__"):
change_date("31/12/2016 23:35", "+", 200)
|
""" Calcular Data a partir de uma quantidade de minutos """
def alterar_data(dataEnt, op, minutosEnt):
""" Calcular nova data """
dataEnt, horaEnt = dataEnt.split(" ", 2)
diaIni, mesIni, anoIni = dataEnt.split("/", 3)
horaIni, minuIni = horaEnt.split(":", 2)
# transformar tudo em minutos
# converter horas em minutos totais
minutosTotais = (int(horaIni) * 60) + int(minuIni) + minutosEnt
print("Total de Minutos: ", minutosTotais)
# 5415 / 60 minutos = 90.25 => separar inteiro de casas decimais 0.25 * 60 = 15
horas_minutos_conv = minutosTotais / 60
print(int(horas_minutos_conv))
# 90h e 15 min
i, d = divmod(horas_minutos_conv, 1)
resto_minutos = d * 60
print(int(resto_minutos))
# 90h / 24h = 3.75 => separar inteiro de casas decimais = 0.75 / 24
total_dias = horas_minutos_conv / 24
print(total_dias)
i, d = divmod(total_dias, 1)
xtotal_dias = i
xtotal_minutos = d
print("Total Dias", int(xtotal_dias))
# 3d 3.75 (0.75 * 24) = 18 h
minutosHora = xtotal_minutos * 24
print(int(xtotal_dias), " Dias", int(minutosHora), " horas", int(resto_minutos), " minutos")
# data_alterada = '01/01/2012 12:00' essa data sera calculada
# print(data_alterada)
if __name__ == ("__main__"):
alterar_data("31/12/2016 23:35", "+", 25)
|
mit
|
Python
|
a10729414971ee454276960fcc1a736c08b3aef7
|
Fix syntax error
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
corehq/tests/noseplugins/uniformresult.py
|
corehq/tests/noseplugins/uniformresult.py
|
r"""A plugin to format test names uniformly for easy comparison
Usage:
# collect django tests
COLLECT_ONLY=1 ./manage.py test -v2 --settings=settings 2> tests-django.txt
# collect nose tests
./manage.py test -v2 --collect-only 2> tests-nose.txt
# clean up django test output: s/skipped\ \'.*\'$/ok/
# sort each output file
# diff tests-django.txt tests-nose.txt
"""
from inspect import isfunction
from types import ModuleType
from nose.case import FunctionTestCase
from nose.plugins import Plugin
def uniform_description(test):
if type(test).__name__ == "DocTestCase":
return test._dt_test.name
if isinstance(test, ModuleType):
return test.__name__
if isinstance(test, type) or isfunction(test):
return "%s:%s" % (test.__module__, test.__name__)
if isinstance(test, FunctionTestCase):
descriptor = test.descriptor or test.test
return "%s:%s %s" % (
descriptor.__module__,
descriptor.__name__,
test.arg,
)
name = "%s:%s.%s" % (
test.__module__,
type(test).__name__,
test._testMethodName
)
return name
#return sys.modules[test.__module__].__file__
class UniformTestResultPlugin(Plugin):
"""Format test descriptions for easy comparison
"""
name = "uniform-results"
enabled = True
def configure(self, options, conf):
"""Do not call super (always enabled)"""
def describeTest(self, test):
return uniform_description(test.test)
|
"""A plugin to format test names uniformly for easy comparison
Usage:
# collect django tests
COLLECT_ONLY=1 ./manage.py test -v2 --settings=settings 2> tests-django.txt
# collect nose tests
./manage.py test -v2 --collect-only 2> tests-nose.txt
# clean up django test output: s/skipped\ \'.*\'$/ok/
# sort each output file
# diff tests-django.txt tests-nose.txt
"""
from inspect import isfunction
from types import ModuleType
from nose.case import FunctionTestCase
from nose.plugins import Plugin
def uniform_description(test):
if type(test).__name__ == "DocTestCase":
return test._dt_test.name
if isinstance(test, ModuleType):
return test.__name__
if isinstance(test, type) or isfunction(test):
return "%s:%s" % (test.__module__, test.__name__)
if isinstance(test, FunctionTestCase):
descriptor = test.descriptor or test.test
return "%s:%s %s" % (
descriptor.__module__,
descriptor.__name__,
test.arg,
)
name = "%s:%s.%s" % (
test.__module__,
type(test).__name__,
test._testMethodName
)
return name
#return sys.modules[test.__module__].__file__
class UniformTestResultPlugin(Plugin):
"""Format test descriptions for easy comparison
"""
name = "uniform-results"
enabled = True
def configure(self, options, conf):
"""Do not call super (always enabled)"""
def describeTest(self, test):
return uniform_description(test.test)
|
bsd-3-clause
|
Python
|
50805c2da2889c13485096f53de27af27a06391a
|
Implement tree preorder traversal
|
arvinsim/hackerrank-solutions
|
all-domains/data-structures/trees/tree-order-traversal/solution.py
|
all-domains/data-structures/trees/tree-order-traversal/solution.py
|
# https://www.hackerrank.com/challenges/tree-preorder-traversal
# Python 2
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.data (the value of the node)
"""
def preOrder(tree):
if tree is None: return
print(tree.data),
return preOrder(tree.left) or preOrder(tree.right)
"""
class Node:
def __init__(self, left=None, right=None, data=None):
self.left = left
self.right = right
self.data = data
one = Node(data=1)
four = Node(data=4)
six = Node(data=6)
five = Node(left=one, right=four, data=5)
two = Node(left=six, data=2)
three = Node(left=five, right=two, data=3)
preOrder(three)
"""
|
# https://www.hackerrank.com/challenges/tree-preorder-traversal
# Python 2
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.data (the value of the node)
"""
def preOrder(tree):
if tree is None: return
print(tree.data)
return preOrder(tree.left) or preOrder(tree.right)
class Node:
def __init__(self, left=None, right=None, data=None):
self.left = left
self.right = right
self.data = data
one = Node(data=1)
four = Node(data=4)
six = Node(data=6)
five = Node(left=one, right=four, data=5)
two = Node(left=six, data=2)
three = Node(left=five, right=two, data=3)
preOrder(three)
|
mit
|
Python
|
1097889524cf7deb4b87722d3aedd27c071117c1
|
Simplify exception logging in template render method.
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
app/soc/views/template.py
|
app/soc/views/template.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct templates
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
]
import logging
from django.template import loader
from soc.views.helper import context as context_helper
class Template(object):
"""Template class that facilitates the rendering of templates.
"""
def __init__(self, data):
self.data = data
def render(self):
"""Renders the template to a string.
Uses the context method to retrieve the appropriate context, uses the
self.templatePath() method to retrieve the template that should be used.
"""
try:
context = context_helper.default(self.data)
context.update(self.context())
rendered = loader.render_to_string(self.templatePath(), dictionary=context)
except Exception, e:
logging.exception(e)
raise e
return rendered
def context(self):
"""Returns the context for the current template.
"""
return {}
def templatePath(self):
"""Returns the path to the template that should be used in render().
Subclasses should override this method.
"""
raise NotImplementedError()
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct templates
"""
__authors__ = [
'"Sverre Rabbelier" <[email protected]>',
]
import logging
import traceback
from django.template import loader
from soc.views.helper import context as context_helper
class Template(object):
"""Template class that facilitates the rendering of templates.
"""
def __init__(self, data):
self.data = data
def render(self):
"""Renders the template to a string.
Uses the context method to retrieve the appropriate context, uses the
self.templatePath() method to retrieve the template that should be used.
"""
try:
context = context_helper.default(self.data)
context.update(self.context())
rendered = loader.render_to_string(self.templatePath(), dictionary=context)
except Exception, e:
logging.error(traceback.format_exc(e))
raise e
return rendered
def context(self):
"""Returns the context for the current template.
"""
return {}
def templatePath(self):
"""Returns the path to the template that should be used in render().
Subclasses should override this method.
"""
raise NotImplementedError()
|
apache-2.0
|
Python
|
a6ed56b37bba3f5abff73c297a8a20271d73cab2
|
Add configure call to random_agent
|
openai/universe,rht/universe
|
example/random-agent/random-agent.py
|
example/random-agent/random-agent.py
|
#!/usr/bin/env python
import argparse
import logging
import sys
import gym
import universe # register the universe environments
from universe import wrappers
logger = logging.getLogger()
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help='Set verbosity.')
args = parser.parse_args()
if args.verbosity == 0:
logger.setLevel(logging.INFO)
elif args.verbosity >= 1:
logger.setLevel(logging.DEBUG)
env = gym.make('flashgames.NeonRace-v0')
env.configure(remotes=1) # automatically creates a local docker container
# Restrict the valid random actions. (Try removing this and see
# what happens when the agent is given full control of the
# keyboard/mouse.)
env = wrappers.SafeActionSpace(env)
observation_n = env.reset()
while True:
# your agent here
#
# Try sending this instead of a random action: ('KeyEvent', 'ArrowUp', True)
action_n = [env.action_space.sample() for ob in observation_n]
observation_n, reward_n, done_n, info = env.step(action_n)
env.render()
return 0
if __name__ == '__main__':
sys.exit(main())
|
#!/usr/bin/env python
import argparse
import logging
import sys
import gym
import universe # register the universe environments
from universe import wrappers
logger = logging.getLogger()
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help='Set verbosity.')
args = parser.parse_args()
if args.verbosity == 0:
logger.setLevel(logging.INFO)
elif args.verbosity >= 1:
logger.setLevel(logging.DEBUG)
env = gym.make('flashgames.NeonRace-v0')
# Restrict the valid random actions. (Try removing this and see
# what happens when the agent is given full control of the
# keyboard/mouse.)
env = wrappers.SafeActionSpace(env)
observation_n = env.reset()
while True:
# your agent here
#
# Try sending this instead of a random action: ('KeyEvent', 'ArrowUp', True)
action_n = [env.action_space.sample() for ob in observation_n]
observation_n, reward_n, done_n, info = env.step(action_n)
env.render()
return 0
if __name__ == '__main__':
sys.exit(main())
|
mit
|
Python
|
cd23780fdc39003f2affe7352bc3253f958faaa5
|
Change assertion so it works with pytest (don't know what its problem is...)
|
ZeitOnline/zeit.content.cp,ZeitOnline/zeit.content.cp
|
src/zeit/content/cp/browser/tests/test_centerpage.py
|
src/zeit/content/cp/browser/tests/test_centerpage.py
|
import mock
import zeit.cms.testing
import zeit.content.cp
import zope.testbrowser.testing
class PermissionsTest(zeit.cms.testing.BrowserTestCase):
layer = zeit.content.cp.testing.layer
def setUp(self):
super(PermissionsTest, self).setUp()
zeit.content.cp.browser.testing.create_cp(self.browser)
self.browser.getLink('Checkin').click()
self.producing = zope.testbrowser.testing.Browser()
self.producing.addHeader('Authorization', 'Basic producer:producerpw')
def test_normal_user_may_not_delete(self):
b = self.browser
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/island')
self.assertNotIn('island/@@delete.html', b.contents)
def test_producing_may_delete(self):
b = self.producing
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/island')
self.assertEllipsis('...<a...island/@@delete.html...', b.contents)
def test_normal_user_may_not_retract(self):
b = self.browser
with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi:
pi().published = True
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/'
'island')
self.assertNotIn('island/@@retract', b.contents)
def test_producing_may_retract(self):
b = self.producing
with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi:
pi().published = True
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/'
'island')
self.assertEllipsis('...<a...island/@@retract...', b.contents)
|
import mock
import zeit.cms.testing
import zeit.content.cp
import zope.testbrowser.testing
class PermissionsTest(zeit.cms.testing.BrowserTestCase):
layer = zeit.content.cp.testing.layer
def setUp(self):
super(PermissionsTest, self).setUp()
zeit.content.cp.browser.testing.create_cp(self.browser)
self.browser.getLink('Checkin').click()
self.producing = zope.testbrowser.testing.Browser()
self.producing.addHeader('Authorization', 'Basic producer:producerpw')
def test_normal_user_may_not_delete(self):
b = self.browser
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/island')
self.assertNotEllipsis('...<a...island/@@delete.html...', b.contents)
def test_producing_may_delete(self):
b = self.producing
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/island')
self.assertEllipsis('...<a...island/@@delete.html...', b.contents)
def test_normal_user_may_not_retract(self):
b = self.browser
with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi:
pi().published = True
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/'
'island')
self.assertNotEllipsis('...<a...island/@@retract...', b.contents)
def test_producing_may_retract(self):
b = self.producing
with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi:
pi().published = True
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/'
'island')
self.assertEllipsis('...<a...island/@@retract...', b.contents)
|
bsd-3-clause
|
Python
|
fed1475564f7ca8a496d50446e4e5924befe8628
|
Update function output type annotation
|
tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model
|
tensorflow/core/function/capture/free_vars_detect.py
|
tensorflow/core/function/capture/free_vars_detect.py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An independent module to detect free vars inside a function."""
import types
from typing import List
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.util import tf_inspect
def _parse_and_analyze(func):
"""Parse and analyze Python Function code."""
node, source = parser.parse_entity(func, future_features=())
node = qual_names.resolve(node)
entity_info = transformer.EntityInfo(
name=func.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
return node
def detect_function_free_vars(func: types.FunctionType) -> List[str]:
"""Detect free vars in any Python function."""
assert isinstance(
func, types.FunctionType
), f"The input should be of Python function type. Got type: {type(func)}."
node = _parse_and_analyze(func)
scope = anno.getanno(node, anno.Static.SCOPE)
free_vars_all = list(scope.free_vars)
globals_dict = func.__globals__
filtered = []
for var in free_vars_all:
base = str(var.qn[0])
if base in globals_dict:
obj = globals_dict[base]
if tf_inspect.ismodule(obj):
continue
if (tf_inspect.isclass(obj) or
tf_inspect.ismethod(obj) or
tf_inspect.isfunction(obj)):
if obj.__module__ != func.__module__:
continue
# Only keep free vars without subscript for simplicity
if not var.has_subscript():
filtered.append(str(var))
else:
if not var.has_subscript():
filtered.append(str(var))
return sorted(filtered)
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An independent module to detect free vars inside a function."""
import types
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.util import tf_inspect
def _parse_and_analyze(func):
"""Parse and analyze Python Function code."""
node, source = parser.parse_entity(func, future_features=())
node = qual_names.resolve(node)
entity_info = transformer.EntityInfo(
name=func.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
return node
def detect_function_free_vars(
func: types.FunctionType) -> tuple[list[str], list[str], list[int]]:
"""Detect free vars in any Python function."""
assert isinstance(
func, types.FunctionType
), f"The input should be of Python function type. Got type: {type(func)}."
node = _parse_and_analyze(func)
scope = anno.getanno(node, anno.Static.SCOPE)
free_vars_all = list(scope.free_vars)
globals_dict = func.__globals__
filtered = []
for var in free_vars_all:
base = str(var.qn[0])
if base in globals_dict:
obj = globals_dict[base]
if tf_inspect.ismodule(obj):
continue
if (tf_inspect.isclass(obj) or
tf_inspect.ismethod(obj) or
tf_inspect.isfunction(obj)):
if obj.__module__ != func.__module__:
continue
# Only keep free vars without subscript for simplicity
if not var.has_subscript():
filtered.append(str(var))
else:
if not var.has_subscript():
filtered.append(str(var))
return sorted(filtered)
|
apache-2.0
|
Python
|
cbfbc2dbeeb8a03cd96ef2756185099a9be9b714
|
Update data_provider_test.py
|
tensorflow/gan,tensorflow/gan
|
tensorflow_gan/examples/esrgan/data_provider_test.py
|
tensorflow_gan/examples/esrgan/data_provider_test.py
|
# coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfgan.examples.esrgan.data_provider"""
import collections
from absl.testing import absltest
import tensorflow as tf
import data_provider
hparams = collections.namedtuple('hparams', ['hr_dimension',
'scale',
'batch_size',
'data_dir'])
class DataProviderTest(tf.test.TestCase, absltest.TestCase):
def setUp(self):
super(DataProviderTest, self).setUp()
self.hparams = hparams(256, 4, 32, '/content/')
self.dataset = data_provider.get_div2k_data(self.hparams)
self.mock_lr = tf.random.normal([32, 64, 64, 3])
self.mock_hr = tf.random.normal([32, 256, 256, 3])
def test_dataset(self):
self.assertIsInstance(self.dataset, tf.data.Dataset)
with self.cached_session() as sess:
lr_image, hr_image = next(iter(self.dataset))
sess.run(tf.compat.v1.global_variables_initializer())
self.assertEqual(type(self.mock_lr), type(lr_image))
self.assertEqual(self.mock_lr.shape, lr_image.shape)
self.assertEqual(type(self.mock_hr), type(hr_image))
self.assertEqual(self.mock_hr.shape, hr_image.shape)
if __name__ == '__main__':
tf.test.main()
|
# coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfgan.examples.esrgan.data_provider"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
import tensorflow as tf
import data_provider
import collections
Params = collections.namedtuple('HParams', ['hr_dimension',
'scale',
'batch_size',
'data_dir'])
class DataProviderTest(tf.test.TestCase, absltest.TestCase):
def setUp(self):
super(DataProviderTest, self).setUp()
self.HParams = Params(256, 4, 32, '/content/')
self.dataset = data_provider.get_div2k_data(self.HParams)
self.mock_lr = tf.random.normal([32, 64, 64, 3])
self.mock_hr = tf.random.normal([32, 256, 256, 3])
def test_dataset(self):
with self.cached_session() as sess:
self.assertIsInstance(self.dataset, tf.data.Dataset)
lr_image, hr_image = next(iter(self.dataset))
sess.run(tf.compat.v1.global_variables_initializer())
self.assertEqual(type(self.mock_lr), type(lr_image))
self.assertEqual(self.mock_lr.shape, lr_image.shape)
self.assertEqual(type(self.mock_hr), type(hr_image))
self.assertEqual(self.mock_hr.shape, hr_image.shape)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
Python
|
ef0d0fa26bfd22c281c54bc348877afd0a7ee9d7
|
Use regex to match user metrics
|
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps
|
tests/integration/blueprints/metrics/test_metrics.py
|
tests/integration/blueprints/metrics/test_metrics.py
|
"""
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import re
import pytest
# To be overridden by test parametrization
@pytest.fixture
def config_overrides():
return {}
@pytest.fixture
def client(admin_app, config_overrides, make_admin_app):
app = make_admin_app(**config_overrides)
with app.app_context():
yield app.test_client()
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': True}])
def test_metrics(client):
response = client.get('/metrics')
assert response.status_code == 200
assert response.content_type == 'text/plain; version=0.0.4; charset=utf-8'
assert response.mimetype == 'text/plain'
# Not a full match as there can be other metrics, too.
regex = re.compile(
'users_active_count \\d+\n'
'users_uninitialized_count \\d+\n'
'users_suspended_count \\d+\n'
'users_deleted_count \\d+\n'
'users_total_count \\d+\n'
)
assert regex.search(response.get_data(as_text=True)) is not None
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': False}])
def test_disabled_metrics(client):
response = client.get('/metrics')
assert response.status_code == 404
|
"""
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
# To be overridden by test parametrization
@pytest.fixture
def config_overrides():
return {}
@pytest.fixture
def client(admin_app, config_overrides, make_admin_app):
app = make_admin_app(**config_overrides)
with app.app_context():
yield app.test_client()
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': True}])
def test_metrics(client):
response = client.get('/metrics')
assert response.status_code == 200
assert response.content_type == 'text/plain; version=0.0.4; charset=utf-8'
assert response.mimetype == 'text/plain'
assert response.get_data(as_text=True) == (
'users_active_count 0\n'
'users_uninitialized_count 0\n'
'users_suspended_count 0\n'
'users_deleted_count 0\n'
'users_total_count 0\n'
)
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': False}])
def test_disabled_metrics(client):
response = client.get('/metrics')
assert response.status_code == 404
|
bsd-3-clause
|
Python
|
cc1b63e76a88fd589bfe3fce2f6cbe5becf995bc
|
use no_backprop_mode
|
yuyu2172/chainercv,pfnet/chainercv,chainer/chainercv,yuyu2172/chainercv,chainer/chainercv
|
tests/links_tests/model_tests/vgg_tests/test_vgg16.py
|
tests/links_tests/model_tests/vgg_tests/test_vgg16.py
|
import unittest
import numpy as np
import chainer
from chainer.initializers import Zero
from chainer import testing
from chainer.testing import attr
from chainer import Variable
from chainercv.links import VGG16
@testing.parameterize(
{'pick': 'prob', 'shapes': (1, 200), 'n_class': 200},
{'pick': 'pool5', 'shapes': (1, 512, 7, 7), 'n_class': None},
{'pick': ['conv5_3', 'conv4_2'],
'shapes': ((1, 512, 14, 14), (1, 512, 28, 28)), 'n_class': None},
)
class TestVGG16Call(unittest.TestCase):
def setUp(self):
self.link = VGG16(
n_class=self.n_class, pretrained_model=None,
initialW=Zero())
self.link.pick = self.pick
def check_call(self):
xp = self.link.xp
x1 = Variable(xp.asarray(np.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(np.float32)))
with chainer.no_backprop_mode():
features = self.link(x1)
if isinstance(features, tuple):
for activation, shape in zip(features, self.shapes):
self.assertEqual(activation.shape, shape)
else:
self.assertEqual(features.shape, self.shapes)
self.assertEqual(features.dtype, np.float32)
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
testing.run_module(__name__, __file__)
|
import unittest
import numpy as np
from chainer.initializers import Zero
from chainer import testing
from chainer.testing import attr
from chainer import Variable
from chainercv.links import VGG16
@testing.parameterize(
{'pick': 'prob', 'shapes': (1, 200), 'n_class': 200},
{'pick': 'pool5', 'shapes': (1, 512, 7, 7), 'n_class': None},
{'pick': ['conv5_3', 'conv4_2'],
'shapes': ((1, 512, 14, 14), (1, 512, 28, 28)), 'n_class': None},
)
class TestVGG16Call(unittest.TestCase):
def setUp(self):
self.link = VGG16(
n_class=self.n_class, pretrained_model=None,
initialW=Zero())
self.link.pick = self.pick
def check_call(self):
xp = self.link.xp
x1 = Variable(xp.asarray(np.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(np.float32)))
features = self.link(x1)
if isinstance(features, tuple):
for activation, shape in zip(features, self.shapes):
self.assertEqual(activation.shape, shape)
else:
self.assertEqual(features.shape, self.shapes)
self.assertEqual(features.dtype, np.float32)
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
testing.run_module(__name__, __file__)
|
mit
|
Python
|
b635d3bb0a0de01539d66dda4555b306c59082ee
|
fix version number
|
MacHu-GWU/constant2-project
|
constant2/__init__.py
|
constant2/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from ._constant2 import Constant
except: # pragma: no cover
pass
__version__ = "0.0.10"
__short_description__ = "provide extensive way of managing your constant variable."
__license__ = "MIT"
__author__ = "Sanhe Hu"
__author_email__ = "[email protected]"
__maintainer__ = "Sanhe Hu"
__maintainer_email__ = "[email protected]"
__github_username__ = "MacHu-GWU"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from ._constant2 import Constant
except: # pragma: no cover
pass
__version__ = "0.0.9"
__short_description__ = "provide extensive way of managing your constant variable."
__license__ = "MIT"
__author__ = "Sanhe Hu"
__author_email__ = "[email protected]"
__maintainer__ = "Sanhe Hu"
__maintainer_email__ = "[email protected]"
__github_username__ = "MacHu-GWU"
|
mit
|
Python
|
770c4fd0b282ee355d2ea3e662786113dd6b4e74
|
add 1.4.2 (#26472)
|
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
|
var/spack/repos/builtin/packages/py-nipype/package.py
|
var/spack/repos/builtin/packages/py-nipype/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNipype(PythonPackage):
"""Neuroimaging in Python: Pipelines and Interfaces."""
homepage = "https://nipy.org/nipype"
pypi = "nipype/nipype-1.6.0.tar.gz"
version('1.6.1', sha256='8428cfc633d8e3b8c5650e241e9eedcf637b7969bcd40f3423334d4c6b0992b5')
version('1.6.0', sha256='bc56ce63f74c9a9a23c6edeaf77631377e8ad2bea928c898cc89527a47f101cf')
version('1.4.2', sha256='069dcbb0217f13af6ee5a7f1e58424b9061290a3e10d7027d73bf44e26f820db')
depends_on('[email protected]:', when='@1.5:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-networkx@2:', when='@1.6:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='^[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.5: ^python@:3.6', type=('build', 'run'))
depends_on('[email protected]:', when='^python@:3.6', type=('build', 'run'))
depends_on('py-packaging', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pydotplus', when='@:1.5', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-rdflib@5:', when='@1.5:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:4,5.1:', type=('build', 'run'))
depends_on('py-filelock@3:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.5:', type=('build', 'run'))
depends_on('py-etelemetry', type=('build', 'run'))
depends_on('py-sphinxcontrib-napoleon', type='test')
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNipype(PythonPackage):
"""Neuroimaging in Python: Pipelines and Interfaces."""
homepage = "https://nipy.org/nipype"
pypi = "nipype/nipype-1.6.0.tar.gz"
version('1.6.1', sha256='8428cfc633d8e3b8c5650e241e9eedcf637b7969bcd40f3423334d4c6b0992b5')
version('1.6.0', sha256='bc56ce63f74c9a9a23c6edeaf77631377e8ad2bea928c898cc89527a47f101cf')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='^python@:3.6')
depends_on('[email protected]:', type=('build', 'run'), when='^[email protected]:')
depends_on('py-packaging', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:4,5.1:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
|
lgpl-2.1
|
Python
|
63d4d37c9194aacd783e911452a34ca78a477041
|
add latest version 1.2.0 (#23528)
|
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
|
var/spack/repos/builtin/packages/py-vermin/package.py
|
var/spack/repos/builtin/packages/py-vermin/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyVermin(PythonPackage):
"""Concurrently detect the minimum Python versions needed to run code."""
homepage = "https://github.com/netromdk/vermin"
url = "https://github.com/netromdk/vermin/archive/v1.2.0.tar.gz"
maintainers = ['netromdk']
version('1.2.0', sha256='a3ab6dc6608b859f301b9a77d5cc0d03335aae10c49d47a91b82be5be48c4f1f')
version('1.1.1', sha256='d13b2281ba16c9d5b0913646483771789552230a9ed625e2cd92c5a112e4ae80')
version('1.1.0', sha256='62d9f1b6694f50c22343cead2ddb6e2b007d24243fb583f61ceed7540fbe660b')
version('1.0.3', sha256='1503be05b55cacde1278a1fe55304d8ee889ddef8ba16e120ac6686259bec95c')
version('1.0.2', sha256='e999d5f5455e1116b366cd1dcc6fecd254c7ae3606549a61bc044216f9bb5b55')
version('1.0.1', sha256='c06183ba653b9d5f6687a6686da8565fb127fab035f9127a5acb172b7c445079')
version('1.0.0', sha256='e598e9afcbe3fa6f3f3aa894da81ccb3954ec9c0783865ecead891ac6aa57207')
version('0.10.5', sha256='00601356e8e10688c52248ce0acc55d5b45417b462d5aa6887a6b073f0d33e0b')
version('0.10.4', sha256='bd765b84679fb3756b26f462d2aab4af3183fb65862520afc1517f6b39dea8bf')
version('0.10.0', sha256='3458a4d084bba5c95fd7208888aaf0e324a07ee092786ee4e5529f539ab4951f')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
@run_after('build')
@on_package_attributes(run_tests=True)
def build_test(self):
make('test')
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyVermin(PythonPackage):
"""Concurrently detect the minimum Python versions needed to run code."""
homepage = "https://github.com/netromdk/vermin"
url = "https://github.com/netromdk/vermin/archive/v1.1.1.tar.gz"
maintainers = ['netromdk']
version('1.1.1', sha256='d13b2281ba16c9d5b0913646483771789552230a9ed625e2cd92c5a112e4ae80')
version('1.1.0', sha256='62d9f1b6694f50c22343cead2ddb6e2b007d24243fb583f61ceed7540fbe660b')
version('1.0.3', sha256='1503be05b55cacde1278a1fe55304d8ee889ddef8ba16e120ac6686259bec95c')
version('1.0.2', sha256='e999d5f5455e1116b366cd1dcc6fecd254c7ae3606549a61bc044216f9bb5b55')
version('1.0.1', sha256='c06183ba653b9d5f6687a6686da8565fb127fab035f9127a5acb172b7c445079')
version('1.0.0', sha256='e598e9afcbe3fa6f3f3aa894da81ccb3954ec9c0783865ecead891ac6aa57207')
version('0.10.5', sha256='00601356e8e10688c52248ce0acc55d5b45417b462d5aa6887a6b073f0d33e0b')
version('0.10.4', sha256='bd765b84679fb3756b26f462d2aab4af3183fb65862520afc1517f6b39dea8bf')
version('0.10.0', sha256='3458a4d084bba5c95fd7208888aaf0e324a07ee092786ee4e5529f539ab4951f')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
@run_after('build')
@on_package_attributes(run_tests=True)
def build_test(self):
make('test')
|
lgpl-2.1
|
Python
|
c0627c6d8d11a9b9597b8fecd10b562d46a71521
|
Send fio results to fio.sc.couchbase.com
|
couchbase/perfrunner,couchbase/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner
|
perfrunner/tests/fio.py
|
perfrunner/tests/fio.py
|
from collections import defaultdict
import requests
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
TRACKER = 'fio.sc.couchbase.com'
TEMPLATE = {
'group': '{}, random mixed reads and writes, IOPS',
'metric': None,
'value': None,
}
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, test_config, verbose)
@staticmethod
def _parse(results):
"""Terse output parsing is based on the following guide:
https://github.com/axboe/fio/blob/master/HOWTO
"""
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7]) # reads
stats[host] += int(job.split(';')[48]) # writes
return stats
def _post(self, data):
data = pretty_dict(data)
logger.info('Posting: {}'.format(data))
requests.post('http://{}/api/v1/benchmarks'.format(self.TRACKER),
data=data)
def _report_kpi(self, stats):
for host, iops in stats.items():
data = self.TEMPLATE.copy()
data['group'] = data['group'].format(self.cluster_spec.name.title())
data['metric'] = host
data['value'] = iops
self._post(data)
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
self._report_kpi(self._parse(stats))
|
from collections import defaultdict
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, test_config, verbose)
@staticmethod
def _parse(results):
"""Terse output parsing is based on the following guide:
https://github.com/axboe/fio/blob/master/HOWTO
"""
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7]) # reads
stats[host] += int(job.split(';')[48]) # writes
return stats
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
logger.info('IOPS: {}'.format(pretty_dict(self._parse(stats))))
|
apache-2.0
|
Python
|
69038348a0e029d2b06c2753a0dec9b2552ed820
|
Add license header to __init__.py
|
gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine
|
openquake/__init__.py
|
openquake/__init__.py
|
"""
OpenGEM is an open-source platform for the calculation of hazard, risk,
and socio-economic impact. It is a project of the Global Earthquake Model,
nd may be extended by other organizations to address additional classes
of peril.
For more information, please see the website at http://www.globalquakemodel.org
This software may be downloaded at http://github.com/gem/openquake
The continuous integration server is at http://openquake.globalquakemodel.org
Up-to-date sphinx documentation is at http://openquake.globalquakemodel.org/docs
This software is licensed under the LGPL license, for more details
please see the LICENSE file.
Copyright (c) 2010, GEM Foundation.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
|
"""
OpenGEM is an open-source platform for the calculation of hazard, risk,
and socio-economic impact. It is a project of the Global Earthquake Model,
nd may be extended by other organizations to address additional classes
of peril.
For more information, please see the website at http://www.globalquakemodel.org
This software may be downloaded at http://github.com/gem/openquake
The continuous integration server is at http://openquake.globalquakemodel.org
Up-to-date sphinx documentation is at http://openquake.globalquakemodel.org/docs
This software is licensed under the LGPL license, for more details
please see the LICENSE file.
Copyright (c) 2010, GEM Foundation.
"""
|
agpl-3.0
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.