commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
cd7b72e67a3af4184ccaf3e3dce231c227392f45
|
Update Keras.py
|
paperrune/Neural-Networks,paperrune/Neural-Networks
|
History/Nesterov-Accelerated-Gradient/Keras.py
|
History/Nesterov-Accelerated-Gradient/Keras.py
|
import keras
from keras.datasets import mnist
from keras.initializers import RandomUniform
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
batch_size = 128
epochs = 30
num_classes = 10
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512,
activation='relu',
input_shape=(784,),
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01, seed=None),
bias_initializer='zeros'))
model.add(Dense(512,
activation='relu',
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01, seed=None),
bias_initializer='zeros'))
model.add(Dense(num_classes,
activation='softmax',
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01, seed=None),
bias_initializer='zeros'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.1, momentum=0.9, nesterov=True),
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
shuffle=False,
validation_data=(x_test, y_test))
|
import keras
from keras.datasets import mnist
from keras.initializers import RandomUniform
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
batch_size = 128
epochs = 30
num_classes = 10
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512,
activation='relu',
input_shape=(784,),
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01, seed=None),
bias_initializer='zeros'))
model.add(Dense(512,
activation='relu',
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01, seed=None),
bias_initializer='zeros'))
model.add(Dense(num_classes,
activation='softmax',
kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01, seed=None),
bias_initializer='zeros'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.1, momentum = 0.9, nesterov=True),
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
shuffle=False,
validation_data=(x_test, y_test))
|
mit
|
Python
|
b8c18068c2cc2afe169c750f25318c6ba92e2763
|
use Spack compilers and remove x86_64 opts from Makefile (#13877)
|
iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack
|
var/spack/repos/builtin/packages/prank/package.py
|
var/spack/repos/builtin/packages/prank/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Prank(Package):
"""A powerful multiple sequence alignment browser."""
homepage = "http://wasabiapp.org/software/prank/"
url = "http://wasabiapp.org/download/prank/prank.source.170427.tgz"
version('170427', sha256='623eb5e9b5cb0be1f49c3bf715e5fabceb1059b21168437264bdcd5c587a8859')
depends_on('mafft')
depends_on('exonerate')
depends_on('bpp-suite') # for bppancestor
conflicts('%[email protected]', when='@:150803')
def install(self, spec, prefix):
with working_dir('src'):
filter_file('gcc', '{0}'.format(spack_cc),
'Makefile', string=True)
filter_file('g++', '{0}'.format(spack_cxx),
'Makefile', string=True)
if not spec.target.family == 'x86_64':
filter_file('-m64', '', 'Makefile', string=True)
filter_file('-pipe', '', 'Makefile', string=True)
make()
mkdirp(prefix.bin)
install('prank', prefix.bin)
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Prank(Package):
"""A powerful multiple sequence alignment browser."""
homepage = "http://wasabiapp.org/software/prank/"
url = "http://wasabiapp.org/download/prank/prank.source.170427.tgz"
version('170427', sha256='623eb5e9b5cb0be1f49c3bf715e5fabceb1059b21168437264bdcd5c587a8859')
depends_on('mafft')
depends_on('exonerate')
depends_on('bpp-suite') # for bppancestor
conflicts('%[email protected]', when='@:150803')
def install(self, spec, prefix):
with working_dir('src'):
make()
mkdirp(prefix.bin)
install('prank', prefix.bin)
|
lgpl-2.1
|
Python
|
26933550f7a3c195669c61539151c5fedf26aaad
|
add version 1.0.0 to r-hms (#21045)
|
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
|
var/spack/repos/builtin/packages/r-hms/package.py
|
var/spack/repos/builtin/packages/r-hms/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RHms(RPackage):
"""Pretty Time of Day
Implements an S3 class for storing and formatting time-of-day values, based
on the 'difftime' class."""
homepage = "https://cloud.r-project.org/package=hms"
url = "https://cloud.r-project.org/src/contrib/hms_0.3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/hms"
version('1.0.0', sha256='9704e903d724f0911d46e5ad18b469a7ed419c5b1f388bd064fd663cefa6c962')
version('0.5.0', sha256='a87872665c3bf3901f597d78c152e7805f7129e4dbe27397051de4cf1a76561b')
version('0.3', sha256='9368259cbc1094ce0e4cf61544875ec30088ef690d6667e6b0b564218ab3ff88')
depends_on('r-ellipsis', when='@1.0.0:', type=('build', 'run'))
depends_on('r-lifecycle', when='@1.0.0:', type=('build', 'run'))
depends_on('r-pkgconfig', when='@0.5.0:', type=('build', 'run'))
depends_on('r-rlang', when='@0.5.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.5.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.0.0:', type=('build', 'run'))
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RHms(RPackage):
"""Implements an S3 class for storing and formatting time-of-day values,
based on the 'difftime' class."""
homepage = "https://cloud.r-project.org/package=hms"
url = "https://cloud.r-project.org/src/contrib/hms_0.3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/hms"
version('0.5.0', sha256='a87872665c3bf3901f597d78c152e7805f7129e4dbe27397051de4cf1a76561b')
version('0.3', sha256='9368259cbc1094ce0e4cf61544875ec30088ef690d6667e6b0b564218ab3ff88')
depends_on('r-pkgconfig', when='@0.5.0:', type=('build', 'run'))
depends_on('r-rlang', when='@0.5.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.5.0:', type=('build', 'run'))
|
lgpl-2.1
|
Python
|
91cb70a94cd41bb6404fb6f21361bb8a7f01c9d5
|
Rework thread model
|
job/irrexplorer,job/irrexplorer,job/irrexplorer,job/irrexplorer
|
irrexplorer.py
|
irrexplorer.py
|
#!/usr/bin/env python
# Copyright (c) 2015, Job Snijders
#
# This file is part of IRR Explorer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from irrexplorer import config
from irrexplorer import nrtm
from threading import Thread
from radix import Radix
from Queue import Queue
databases = config('irrexplorer_config.yml').databases
nrtm_queue = Queue()
def connect_nrtm(config, nrtm_queue):
feed = nrtm.client(**config)
for cmd, serial, obj in feed.get():
if not obj:
continue
# print cmd, obj
nrtm_queue.put((cmd, serial, obj, config['dbname']))
def radix_maintainer(nrtm_queue):
import time
time.sleep(15)
while True:
update = nrtm_queue.get()
print update
nrtm_queue.task_done()
for dbase in databases:
name = dbase.keys().pop()
client_config = dict(d.items()[0] for d in dbase[name])
print client_config
worker = Thread(target=connect_nrtm, args=(client_config, nrtm_queue))
worker.setDaemon(True)
worker.start()
worker = Thread(target=radix_maintainer, args=(nrtm_queue,))
worker.setDaemon(True)
worker.start()
nrtm_queue.join()
"""
from irrexplorer.nrtm import client
a = client(nrtmhost='whois.radb.net',
nrtmport=43,
serial='ftp://ftp.radb.net/radb/dbase/RADB.CURRENTSERIAL',
dump='ftp://ftp.radb.net/radb/dbase/radb.db.gz',
dbase="RADB")
while True:
for i in a.get():
print i
"""
|
#!/usr/bin/env python
# Copyright (c) 2015, Job Snijders
#
# This file is part of IRR Explorer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from irrexplorer import config
from irrexplorer import nrtm
from threading import Thread
from radix import Radix
from Queue import Queue
databases = config('irrexplorer_config.yml').databases
def connect_nrtm(config):
feed = nrtm.client(**config)
for cmd, serial, obj in feed.get():
if not obj:
continue
print obj
print cmd, serial, len(obj), config['dbname']
def radix_maintainer():
for dbase in databases:
name = dbase.keys().pop()
client_config = dict(d.items()[0] for d in dbase[name])
print client_config
worker = Thread(target=connect_nrtm, args=(client_config,))
worker.setDaemon(True)
worker.start()
"""
from irrexplorer.nrtm import client
a = client(nrtmhost='whois.radb.net',
nrtmport=43,
serial='ftp://ftp.radb.net/radb/dbase/RADB.CURRENTSERIAL',
dump='ftp://ftp.radb.net/radb/dbase/radb.db.gz',
dbase="RADB")
while True:
for i in a.get():
print i
"""
|
bsd-2-clause
|
Python
|
03f33b099ec9adc480f599338b61214e870fedf6
|
Update iypm_domain export name to use a valid format
|
MinnSoe/ifyoupayme,MinnSoe/ifyoupayme,MinnSoe/ifyoupayme
|
iypm_domain.py
|
iypm_domain.py
|
import sys
try:
from troposphere import Join, Sub, Output, Export
from troposphere import Parameter, Ref, Template
from troposphere.route53 import HostedZone
from troposphere.certificatemanager import Certificate
except ImportError:
sys.exit('Unable to import troposphere. '
'Try "pip install troposphere[policy]".')
t = Template()
t.add_description(
'Template for creating a DNS Zone and SSL Certificate. '
'Note: Stack creation will block until domain ownership is verified.')
zone_name = t.add_parameter(Parameter(
'ZoneName',
Description='The name of the DNS Zone to create (example.com).',
Type='String'
))
hosted_zone = t.add_resource(HostedZone('DNSZone', Name=Ref(zone_name)))
acm_certificate = t.add_resource(Certificate(
'Certificate',
DomainName=Ref(zone_name),
SubjectAlternativeNames=[Sub('*.${ZoneName}')]
))
t.add_output([
Output(
'ZoneId',
Description='Route53 Zone ID',
Value=Ref(hosted_zone),
Export=Export(Sub('${AWS::StackName}-R53Zone'))
),
Output(
'CertificateId',
Description='ACM Certificate ARN',
Value=Ref(acm_certificate),
Export=Export(Sub('${AWS::StackName}-CertARN'))
)
])
print(t.to_json())
|
import sys
try:
from troposphere import Join, Sub, Output, Export
from troposphere import Parameter, Ref, Template
from troposphere.route53 import HostedZone
from troposphere.certificatemanager import Certificate
except ImportError:
sys.exit('Unable to import troposphere. '
'Try "pip install troposphere[policy]".')
t = Template()
t.add_description(
'Template for creating a DNS Zone and SSL Certificate. '
'Note: Stack creation will block until domain ownership is verified.')
zone_name = t.add_parameter(Parameter(
'ZoneName',
Description='The name of the DNS Zone to create (example.com).',
Type='String'
))
hosted_zone = t.add_resource(HostedZone('DNSZone', Name=Ref(zone_name)))
acm_certificate = t.add_resource(Certificate(
'Certificate',
DomainName=Ref(zone_name),
SubjectAlternativeNames=[Sub('*.${ZoneName}')]
))
t.add_output([
Output(
'ZoneId',
Description='Route53 Zone ID',
Value=Ref(hosted_zone),
Export=Export(Sub('${AWS::StackName}-${ZoneName}-R53Zone'))
),
Output(
'CertificateId',
Description='ACM Certificate ARN',
Value=Ref(acm_certificate),
Export=Export(Sub('${AWS::StackName}-${ZoneName}-CertARN'))
)
])
print(t.to_json())
|
mpl-2.0
|
Python
|
0665beccbca954df9a477119bb976441c29dd5eb
|
fix test
|
sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana
|
test/sphinxext/test_sphinxext.py
|
test/sphinxext/test_sphinxext.py
|
import tempfile
import os
from sequana.sphinxext import snakemakerule
from sphinx.application import Sphinx
def test_doc():
res = snakemakerule.get_rule_doc("dag")
res = snakemakerule.get_rule_doc("fastqc_dynamic")
try:
res = snakemakerule.get_rule_doc("dummy")
assert False
except FileNotFoundError:
assert True
except:
assert False
with tempfile.TemporaryDirectory() as tmpdir:
# Create the conf and index in tmpdir
with open(tmpdir+os.sep+"index.rst", "w") as fh:
fh.write(".. snakemakerule:: dag\n")
with open(tmpdir+os.sep+"conf.py", "w") as fh:
print(fh.name)
fh.write("""
import sys, os
import sphinx
sys.path.insert(0, os.path.abspath('sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
"sequana.sphinxext.snakemakerule"
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = "sequana"
copyright = "2016"
version = '1.0'
release = "1.0"
exclude_patterns = []
add_module_names = False
pygments_style = 'sphinx'
intersphinx_mapping = {}
""")
# srcdir, confdir, outdir, doctreedir, buildername
app = Sphinx(tmpdir, tmpdir, tmpdir, tmpdir, "html")
app.build()
|
import tempfile
import os
from sequana.sphinxext import snakemakerule
from sphinx.application import Sphinx
def test_doc():
res = snakemakerule.get_rule_doc("dag")
res = snakemakerule.get_rule_doc("fastqc")
try:
res = snakemakerule.get_rule_doc("dummy")
assert False
except FileNotFoundError:
assert True
except:
assert False
with tempfile.TemporaryDirectory() as tmpdir:
# Create the conf and index in tmpdir
with open(tmpdir+os.sep+"index.rst", "w") as fh:
fh.write(".. snakemakerule:: dag\n")
with open(tmpdir+os.sep+"conf.py", "w") as fh:
print(fh.name)
fh.write("""
import sys, os
import sphinx
sys.path.insert(0, os.path.abspath('sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
"sequana.sphinxext.snakemakerule"
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = "sequana"
copyright = "2016"
version = '1.0'
release = "1.0"
exclude_patterns = []
add_module_names = False
pygments_style = 'sphinx'
intersphinx_mapping = {}
""")
# srcdir, confdir, outdir, doctreedir, buildername
app = Sphinx(tmpdir, tmpdir, tmpdir, tmpdir, "html")
app.build()
|
bsd-3-clause
|
Python
|
30e1f6ca2224cba216c2e08f2600ae55ba43cebb
|
update comment
|
amplifylitco/asiaq,amplifylitco/asiaq,amplifylitco/asiaq
|
test/unit/test_disco_aws_util.py
|
test/unit/test_disco_aws_util.py
|
"""
Tests of disco_aws_util
"""
from unittest import TestCase
from disco_aws_automation import disco_aws_util
class DiscoAWSUtilTests(TestCase):
'''Test disco_aws_util.py'''
def test_size_as_rec_map_with_none(self):
"""size_as_recurrence_map works with None"""
self.assertEqual(disco_aws_util.size_as_recurrence_map(None), {"": None})
self.assertEqual(disco_aws_util.size_as_recurrence_map(''), {"": None})
def test_size_as_rec_map_with_int(self):
"""size_as_recurrence_map works with simple integer"""
self.assertEqual(disco_aws_util.size_as_recurrence_map(5, sentinel="0 0 * * *"),
{"0 0 * * *": 5})
def test_size_as_rec_map_with_map(self):
"""size_as_recurrence_map works with a map"""
map_as_string = "2@1 0 * * *:3@6 0 * * *"
map_as_dict = {"1 0 * * *": 2, "6 0 * * *": 3}
self.assertEqual(disco_aws_util.size_as_recurrence_map(map_as_string), map_as_dict)
def test_size_as_rec_map_with_duped_map(self):
"""size_as_recurrence_map works with a duped map"""
map_as_string = "2@1 0 * * *:3@6 0 * * *:3@6 0 * * *"
map_as_dict = {"1 0 * * *": 2, "6 0 * * *": 3}
self.assertEqual(disco_aws_util.size_as_recurrence_map(map_as_string), map_as_dict)
|
"""
Tests of disco_aws_util
"""
from unittest import TestCase
from disco_aws_automation import disco_aws_util
class DiscoAWSUtilTests(TestCase):
'''Test disco_aws_util.py'''
def test_size_as_rec_map_with_none(self):
"""_size_as_recurrence_map works with None"""
self.assertEqual(disco_aws_util.size_as_recurrence_map(None), {"": None})
self.assertEqual(disco_aws_util.size_as_recurrence_map(''), {"": None})
def test_size_as_rec_map_with_int(self):
"""_size_as_recurrence_map works with simple integer"""
self.assertEqual(disco_aws_util.size_as_recurrence_map(5, sentinel="0 0 * * *"),
{"0 0 * * *": 5})
def test_size_as_rec_map_with_map(self):
"""_size_as_recurrence_map works with a map"""
map_as_string = "2@1 0 * * *:3@6 0 * * *"
map_as_dict = {"1 0 * * *": 2, "6 0 * * *": 3}
self.assertEqual(disco_aws_util.size_as_recurrence_map(map_as_string), map_as_dict)
def test_size_as_rec_map_with_duped_map(self):
"""_size_as_recurrence_map works with a duped map"""
map_as_string = "2@1 0 * * *:3@6 0 * * *:3@6 0 * * *"
map_as_dict = {"1 0 * * *": 2, "6 0 * * *": 3}
self.assertEqual(disco_aws_util.size_as_recurrence_map(map_as_string), map_as_dict)
|
bsd-2-clause
|
Python
|
0df416d66ee6c28512295de297f44597b45acf7a
|
Bump version for release
|
pfmoore/pip,sbidoul/pip,pypa/pip,sbidoul/pip,xavfernandez/pip,rouge8/pip,pradyunsg/pip,pfmoore/pip,rouge8/pip,rouge8/pip,pypa/pip,pradyunsg/pip,xavfernandez/pip,xavfernandez/pip
|
src/pip/__init__.py
|
src/pip/__init__.py
|
__version__ = "19.2"
|
__version__ = "19.2.dev0"
|
mit
|
Python
|
e2ce9caa84d0932b72894f17dc2c4884cc285bb0
|
update test case for jaccard
|
thongdong7/subfind,thongdong7/subfind,thongdong7/subfind,thongdong7/subfind
|
tests/TestReleaseScoringAlice.py
|
tests/TestReleaseScoringAlice.py
|
from pprint import pprint
from subfind.release.alice import ReleaseScoringAlice
__author__ = 'hiepsimu'
import logging
import unittest
logging.basicConfig(level=logging.DEBUG)
class ReleaseScoringAliceTestCase(unittest.TestCase):
def test_01(self):
"""
Release which match the movie title should be the higher priority
:return:
:rtype:
"""
scoring = ReleaseScoringAlice()
input_release_name = 'Survivor.2014.1080p.BluRay.H264.AAC-RARBG'
found_releases = [
{'name': 'The.Hobbit.The.Battle.of.the.Five.Armies.2014.1080p.BluRay.H264.AAC-RARBG'},
{'name': 'Survivor.2015.1080p.BluRay.H264.AAC-RARBG'},
]
scoring.sort(input_release_name, found_releases)
self.assertEqual('Survivor.2015.1080p.BluRay.H264.AAC-RARBG', found_releases[0]['name'])
def test_02(self):
"""
Test 100% match
:return:
:rtype:
"""
scoring = ReleaseScoringAlice()
input_release_name = '400.Days.2015.1080p.BluRay.H264.AAC-RARBG'
found_releases = [
{'name': '400.Days.2015.1080p.BluRay.H264.AAC-RARBG'},
{'name': '400.Days.2015.720p.BluRay.H264.AAC-RARBG'},
{'name': '400.Days.2015.BRRip.XviD.AC3-RARBG'},
{'name': '400.Days.2015.1080p.BluRay.H264.AAC-RARBG'},
{'name': '400.Days.2015.720p.BluRay.x264.[YTS.AG]'},
]
scoring.sort(input_release_name, found_releases)
# pprint(found_releases)
self.assertEqual('400.Days.2015.1080p.BluRay.H264.AAC-RARBG', found_releases[0]['name'])
if __name__ == '__main__':
unittest.main()
|
from subfind.release.alice import ReleaseScoringAlice
__author__ = 'hiepsimu'
import logging
import unittest
logging.basicConfig(level=logging.DEBUG)
class ReleaseScoringAliceTestCase(unittest.TestCase):
def test_01(self):
"""
Release which match the movie title should be the higher priority
:return:
:rtype:
"""
scoring = ReleaseScoringAlice()
input_release_name = 'Survivor.2014.1080p.BluRay.H264.AAC-RARBG'
found_releases = [
{'name': 'The.Hobbit.The.Battle.of.the.Five.Armies.2014.1080p.BluRay.H264.AAC-RARBG'},
{'name': 'Survivor.2015.1080p.BluRay.H264.AAC-RARBG'},
]
scoring.sort(input_release_name, found_releases)
self.assertEqual('Survivor.2015.1080p.BluRay.H264.AAC-RARBG', found_releases[0]['name'])
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
c535c22884dbb0df227d4ad142e4d4515415ca29
|
Switch to wav test files for gstreamer tests
|
dbrgn/mopidy,jcass77/mopidy,woutervanwijk/mopidy,jcass77/mopidy,tkem/mopidy,kingosticks/mopidy,ali/mopidy,diandiankan/mopidy,hkariti/mopidy,bacontext/mopidy,rawdlite/mopidy,jcass77/mopidy,bacontext/mopidy,pacificIT/mopidy,ZenithDK/mopidy,adamcik/mopidy,mopidy/mopidy,dbrgn/mopidy,mokieyue/mopidy,hkariti/mopidy,woutervanwijk/mopidy,kingosticks/mopidy,SuperStarPL/mopidy,tkem/mopidy,tkem/mopidy,adamcik/mopidy,jodal/mopidy,dbrgn/mopidy,swak/mopidy,diandiankan/mopidy,ZenithDK/mopidy,bencevans/mopidy,vrs01/mopidy,adamcik/mopidy,jodal/mopidy,priestd09/mopidy,glogiotatidis/mopidy,ali/mopidy,diandiankan/mopidy,bencevans/mopidy,kingosticks/mopidy,bencevans/mopidy,swak/mopidy,quartz55/mopidy,ali/mopidy,ZenithDK/mopidy,jmarsik/mopidy,priestd09/mopidy,jmarsik/mopidy,SuperStarPL/mopidy,liamw9534/mopidy,ZenithDK/mopidy,liamw9534/mopidy,quartz55/mopidy,SuperStarPL/mopidy,jmarsik/mopidy,rawdlite/mopidy,priestd09/mopidy,tkem/mopidy,hkariti/mopidy,rawdlite/mopidy,mopidy/mopidy,swak/mopidy,quartz55/mopidy,abarisain/mopidy,bacontext/mopidy,jmarsik/mopidy,pacificIT/mopidy,pacificIT/mopidy,abarisain/mopidy,hkariti/mopidy,mopidy/mopidy,bacontext/mopidy,glogiotatidis/mopidy,vrs01/mopidy,vrs01/mopidy,mokieyue/mopidy,glogiotatidis/mopidy,diandiankan/mopidy,dbrgn/mopidy,pacificIT/mopidy,rawdlite/mopidy,mokieyue/mopidy,swak/mopidy,SuperStarPL/mopidy,glogiotatidis/mopidy,jodal/mopidy,mokieyue/mopidy,vrs01/mopidy,quartz55/mopidy,bencevans/mopidy,ali/mopidy
|
tests/backends/gstreamer_test.py
|
tests/backends/gstreamer_test.py
|
import unittest
import os
from mopidy.models import Playlist, Track
from mopidy.backends.gstreamer import GStreamerBackend
from tests.backends.base import (BasePlaybackControllerTest,
BaseCurrentPlaylistControllerTest)
folder = os.path.dirname(__file__)
folder = os.path.join(folder, '..', 'data')
folder = os.path.abspath(folder)
song = os.path.join(folder, 'song%s.wav')
song = 'file://' + song
# FIXME can be switched to generic test
class GStreamerCurrentPlaylistHandlerTest(BaseCurrentPlaylistControllerTest, unittest.TestCase):
tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)]
backend_class = GStreamerBackend
class GStreamerPlaybackControllerTest(BasePlaybackControllerTest, unittest.TestCase):
tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)]
backend_class = GStreamerBackend
if __name__ == '__main__':
unittest.main()
|
import unittest
import os
from mopidy.models import Playlist, Track
from mopidy.backends.gstreamer import GStreamerBackend
from tests.backends.base import (BasePlaybackControllerTest,
BaseCurrentPlaylistControllerTest)
folder = os.path.dirname(__file__)
folder = os.path.join(folder, '..', 'data')
folder = os.path.abspath(folder)
song = os.path.join(folder, 'song%s.mp3')
song = 'file://' + song
# FIXME can be switched to generic test
class GStreamerCurrentPlaylistHandlerTest(BaseCurrentPlaylistControllerTest, unittest.TestCase):
tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)]
backend_class = GStreamerBackend
class GStreamerPlaybackControllerTest(BasePlaybackControllerTest, unittest.TestCase):
tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)]
backend_class = GStreamerBackend
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
Python
|
9e0725483e80a4e98d2635b90a268d00e4eae9f3
|
Update insertion-sort-1.py
|
manasRK/algorithms-practice
|
hackerrank/insertion-sort-1.py
|
hackerrank/insertion-sort-1.py
|
'''
https://www.hackerrank.com/challenges/insertionsort1
Sorting
One common task for computers is to sort data. For example, people might want to see all their files on a computer sorted by size. Since sorting is a simple problem with many different possible solutions, it is often used to introduce the study of algorithms.
Insertion Sort
These challenges will cover Insertion Sort, a simple and intuitive sorting algorithm. We will first start with an already sorted list.
Insert element into sorted list
Given a sorted list with an unsorted number in the rightmost cell, can you write some simple code to insert into the array so that it remains sorted?
Print the array every time a value is shifted in the array until the array is fully sorted. The goal of this challenge is to follow the correct order of insertion sort.
Guideline: You can copy the value of to a variable and consider its cell "empty". Since this leaves an extra cell empty on the right, you can shift everything over until can be inserted. This will create a duplicate of each value, but when you reach the right spot, you can replace it with .
Input Format
There will be two lines of input:
SIZE - the size of the array
ARR - the unsorted array of integers
Output Format
On each line, output the entire array every time an item is shifted in it.
'''
import sys
def insertion_sort(ar):
if len(ar) == 1:
print(' '.join(map(str, ar)))
return(ar)
else:
x = ar[-1]
for i in reversed(range(len(ar) - 1)):
if x < ar[i]:
ar[i + 1] = ar[i]
print(' '.join(map(str, ar)))
if i == 0:
ar[0] = x
print(' '.join(map(str, ar)))
break
else:
ar[i + 1] = x
print(' '.join(map(str, ar)))
break
return(ar)
if __name__ == '__main__':
s = int(sys.stdin.readline())
ar = list(map(int, sys.stdin.readline().split()))
insertion_sort(ar)
|
'''
https://www.hackerrank.com/challenges/insertionsort1
Sorting
One common task for computers is to sort data. For example, people might want to see all their files on a computer sorted by size. Since sorting is a simple problem with many different possible solutions, it is often used to introduce the study of algorithms.
Insertion Sort
These challenges will cover Insertion Sort, a simple and intuitive sorting algorithm. We will first start with an already sorted list.
Insert element into sorted list
Given a sorted list with an unsorted number in the rightmost cell, can you write some simple code to insert into the array so that it remains sorted?
Print the array every time a value is shifted in the array until the array is fully sorted. The goal of this challenge is to follow the correct order of insertion sort.
Guideline: You can copy the value of to a variable and consider its cell "empty". Since this leaves an extra cell empty on the right, you can shift everything over until can be inserted. This will create a duplicate of each value, but when you reach the right spot, you can replace it with .
Input Format
There will be two lines of input:
SIZE - the size of the array
ARR - the unsorted array of integers
Output Format
On each line, output the entire array every time an item is shifted in it.
'''
import sys
def insertion_sort(ar):
if len(ar) == 1:
print(' '.join(map(str, ar)))
return(ar)
else:
x = ar[-1]
for i in reversed(range(len(ar) - 1)):
if x < ar[i]:
ar[i + 1] = ar[i]
print(' '.join(map(str, ar)))
if i == 0:
ar[0] = x
print_list(ar)
break
else:
ar[i + 1] = x
print_list(ar)
break
return(ar)
if __name__ == '__main__':
s = int(sys.stdin.readline())
ar = list(map(int, sys.stdin.readline().split()))
inserti
|
mit
|
Python
|
4d1fa4bee77eba19cb0a4c80032f30dcc89e6b98
|
Fix date check
|
DHTC-Tools/logstash-confs,DHTC-Tools/logstash-confs,DHTC-Tools/logstash-confs
|
dcache-billing/python/download_billing_logs.py
|
dcache-billing/python/download_billing_logs.py
|
#!/usr/bin/env python
import sys
import urllib2
import argparse
FAXBOX_PROCESSED_CSV_URL = "http://login.usatlas.org/logs/mwt2/dcache-billing/processed/"
FAXBOX_RAW_CSV_URL = "http://login.usatlas.org/logs/mwt2/dcache-billing/raw/"
def download_log(date_string):
"""
Download job log files from Amazon EC2 machines
parameters:
date_string - date to start download
"""
file_urls = []
url_file = "billing-{0}".format(date_string)
file_url = "{0}/{1}/{2}".format(FAXBOX_PROCESSED_CSV_URL,
date_string[0:4],
url_file)
file_urls.append((url_file, file_url))
url_file = "billing-error-{0}".format(date_string)
file_url = "{0}/{1}/{2}".format(FAXBOX_PROCESSED_CSV_URL,
date_string[0:4],
url_file)
file_urls.append((url_file, file_url))
for file_info in file_urls:
try:
url = file_info[1]
request = urllib2.urlopen(url)
if request.getcode() != 200:
sys.stderr.write("Can't download {0}".format(url))
return None
except urllib2.HTTPError:
sys.stderr.write("Can't download {0}".format(url))
return False
output_file = open(file_info[0], 'w')
for line in request:
output_file.write(line)
output_file.close()
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download DCache billing records')
parser.add_argument('--date', dest='date', default=None, required=True,
help='Date to download')
args = parser.parse_args(sys.argv[1:])
if len(args.date) != 10:
sys.stderr.write("Invalid date argument: {0}\n".format(args.date))
try:
int(args.date)
except ValueError:
sys.stderr.write("Invalid date argument: {0}\n".format(args.date))
download_log(args.date)
|
#!/usr/bin/env python
import sys
import urllib2
import argparse
FAXBOX_PROCESSED_CSV_URL = "http://login.usatlas.org/logs/mwt2/dcache-billing/processed/"
FAXBOX_RAW_CSV_URL = "http://login.usatlas.org/logs/mwt2/dcache-billing/raw/"
def download_log(date_string):
"""
Download job log files from Amazon EC2 machines
parameters:
date_string - date to start download
"""
file_urls = []
url_file = "billing-{0}".format(date_string)
file_url = "{0}/{1}/{2}".format(FAXBOX_PROCESSED_CSV_URL,
date_string[0:4],
url_file)
file_urls.append((url_file, file_url))
url_file = "billing-error-{0}".format(date_string)
file_url = "{0}/{1}/{2}".format(FAXBOX_PROCESSED_CSV_URL,
date_string[0:4],
url_file)
file_urls.append((url_file, file_url))
for file_info in file_urls:
try:
url = file_info[1]
request = urllib2.urlopen(url)
if request.getcode() != 200:
sys.stderr.write("Can't download {0}".format(url))
return None
except urllib2.HTTPError:
sys.stderr.write("Can't download {0}".format(url))
return False
output_file = open(file_info[0], 'w')
for line in request:
output_file.write(line)
output_file.close()
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download DCache billing records')
parser.add_argument('--date', dest='date', default=None, required=True,
help='Date to download')
args = parser.parse_args(sys.argv[1:])
if len(args.date) != 8:
sys.stderr.write("Invalid date argument: {0}\n".format(args.date))
try:
int(args.date)
except ValueError:
sys.stderr.write("Invalid date argument: {0}\n".format(args.date))
download_log(args.date)
|
apache-2.0
|
Python
|
6ed3d0d8f554e578b65db89e5c5f88cd14bfaea4
|
Update tools/hcluster_sg_parser/hcluster_sg_parser.py
|
TGAC/earlham-galaxytools,TGAC/earlham-galaxytools,anilthanki/tgac-galaxytools,anilthanki/tgac-galaxytools,TGAC/earlham-galaxytools,anilthanki/tgac-galaxytools,anilthanki/tgac-galaxytools,TGAC/earlham-galaxytools,TGAC/earlham-galaxytools,anilthanki/tgac-galaxytools
|
tools/hcluster_sg_parser/hcluster_sg_parser.py
|
tools/hcluster_sg_parser/hcluster_sg_parser.py
|
"""
A simple parser to convert the hcluster_sg output into lists of IDs, one list for each cluster.
When a minimum and/or maximum number of cluster elements are specified, the IDs contained in the filtered-out clusters are collected in the "discarded IDS" output dataset.
Usage:
python hcluster_sg_parser.py [-m <N>] [-M <N>] <file> <discarded_out>
"""
import optparse
import os
import sys
def main():
parser = optparse.OptionParser()
parser.add_option('-m', '--min', type='int', default=0, help='Minimum number of cluster elements')
parser.add_option('-M', '--max', type='int', default=sys.maxsize, help='Maximum number of cluster elements')
parser.add_option('-d', '--dir', type='string', help="Absolute or relative path to output directory. If the directory does not exist, it will be created")
options, args = parser.parse_args()
if options.dir and not os.path.exists(options.dir):
os.mkdir(options.dir)
with open(args[2], 'w') as discarded_max_out:
with open(args[1], 'w') as discarded_min_out:
with open(args[0]) as fh:
for line in fh:
line = line.rstrip()
line_cols = line.split('\t')
cluster_id = line_cols[0]
n_ids = int(line_cols[-2])
id_list = line_cols[-1].replace(',', '\n')
if n_ids < options.min:
discarded_min_out.write(id_list)
elif n_ids > options.max:
discarded_max_out.write(id_list)
else:
outfile = cluster_id + '_output.txt'
if options.dir:
outfile = os.path.join(options.dir, outfile)
with open(outfile, 'w') as f:
f.write(id_list)
if __name__ == "__main__":
main()
|
"""
A simple parser to convert the hcluster_sg output into lists of IDs, one list for each cluster.
When a minimum and/or maximum number of cluster elements are specified, the IDs contained in the filtered-out clusters are collected in the "discarded IDS" output dataset.
Usage:
python hcluster_sg_parser.py [-m <N>] [-M <N>] <file> <discarded_out>
"""
import optparse
import os
import sys
def main():
parser = optparse.OptionParser()
parser.add_option('-m', '--min', type='int', default=0, help='Minimum number of cluster elements')
parser.add_option('-M', '--max', type='int', default=sys.maxsize, help='Maximum number of cluster elements')
parser.add_option('-d', '--dir', type='string', help="Absolute or relative path to output directory. If the directory does not exist, it will be created")
options, args = parser.parse_args()
if options.dir and not os.path.exists(options.dir):
os.mkdir(options.dir)
with open(args[2], 'w') as discarded_max_out:
with open(args[1], 'w') as discarded_min_out:
with open(args[0]) as fh:
for line in fh:
line = line.rstrip()
line_cols = line.split('\t')
cluster_id = line_cols[0]
n_ids = int(line_cols[-2])
id_list = line_cols[-1].replace(',', '\n')
if n_ids < options.min:
discarded_min_out.write(id_list)
elif n_ids > options.max:
discarded_max_out.write(id_list)
else:
outfile = cluster_id + '_output.txt'
if options.dir != "":
outfile = options.dir + "/" + cluster_id + '_output.txt'
with open(outfile, 'w') as f:
f.write(id_list)
if __name__ == "__main__":
main()
|
mit
|
Python
|
0f7cb25ea5a3fbb3c88f4fd7207144f29140f69c
|
Change happy_numbers to check for number 4.
|
bm5w/codeeval
|
happy_numbers/happy_numbers.py
|
happy_numbers/happy_numbers.py
|
"""
Happy numbers solution, code eval.
https://www.codeeval.com/open_challenges/39/
A happy number is defined by the following process. Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers, while those that do not end in 1 are unhappy numbers.
INPUT SAMPLE:
The first argument is the pathname to a file which contains test data, one test case per line. Each line contains a positive integer. E.g.
1
7
22
OUTPUT SAMPLE:
If the number is a happy number, print out 1. If not, print out 0. E.g
1
1
0
For the curious, here's why 7 is a happy number: 7->49->97->130->10->1. Here's why 22 is NOT a happy number: 22->8->64->52->29->85->89->145->42->20->4->16->37->58->89 ...
"""
import sys
def happy_number(num):
if num == 1:
return 1
elif num == 4: # all unhappy numbers end up in cycle including 4
return 0
else:
num = sum([int(x)**2 for x in str(num)])
return happy_number(num)
def main(input_file):
with open(input_file, 'r') as f:
for line in f:
print happy_number(int(line.strip()))
if __name__ == '__main__':
input_file = sys.argv[1]
main(input_file)
|
"""
Happy numbers solution, code eval.
https://www.codeeval.com/open_challenges/39/
A happy number is defined by the following process. Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers, while those that do not end in 1 are unhappy numbers.
INPUT SAMPLE:
The first argument is the pathname to a file which contains test data, one test case per line. Each line contains a positive integer. E.g.
1
7
22
OUTPUT SAMPLE:
If the number is a happy number, print out 1. If not, print out 0. E.g
1
1
0
For the curious, here's why 7 is a happy number: 7->49->97->130->10->1. Here's why 22 is NOT a happy number: 22->8->64->52->29->85->89->145->42->20->4->16->37->58->89 ...
"""
import sys
def happy_number(num, past=[]):
if num == 1:
return 1
elif num in past:
return 0
else:
past.append(num)
num = sum([int(x)**2 for x in str(num)])
return happy_number(num, past)
def main(input_file):
with open(input_file, 'r') as f:
for line in f:
print happy_number(int(line.strip()))
if __name__ == '__main__':
input_file = sys.argv[1]
main(input_file)
|
mit
|
Python
|
5af16432976f72de1d86f1d725205c4ec6a6caa2
|
Add warning when entity not found in reproduce_state
|
xifle/home-assistant,oandrew/home-assistant,tinloaf/home-assistant,home-assistant/home-assistant,nugget/home-assistant,emilhetty/home-assistant,Julian/home-assistant,Duoxilian/home-assistant,tchellomello/home-assistant,mikaelboman/home-assistant,Duoxilian/home-assistant,ewandor/home-assistant,keerts/home-assistant,ma314smith/home-assistant,kyvinh/home-assistant,emilhetty/home-assistant,tboyce021/home-assistant,bdfoster/blumate,xifle/home-assistant,mKeRix/home-assistant,sdague/home-assistant,hmronline/home-assistant,mikaelboman/home-assistant,sander76/home-assistant,lukas-hetzenecker/home-assistant,sffjunkie/home-assistant,deisi/home-assistant,Smart-Torvy/torvy-home-assistant,soldag/home-assistant,morphis/home-assistant,DavidLP/home-assistant,miniconfig/home-assistant,Zac-HD/home-assistant,coteyr/home-assistant,turbokongen/home-assistant,robjohnson189/home-assistant,hexxter/home-assistant,jnewland/home-assistant,MartinHjelmare/home-assistant,jaharkes/home-assistant,mikaelboman/home-assistant,deisi/home-assistant,qedi-r/home-assistant,Zac-HD/home-assistant,tinloaf/home-assistant,miniconfig/home-assistant,dmeulen/home-assistant,Theb-1/home-assistant,florianholzapfel/home-assistant,molobrakos/home-assistant,joopert/home-assistant,stefan-jonasson/home-assistant,Theb-1/home-assistant,alexmogavero/home-assistant,nkgilley/home-assistant,ct-23/home-assistant,alanbowman/home-assistant,leoc/home-assistant,open-homeautomation/home-assistant,leoc/home-assistant,mikaelboman/home-assistant,florianholzapfel/home-assistant,MungoRae/home-assistant,dmeulen/home-assistant,pottzer/home-assistant,nugget/home-assistant,happyleavesaoc/home-assistant,mahendra-r/home-assistant,jawilson/home-assistant,philipbl/home-assistant,alanbowman/home-assistant,sander76/home-assistant,varunr047/homefile,aoakeson/home-assistant,caiuspb/home-assistant,tmm1/home-assistant,DavidLP/home-assistant,kennedyshead/home-assistant,badele/home-assistant,eagleamon/home-assistant,caiuspb/home-assistant,dmeulen/home-assistant,tomduijf/home-assistant,philipbl/home-assistant,LinuxChristian/home-assistant,Teagan42/home-assistant,emilhetty/home-assistant,LinuxChristian/home-assistant,toddeye/home-assistant,aequitas/home-assistant,srcLurker/home-assistant,bdfoster/blumate,keerts/home-assistant,mahendra-r/home-assistant,fbradyirl/home-assistant,open-homeautomation/home-assistant,alexkolar/home-assistant,betrisey/home-assistant,pschmitt/home-assistant,ma314smith/home-assistant,srcLurker/home-assistant,jamespcole/home-assistant,GenericStudent/home-assistant,deisi/home-assistant,alexmogavero/home-assistant,bdfoster/blumate,g12mcgov/home-assistant,tboyce1/home-assistant,JshWright/home-assistant,jabesq/home-assistant,devdelay/home-assistant,nnic/home-assistant,joopert/home-assistant,aoakeson/home-assistant,ewandor/home-assistant,qedi-r/home-assistant,bdfoster/blumate,JshWright/home-assistant,jnewland/home-assistant,LinuxChristian/home-assistant,turbokongen/home-assistant,Julian/home-assistant,MungoRae/home-assistant,tboyce1/home-assistant,MartinHjelmare/home-assistant,stefan-jonasson/home-assistant,ewandor/home-assistant,adrienbrault/home-assistant,jamespcole/home-assistant,MartinHjelmare/home-assistant,DavidLP/home-assistant,hmronline/home-assistant,varunr047/homefile,robbiet480/home-assistant,betrisey/home-assistant,robbiet480/home-assistant,jnewland/home-assistant,eagleamon/home-assistant,Smart-Torvy/torvy-home-assistant,open-homeautomation/home-assistant,aequitas/home-assistant,HydrelioxGitHub/home-assistant,kyvinh/home-assistant,jamespcole/home-assistant,leoc/home-assistant,instantchow/home-assistant,mikaelboman/home-assistant,kennedyshead/home-assistant,luxus/home-assistant,Julian/home-assistant,JshWright/home-assistant,morphis/home-assistant,lukas-hetzenecker/home-assistant,persandstrom/home-assistant,kyvinh/home-assistant,xifle/home-assistant,happyleavesaoc/home-assistant,HydrelioxGitHub/home-assistant,rohitranjan1991/home-assistant,alanbowman/home-assistant,morphis/home-assistant,varunr047/homefile,shaftoe/home-assistant,oandrew/home-assistant,tomduijf/home-assistant,hmronline/home-assistant,shaftoe/home-assistant,aequitas/home-assistant,toddeye/home-assistant,oandrew/home-assistant,happyleavesaoc/home-assistant,aronsky/home-assistant,persandstrom/home-assistant,balloob/home-assistant,auduny/home-assistant,alexmogavero/home-assistant,tboyce021/home-assistant,sdague/home-assistant,jawilson/home-assistant,mezz64/home-assistant,sfam/home-assistant,pottzer/home-assistant,MungoRae/home-assistant,FreekingDean/home-assistant,mKeRix/home-assistant,home-assistant/home-assistant,alexmogavero/home-assistant,PetePriority/home-assistant,JshWright/home-assistant,nevercast/home-assistant,devdelay/home-assistant,eagleamon/home-assistant,g12mcgov/home-assistant,g12mcgov/home-assistant,dorant/home-assistant,LinuxChristian/home-assistant,justyns/home-assistant,PetePriority/home-assistant,jabesq/home-assistant,pschmitt/home-assistant,nevercast/home-assistant,keerts/home-assistant,Danielhiversen/home-assistant,Cinntax/home-assistant,w1ll1am23/home-assistant,titilambert/home-assistant,florianholzapfel/home-assistant,leppa/home-assistant,shaftoe/home-assistant,partofthething/home-assistant,devdelay/home-assistant,tinloaf/home-assistant,leoc/home-assistant,betrisey/home-assistant,jaharkes/home-assistant,ct-23/home-assistant,hexxter/home-assistant,Duoxilian/home-assistant,alexkolar/home-assistant,PetePriority/home-assistant,sffjunkie/home-assistant,aronsky/home-assistant,miniconfig/home-assistant,postlund/home-assistant,justyns/home-assistant,nugget/home-assistant,emilhetty/home-assistant,florianholzapfel/home-assistant,mezz64/home-assistant,sfam/home-assistant,hmronline/home-assistant,HydrelioxGitHub/home-assistant,hexxter/home-assistant,MungoRae/home-assistant,jaharkes/home-assistant,auduny/home-assistant,keerts/home-assistant,eagleamon/home-assistant,deisi/home-assistant,caiuspb/home-assistant,Theb-1/home-assistant,leppa/home-assistant,tboyce1/home-assistant,sfam/home-assistant,Zac-HD/home-assistant,philipbl/home-assistant,partofthething/home-assistant,molobrakos/home-assistant,hexxter/home-assistant,Smart-Torvy/torvy-home-assistant,nnic/home-assistant,rohitranjan1991/home-assistant,robjohnson189/home-assistant,shaftoe/home-assistant,morphis/home-assistant,fbradyirl/home-assistant,sffjunkie/home-assistant,instantchow/home-assistant,justyns/home-assistant,mKeRix/home-assistant,ma314smith/home-assistant,dorant/home-assistant,aoakeson/home-assistant,xifle/home-assistant,tchellomello/home-assistant,tmm1/home-assistant,Danielhiversen/home-assistant,kyvinh/home-assistant,Smart-Torvy/torvy-home-assistant,luxus/home-assistant,badele/home-assistant,Zac-HD/home-assistant,philipbl/home-assistant,dorant/home-assistant,Julian/home-assistant,w1ll1am23/home-assistant,tomduijf/home-assistant,sffjunkie/home-assistant,LinuxChristian/home-assistant,open-homeautomation/home-assistant,rohitranjan1991/home-assistant,Zyell/home-assistant,stefan-jonasson/home-assistant,oandrew/home-assistant,balloob/home-assistant,nkgilley/home-assistant,badele/home-assistant,fbradyirl/home-assistant,deisi/home-assistant,instantchow/home-assistant,soldag/home-assistant,robjohnson189/home-assistant,varunr047/homefile,molobrakos/home-assistant,Zyell/home-assistant,tboyce1/home-assistant,Duoxilian/home-assistant,srcLurker/home-assistant,GenericStudent/home-assistant,auduny/home-assistant,srcLurker/home-assistant,ct-23/home-assistant,jaharkes/home-assistant,nnic/home-assistant,persandstrom/home-assistant,happyleavesaoc/home-assistant,Zyell/home-assistant,hmronline/home-assistant,devdelay/home-assistant,varunr047/homefile,adrienbrault/home-assistant,ma314smith/home-assistant,sffjunkie/home-assistant,pottzer/home-assistant,betrisey/home-assistant,jabesq/home-assistant,postlund/home-assistant,coteyr/home-assistant,tmm1/home-assistant,stefan-jonasson/home-assistant,emilhetty/home-assistant,titilambert/home-assistant,ct-23/home-assistant,miniconfig/home-assistant,coteyr/home-assistant,mKeRix/home-assistant,FreekingDean/home-assistant,robjohnson189/home-assistant,mahendra-r/home-assistant,Teagan42/home-assistant,MungoRae/home-assistant,ct-23/home-assistant,luxus/home-assistant,nevercast/home-assistant,balloob/home-assistant,dmeulen/home-assistant,alexkolar/home-assistant,bdfoster/blumate,Cinntax/home-assistant
|
homeassistant/helpers/state.py
|
homeassistant/helpers/state.py
|
"""
homeassistant.helpers.state
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helpers that help with state related things.
"""
import logging
from homeassistant.core import State
import homeassistant.util.dt as dt_util
from homeassistant.const import (
STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, ATTR_ENTITY_ID)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods, attribute-defined-outside-init
class TrackStates(object):
"""
Records the time when the with-block is entered. Will add all states
that have changed since the start time to the return list when with-block
is exited.
"""
def __init__(self, hass):
self.hass = hass
self.states = []
def __enter__(self):
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type, exc_value, traceback):
self.states.extend(get_changed_since(self.hass.states.all(), self.now))
def get_changed_since(states, utc_point_in_time):
"""
Returns all states that have been changed since utc_point_in_time.
"""
point_in_time = dt_util.strip_microseconds(utc_point_in_time)
return [state for state in states if state.last_updated >= point_in_time]
def reproduce_state(hass, states, blocking=False):
""" Takes in a state and will try to have the entity reproduce it. """
if isinstance(states, State):
states = [states]
for state in states:
current_state = hass.states.get(state.entity_id)
if current_state is None:
_LOGGER.warning('reproduce_state: Unable to find entity %s',
state.entity_id)
continue
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
else:
_LOGGER.warning("reproduce_state: Unable to reproduce state %s",
state)
continue
service_data = dict(state.attributes)
service_data[ATTR_ENTITY_ID] = state.entity_id
hass.services.call(state.domain, service, service_data, blocking)
|
"""
homeassistant.helpers.state
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helpers that help with state related things.
"""
import logging
from homeassistant.core import State
import homeassistant.util.dt as dt_util
from homeassistant.const import (
STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, ATTR_ENTITY_ID)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods, attribute-defined-outside-init
class TrackStates(object):
"""
Records the time when the with-block is entered. Will add all states
that have changed since the start time to the return list when with-block
is exited.
"""
def __init__(self, hass):
self.hass = hass
self.states = []
def __enter__(self):
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type, exc_value, traceback):
self.states.extend(get_changed_since(self.hass.states.all(), self.now))
def get_changed_since(states, utc_point_in_time):
"""
Returns all states that have been changed since utc_point_in_time.
"""
point_in_time = dt_util.strip_microseconds(utc_point_in_time)
return [state for state in states if state.last_updated >= point_in_time]
def reproduce_state(hass, states, blocking=False):
""" Takes in a state and will try to have the entity reproduce it. """
if isinstance(states, State):
states = [states]
for state in states:
current_state = hass.states.get(state.entity_id)
if current_state is None:
continue
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
else:
_LOGGER.warning("Unable to reproduce state for %s", state)
continue
service_data = dict(state.attributes)
service_data[ATTR_ENTITY_ID] = state.entity_id
hass.services.call(state.domain, service, service_data, blocking)
|
mit
|
Python
|
a2cbf6a4b500fa657a83f0504dc777603590372b
|
Reduce logging severity
|
phil-lopreiato/the-blue-alliance,phil-lopreiato/the-blue-alliance,verycumbersome/the-blue-alliance,bdaroz/the-blue-alliance,bdaroz/the-blue-alliance,tsteward/the-blue-alliance,the-blue-alliance/the-blue-alliance,phil-lopreiato/the-blue-alliance,bdaroz/the-blue-alliance,synth3tk/the-blue-alliance,nwalters512/the-blue-alliance,phil-lopreiato/the-blue-alliance,fangeugene/the-blue-alliance,tsteward/the-blue-alliance,verycumbersome/the-blue-alliance,verycumbersome/the-blue-alliance,jaredhasenklein/the-blue-alliance,jaredhasenklein/the-blue-alliance,verycumbersome/the-blue-alliance,bdaroz/the-blue-alliance,nwalters512/the-blue-alliance,the-blue-alliance/the-blue-alliance,phil-lopreiato/the-blue-alliance,synth3tk/the-blue-alliance,verycumbersome/the-blue-alliance,the-blue-alliance/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,the-blue-alliance/the-blue-alliance,bdaroz/the-blue-alliance,tsteward/the-blue-alliance,synth3tk/the-blue-alliance,jaredhasenklein/the-blue-alliance,nwalters512/the-blue-alliance,synth3tk/the-blue-alliance,the-blue-alliance/the-blue-alliance,nwalters512/the-blue-alliance,the-blue-alliance/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,synth3tk/the-blue-alliance,jaredhasenklein/the-blue-alliance,tsteward/the-blue-alliance,fangeugene/the-blue-alliance,phil-lopreiato/the-blue-alliance,nwalters512/the-blue-alliance,fangeugene/the-blue-alliance,synth3tk/the-blue-alliance,verycumbersome/the-blue-alliance,bdaroz/the-blue-alliance,fangeugene/the-blue-alliance,nwalters512/the-blue-alliance,tsteward/the-blue-alliance,tsteward/the-blue-alliance
|
helpers/notification_sender.py
|
helpers/notification_sender.py
|
import hashlib
import json
import logging
import urllib2
from controllers.gcm.gcm import GCMConnection
class NotificationSender(object):
WEBHOOK_VERSION = 1
@classmethod
def send_gcm(cls, notification):
gcm_connection = GCMConnection()
gcm_connection.notify_device(notification)
@classmethod
def send_ios(cls, notification):
pass
@classmethod
def send_webhook(cls, message, keys):
payload = json.dumps(message, ensure_ascii=True)
invalid_urls = []
for client in keys:
url = client[0]
secret = client[1]
ch = hashlib.sha1()
ch.update(secret)
ch.update(payload)
checksum = ch.hexdigest()
request = urllib2.Request(url, payload)
request.add_header("Content-Type", 'application/json; charset="utf-8"')
request.add_header("X-TBA-Checksum", checksum)
request.add_header("X-TBA-Version", '{}'.format(cls.WEBHOOK_VERSION))
try:
resp = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 400:
logging.warning('400, Invalid message: ' + repr(gcm_post_json_str))
elif e.code == 401:
logging.warning('401, Webhook unauthorized')
elif e.code == 404:
invalid_urls.append(url)
elif e.code == 500:
logging.warning('500, Internal error on server sending message')
else:
logging.exception('Unexpected HTTPError: ' + str(e.code) + " " + e.msg + " " + e.read())
except Exception, ex:
logging.error("Other Exception: {}".format(str(ex)))
if invalid_urls:
logging.warning("Invalid urls while sending webhook: {}".format(str(invalid_urls)))
return False
return True
|
import hashlib
import json
import logging
import urllib2
from controllers.gcm.gcm import GCMConnection
class NotificationSender(object):
WEBHOOK_VERSION = 1
@classmethod
def send_gcm(cls, notification):
gcm_connection = GCMConnection()
gcm_connection.notify_device(notification)
@classmethod
def send_ios(cls, notification):
pass
@classmethod
def send_webhook(cls, message, keys):
payload = json.dumps(message, ensure_ascii=True)
invalid_urls = []
for client in keys:
url = client[0]
secret = client[1]
ch = hashlib.sha1()
ch.update(secret)
ch.update(payload)
checksum = ch.hexdigest()
request = urllib2.Request(url, payload)
request.add_header("Content-Type", 'application/json; charset="utf-8"')
request.add_header("X-TBA-Checksum", checksum)
request.add_header("X-TBA-Version", '{}'.format(cls.WEBHOOK_VERSION))
try:
resp = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 400:
logging.error('400, Invalid message: ' + repr(gcm_post_json_str))
elif e.code == 401:
logging.error('401, Webhook unauthorized')
elif e.code == 404:
invalid_urls.append(url)
elif e.code == 500:
logging.error('500, Internal error on server sending message')
else:
logging.exception('Unexpected HTTPError: ' + str(e.code) + " " + e.msg + " " + e.read())
except Exception, ex:
logging.error("Other Exception: {}".format(str(ex)))
if invalid_urls:
logging.warning("Invalid urls while sending webhook: {}".format(str(invalid_urls)))
return False
return True
|
mit
|
Python
|
0efb8c4347b944c692e3352382bf36de1c9f5ef4
|
Fix test_client with no webpack manifest
|
DirkHoffmann/indico,indico/indico,DirkHoffmann/indico,ThiefMaster/indico,ThiefMaster/indico,indico/indico,DirkHoffmann/indico,indico/indico,pferreir/indico,ThiefMaster/indico,pferreir/indico,DirkHoffmann/indico,pferreir/indico,pferreir/indico,indico/indico,ThiefMaster/indico
|
indico/testing/fixtures/app.py
|
indico/testing/fixtures/app.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
import pytest
from flask_webpackext.ext import _FlaskWebpackExtState
from indico.web.flask.app import make_app
from indico.web.flask.wrappers import IndicoFlask
@pytest.fixture(scope='session')
def app(request, redis_proc):
"""Create the flask app."""
config_override = {
'BASE_URL': 'http://localhost',
'SMTP_SERVER': ('localhost', 0), # invalid port - just in case so we NEVER send emails!
'TEMP_DIR': request.config.indico_temp_dir.strpath,
'CACHE_DIR': request.config.indico_temp_dir.strpath,
'REDIS_CACHE_URL': f'redis://{redis_proc.host}:{redis_proc.port}/0',
'STORAGE_BACKENDS': {'default': 'mem:'},
'PLUGINS': request.config.indico_plugins,
'ENABLE_ROOMBOOKING': True,
'SECRET_KEY': os.urandom(16),
'SMTP_USE_CELERY': False,
}
return make_app(set_path=True, testing=True, config_override=config_override)
@pytest.fixture(autouse=True)
def app_context(app):
"""Create a flask app context."""
with app.app_context():
yield app
@pytest.fixture
def request_context(app_context):
"""Create a flask request context."""
with app_context.test_request_context():
yield
@pytest.fixture
def test_client(app, mocker):
"""Create a flask request context."""
mocker.patch.object(_FlaskWebpackExtState, 'manifest')
mocker.patch.object(IndicoFlask, 'manifest')
with app.test_client() as c:
yield c
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
import pytest
from indico.web.flask.app import make_app
from indico.web.flask.wrappers import IndicoFlask
@pytest.fixture(scope='session')
def app(request, redis_proc):
"""Create the flask app."""
config_override = {
'BASE_URL': 'http://localhost',
'SMTP_SERVER': ('localhost', 0), # invalid port - just in case so we NEVER send emails!
'TEMP_DIR': request.config.indico_temp_dir.strpath,
'CACHE_DIR': request.config.indico_temp_dir.strpath,
'REDIS_CACHE_URL': f'redis://{redis_proc.host}:{redis_proc.port}/0',
'STORAGE_BACKENDS': {'default': 'mem:'},
'PLUGINS': request.config.indico_plugins,
'ENABLE_ROOMBOOKING': True,
'SECRET_KEY': os.urandom(16),
'SMTP_USE_CELERY': False,
}
return make_app(set_path=True, testing=True, config_override=config_override)
@pytest.fixture(autouse=True)
def app_context(app):
"""Create a flask app context."""
with app.app_context():
yield app
@pytest.fixture
def request_context(app_context):
"""Create a flask request context."""
with app_context.test_request_context():
yield
@pytest.fixture
def test_client(app, mocker):
"""Create a flask request context."""
mocker.patch.object(IndicoFlask, 'manifest')
with app.test_client() as c:
yield c
|
mit
|
Python
|
6102f840c68e98a6c09aeb30055d6e58fa9c5006
|
Put temporary files in system's tempdir
|
atmtools/typhon,atmtools/typhon
|
typhon/tests/files/test_utils.py
|
typhon/tests/files/test_utils.py
|
from tempfile import gettempdir, NamedTemporaryFile
from typhon.files import compress, decompress
class TestCompression:
data = "ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678910"
def create_file(self, filename):
with open(filename, "w") as file:
file.write(self.data)
def check_file(self, filename):
with open(filename) as file:
return self.data == file.readline()
def test_compress_decompress_zip(self):
with NamedTemporaryFile(dir=gettempdir()) as file:
with compress(file.name+".zip") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".zip") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_gzip(self):
with NamedTemporaryFile(dir=gettempdir()) as file:
with compress(file.name+".gz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".gz") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_bz2(self):
with NamedTemporaryFile(dir=gettempdir()) as file:
with compress(file.name+".bz2") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".bz2") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_lzma(self):
with NamedTemporaryFile(dir=gettempdir()) as file:
with compress(file.name+".xz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".xz") as uncompressed_file:
assert self.check_file(uncompressed_file)
|
from tempfile import NamedTemporaryFile
from typhon.files import compress, decompress
class TestCompression:
data = "ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678910"
def create_file(self, filename):
with open(filename, "w") as file:
file.write(self.data)
def check_file(self, filename):
with open(filename) as file:
return self.data == file.readline()
def test_compress_decompress_zip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".zip") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".zip") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_gzip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".gz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".gz") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_bz2(self):
with NamedTemporaryFile() as file:
with compress(file.name+".bz2") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".bz2") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_lzma(self):
with NamedTemporaryFile() as file:
with compress(file.name+".xz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".xz") as uncompressed_file:
assert self.check_file(uncompressed_file)
|
mit
|
Python
|
cfbe7778e441f5851dc0efbacdfebd5209c31742
|
bump version
|
cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy
|
cupy/_version.py
|
cupy/_version.py
|
__version__ = '11.0.0rc1'
|
__version__ = '11.0.0b3'
|
mit
|
Python
|
eac2f296e855f92d040321edee943ad5f8a8fb39
|
Add filtering to view (nc-463)
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
nodeconductor/events/views.py
|
nodeconductor/events/views.py
|
from rest_framework import generics, response
from nodeconductor.events import elasticsearch_client
class EventListView(generics.GenericAPIView):
def list(self, request, *args, **kwargs):
order_by = request.GET.get('o', '-@timestamp')
event_types = request.GET.getlist('event_type')
search_text = request.GET.get('search_text')
elasticsearch_list = elasticsearch_client.ElasticsearchResultList(
user=request.user, sort=order_by, event_types=event_types, search_text=search_text)
page = self.paginate_queryset(elasticsearch_list)
if page is not None:
return self.get_paginated_response(page)
return response.Response(elasticsearch_list)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
|
from rest_framework import generics, response
from nodeconductor.events import elasticsearch_client
class EventListView(generics.GenericAPIView):
def list(self, request, *args, **kwargs):
order_by = request.GET.get('o', '-@timestamp')
elasticsearch_list = elasticsearch_client.ElasticsearchResultList(user=request.user, sort=order_by)
page = self.paginate_queryset(elasticsearch_list)
if page is not None:
return self.get_paginated_response(page)
return response.Response(elasticsearch_list)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
|
mit
|
Python
|
d62f3bc97bd318ebaf68e97ccc2629d9f8f246b5
|
Correct the pyproj minimum version.
|
girder/large_image,DigitalSlideArchive/large_image,DigitalSlideArchive/large_image,DigitalSlideArchive/large_image,girder/large_image,girder/large_image
|
sources/mapnik/setup.py
|
sources/mapnik/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def prerelease_local_scheme(version):
"""
Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') in ('master', ):
return ''
else:
return get_local_node_and_date(version)
setup(
name='large-image-source-mapnik',
use_scm_version={'root': '../..', 'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm'],
description='A Mapnik/GDAL tilesource for large_image',
long_description='See the large-image package for more details.',
author='Kitware, Inc.',
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=[
'large-image>=1.0.0',
'gdal',
'mapnik',
'palettable',
'pyproj>=2.2.0',
],
extras_require={
'girder': 'girder-large-image>=1.0.0',
},
license='Apache Software License 2.0',
keywords='large_image, tile source',
packages=find_packages(exclude=['test', 'test.*']),
url='https://github.com/girder/large_image',
entry_points={
'large_image.source': [
'mapnik = large_image_source_mapnik:MapnikFileTileSource'
],
'girder_large_image.source': [
'mapnik = large_image_source_mapnik.girder_source:MapnikGirderTileSource'
]
},
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def prerelease_local_scheme(version):
"""
Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') in ('master', ):
return ''
else:
return get_local_node_and_date(version)
setup(
name='large-image-source-mapnik',
use_scm_version={'root': '../..', 'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm'],
description='A Mapnik/GDAL tilesource for large_image',
long_description='See the large-image package for more details.',
author='Kitware, Inc.',
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=[
'large-image>=1.0.0',
'gdal',
'mapnik',
'palettable',
'pyproj>=2.0.0',
],
extras_require={
'girder': 'girder-large-image>=1.0.0',
},
license='Apache Software License 2.0',
keywords='large_image, tile source',
packages=find_packages(exclude=['test', 'test.*']),
url='https://github.com/girder/large_image',
entry_points={
'large_image.source': [
'mapnik = large_image_source_mapnik:MapnikFileTileSource'
],
'girder_large_image.source': [
'mapnik = large_image_source_mapnik.girder_source:MapnikGirderTileSource'
]
},
)
|
apache-2.0
|
Python
|
f70421a0c3143648f7dd2491ad031e62ca92792a
|
increment version for password rest admin form fix
|
foundertherapy/django-users-plus,foundertherapy/django-users-plus
|
accountsplus/__init__.py
|
accountsplus/__init__.py
|
__version__ = '1.3.2'
default_app_config = 'accountsplus.apps.AccountsConfig'
|
__version__ = '1.3.1'
default_app_config = 'accountsplus.apps.AccountsConfig'
|
mit
|
Python
|
bb165b4f8fc88ab3de26b0b52f07ada612e87f2b
|
Fix test cases related to getting tags and fields
|
amancevice/influxalchemy
|
tests/client_test.py
|
tests/client_test.py
|
""" InfluxAlchemy client tests. """
import mock
import influxdb
from influxalchemy.client import InfluxAlchemy
from influxalchemy.measurement import Measurement
from influxalchemy.query import InfluxDBQuery
@mock.patch("influxdb.InfluxDBClient")
def test_query(mock_flux):
db = influxdb.InfluxDBClient(database="fizz")
db.query.side_effect = influxdb.exceptions.InfluxDBClientError(None)
client = InfluxAlchemy(db)
query = client.query(Measurement.new("buzz"))
assert str(query) == "SELECT * FROM buzz;"
@mock.patch("influxdb.InfluxDBClient.query")
def test_measurements(mock_flux):
mock_res = mock.MagicMock()
mock_res.get_points.return_value = [{"name": "fizz"}]
mock_flux.return_value = mock_res
db = influxdb.InfluxDBClient(database="fizz")
client = InfluxAlchemy(db)
measurements = list(client.measurements())
mock_flux.assert_called_once_with("SHOW MEASUREMENTS;")
@mock.patch("influxdb.InfluxDBClient.query")
def test_tags(mock_flux):
mock_res = mock.MagicMock()
mock_res.get_points.return_value = [{'tagKey': 'sensor_id'}]
mock_flux.return_value = mock_res
db = influxdb.InfluxDBClient(database="fizz")
client = InfluxAlchemy(db)
assert client.tags(Measurement.new("environment")) == ["sensor_id"]
@mock.patch("influxdb.InfluxDBClient.query")
def test_fields(mock_flux):
mock_res = mock.MagicMock()
mock_res.get_points.return_value = [
{'fieldKey': 'humidity', 'fieldType': 'float'},
{'fieldKey': 'temperature', 'fieldType': 'float'}
]
mock_flux.return_value = mock_res
db = influxdb.InfluxDBClient(database="fizz")
client = InfluxAlchemy(db)
assert client.fields(Measurement.new("environment")) == ["humidity", "temperature"]
|
""" InfluxAlchemy client tests. """
import mock
import influxdb
from influxalchemy.client import InfluxAlchemy
from influxalchemy.measurement import Measurement
from influxalchemy.query import InfluxDBQuery
@mock.patch("influxdb.InfluxDBClient")
def test_query(mock_flux):
db = influxdb.InfluxDBClient(database="fizz")
db.query.side_effect = influxdb.exceptions.InfluxDBClientError(None)
client = InfluxAlchemy(db)
query = client.query(Measurement.new("buzz"))
assert str(query) == "SELECT * FROM buzz;"
@mock.patch("influxdb.InfluxDBClient.query")
def test_measurements(mock_flux):
mock_res = mock.MagicMock()
mock_res.get_points.return_value = [{"name": "fizz"}]
mock_flux.return_value = mock_res
db = influxdb.InfluxDBClient(database="fizz")
client = InfluxAlchemy(db)
measurements = list(client.measurements())
mock_flux.assert_called_once_with("SHOW MEASUREMENTS;")
@mock.patch("influxdb.InfluxDBClient.query")
def test_tags(mock_flux):
mock_res = mock.MagicMock()
mock_res.get_points.return_value = [{"name": "fizz"}]
mock_flux.return_value = mock_res
db = influxdb.InfluxDBClient(database="fizz")
client = InfluxAlchemy(db)
assert client.tags(Measurement.new("foo")) == ["fizz"]
@mock.patch("influxdb.InfluxDBClient.query")
def test_fields(mock_flux):
mock_res = mock.MagicMock()
mock_res.get_points.return_value = [{"name": "fizz"}]
mock_flux.return_value = mock_res
db = influxdb.InfluxDBClient(database="fizz")
client = InfluxAlchemy(db)
assert client.fields(Measurement.new("foo")) == ["fizz"]
|
mit
|
Python
|
499a74ff3256b3c6fb6a0ca4e2fd9578f2948cc8
|
correct variable names
|
tmthydvnprt/eugene,tmthydvnprt/eugene
|
tests/eguene_test.py
|
tests/eguene_test.py
|
"""
eugene_test.py
"""
import os
import sys
import numpy as np
import pandas as pd
sys.path.append(os.path.expanduser('~/GitHub/eugene'))
import eugene.Config
from eugene.Population import Population
# Setup up variable and truth configuration
eugene.Config.VAR['x'] = np.linspace(0, 8.0 * np.pi, 1024)
eugene.Config.TRUTH = eugene.Config.VAR['x'] * np.sin(eugene.Config.VAR['x']) + eugene.Config.VAR['x']/2.0 + 1.61
# @profile
def error_and_complexity(gene_expression, scale):
"""user fitness function, weighted combination of error and complexity"""
weights = np.array([0.95, 0.025, 0.025])
scaled_gene_expression = 1.0 / (gene_expression / scale)
return np.dot(scaled_gene_expression, weights)
# Setup Population
P = Population(
init_population_size=1000,
objective_function=error_and_complexity,
max_generations=100,
init_tree_size=2,
target=eugene.Config.TRUTH,
pruning=False
)
# Initialize Population
P.initialize()
# Run the Population
P.run(20)
|
"""
eugene_test.py
"""
import os
import sys
import numpy as np
import pandas as pd
sys.path.append('~/GitHub/eugene')
import eugene.Config
from eugene.Population import Population
# Setup up variable and truth configuration
eugene.Config.var['x'] = np.linspace(0, 8.0 * np.pi, 1024)
eugene.Config.truth = eugene.Config.var['x'] * np.sin(eugene.Config.var['x']) + eugene.Config.var['x']/2.0 + 1.61
@profile
def error_and_complexity(gene_expression, scale):
"""user fitness function, weighted combination of error and complexity"""
weights = np.array([0.95, 0.025, 0.025])
scaled_gene_expression = 1.0 / (gene_expression / scale)
return np.dot(scaled_gene_expression, weights)
# Setup Population
P = Population(
init_population_size=1000,
objective_function=error_and_complexity,
max_generations=100,
init_tree_size=2,
target=eugene.Config.truth,
pruning=False
)
# Initialize Population
P.initialize()
# Run the Population
P.run(20)
|
mit
|
Python
|
b4439ef76148f73581e6df0bf593504ae796578a
|
correct a bug in geo to country code.
|
mpetyx/DarwinsMusic,mpetyx/DarwinsMusic,mpetyx/DarwinsMusic
|
dbpedia/geoToCountry.py
|
dbpedia/geoToCountry.py
|
from urllib2 import urlopen
def getCountry(lat, lng):
url = "http://ws.geonames.org/countryCode?lng=" + str(lng) + "&lat=" + str(lat)
country = urlopen(url).read().strip()
if len(country) != 2:
return "Unknown"
return country
|
from urllib2 import urlopen
def getCountry(lat, lng):
url = "http://ws.geonames.org/countryCode?lng=" + str(lng) + "&lat=" + str(lat)
country = urlopen(url).read().strip()
return country
|
agpl-3.0
|
Python
|
9c8bfff17254cf88e11517a278bb60ad4c83e41b
|
Add revised alg_strongly_connected_components.py
|
bowen0701/algorithms_data_structures
|
alg_strongly_connected_components.py
|
alg_strongly_connected_components.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def strongly_connected_components():
pass
def main():
# 3 strongly connected graphs: {A, B, D, E, G}, {C}, {F, H, I}.
adj_dict = {
'A': ['B'],
'B': ['C', 'E'],
'C': ['C', 'F'],
'D': ['B', 'G'],
'E': ['A', 'D'],
'F': ['H'],
'G': ['E'],
'H': ['I'],
'I': ['F']
}
strongly_connected_components(adj_dict)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def dfs_recur(adj_dict, start_vertex, visited_set,
discover_ls, finish_ls):
visited_set.add(start_vertex)
discover_ls.append(start_vertex)
for neighbor_vertex in adj_dict[start_vertex]:
if neighbor_vertex not in visited_set:
dfs_recur(adj_dict, neighbor_vertex, visited_set,
discover_ls, finish_ls)
finish_ls.insert(0, start_vertex)
def traverse_dfs_recur(adj_dict):
visited_set = set()
discover_ls = []
finish_ls = []
for vertex in adj_dict:
if vertex not in visited_set:
dfs_recur(adj_dict, vertex, visited_set,
discover_ls, finish_ls)
return discover_ls, finish_ls
def transpose_graph(adj_dict):
tr_adj_dict = {}
for vertex in adj_dict:
tr_adj_dict[vertex] = []
for vertex in adj_dict:
for neighbor_vertex in adj_dict[vertex]:
tr_adj_dict[neighbor_vertex].append(vertex)
return tr_adj_dict
def strongly_connected_components(adj_dict):
"""Find strongly connected graphs by Kosaraju's Algorithm."""
discover_ls, finish_ls = traverse_dfs_recur(adj_dict)
print('discover_ls for G: {}'.format(discover_ls))
print('finish_ls for G: {}'.format(finish_ls))
tr_adj_dict = transpose_graph(adj_dict)
print('G^T: {}'.format(tr_adj_dict))
print('strongly connected components:')
scc_visited_set = set()
for vertex in finish_ls:
scc_discover_ls = []
scc_finish_ls = []
if vertex not in scc_visited_set:
dfs_recur(tr_adj_dict, vertex, scc_visited_set,
scc_discover_ls, scc_finish_ls)
print('scc_discover_ls: {}'.format(scc_discover_ls))
def main():
# 3 strongly connected graphs: {A, B, D, E, G}, {C}, {F, H, I}.
adj_dict = {
'A': ['B'],
'B': ['C', 'E'],
'C': ['C', 'F'],
'D': ['B', 'G'],
'E': ['A', 'D'],
'F': ['H'],
'G': ['E'],
'H': ['I'],
'I': ['F']
}
strongly_connected_components(adj_dict)
if __name__ == '__main__':
main()
|
bsd-2-clause
|
Python
|
687bb616deca1372d69ba0781c61a8ea62112426
|
Allow RO commands in oq workers
|
gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine
|
openquake/commands/workers.py
|
openquake/commands/workers.py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2017-2019 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import getpass
from openquake.baselib import sap, config, workerpool
ro_commands = ('status', 'inspect')
@sap.script
def workers(cmd):
"""
start/stop/restart the workers, or return their status
"""
if (cmd not in ro_commands and config.dbserver.multi_user and
getpass.getuser() != 'openquake'):
sys.exit('oq workers only works in single user mode')
master = workerpool.WorkerMaster(config.dbserver.host,
**config.zworkers)
print(getattr(master, cmd)())
workers.arg('cmd', 'command',
choices='start stop status restart inspect'.split())
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2017-2019 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import getpass
from openquake.baselib import sap, config, workerpool
@sap.script
def workers(cmd):
"""
start/stop/restart the workers, or return their status
"""
if config.dbserver.multi_user and getpass.getuser() != 'openquake':
sys.exit('oq workers only works in single user mode')
master = workerpool.WorkerMaster(config.dbserver.host,
**config.zworkers)
print(getattr(master, cmd)())
workers.arg('cmd', 'command',
choices='start stop status restart inspect'.split())
|
agpl-3.0
|
Python
|
19b0b1ed7e94ae4bb05f57baf3163850a64df8f9
|
test exports
|
oscarlorentzon/OpenSfM,oscarlorentzon/OpenSfM,oscarlorentzon/OpenSfM,oscarlorentzon/OpenSfM,oscarlorentzon/OpenSfM,mapillary/OpenSfM,mapillary/OpenSfM,mapillary/OpenSfM,mapillary/OpenSfM,mapillary/OpenSfM
|
opensfm/test/test_commands.py
|
opensfm/test/test_commands.py
|
import argparse
from opensfm import commands
from opensfm.test import data_generation
def run_command(command, args):
parser = argparse.ArgumentParser()
command.add_arguments(parser)
parsed_args = parser.parse_args(args)
command.run(parsed_args)
def test_run_all(tmpdir):
data = data_generation.create_berlin_test_folder(tmpdir)
run_all_commands = [
commands.extract_metadata,
commands.detect_features,
commands.match_features,
commands.create_tracks,
commands.reconstruct,
commands.bundle,
commands.mesh,
commands.undistort,
commands.compute_depthmaps,
commands.export_ply,
commands.export_visualsfm,
commands.export_openmvs,
commands.export_pmvs,
commands.export_bundler,
commands.export_colmap
]
for module in run_all_commands:
command = module.Command()
run_command(command, [data.data_path])
reconstruction = data.load_reconstruction()
assert len(reconstruction[0].shots) == 3
assert len(reconstruction[0].points) > 1000
|
import argparse
from opensfm import commands
from opensfm.test import data_generation
def run_command(command, args):
parser = argparse.ArgumentParser()
command.add_arguments(parser)
parsed_args = parser.parse_args(args)
command.run(parsed_args)
def test_run_all(tmpdir):
data = data_generation.create_berlin_test_folder(tmpdir)
run_all_commands = [
commands.extract_metadata,
commands.detect_features,
commands.match_features,
commands.create_tracks,
commands.reconstruct,
commands.bundle,
commands.mesh,
commands.undistort,
commands.compute_depthmaps,
commands.export_ply,
commands.export_visualsfm,
]
for module in run_all_commands:
command = module.Command()
run_command(command, [data.data_path])
reconstruction = data.load_reconstruction()
assert len(reconstruction[0].shots) == 3
assert len(reconstruction[0].points) > 1000
|
bsd-2-clause
|
Python
|
0c30226cf6037ce6a3938cfb1e8b98fe5ef4d767
|
Test for miss configured skeletor cfg
|
krak3n/Facio,krak3n/Facio,krak3n/Facio,krak3n/Facio,krak3n/Facio
|
tests/test_config.py
|
tests/test_config.py
|
import sys
from skeletor.config import Config
from .base import BaseTestCase
from .helpers import nostdout
class ConfigTests(BaseTestCase):
""" Argument Passing & Config Tests. """
base_args = ['-n', 'test_skeleton']
def _set_cli_args(self, args):
with nostdout():
sys.argv = sys.argv + args
self.config = Config()
def should_exit_with_no_arguments(self):
try:
with nostdout():
Config()
except SystemExit:
assert True
def ensure_valid_project_name(self):
self._set_cli_args(['-n', 'this_is_valid'])
self.assertEquals(self.config.project_name, 'this_is_valid')
self._set_cli_args(['-n', 'Thisisvalid'])
self.assertEquals(self.config.project_name, 'Thisisvalid')
def should_exit_on_invalid_name(self):
try:
self._set_cli_args(['-n', 'this_is_not-valid'])
except SystemExit:
assert True
try:
self._set_cli_args(['-n', 'this_is not_valid'])
except SystemExit:
assert True
try:
self._set_cli_args(['-n', '*this_is_not_valid'])
except SystemExit:
assert True
def ensure_template_var_is_set_from_cli(self):
self._set_cli_args(self.base_args + ['--template', self.test_tpl_path])
self.assertEquals(self.config.template, self.test_tpl_path)
def should_raise_exit_if_template_section_is_not_list(self):
try:
self._set_cli_args(self.base_args)
self.config.set_template_options('this is not a list')
except SystemExit:
assert True
else:
assert False
def should_exit_if_skeletor_cfg_is_miss_configured(self):
try:
with nostdout():
self._set_cli_args(self.base_args)
self.config.set_attributes('not valid', {'not': 'valid'})
except SystemExit:
assert True
else:
assert False
|
import sys
from skeletor.config import Config
from .base import BaseTestCase
from .helpers import nostdout
class ConfigTests(BaseTestCase):
""" Argument Passing & Config Tests. """
base_args = ['-n', 'test_skeleton']
def _set_cli_args(self, args):
with nostdout():
sys.argv = sys.argv + args
self.config = Config()
def should_exit_with_no_arguments(self):
try:
with nostdout():
Config()
except SystemExit:
assert True
def ensure_valid_project_name(self):
self._set_cli_args(['-n', 'this_is_valid'])
self.assertEquals(self.config.project_name, 'this_is_valid')
self._set_cli_args(['-n', 'Thisisvalid'])
self.assertEquals(self.config.project_name, 'Thisisvalid')
def should_exit_on_invalid_name(self):
try:
self._set_cli_args(['-n', 'this_is_not-valid'])
except SystemExit:
assert True
try:
self._set_cli_args(['-n', 'this_is not_valid'])
except SystemExit:
assert True
try:
self._set_cli_args(['-n', '*this_is_not_valid'])
except SystemExit:
assert True
def ensure_template_var_is_set_from_cli(self):
self._set_cli_args(self.base_args + ['--template', self.test_tpl_path])
self.assertEquals(self.config.template, self.test_tpl_path)
def should_raise_exit_if_template_section_is_not_list(self):
self._set_cli_args(self.base_args)
try:
self.config.set_template_options('this is not a list')
except SystemExit:
assert True
|
bsd-3-clause
|
Python
|
b3e7bfab5920c45a19ba0ca67a8c0119714579ad
|
Update dtruss() tests
|
rodionovd/cuckoo-osx-analyzer,cuckoobox/cuckoo,cuckoobox/cuckoo,cuckoobox/cuckoo,cuckoobox/cuckoo,cuckoobox/cuckoo,rodionovd/cuckoo-osx-analyzer,rodionovd/cuckoo-osx-analyzer
|
tests/test_dtrace.py
|
tests/test_dtrace.py
|
#!/usr/bin/env python
# Copyright (C) 2015 Dmitry Rodionov
# This file is part of my GSoC'15 project for Cuckoo Sandbox:
# http://www.cuckoosandbox.org
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import os
import sys
import unittest
import subprocess
from dtrace.dtruss import *
TESTS_DIR = os.path.dirname(os. path.abspath(__file__))
class TestDtrace(unittest.TestCase):
def setUp(self):
build_target(self.current_target())
def tearDown(self):
cleanup_target(self.current_target())
def current_target(self):
return self._testMethodName
def test_dtruss_helloworld(self):
# given
print_hello_world_syscall = ('write_nocancel', ['0x1', 'Hello, world!\\n\\0', '0xE'], 14, 0)
# when
output = dtruss("./tests/assets/"+self.current_target())
#then
self.assertIn(print_hello_world_syscall, output)
self.assertEqual(sum(x.name == "write_nocancel" for x in output), 1)
def build_target(target):
# clang -arch x86_64 -o $target_name $target_name.c
output = executable_name_for_target(target)
source = sourcefile_name_for_target(target)
subprocess.check_call(["clang", "-arch", "x86_64", "-o", output, source])
def cleanup_target(target):
os.remove(executable_name_for_target(target))
def sourcefile_name_for_target(target):
return "%s/assets/%s.c" % (TESTS_DIR, target)
def executable_name_for_target(target):
return "%s/assets/%s" % (TESTS_DIR, target)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# Copyright (C) 2015 Dmitry Rodionov
# This file is part of my GSoC'15 project for Cuckoo Sandbox:
# http://www.cuckoosandbox.org
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import os
import sys
import unittest
import subprocess
from dtrace.dtruss import *
TESTS_DIR = os.path.dirname(os. path.abspath(__file__))
class TestDtrace(unittest.TestCase):
def setUp(self):
build_target(self.current_target())
def tearDown(self):
cleanup_target(self.current_target())
def current_target(self):
return self._testMethodName
def test_dtruss_helloworld(self):
# given
print_hello_world_syscall = ('write_nocancel', ['0x1', 'Hello, world!\\n\\0', '0xE'], 14, 0)
# when
output = dtruss("./tests/assets/"+self.current_target())
#then
self.assertIn(print_hello_world_syscall, output)
def build_target(target):
# clang -arch x86_64 -o $target_name $target_name.c
output = executable_name_for_target(target)
source = sourcefile_name_for_target(target)
subprocess.check_call(["clang", "-arch", "x86_64", "-o", output, source])
def cleanup_target(target):
os.remove(executable_name_for_target(target))
def sourcefile_name_for_target(target):
return "%s/assets/%s.c" % (TESTS_DIR, target)
def executable_name_for_target(target):
return "%s/assets/%s" % (TESTS_DIR, target)
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
52d8442068af3fbd848c32334327e48e623769c2
|
Change test class name
|
rafaellott/python_utils
|
tests/test_helper.py
|
tests/test_helper.py
|
# -*- coding: utf-8 -*-
"""Tests for _Helper class."""
# from python_utils.helper import _Helper
# from python_utils import helper
import unittest
from python_utils.helper import _Helper
class TestPythonUtils(unittest.TestCase):
"""Add documentation here."""
def setUp(self):
"""Add documentation here."""
self.helper = _Helper()
def tearDown(self):
"""Add documentation here."""
del self.helper
def test__1_add_section(self):
"""Add documentation here."""
self.helper.add_section_name("TESTE")
print self.helper.name
|
# -*- coding: utf-8 -*-
# from python_utils.helper import _Helper
#from python_utils import helper
import unittest
import python_utils
class TestPprint(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
|
mit
|
Python
|
6b2f403ce33205ec681ba1a511c2d52db02f6a36
|
Use pipelines for cache busting scans for better performance
|
dailymuse/oz,dailymuse/oz,dailymuse/oz
|
oz/plugins/aws_cdn/actions.py
|
oz/plugins/aws_cdn/actions.py
|
from __future__ import absolute_import, division, print_function, with_statement, unicode_literals
# Module for generating hashes for files that match a glob, and putting that
# hash in redis to allow us to generate cache-busting URLs later
import os
import oz
import oz.app
import oz.plugins.redis
import oz.plugins.aws_cdn
@oz.action
def cache_busting_scan(*prefixes):
"""
(Re-)generates the cache buster values for all files with the specified
prefixes.
"""
settings = oz.app.settings
redis = oz.plugins.redis.create_connection()
pipe = redis.pipeline()
# Get all items that match any of the patterns. Put it in a set to
# prevent duplicates.
if settings["s3_bucket"]:
bucket = oz.plugins.aws_cdn.get_bucket()
matches = set([oz.plugins.aws_cdn.S3File(key) for prefix in prefixes for key in bucket.list(prefix)])
else:
matches = set([])
static_path = settings["static_path"]
for root, _, filenames in os.walk(static_path):
for filename in filenames:
path = os.path.relpath(os.path.join(root, filename), static_path)
for prefix in prefixes:
if path.startswith(prefix):
matches.add(oz.plugins.aws_cdn.LocalFile(static_path, path))
break
# Set the cache busters
for f in matches:
file_hash = f.hash(override=settings.get("hash_override", ""))
print(file_hash, f.path())
oz.plugins.aws_cdn.set_cache_buster(pipe, f.path(), file_hash)
pipe.execute()
|
from __future__ import absolute_import, division, print_function, with_statement, unicode_literals
# Module for generating hashes for files that match a glob, and putting that
# hash in redis to allow us to generate cache-busting URLs later
import os
import oz
import oz.app
import oz.plugins.redis
import oz.plugins.aws_cdn
@oz.action
def cache_busting_scan(*prefixes):
"""
(Re-)generates the cache buster values for all files with the specified
prefixes.
"""
settings = oz.app.settings
redis = oz.plugins.redis.create_connection()
# Get all items that match any of the patterns. Put it in a set to
# prevent duplicates.
if settings["s3_bucket"]:
bucket = oz.plugins.aws_cdn.get_bucket()
matches = set([oz.plugins.aws_cdn.S3File(key) for prefix in prefixes for key in bucket.list(prefix)])
else:
matches = set([])
static_path = settings["static_path"]
for root, _, filenames in os.walk(static_path):
for filename in filenames:
path = os.path.relpath(os.path.join(root, filename), static_path)
for prefix in prefixes:
if path.startswith(prefix):
matches.add(oz.plugins.aws_cdn.LocalFile(static_path, path))
break
# Set the cache busters
for f in matches:
file_hash = f.hash(override=settings.get("hash_override", ""))
print(file_hash, f.path())
oz.plugins.aws_cdn.set_cache_buster(redis, f.path(), file_hash)
|
bsd-3-clause
|
Python
|
b587557ab27598d7b1d273fbc445f27b40613a29
|
Update production bucket name.
|
us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite
|
us_ignite/settings/production.py
|
us_ignite/settings/production.py
|
# Production settings for us_ignite
import datetime
import os
import urlparse
from us_ignite.settings import *
# Sensitive values are saved as env variables:
env = os.getenv
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
# settings is one directory up now
here = lambda *x: os.path.join(PROJECT_ROOT, '..', *x)
SITE_URL = 'https://us-ignite.herokuapp.com'
ALLOWED_HOSTS = [
'us-ignite.herokuapp.com',
]
# HTTPS configuration:
SESSION_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 60 * 5
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = env('SECRET_KEY')
# Remote storage settings:
STATICFILES_STORAGE = 'us_ignite.common.storage.StaticS3Storage'
DEFAULT_FILE_STORAGE = 'us_ignite.common.storage.MediaS3Storage'
THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'us-ignite-dot-org'
expire_date = datetime.date.today() + datetime.timedelta(days=365)
expire_seconds = 30 * 24 * 60 * 60
AWS_HEADERS = {
'Expires': expire_date.strftime('%a, %d %b %Y 00:00:00 GMT'),
'Cache-Control': 'max-age=%s' % expire_seconds,
}
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = '//%s/static/' % AWS_S3_CUSTOM_DOMAIN
redis_url = urlparse.urlparse(env('REDISTOGO_URL'))
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '%s:%s' % (redis_url.hostname, redis_url.port),
'OPTIONS': {
'DB': 0,
'PASSWORD': redis_url.password,
}
}
}
# Email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = env('EMAIL_HOST')
EMAIL_PORT = env('EMAIL_PORT')
EMAIL_HOST_USER = env('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD')
# Twitter API:
TWITTER_API_KEY = env('TWITTER_API_KEY')
TWITTER_API_SECRET = env('TWITTER_API_SECRET')
# WP email
WP_EMAIL = env('WP_EMAIL')
# Enable dummy content generation on this build:
ENABLE_DUMMY = True
if ENABLE_DUMMY:
INSTALLED_APPS += ('us_ignite.dummy', )
# List of words:
WORDS_PATH = here('..', 'words')
MAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY')
MAILCHIMP_LIST = env('MAILCHIMP_LIST')
|
# Production settings for us_ignite
import datetime
import os
import urlparse
from us_ignite.settings import *
# Sensitive values are saved as env variables:
env = os.getenv
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
# settings is one directory up now
here = lambda *x: os.path.join(PROJECT_ROOT, '..', *x)
SITE_URL = 'https://us-ignite.herokuapp.com'
ALLOWED_HOSTS = [
'us-ignite.herokuapp.com',
]
# HTTPS configuration:
SESSION_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 60 * 5
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = env('SECRET_KEY')
# Remote storage settings:
STATICFILES_STORAGE = 'us_ignite.common.storage.StaticS3Storage'
DEFAULT_FILE_STORAGE = 'us_ignite.common.storage.MediaS3Storage'
THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'local-us-ignite-dot-org'
expire_date = datetime.date.today() + datetime.timedelta(days=365)
expire_seconds = 30 * 24 * 60 * 60
AWS_HEADERS = {
'Expires': expire_date.strftime('%a, %d %b %Y 00:00:00 GMT'),
'Cache-Control': 'max-age=%s' % expire_seconds,
}
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = '//%s/static/' % AWS_S3_CUSTOM_DOMAIN
redis_url = urlparse.urlparse(env('REDISTOGO_URL'))
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '%s:%s' % (redis_url.hostname, redis_url.port),
'OPTIONS': {
'DB': 0,
'PASSWORD': redis_url.password,
}
}
}
# Email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = env('EMAIL_HOST')
EMAIL_PORT = env('EMAIL_PORT')
EMAIL_HOST_USER = env('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD')
# Twitter API:
TWITTER_API_KEY = env('TWITTER_API_KEY')
TWITTER_API_SECRET = env('TWITTER_API_SECRET')
# WP email
WP_EMAIL = env('WP_EMAIL')
# Enable dummy content generation on this build:
ENABLE_DUMMY = True
if ENABLE_DUMMY:
INSTALLED_APPS += ('us_ignite.dummy', )
# List of words:
WORDS_PATH = here('..', 'words')
MAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY')
MAILCHIMP_LIST = env('MAILCHIMP_LIST')
|
bsd-3-clause
|
Python
|
9c61b0d27873c8c1ea2ba2311f547625a83bf7be
|
Add cached_function to API
|
Lodifice/mfnf-pdf-export,Lodifice/mfnf-pdf-export,Lodifice/mfnf-pdf-export
|
tests/test_parser.py
|
tests/test_parser.py
|
import shelve
from unittest import TestCase
import requests
import yaml
from mfnf.api import HTTPMediaWikiAPI
from mfnf.parser import HTML2JSONParser, ArticleContentParser
from mfnf.utils import CachedFunction
class TestParser(TestCase):
@classmethod
def setUpClass(cls):
cls.database = shelve.open(".cache.db", "c")
cached_function = CachedFunction(cls.database)
class CachedMediaWikiAPI(HTTPMediaWikiAPI):
@cached_function
def get_content(self, title):
return super().get_content(title)
@cached_function
def convert_text_to_html(self, title, text):
return super().convert_text_to_html(title, text)
cls.api = CachedMediaWikiAPI(requests.Session())
@classmethod
def tearDownClass(cls):
cls.database.close()
def setUp(self):
self.title = "Mathe für Nicht-Freaks: Analysis 1"
self.maxDiff = None
def parse(self, text):
return ArticleContentParser(api=self.api, title=self.title)(text)
def test_html2json_parser(self):
with open("docs/html.spec.yml") as spec_file:
spec = yaml.load(spec_file)
for html, target_json in ((x["in"], x["out"]) for x in spec):
with self.subTest(html=html):
parser = HTML2JSONParser()
parser.feed(html)
self.assertListEqual(parser.content, target_json, msg=html)
def test_parsing_block_elements(self):
with open("docs/mfnf-block-elements.spec.yml") as spec_file:
spec = yaml.load(spec_file)
for text, target in ((x["in"], x["out"]) for x in spec):
with self.subTest(text=text):
self.assertListEqual(self.parse(text), target, msg=text)
def test_parsing_inline_elements(self):
with open("docs/mfnf-inline-elements.spec.yml") as spec_file:
spec = yaml.load(spec_file)
for text, target in ((x["in"], x["out"]) for x in spec):
with self.subTest(text=text):
target = [{"type": "paragraph", "content": [target]}]
self.assertListEqual(self.parse(text), target, msg=text)
|
import requests
import yaml
from unittest import TestCase
from mfnf.api import HTTPMediaWikiAPI
from mfnf.parser import HTML2JSONParser, ArticleContentParser
class TestParser(TestCase):
def setUp(self):
self.api = HTTPMediaWikiAPI(requests.Session())
self.title = "Mathe für Nicht-Freaks: Analysis 1"
self.maxDiff = None
def parse(self, text):
return ArticleContentParser(api=self.api, title=self.title)(text)
def test_html2json_parser(self):
with open("docs/html.spec.yml") as spec_file:
spec = yaml.load(spec_file)
for html, target_json in ((x["in"], x["out"]) for x in spec):
with self.subTest(html=html):
parser = HTML2JSONParser()
parser.feed(html)
self.assertListEqual(parser.content, target_json, msg=html)
def test_parsing_block_elements(self):
with open("docs/mfnf-block-elements.spec.yml") as spec_file:
spec = yaml.load(spec_file)
for text, target in ((x["in"], x["out"]) for x in spec):
with self.subTest(text=text):
self.assertListEqual(self.parse(text), target, msg=text)
def test_parsing_inline_elements(self):
with open("docs/mfnf-inline-elements.spec.yml") as spec_file:
spec = yaml.load(spec_file)
for text, target in ((x["in"], x["out"]) for x in spec):
with self.subTest(text=text):
target = [{"type": "paragraph", "content": [target]}]
self.assertListEqual(self.parse(text), target, msg=text)
|
apache-2.0
|
Python
|
6ddaf77adb3a3d1ad42eee06aae657fe15f77fa7
|
revert to assertions
|
delgadom/clatter
|
tests/test_readme.py
|
tests/test_readme.py
|
import doctest
def test_readme():
errs, _ = doctest.testfile('../README.rst', report=True)
assert not errs
|
import doctest
def test_readme():
errs, _ = doctest.testfile('../README.rst', report=True)
if errs > 0:
raise ValueError(
'{} errors encountered in README.rst'.format(
errs))
|
mit
|
Python
|
7ef0fe9f1a2b91c72c2709ed025780547e329403
|
Update test
|
gatechzhu/ricker
|
tests/test_ricker.py
|
tests/test_ricker.py
|
import pytest
from ricker.ricker import ricker
class TestRicker:
def test_default_output(self):
dt = 0.002
length = 1
s = ricker(len=length, dt=dt)
assert len(s) == int(length / dt)
def test_input_check_f(self):
with pytest.raises(ValueError):
ricker(f=0)
def test_input_check_len(self):
with pytest.raises(ValueError):
ricker(len=0)
def test_input_check_dt(self):
with pytest.raises(ValueError):
ricker(dt=0)
def test_input_len_peak_loc(self):
with pytest.warns(UserWarning):
ricker(len=1, peak_loc=2)
|
import pytest
from ricker.ricker import ricker
class TestRicker:
def test_output_number(self):
assert len(ricker()) == 2
def test_default_output(self):
t, s = ricker()
assert len(t) == len(s)
def test_error(self):
with pytest.raises(ValueError):
ricker(f=0)
|
mit
|
Python
|
298b85a7c36e536a985b7ccffc8fefa135baa187
|
Fix TestRunner test case
|
bosondata/badwolf,bosondata/badwolf,bosondata/badwolf
|
tests/test_runner.py
|
tests/test_runner.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import tempfile
from multiprocessing import Lock
import git
import mock
import pytest
from badwolf.runner import TestContext, TestRunner
from badwolf.bitbucket import PullRequest, Changesets
@pytest.fixture(scope='function')
def push_context():
return TestContext(
'deepanalyzer/badwolf',
'[email protected]:deepanalyzer/badwolf.git',
{},
'commit',
'Update',
{
'branch': {'name': 'master'},
'commit': {'hash': '2cedc1af762'},
}
)
@pytest.fixture(scope='function')
def push_runner(push_context):
runner = TestRunner(push_context, Lock())
runner.clone_path = os.path.join(
tempfile.gettempdir(),
'badwolf',
runner.task_id,
runner.repo_name
)
return runner
def test_clone_repo_failed(app, push_runner):
with mock.patch.object(push_runner, 'update_build_status') as status, \
mock.patch.object(push_runner, 'clone_repository') as clone_repo, \
mock.patch.object(push_runner, 'validate_settings') as validate_settings, \
mock.patch.object(PullRequest, 'comment') as pr_comment, \
mock.patch.object(Changesets, 'comment') as cs_comment:
status.return_value = None
clone_repo.side_effect = git.GitCommandError('git clone', 1)
pr_comment.return_value = None
cs_comment.return_value = None
push_runner.run()
validate_settings.assert_not_called()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from multiprocessing import Lock
import git
import mock
import pytest
from badwolf.runner import TestContext, TestRunner
@pytest.fixture(scope='function')
def push_context():
return TestContext(
'deepanalyzer/badwolf',
'[email protected]:deepanalyzer/badwolf.git',
{},
'commit',
'Update',
{
'branch': {'name': 'master'},
'commit': {'hash': '2cedc1af762'},
}
)
@pytest.fixture(scope='function')
def push_runner(push_context):
return TestRunner(push_context, Lock())
def test_clone_repo_failed(app, push_runner):
with mock.patch.object(push_runner, 'update_build_status') as status, \
mock.patch.object(push_runner, 'clone_repository') as clone_repo, \
mock.patch.object(push_runner, 'validate_settings') as validate_settings:
status.return_value = None
clone_repo.side_effect = git.GitCommandError('git clone', 1)
push_runner.run()
validate_settings.assert_not_called()
|
mit
|
Python
|
050e6ee000e89fb0ebeff5dcb2b6d79b10e92069
|
Fix monkeypatching for older scipy versions
|
audiolabs/stft
|
tests/test_things.py
|
tests/test_things.py
|
from __future__ import division
import stft
import numpy
import pytest
@pytest.fixture(params=[1, 2])
def channels(request):
return request.param
@pytest.fixture(params=[0, 1, 4])
def padding(request):
return request.param
@pytest.fixture(params=[2048])
def length(request):
return request.param
@pytest.fixture
def signal(channels, length):
return numpy.squeeze(numpy.random.random((length, channels)))
@pytest.fixture(params=[512])
def framelength(request):
return request.param
def test_shape(length, framelength):
a = numpy.squeeze(numpy.random.random((length, 1)))
x = stft.spectrogram(a, framelength=framelength, halved=True)
assert x.shape[0] == framelength / 2 + 1
x_2 = stft.spectrogram(a, framelength=framelength, halved=False)
assert x_2.shape[0] == framelength
def test_windowlength_errors():
"""
Test if way too short signals can be transformed
"""
siglen = 512
framelen = 2048
stft.spectrogram(numpy.random.random(siglen), framelength=framelen)
def test_precision(channels, padding, signal, framelength):
"""
Test if transform-inverse identity holds
"""
a = signal
x = stft.spectrogram(a, framelength=framelength, padding=padding)
y = stft.ispectrogram(x, framelength=framelength, padding=padding)
# Crop first and last frame
assert numpy.allclose(a, y)
def test_rms(channels, padding, signal, framelength):
"""
Test if transform-inverse identity holds
"""
a = signal
x = stft.spectrogram(a, framelength=framelength, padding=padding)
y = stft.ispectrogram(x, framelength=framelength, padding=padding)
# Crop first and last frame
assert numpy.sqrt(numpy.mean((a - y) ** 2)) < 1e-8
def test_maxdim():
a = numpy.random.random((512, 2, 2))
with pytest.raises(ValueError):
stft.spectrogram(a)
b = numpy.random.random((512, 2, 2, 3))
with pytest.raises(ValueError):
stft.ispectrogram(b)
def test_issue1():
a = numpy.random.random((512, 1))
b = stft.spectrogram(a)
assert b.ndim == 2
def raiser(*args):
raise AttributeError
def test_fallback(monkeypatch):
# Try monkeypatching signal.cosine away.
# Ignore AttributeErrors during monkeypatching, for older scipy versions
try:
import scipy.signal
monkeypatch.setattr("scipy.signal.cosine", raiser)
except AttributeError:
pass
return test_windowlength_errors()
|
from __future__ import division
import stft
import numpy
import pytest
@pytest.fixture(params=[1, 2])
def channels(request):
return request.param
@pytest.fixture(params=[0, 1, 4])
def padding(request):
return request.param
@pytest.fixture(params=[2048])
def length(request):
return request.param
@pytest.fixture
def signal(channels, length):
return numpy.squeeze(numpy.random.random((length, channels)))
@pytest.fixture(params=[512])
def framelength(request):
return request.param
def test_shape(length, framelength):
a = numpy.squeeze(numpy.random.random((length, 1)))
x = stft.spectrogram(a, framelength=framelength, halved=True)
assert x.shape[0] == framelength / 2 + 1
x_2 = stft.spectrogram(a, framelength=framelength, halved=False)
assert x_2.shape[0] == framelength
def test_windowlength_errors():
"""
Test if way too short signals can be transformed
"""
siglen = 512
framelen = 2048
stft.spectrogram(numpy.random.random(siglen), framelength=framelen)
def test_precision(channels, padding, signal, framelength):
"""
Test if transform-inverse identity holds
"""
a = signal
x = stft.spectrogram(a, framelength=framelength, padding=padding)
y = stft.ispectrogram(x, framelength=framelength, padding=padding)
# Crop first and last frame
assert numpy.allclose(a, y)
def test_rms(channels, padding, signal, framelength):
"""
Test if transform-inverse identity holds
"""
a = signal
x = stft.spectrogram(a, framelength=framelength, padding=padding)
y = stft.ispectrogram(x, framelength=framelength, padding=padding)
# Crop first and last frame
assert numpy.sqrt(numpy.mean((a - y) ** 2)) < 1e-8
def test_maxdim():
a = numpy.random.random((512, 2, 2))
with pytest.raises(ValueError):
stft.spectrogram(a)
b = numpy.random.random((512, 2, 2, 3))
with pytest.raises(ValueError):
stft.ispectrogram(b)
def test_issue1():
a = numpy.random.random((512, 1))
b = stft.spectrogram(a)
assert b.ndim == 2
def raiser(*args):
raise AttributeError
def test_fallback(monkeypatch):
import scipy.signal
monkeypatch.setattr("scipy.signal.cosine", raiser)
return test_windowlength_errors()
|
mit
|
Python
|
893e09b14eabff3a6ec2ff87db0499bc3fd2a213
|
fix tests to use forced aligner
|
lowerquality/gentle,lowerquality/gentle,lowerquality/gentle,lowerquality/gentle
|
tests/transcriber.py
|
tests/transcriber.py
|
import os
import unittest
class Aligner(unittest.TestCase):
audio = 'examples/data/lucier.mp3'
transcript = "i am sitting in a room"
def test_resources(self):
from gentle import Resources
from gentle.util.paths import get_binary
resources = Resources()
k3 = get_binary("ext/k3")
model = get_binary("exp/tdnn_7b_chain_online/final.mdl" )
self.assertEqual(os.path.exists(self.audio), True)
self.assertEqual(os.path.exists(k3), True)
self.assertEqual(os.path.exists(model), True)
def test_aligner(self):
import subprocess
from gentle import resampled, standard_kaldi, Resources
from gentle.forced_aligner import ForcedAligner
from gentle.transcription import Word
standard_kaldi.STDERR = subprocess.STDOUT
resources = Resources()
align = ForcedAligner(resources, self.transcript, nthreads=1)
with resampled(self.audio, 5.0, 5.0) as filename:
transcription = align.transcribe(filename)
words = transcription.words
self.assertEqual(words[0].word, "i")
self.assertEqual(words[1].word, "am")
self.assertEqual(words[1].case, Word.SUCCESS)
|
import os
import unittest
class Transcriber(unittest.TestCase):
audio = 'examples/data/lucier.mp3'
def test_resources(self):
from gentle import Resources
from gentle.util.paths import get_binary
resources = Resources()
k3 = get_binary("ext/k3")
self.assertEqual(os.path.exists(resources.full_hclg_path), True)
self.assertEqual(os.path.exists(self.audio), True)
self.assertEqual(os.path.exists(k3), True)
def test_transcriber(self):
import subprocess
from gentle import resampled, kaldi_queue, standard_kaldi, Resources
from gentle.transcriber import MultiThreadedTranscriber
standard_kaldi.STDERR = subprocess.STDOUT
resources = Resources()
k_queue = kaldi_queue.build(resources, 1)
trans = MultiThreadedTranscriber(k_queue)
with resampled(self.audio, 10.5, 2.5) as filename:
words, duration = trans.transcribe(filename)
self.assertEqual(words[0].word, "different")
|
mit
|
Python
|
72e948719145579eb7dfb9385b921f8eb6ea1384
|
Add more exemplar primitive generators
|
maxalbert/tohu
|
tests/v4/conftest.py
|
tests/v4/conftest.py
|
from .context import tohu
from tohu.v4.primitive_generators import *
from tohu.v4.derived_generators import *
__all__ = ['EXEMPLAR_GENERATORS', 'EXEMPLAR_PRIMITIVE_GENERATORS', 'EXEMPLAR_DERIVED_GENERATORS']
def add(x, y):
return x + y
EXEMPLAR_PRIMITIVE_GENERATORS = [
Boolean(p=0.3),
Constant("quux"),
FakerGenerator(method="name"),
Float(12.34, 56.78),
HashDigest(length=6),
Integer(100, 200),
IterateOver('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcde', p=[0.1, 0.05, 0.7, 0.03, 0.12]),
Timestamp(date='2018-01-01'),
]
EXEMPLAR_DERIVED_GENERATORS = [
Apply(add, Integer(100, 200), Integer(300, 400)),
Apply(add, Apply(add, Integer(100, 200), Integer(300, 400)), Apply(add, Integer(500, 600), Integer(700, 800))),
]
EXEMPLAR_CUSTOM_GENERATORS = []
EXEMPLAR_GENERATORS = EXEMPLAR_PRIMITIVE_GENERATORS + EXEMPLAR_DERIVED_GENERATORS + EXEMPLAR_CUSTOM_GENERATORS
|
from .context import tohu
from tohu.v4.primitive_generators import *
from tohu.v4.derived_generators import *
__all__ = ['EXEMPLAR_GENERATORS', 'EXEMPLAR_PRIMITIVE_GENERATORS', 'EXEMPLAR_DERIVED_GENERATORS']
def add(x, y):
return x + y
EXEMPLAR_PRIMITIVE_GENERATORS = [
Constant("quux"),
Integer(100, 200),
HashDigest(length=6),
FakerGenerator(method="name"),
IterateOver('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcde', p=[0.1, 0.05, 0.7, 0.03, 0.12]),
Timestamp(date='2018-01-01'),
]
EXEMPLAR_DERIVED_GENERATORS = [
Apply(add, Integer(100, 200), Integer(300, 400)),
Apply(add, Apply(add, Integer(100, 200), Integer(300, 400)), Apply(add, Integer(500, 600), Integer(700, 800))),
]
EXEMPLAR_CUSTOM_GENERATORS = []
EXEMPLAR_GENERATORS = EXEMPLAR_PRIMITIVE_GENERATORS + EXEMPLAR_DERIVED_GENERATORS + EXEMPLAR_CUSTOM_GENERATORS
|
mit
|
Python
|
e6305725a57bd6daca24e66699a8e3b0ead8d866
|
Split long line
|
cgeoffroy/son-analyze,cgeoffroy/son-analyze
|
utils/ci/topology_integration.py
|
utils/ci/topology_integration.py
|
#!/usr/bin/env python2
# pylint: disable=missing-docstring
import time
import signal
import threading
from emuvim.dcemulator.net import DCNetwork
from mininet.node import RemoteController
from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
class SigTermCatcher:
def __init__(self, net):
self.net = net
signal.signal(signal.SIGTERM, self.stop_containernet)
signal.signal(signal.SIGINT, self.stop_containernet)
def stop_containernet(self, signum, frame):
self.net.stop()
time.sleep(2)
exit(1)
def _in_separate_thread(net):
net.start()
def setup_topology(net):
dc = net.addDatacenter("dc") # pylint: disable=invalid-name
# add the SONATA dummy gatekeeper to each DC
sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
sdkg1.connectDatacenter(dc)
# run the dummy gatekeeper (in another thread, don't block)
sdkg1.start()
def main():
net = DCNetwork(controller=RemoteController,
monitor=True,
enable_learning=True)
SigTermCatcher(net)
setup_topology(net)
sub_thread = threading.Thread(target=_in_separate_thread, args=(net,))
sub_thread.start()
while True:
time.sleep(120)
exit(2)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2
# pylint: disable=missing-docstring
import time
import signal
import threading
from emuvim.dcemulator.net import DCNetwork
from mininet.node import RemoteController
from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
class SigTermCatcher:
def __init__(self, net):
self.net = net
signal.signal(signal.SIGTERM, self.stop_containernet)
signal.signal(signal.SIGINT, self.stop_containernet)
def stop_containernet(self, signum, frame):
self.net.stop()
time.sleep(2)
exit(1)
def _in_separate_thread(net):
net.start()
def setup_topology(net):
dc = net.addDatacenter("dc") # pylint: disable=invalid-name
# add the SONATA dummy gatekeeper to each DC
sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
sdkg1.connectDatacenter(dc)
# run the dummy gatekeeper (in another thread, don't block)
sdkg1.start()
def main():
net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
SigTermCatcher(net)
setup_topology(net)
sub_thread = threading.Thread(target=_in_separate_thread, args=(net,))
sub_thread.start()
while True:
time.sleep(120)
exit(2)
if __name__ == "__main__":
main()
|
apache-2.0
|
Python
|
d49997058c54bfeabe21a7284bdf3cf07c76075b
|
add doc
|
tencentyun/Cloud-Image-Migration-Tool,tencentyun/Cloud-Image-Migration-Tool,JamisHoo/Cloud-Image-Migration-Tool,JamisHoo/Cloud-Image-Migration-Tool
|
usr/sbin/local_fs_job_manager.py
|
usr/sbin/local_fs_job_manager.py
|
#!/usr/bin/env python
###############################################################################
# Copyright (c) 2015 Tencent Inc.
# Distributed under the MIT license
# (See accompanying file LICENSE or copy at http://opensource.org/licenses/MIT)
#
# Project: Cloud Image Migration Tool
# Filename: local_fs_job_manager.py
# Version: 2.0
# Author: Jamis Hoo
# E-mail: [email protected]
# Date: Sep 7, 2015
# Time: 14:29:44
###############################################################################
from base_job_manager import BaseJobManager
import os
class LocalFSJobManager(BaseJobManager):
"""
Derived class of BaseJobManager.
Traverse local files and submit.
Attributes:
mandatory_options: Configuration options required by this class. This is
a list of tuples each of which contains two strings, section name and
property name, both of which are case-insensitive.
"""
mandatory_options = [
("local", "local.image_root_path"),
]
def __init__(self, config):
"""
Initialize base class.
"""
super(LocalFSJobManager, self).__init__(config)
@staticmethod
def check_config(config):
"""
Check whether all required options are provided.
Also check the validity of some options.
Args:
config: configuration dict
Returns:
Returns string containing error message if there are some errors.
Returns none otherwise.
"""
for section, option in LocalFSJobManager.mandatory_options:
if section not in config or option not in config[section]:
return "Error: Option %s.%s is required. " % (section, option)
if not os.path.isabs(config["local"]["local.image_root_path"]):
return "Error: Image root path %s is not absolute path. " % config["local"]["local.image_root_path"]
if not os.path.isdir(config["local"]["local.image_root_path"]):
return "Error: Image root path %s is not directory. " % config["local"]["local.image_root_path"]
def do(self):
"""
Implementation of abstract method.
Traverse a directory and submit each file, with relative path as its
file id and absolute path as its src.
"""
image_root_path = self.config["local"]["local.image_root_path"]
for dirpath, dirs, files in os.walk(image_root_path, followlinks = True):
for filename in files:
full_name = os.path.join(dirpath, filename)
fileid = os.path.relpath(full_name, image_root_path)
self.submit(fileid, "file://%s" % full_name)
|
#!/usr/bin/env python
###############################################################################
# Copyright (c) 2015 Tencent Inc.
# Distributed under the MIT license
# (See accompanying file LICENSE or copy at http://opensource.org/licenses/MIT)
#
# Project: Cloud Image Migration Tool
# Filename: local_fs_job_manager.py
# Version: 2.0
# Author: Jamis Hoo
# E-mail: [email protected]
# Date: Sep 7, 2015
# Time: 14:29:44
# Description: derived job manager for local FS
###############################################################################
from base_job_manager import BaseJobManager
import os
class LocalFSJobManager(BaseJobManager):
mandatory_options = [
("local", "local.image_root_path"),
]
def __init__(self, config):
super(LocalFSJobManager, self).__init__(config)
@staticmethod
def check_config(config):
for section, option in LocalFSJobManager.mandatory_options:
if section not in config or option not in config[section]:
return "Error: Option %s.%s is required. " % (section, option)
if not os.path.isabs(config["local"]["local.image_root_path"]):
return "Error: Image root path %s is not absolute path. " % config["local"]["local.image_root_path"]
if not os.path.isdir(config["local"]["local.image_root_path"]):
return "Error: Image root path %s is not directory. " % config["local"]["local.image_root_path"]
# implementation of abstract method
def do(self):
image_root_path = self.config["local"]["local.image_root_path"]
for dirpath, dirs, files in os.walk(image_root_path, followlinks = True):
for filename in files:
full_name = os.path.join(dirpath, filename)
fileid = os.path.relpath(full_name, image_root_path)
self.submit(fileid, "file://%s" % full_name)
|
mit
|
Python
|
12d22221df5786caee510cc167c9ef29f9155488
|
Correct name of output file
|
DanielAndreasen/FASMA-web,DanielAndreasen/FASMA-web,DanielAndreasen/FASMA-web,DanielAndreasen/FASMA-web
|
var/www/cgi-bin/abundanceConf.py
|
var/www/cgi-bin/abundanceConf.py
|
#!/home/daniel/Software/anaconda3/bin/python
# Import modules for CGI handling
import cgi, cgitb
from abundanceDriver import abundancedriver
from emailSender import sendEmail
def cgi2dict(form):
"""Convert the form from cgi.FieldStorage to a python dictionary"""
params = {}
for key in form.keys():
params[key] = form[key].value
return params
def abundance(form):
"""Create the configuration file for running the abundance driver"""
# Make the StarMe_ares.cfg
fout = '/tmp/linelist.moog {Teff} {logg} {feh} {vt}'.format(**form)
with open('/tmp/StarMe_abundance.cfg', 'w') as f:
f.writelines(fout+'\n')
abundancedriver('/tmp/StarMe_abundance.cfg')
if __name__ == '__main__':
# Enable debugging
cgitb.enable()
form = cgi.FieldStorage()
# Run ARES for one or several line lists
formDict = cgi2dict(form)
abundance(formDict)
sendEmail(to=formDict['email'], driver='abundances', data='/tmp/abundresults.dat')
# Show the finished html page
print "Content-type: text/html\n\n"
with open('../html/finish.html', 'r') as lines:
for line in lines:
print line
|
#!/home/daniel/Software/anaconda3/bin/python
# Import modules for CGI handling
import cgi, cgitb
from abundanceDriver import abundancedriver
from emailSender import sendEmail
def cgi2dict(form):
"""Convert the form from cgi.FieldStorage to a python dictionary"""
params = {}
for key in form.keys():
params[key] = form[key].value
return params
def abundance(form):
"""Create the configuration file for running the abundance driver"""
# Make the StarMe_ares.cfg
fout = '/tmp/linelist.moog {Teff} {logg} {feh} {vt}'.format(**form)
with open('/tmp/StarMe_abundance.cfg', 'w') as f:
f.writelines(fout+'\n')
abundancedriver('/tmp/StarMe_abundance.cfg')
if __name__ == '__main__':
# Enable debugging
import os
os.system('touch /tmp/test1')
cgitb.enable()
form = cgi.FieldStorage()
# Run ARES for one or several line lists
formDict = cgi2dict(form)
abundance(formDict)
sendEmail(to=formDict['email'], driver='abundances', data='/tmp/abundances.dat')
# Show the finished html page
print "Content-type: text/html\n\n"
with open('../html/finish.html', 'r') as lines:
for line in lines:
print line
|
mit
|
Python
|
ced3fd5fc8945fbb0ac79b3e90833173b1c72e93
|
disable not callable
|
djaodjin/djaodjin-pages,djaodjin/djaodjin-pages,smirolo/djaodjin-pages,smirolo/djaodjin-pages,djaodjin/djaodjin-pages
|
pages/tasks.py
|
pages/tasks.py
|
from celery import task
from pages.models import UploadedImage
from pages.settings import IMG_PATH
# XXX - not callable on pylint!
@task()#pylint: disable=not-callable
def upload_to_s3(img, account, tags, filename):
img_obj = UploadedImage(
img=img,
account=account,
tags=tags
)
img_obj.save()
print filename
full_path = IMG_PATH + account.slug + '/' + filename
UploadedImage.objects.filter(
img=full_path).order_by('-created_at')[0].delete()
|
from celery import task
from pages.models import UploadedImage
from pages.settings import IMG_PATH
@task()
def upload_to_s3(img, account, tags, filename):
img_obj = UploadedImage(
img=img,
account=account,
tags=tags
)
img_obj.save()
print filename
full_path = IMG_PATH + account.slug + '/' + filename
UploadedImage.objects.filter(
img=full_path).order_by('-created_at')[0].delete()
|
bsd-2-clause
|
Python
|
08b5ccc5ff94ced8d582d1f023901d2ea25aca53
|
Disable timeout on reindex
|
davidbgk/udata,grouan/udata,davidbgk/udata,grouan/udata,davidbgk/udata,jphnoel/udata,etalab/udata,jphnoel/udata,etalab/udata,opendatateam/udata,jphnoel/udata,opendatateam/udata,opendatateam/udata,etalab/udata,grouan/udata
|
udata/core/search/commands.py
|
udata/core/search/commands.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from udata.commands import manager
from udata.search import es, adapter_catalog
log = logging.getLogger(__name__)
@manager.option('-t', '--type', dest='doc_type', default=None, help='Only reindex a given type')
def reindex(doc_type=None):
'''Reindex models'''
for model, adapter in adapter_catalog.items():
if not doc_type or doc_type == adapter.doc_type():
print 'Reindexing {0} objects'.format(model.__name__)
if es.indices.exists_type(index=es.index_name, doc_type=adapter.doc_type()):
es.indices.delete_mapping(index=es.index_name, doc_type=adapter.doc_type())
es.indices.put_mapping(index=es.index_name, doc_type=adapter.doc_type(), body=adapter.mapping)
qs = model.objects.visible() if hasattr(model.objects, 'visible') else model.objects
for obj in qs.timeout(False):
es.index(index=es.index_name, doc_type=adapter.doc_type(), id=obj.id, body=adapter.serialize(obj))
es.indices.refresh(index=es.index_name)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from udata.commands import manager
from udata.search import es, adapter_catalog
log = logging.getLogger(__name__)
@manager.option('-t', '--type', dest='doc_type', default=None, help='Only reindex a given type')
def reindex(doc_type=None):
'''Reindex models'''
for model, adapter in adapter_catalog.items():
if not doc_type or doc_type == adapter.doc_type():
print 'Reindexing {0} objects'.format(model.__name__)
if es.indices.exists_type(index=es.index_name, doc_type=adapter.doc_type()):
es.indices.delete_mapping(index=es.index_name, doc_type=adapter.doc_type())
es.indices.put_mapping(index=es.index_name, doc_type=adapter.doc_type(), body=adapter.mapping)
qs = model.objects.visible() if hasattr(model.objects, 'visible') else model.objects
for obj in qs:
es.index(index=es.index_name, doc_type=adapter.doc_type(), id=obj.id, body=adapter.serialize(obj))
es.indices.refresh(index=es.index_name)
|
agpl-3.0
|
Python
|
f577ff84c7231dff4fe01e7e9b73f1b497993b41
|
remove commented out code and debug statements.
|
openxc/openxc-python,openxc/openxc-python,openxc/openxc-python
|
openxc/tools/obd2scanner.py
|
openxc/tools/obd2scanner.py
|
"""
This module contains the methods for the ``openxc-obd2scanner`` command line
program.
`main` is executed when ``openxc-obd2scanner`` is run, and all other callables in this
module are internal only.
"""
import argparse
from .common import device_options, configure_logging, select_device
import json
def scan(controller, bus=None):
# TODO could read the response from the "PIDs supported" requests to see
# what the vehicle reports that it *should* support.
print("Beginning sequential scan of all OBD-II PIDs")
for pid in range(0xd0ff, 0xd101):
response = controller.create_diagnostic_request(0x7d0, mode=0x22, bus=bus,
wait_for_first_response=True, pid=pid)
if response is not None:
no_response = True
for item in response[1]:
if 'success' in item:
no_response = False
print(("PID 0x%x responded with: %s" % (pid, item)))
if (no_response == True):
print(("PID 0x%x did not respond" % pid))
def parse_options():
parser = argparse.ArgumentParser(description="Send requests for all "
"OBD-II PIDs sequentially to see what actually responds",
parents=[device_options()])
parser.add_argument("--bus")
return parser.parse_args()
def main():
configure_logging()
arguments = parse_options()
controller_class, controller_kwargs = select_device(arguments)
controller = controller_class(**controller_kwargs)
controller.start()
scan(controller, arguments.bus)
|
"""
This module contains the methods for the ``openxc-obd2scanner`` command line
program.
`main` is executed when ``openxc-obd2scanner`` is run, and all other callables in this
module are internal only.
"""
import argparse
from .common import device_options, configure_logging, select_device
import json
def scan(controller, bus=None):
# TODO could read the response from the "PIDs supported" requests to see
# what the vehicle reports that it *should* support.
print("Beginning sequential scan of all OBD-II PIDs")
for pid in range(0, 0x88):
response = controller.create_diagnostic_request(0x7df, mode=0x1, bus=bus,
wait_for_first_response=True, pid=pid)
if response is not None:
no_response = True
for item in response[1]:
if 'success' in item:
no_response = False
print(("PID 0x%x responded with: %s" % (pid, item)))
# if item['success']:
# if 'name' in item:
# print('found success true response at ' + item['name'])
# elif 'id' in item:
# print('found success true response at id ' + str(item['id']))
# else:
# # print('idk')
# print(("PID 0x%x responded with: %s" % (pid, response)))
if (no_response == True):
print(("PID 0x%x did not respond" % pid))
# else:
# print(("PID 0x%x did not respond" % pid))
def parse_options():
parser = argparse.ArgumentParser(description="Send requests for all "
"OBD-II PIDs sequentially to see what actually responds",
parents=[device_options()])
parser.add_argument("--bus")
return parser.parse_args()
def main():
configure_logging()
arguments = parse_options()
controller_class, controller_kwargs = select_device(arguments)
controller = controller_class(**controller_kwargs)
controller.start()
scan(controller, arguments.bus)
|
bsd-3-clause
|
Python
|
24b8e2f7440926d6d1c384a7289dfb5d1124e82f
|
Add article on core admin
|
YACOWS/opps,opps/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,YACOWS/opps,williamroot/opps,opps/opps,YACOWS/opps,jeanmask/opps,jeanmask/opps,williamroot/opps,opps/opps,jeanmask/opps,opps/opps,williamroot/opps
|
opps/core/admin/__init__.py
|
opps/core/admin/__init__.py
|
# -*- coding: utf-8 -*-
from opps.core.admin.article import *
from opps.core.admin.channel import *
from opps.core.admin.profile import *
from opps.core.admin.source import *
|
# -*- coding: utf-8 -*-
from opps.core.admin.channel import *
from opps.core.admin.profile import *
from opps.core.admin.source import *
|
mit
|
Python
|
cf096184562d723d321f179732aa25f03be35c6d
|
build graphs without saving the dents
|
thammi/digger
|
auto_fetch.py
|
auto_fetch.py
|
#!/usr/bin/env python
###############################################################################
##
## digger - Digging into some data mines
## Copyright (C) 2010 Thammi
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
from os.path import join, exists
from microblogging import *
from test import *
def search(service, query):
urls = {
'identica' : "http://identi.ca/api/search.json",
'twitter' : "http://search.twitter.com/search.json",
}
if service not in urls:
raise UnknownServiceException(service)
url_parts = {
'query': urllib.urlencode({'q': query}),
'url': urls[service],
}
res = urllib.urlopen("{url}?{query}".format(**url_parts))
if res.getcode() < 300:
return json.load(res)
else:
msg = "Unable to fetch: %i" % res.getcode()
raise ServiceFailedException(msg)
def user_exists(service, user):
return exists(user_path(service, user))
def user_path(service, user):
return join('auto', service, user)
def main(argv):
service = argv[0]
tags = argv[1:]
updates = search(service, ' '.join('#' + tag for tag in tags))['results']
users = set(update['from_user'] for update in updates)
users = filter(lambda u: not user_exists(service, u), users)
print "Fetching: " + ', '.join(users)
for user in users:
try:
print "==> Fetching '%s'" % user
updates = get_statuses(service, user, 1000)
blob_graph(updates, user_path(service, user), microblogging_date)
except Exception as e:
# the show must go on ...
print e
if __name__ == '__main__':
import sys
main(sys.argv[1:])
|
#!/usr/bin/env python
###############################################################################
##
## digger - Digging into some data mines
## Copyright (C) 2010 Thammi
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
from microblogging import *
def search(service, query):
urls = {
'identica' : "http://identi.ca/api/search.json",
'twitter' : "http://search.twitter.com/search.json",
}
if service not in urls:
raise UnknownServiceException(service)
url_parts = {
'query': urllib.urlencode({'q': query}),
'url': urls[service],
}
res = urllib.urlopen("{url}?{query}".format(**url_parts))
if res.getcode() < 300:
return json.load(res)
else:
msg = "Unable to fetch: %i" % res.getcode()
raise ServiceFailedException(msg)
def main(argv):
service = argv[0]
tags = argv[1:]
updates = search(service, ' '.join('#' + tag for tag in tags))['results']
users = set(update['from_user'] for update in updates)
save_users(service, users)
if __name__ == '__main__':
import sys
main(sys.argv[1:])
|
agpl-3.0
|
Python
|
ea416504c287bc5a3716289b57ebfd15bb770b9d
|
Use a string instead of a file
|
eXcomm/gratipay.com,studio666/gratipay.com,eXcomm/gratipay.com,gratipay/gratipay.com,eXcomm/gratipay.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com,studio666/gratipay.com,mccolgst/www.gittip.com,gratipay/gratipay.com,eXcomm/gratipay.com,mccolgst/www.gittip.com,gratipay/gratipay.com,studio666/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com
|
sql/branch.py
|
sql/branch.py
|
import sys
from gratipay import wireup
env = wireup.env()
db = wireup.db(env)
# Temporary, will fill with actual values when running script
email_txt = """
[email protected]
[email protected]
"""
emails = [email.strip() for email in email_txt.split()]
assert len(emails) == 176
participants = []
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE email_address IN %s
""", (tuple(emails), ))
for p in participants:
p.queue_email('double_emails')
print("Done")
sys.exit()
|
from gratipay import wireup
env = wireup.env()
db = wireup.db(env)
participants = []
with open('./sql/emails.txt') as f:
emails = [line.rstrip() for line in f]
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE email_address IN %s
""", (tuple(emails), ))
for p in participants:
p.queue_email('double_emails')
|
mit
|
Python
|
36e3ee242098f1768e009fca320c5d94142529d1
|
set debug to false
|
tmthyjames/Achoo,tmthyjames/Achoo,tmthyjames/Achoo,tmthyjames/Achoo,tmthyjames/Achoo
|
ui/app.py
|
ui/app.py
|
from flask import Flask, redirect, url_for, flash, g, config, session
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, current_user, login_user
from werkzeug.security import generate_password_hash, check_password_hash
from flask_restful import Resource
from flask import request
# views
from app.views.views import main as views_blueprints
# forms
from app.forms.forms import LoginForm
# api
from app.api.api import Prediction, Admin
# models
from app.models.models import User
# app
from app import app
# config
from app.config import Config
app.register_blueprint(views_blueprints)
login_manager = LoginManager(app)
login_manager.init_app(app)
@app.before_request
def inject_globals():
with app.app_context():
session['VERSION'] = Config.VERSION
session['MSG'] = Config.MSG
return None
@login_manager.user_loader
# @app.before_request
def load_user(user_id):
return User.query.get(user_id)
api = Api(app)
api.add_resource(Prediction, '/api/1.0/prediction/')
api.add_resource(Admin, '/api/1.0/user/')
if __name__ == '__main__':
app.run()
|
from flask import Flask, redirect, url_for, flash, g, config, session
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, current_user, login_user
from werkzeug.security import generate_password_hash, check_password_hash
from flask_restful import Resource
from flask import request
# views
from app.views.views import main as views_blueprints
# forms
from app.forms.forms import LoginForm
# api
from app.api.api import Prediction, Admin
# models
from app.models.models import User
# app
from app import app
# config
from app.config import Config
app.register_blueprint(views_blueprints)
login_manager = LoginManager(app)
login_manager.init_app(app)
@app.before_request
def inject_globals():
with app.app_context():
session['VERSION'] = Config.VERSION
session['MSG'] = Config.MSG
return None
@login_manager.user_loader
# @app.before_request
def load_user(user_id):
return User.query.get(user_id)
api = Api(app)
api.add_resource(Prediction, '/api/1.0/prediction/')
api.add_resource(Admin, '/api/1.0/user/')
if __name__ == '__main__':
app.run(debug=True)
|
mit
|
Python
|
6f155538c02a25fcdd038c703d78f8785dfc9da8
|
Add most cache functionality
|
bewt85/genbankfs
|
genbankfs/cache.py
|
genbankfs/cache.py
|
import hashlib
import os
import socket
import urllib
from Queue import Queue, Full, Empty
from StringIO import StringIO
from threading import Lock, Thread
# Set download timeout
socket.setdefaulttimeout(600)
class DownloadError(Exception):
pass
class DownloadWithExceptions(urllib.FancyURLopener):
def error(self, *args, **kwargs):
raise DownloadError("There was a problem with your download")
http_error_401 = error
http_error_403 = error
http_error_404 = error
def create_warning_file(root_dir, filename_prefix, message):
message_digest = hashlib.md5(message).hexdigest()
file_path = os.path.join(root_dir, 'tmp', "%s_%s.tmp" % (filename_prefix,
message_digest))
if not os.path.isdir(os.path.join(root_dir, 'tmp')):
os.mkdir(os.path.join(root_dir, 'tmp'), 0755)
if not os.path.isfile(file_path):
with open(file_path, 'w') as f:
f.write(message)
return os.path.abspath(file_path)
download_queue_warning = """\
WARNING: You seem to be downloading a lot!
To protect you from accidentally downloading all of
the internet at once, we've implemented a queue
system which means that you can only request up to
%(max_downloads)s downloads at once. If you ask
for more than this, the first %(max_downloads)s
are downloaded and this message is temporarily
returned.
To get the files you want, simply wait a few
minutes and retry by which time you should be able
to get a few more of them.
Apologies for the inconvenience
"""
download_timeout_warning = """\
WARNING: The download timed out
We couldn't find this file in our cache so tried
to download it. Unfortunately the download timed
out. Please try again later
"""
download_error = """\
WARNING: There was a problem downloading this file
Please try again later
"""
class GenbankCache(object):
def __init__(self, root_dir, lookup_func, max_queue=100, concurent_downloads=2):
self.lookup = lookup_func
self.max_queue = max_queue
self.root_dir = os.path.realpath(root_dir)
self.download_queue = Queue(maxsize=max_queue)
self.rwlock = Lock()
self.threads = [Thread(target=self._download_queued, args=(self.download_queue,))
for i in xrange(concurent_downloads)]
for thread in self.threads:
thread.daemon = True
thread.start()
# TODO: create root dir if missing
self.warning_files = {
'queue': create_warning_file(self.root_dir, 'download_queue_warning',
download_queue_warning.format(max_downloads=self.max_queue)),
'timeout': create_warning_file(self.root_dir, 'download_timeout_warning', download_timeout_warning),
'error': create_warning_file(self.root_dir, 'download_error', download_error)
}
def open(self, path, flags):
cache_path = os.path.join(self.root_dir, path)
self._check_in_root(cache_path)
# TODO: what if path includes a directory which doesn't exist?
try:
return os.open(cache_path, flags)
except OSError:
pass
try:
origin_path = self.lookup(path)
except:
raise IOError('%s not found and not available for download') % path
return self.download(cache_path, origin_path, flags)
def read(self, size, offset, fh):
with self.rwlock:
os.lseek(fh, offset, 0)
return os.read(fh, size)
def download(self, cache_path, origin_path, flags, timeout=600):
result = Queue()
try:
self.download_queue.put_nowait((cache_path, origin_path, flags, result))
output_file = result.get(timeout=timeout)
return output_file
except Full:
return os.open(self.warning_files['queue'], flags)
except Empty:
return os.open(self.warning_files['timeout'], flags)
def _download_queued(self, queue):
downloader = DownloadWithExceptions()
while True:
cache_path, origin_path, flags, result = queue.get()
try:
# TODO: what if it was downloaded since it was originally queued?
# TODO: what if the target directory doesn't exist?
download_path, status = downloader.retrieve(origin_path, cache_path)
result.put(os.open(download_path, flags))
except:
result.put(os.open(self.warning_files['error'], flags))
finally:
queue.task_done()
def _check_in_root(self, path):
if not os.path.realpath(path).startswith(self.root_dir):
raise IOError("Relative links in path would take us outside the root dir: %s not in %s" % (os.path.realpath(path), self.root_dir))
|
class GenbankCache(object):
pass
|
mit
|
Python
|
bd1ae8fbcbcdfc649c765259f543f52a5a21c303
|
Reset root logger before setting up logging
|
agdsn/hades,agdsn/hades,agdsn/hades,agdsn/hades,agdsn/hades
|
src/hades/common/cli.py
|
src/hades/common/cli.py
|
import argparse
import logging
import os
import sys
from gettext import gettext as _
from hades import constants
class ArgumentParser(argparse.ArgumentParser):
"""
ArgumentParser subclass that exists with os.EX_USAGE exit code if parsing
fails.
"""
def error(self, message):
self.print_usage(sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(os.EX_USAGE, _('%(prog)s: error: %(message)s\n') % args)
parser = ArgumentParser(add_help=False)
parser.add_argument('-c', '--config', default=None, help="Path to config file")
parser.add_argument('-v', '--verbose', dest='verbosity',
default=None, action='count', help='Be more verbose')
parser.add_argument('-q', '--quiet', dest='verbosity',
action='store_const', const=0, help='Be quiet')
parser.add_argument('-V', '--version', action='version',
version=constants.PACKAGE_VERSION)
VERBOSITY_LEVELS = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
DEFAULT_VERBOSITY = 1
def setup_cli_logging(program, args):
"""
Setup logging for CLI applications, that do not configure logging
themselves.
Flask and Celery are quite opinionated about logging, so this function
should probably not be called in their launchers.
:param program: The name of the program
:param args: The parsed arguments of the program
"""
reset_cli_logging()
if args.verbosity is None:
verbosity = os.environ.get('HADES_VERBOSITY', DEFAULT_VERBOSITY)
try:
verbosity = int(verbosity)
except ValueError:
verbosity = DEFAULT_VERBOSITY
else:
verbosity = args.verbosity
effective_verbosity = max(0, min(len(VERBOSITY_LEVELS) - 1, verbosity))
level = VERBOSITY_LEVELS[effective_verbosity]
if level <= logging.DEBUG:
fmt = ("[%(asctime)s] %(levelname)s in %(filename)s:%(lineno)d: "
"%(message)s")
else:
fmt = "%(message)s"
logging.basicConfig(level=level, style='%', format=fmt, stream=sys.stderr)
def reset_cli_logging():
"""Reset root logger configuration"""
root = logging.root
for h in root.handlers:
try:
h.acquire()
h.flush()
h.close()
except (OSError, ValueError):
pass
finally:
h.release()
root.removeHandler(h)
for f in root.filters:
root.removeFilter(f)
|
import argparse
import logging
import os
import sys
from gettext import gettext as _
from hades import constants
class ArgumentParser(argparse.ArgumentParser):
"""
ArgumentParser subclass that exists with os.EX_USAGE exit code if parsing
fails.
"""
def error(self, message):
self.print_usage(sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(os.EX_USAGE, _('%(prog)s: error: %(message)s\n') % args)
parser = ArgumentParser(add_help=False)
parser.add_argument('-c', '--config', default=None, help="Path to config file")
parser.add_argument('-v', '--verbose', dest='verbosity',
default=None, action='count', help='Be more verbose')
parser.add_argument('-q', '--quiet', dest='verbosity',
action='store_const', const=0, help='Be quiet')
parser.add_argument('-V', '--version', action='version',
version=constants.PACKAGE_VERSION)
VERBOSITY_LEVELS = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
DEFAULT_VERBOSITY = 1
def setup_cli_logging(program, args):
"""
Setup logging for CLI applications, that do not configure logging
themselves.
Flask and Celery are quite opinionated about logging, so this function
should probably not be called in their launchers.
:param program: The name of the program
:param args: The parsed arguments of the program
"""
if args.verbosity is None:
verbosity = os.environ.get('HADES_VERBOSITY', DEFAULT_VERBOSITY)
try:
verbosity = int(verbosity)
except ValueError:
verbosity = DEFAULT_VERBOSITY
else:
verbosity = args.verbosity
effective_verbosity = max(0, min(len(VERBOSITY_LEVELS) - 1, verbosity))
level = VERBOSITY_LEVELS[effective_verbosity]
if level <= logging.DEBUG:
fmt = ("[%(asctime)s] %(levelname)s in %(filename)s:%(lineno)d: "
"%(message)s")
else:
fmt = "%(message)s"
logging.basicConfig(level=level, style='%', format=fmt, stream=sys.stderr)
|
mit
|
Python
|
e0280150917934e9ba051966e8aa966d4cd43a6d
|
fix flake warning try 2
|
persandstrom/python-verisure
|
verisure/devices/smartplug.py
|
verisure/devices/smartplug.py
|
"""
Smartplug device
"""
import time
from .overview import Overview
OVERVIEW_URL = '/overview/smartplug'
COMMAND_URL = '/smartplugs/onoffplug.cmd'
DETAILS_URL = '/smarthome/{}/details'
class Smartplug(object):
""" Smartplug device
Args:
session (verisure.session): Current session
"""
def __init__(self, session):
self._session = session
def get(self):
""" Get device overview """
status = self._session.get(OVERVIEW_URL)
return [Overview('smartplug', val) for val in status]
def set(self, device_id, value):
""" Set device status
Args:
device_id (str): Id of the smartplug
value (str): new status, 'on' or 'off'
"""
data = {
'targetDeviceLabel': device_id,
'targetOn': value
}
return not self._session.post(COMMAND_URL, data)
def get_details(self, device_id):
""" Get details from a smartplug
Args:
device_id (str): Id of the smartplug
"""
return self._session.get(DETAILS_URL.format(
device_id.upper().replace(' ', '%20')))
def set_location(self, device_id, location):
""" Set smartplug location
Args:
device_id (str): Id of the smartplug
location (str): New location
"""
details_url = DETAILS_URL.format(
device_id.upper().replace(' ', '%20'))
details = self._session.get(details_url)
details['location'] = location
self._session.put(details_url, details)
def wait_while_updating(self, device_id, value, max_request_count=100):
""" Wait for device status to update
Args:
device_id (str): Id of the smartplug
value (str): status to wait for, 'on' or 'off'
max_request_count (int): maximum number of post requests
Returns: retries if success else -1
"""
for counter in range(max_request_count):
if [overview for overview in self.get()
if overview.id == device_id and overview.status == value]:
return counter
time.sleep(1)
return -1
|
"""
Smartplug device
"""
import time
from .overview import Overview
OVERVIEW_URL = '/overview/smartplug'
COMMAND_URL = '/smartplugs/onoffplug.cmd'
DETAILS_URL = '/smarthome/{}/details'
class Smartplug(object):
""" Smartplug device
Args:
session (verisure.session): Current session
"""
def __init__(self, session):
self._session = session
def get(self):
""" Get device overview """
status = self._session.get(OVERVIEW_URL)
return [Overview('smartplug', val) for val in status]
def set(self, device_id, value):
""" Set device status
Args:
device_id (str): Id of the smartplug
value (str): new status, 'on' or 'off'
"""
data = {
'targetDeviceLabel': device_id,
'targetOn': value
}
return not self._session.post(COMMAND_URL, data)
def get_details(self, device_id):
""" Get details from a smartplug
Args:
device_id (str): Id of the smartplug
"""
return self._session.get(DETAILS_URL.format(
device_id.upper().replace(' ', '%20')))
def set_location(self, device_id, location):
""" Set smartplug location
Args:
device_id (str): Id of the smartplug
location (str): New location
"""
details_url = DETAILS_URL.format(
device_id.upper().replace(' ', '%20'))
details = self._session.get(details_url)
details['location'] = location
self._session.put(details_url, details)
def wait_while_updating(self, device_id, value, max_request_count=100):
""" Wait for device status to update
Args:
device_id (str): Id of the smartplug
value (str): status to wait for, 'on' or 'off'
max_request_count (int): maximum number of post requests
Returns: retries if success else -1
"""
for counter in range(max_request_count):
if [overview for overview in self.get()
if (overview.id == device_id
and overview.status == value)]:
return counter
time.sleep(1)
return -1
|
mit
|
Python
|
ec2dfafd0501b29c7ab70489349dd0d0cf048328
|
cover exception case
|
thatcr/knowed
|
src/area51/nowd/test/test_args_fib.py
|
src/area51/nowd/test/test_args_fib.py
|
from pytest import raises
from .. import nowd, NowdObject, LoggingScope
class FibThing(NowdObject):
@nowd
def Fib(self, x):
if x < 0:
raise ValueError('cannot calculate fib < 0')
if x == 0 or x == 1:
return 1
return self.Fib(x-1) + self.Fib(x-2)
def test_args_fib():
thing = FibThing()
print(FibThing.Fib)
with LoggingScope():
with raises(ValueError):
thing.Fib(-1)
assert thing.Fib(0) == 1
assert thing.Fib(1) == 1
assert thing.Fib(2) == 2
assert thing.Fib(3) == 3
|
from .. import nowd, NowdObject, LoggingScope
class FibThing(NowdObject):
@nowd
def Fib(self, x):
if x < 0:
raise ValueError('cannot calculate fib < 0')
if x == 0 or x == 1:
return 1
return self.Fib(x-1) + self.Fib(x-2)
def test_args_fib():
thing = FibThing()
print(FibThing.Fib)
with LoggingScope():
assert thing.Fib(0) == 1
assert thing.Fib(1) == 1
assert thing.Fib(2) == 2
assert thing.Fib(3) == 3
|
mit
|
Python
|
d301cfc0e8f76c94f8f3bcd1b0263f9bd6e1604c
|
Add call_decorator to RedisManager.
|
vishwaprakashmishra/xmatrix,harrissoerja/vumi,TouK/vumi,vishwaprakashmishra/xmatrix,TouK/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,harrissoerja/vumi,TouK/vumi
|
vumi/persist/redis_manager.py
|
vumi/persist/redis_manager.py
|
# -*- test-case-name: vumi.persist.tests.test_redis_manager -*-
import redis
from vumi.persist.redis_base import Manager
from vumi.persist.fake_redis import FakeRedis
from vumi.persist.riak_manager import flatten_generator
class RedisManager(Manager):
call_decorator = staticmethod(flatten_generator)
@classmethod
def _fake_manager(cls, key_prefix, client=None):
if client is None:
client = FakeRedis()
manager = cls(client, key_prefix)
# Because ._close() assumes a real connection.
manager._close = client.teardown
return manager
@classmethod
def _manager_from_config(cls, config, key_prefix):
"""Construct a manager from a dictionary of options.
:param dict config:
Dictionary of options for the manager.
:param str key_prefix:
Key prefix for namespacing.
"""
return cls(redis.Redis(**config), key_prefix)
def _close(self):
"""Close redis connection."""
pass
def _purge_all(self):
"""Delete *ALL* keys whose names start with this manager's key prefix.
Use only in tests.
"""
for key in self.keys():
self.delete(key)
def _make_redis_call(self, call, *args, **kw):
"""Make a redis API call using the underlying client library.
"""
return getattr(self._client, call)(*args, **kw)
def _filter_redis_results(self, func, results):
"""Filter results of a redis call.
"""
return func(results)
|
# -*- test-case-name: vumi.persist.tests.test_redis_manager -*-
import redis
from vumi.persist.redis_base import Manager
from vumi.persist.fake_redis import FakeRedis
class RedisManager(Manager):
@classmethod
def _fake_manager(cls, key_prefix, client=None):
if client is None:
client = FakeRedis()
manager = cls(client, key_prefix)
# Because ._close() assumes a real connection.
manager._close = client.teardown
return manager
@classmethod
def _manager_from_config(cls, config, key_prefix):
"""Construct a manager from a dictionary of options.
:param dict config:
Dictionary of options for the manager.
:param str key_prefix:
Key prefix for namespacing.
"""
return cls(redis.Redis(**config), key_prefix)
def _close(self):
"""Close redis connection."""
pass
def _purge_all(self):
"""Delete *ALL* keys whose names start with this manager's key prefix.
Use only in tests.
"""
for key in self.keys():
self.delete(key)
def _make_redis_call(self, call, *args, **kw):
"""Make a redis API call using the underlying client library.
"""
return getattr(self._client, call)(*args, **kw)
def _filter_redis_results(self, func, results):
"""Filter results of a redis call.
"""
return func(results)
|
bsd-3-clause
|
Python
|
8320b81b7355c0158231b4a3c7ac40c49872f7b1
|
handle partial pbuttons in ss tab
|
murrayo/yape,murrayo/yape,murrayo/yape
|
scripts/ss_tab.py
|
scripts/ss_tab.py
|
# pandas and numpy for data manipulation
import pandas as pd
import numpy as np
import sqlite3
import holoviews as hv
hv.extension('bokeh')
from bokeh.plotting import Figure
from bokeh.models import (CategoricalColorMapper, HoverTool,
ColumnDataSource, Panel,
FuncTickFormatter, SingleIntervalTicker, LinearAxis)
from bokeh.models.widgets import (CheckboxGroup, Slider, RangeSlider,
Tabs, CheckboxButtonGroup,
TableColumn, DataTable, Select,PreText)
from bokeh.layouts import column, row, WidgetBox
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from .generic_tab import generic_tab
def ss_tab(db):
ss_1_tab = generic_tab(db,"ss1")
ss_2_tab = generic_tab(db,"ss2")
ss_3_tab = generic_tab(db,"ss3")
ss_4_tab = generic_tab(db,"ss4")
ts = [ss_1_tab,ss_2_tab,ss_3_tab,ss_4_tab])
tabs = Tabs(tabs = list(filter(None.__ne__, ts)))
tab = Panel(child=tabs, title = "%SS")
#tab = Panel(child=layout, title = mode)
return tab
|
# pandas and numpy for data manipulation
import pandas as pd
import numpy as np
import sqlite3
import holoviews as hv
hv.extension('bokeh')
from bokeh.plotting import Figure
from bokeh.models import (CategoricalColorMapper, HoverTool,
ColumnDataSource, Panel,
FuncTickFormatter, SingleIntervalTicker, LinearAxis)
from bokeh.models.widgets import (CheckboxGroup, Slider, RangeSlider,
Tabs, CheckboxButtonGroup,
TableColumn, DataTable, Select,PreText)
from bokeh.layouts import column, row, WidgetBox
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from .generic_tab import generic_tab
def ss_tab(db):
ss_1_tab = generic_tab(db,"ss1")
ss_2_tab = generic_tab(db,"ss2")
ss_3_tab = generic_tab(db,"ss3")
ss_4_tab = generic_tab(db,"ss4")
tabs = Tabs(tabs = [ss_1_tab,ss_2_tab,ss_3_tab,ss_4_tab])
tab = Panel(child=tabs, title = "%SS")
#tab = Panel(child=layout, title = mode)
return tab
|
mit
|
Python
|
64967f3d262a1823a5d0cd22fb096f9529e6aa98
|
resolve flake8 linting errors
|
watsonpy/watson-validators
|
watson/validators/__init__.py
|
watson/validators/__init__.py
|
# -*- coding: utf-8 -*-
__version__ = '1.0.6'
try:
# Fix for setup.py version import
from watson.validators.numeric import Range
from watson.validators.string import Length, Required, RegEx, Csrf
__all__ = ['Range', 'Length', 'Required', 'RegEx', 'Csrf']
except: # noqa, pragma: no cover
pass # pragma: no cover
|
# -*- coding: utf-8 -*-
__version__ = '1.0.6'
try:
# Fix for setup.py version import
from watson.validators.numeric import Range
from watson.validators.string import Length, Required, RegEx, Csrf
__all__ = ['Range', 'Length', 'Required', 'RegEx', 'Csrf']
except: # pragma: no cover
pass # pragma: no cover
|
bsd-3-clause
|
Python
|
5aa594d02bfcdfe5eefe90fad1e464070b6ae9b5
|
Update common.py
|
suzannerohrback/somaticCNVpipeline,suzannerohrback/somaticCNVpipeline
|
bin/common.py
|
bin/common.py
|
#!/usr/bin/python
import os
import multiprocessing as mp
import subprocessing as sub
import shlex
def fixDirName(dirpath):
if dirpath[-1] != '/':
dirpath += '/'
return dirpath
def makeDir(dirpath):
if not os.path.exists(dirpath):
os.mkdir(dirpath)
return 0
def importSampleList(infile):
if os.path.exists(infile):
files = []
with open(infile, 'r') as IN:
for x in IN:
files.append(x.rstrip())
else:
errorText = '\nERROR: the specified sample name file does not exist, please fix\n\t' + infile + '\n'
print(errorText)
raise SystemExit
if len(files) == 0:
errorText = '\nERROR: The sample name file does not contain any sample names, please fix\n'
print(errorText)
raise SystemExit
return files
###daemon to run multiprocessing and parallelize tasks###
def daemon(target, argList, name, cpuPerProcess=1):
print( str( '\t' + str(len(argList)) + ' processes to run to ' + name ) )
numCPU = mp.cpu_count()
numWorkers = min( [int(numCPU / cpuPerProcess), len(argList)] )
pool = mp.Pool(numWorkers)
processes = [pool.apply_async(target, args=x) for x in argList]
pool.close()
for i,j in enumerate(processes):
j.wait()
if not j.successful():
pool.terminate()
print '\n\n\nprocessing failed, getting traceback now...'
p = mp.Process(target=target, args=argList[i])
p.start()
p.join()
# else:
# print( str( '\t\t' + str(i+1) + ' of ' + str(len(argList)) + ' processes complete' ) )
print( str( '\tAll processing to ' + name + ' complete\n' ) )
def zipping(filepath, gunzip=True):
if filepath.split('.')[-1] != 'gz' and gunzip:
return filepath
elif filepath.split('.')[-1] == 'gz' and not gunzip:
return filepath
if gunzip:
cmd = 'gunzip ' + filepath
fixname = filepath[:-3]
else:
cmd = 'gzip ' + filepath
fixname = filepath + '.gz'
cmd = shlex.split(cmd)
p = sub.Popen(cmd)
p.wait()
return fixname
|
#!/usr/bin/python
import os
import multiprocessing as mp
import subprocessing as sub
import shlex
def fixDirName(dirpath):
if dirpath[-1] != '/':
dirpath += '/'
return dirpath
def makeDir(dirpath):
if not os.path.exists(dirpath):
os.mkdir(dirpath)
return 0
def importSampleList(infile):
if os.path.exists(infile):
files = []
with open(infile, 'r') as IN:
for x in IN:
files.append(x.rstrip())
else:
errorText = '\nERROR: the specified sample name file does not exist, please fix\n\t' + infile + '\n'
print(errorText)
raise SystemExit
if len(files) == 0:
errorText = '\nERROR: The sample name file does not contain any sample names, please fix\n'
print(errorText)
raise SystemExit
return files
###daemon to run multiprocessing and parallelize tasks###
def daemon(target, argList, name, cpuPerProcess=1):
print( str( '\t' + str(len(argList)) + ' processes to run to ' + name ) )
numCPU = mp.cpu_count()
numWorkers = min( [int(numCPU / cpuPerProcess), len(argList)] )
pool = mp.Pool(numWorkers)
processes = [pool.apply_async(target, args=x) for x in argList]
pool.close()
for i,j in enumerate(processes):
j.wait()
if not j.successful():
pool.terminate()
print '\n\n\nprocessing failed, getting traceback now...'
p = mp.Process(target=target, args=argList[i])
p.start()
p.join()
# else:
# print( str( '\t\t' + str(i+1) + ' of ' + str(len(argList)) + ' processes complete' ) )
print( str( '\tAll processing to ' + name + ' complete\n' ) )
def zipping(filepath, gunzip=True):
if filepath.split('.')[-1] != 'gz' and gunzip:
return filepath
elif filepath.split('.')[-1] == 'gz' and not gunzip:
return filepath
if gunzip:
cmd = 'gunzip ' + filepath
fixname = filepath[:-3]
else:
cmd = 'gzip ' + filepath
fixname = filepath + '.gz'
cmd = shlex.split(cmd)
p = sub.popen(cmd)
p.wait()
return fixname
|
mit
|
Python
|
534d66ff92e6fd00ea92a08c76f39614b9977967
|
make it compatible to python3
|
crackwitz/uuhash
|
uuhash.py
|
uuhash.py
|
#!/usr/bin/env python3
import os
import hashlib
import binascii
import struct
__all__ = ["UUHash"]
# https://en.wikipedia.org/wiki/UUHash
# MLDonkey source code, file src/utils/lib/fst_hash.c, retrieved 2014-08-20
# http://sourceforge.net/projects/mldonkey/files/mldonkey/3.1.5/mldonkey-3.1.5.tar.bz2
# http://www.opensource.apple.com/source/xnu/xnu-1456.1.26/bsd/libkern/crc32.c
def UUHash(fobj):
chunksize = 307200
fobj.seek(0, os.SEEK_END)
filesize = fobj.tell()
fobj.seek(0)
chunk = fobj.read(chunksize)
md5hash = hashlib.md5(chunk).digest()
smallhash = 0
if filesize > chunksize:
lastpos = fobj.tell()
offset = 0x100000
while offset + 2*chunksize < filesize: # yes, LESS than, not equal
fobj.seek(offset)
chunk = fobj.read(chunksize)
smallhash = binascii.crc32(chunk, smallhash)
lastpos = offset + chunksize
offset <<= 1
endlen = filesize - lastpos
if endlen > chunksize:
endlen = chunksize
fobj.seek(filesize-endlen)
chunk = fobj.read(endlen)
smallhash = binascii.crc32(chunk, smallhash)
smallhash = ((~smallhash) ^ filesize) % 2**32
return md5hash + struct.pack("<I", smallhash)
if __name__ == '__main__':
import sys
import glob
import base64
import time
files = []
for globbable in sys.argv[1:]:
files += glob.glob(globbable) or [globbable]
for fname in files:
if not os.path.isfile(fname): continue
t0 = time.time()
hash = UUHash(open(fname, 'rb'))
t1 = time.time()
encoded = base64.b64encode(hash)
print("{} {} {}".format(encoded.decode(), hash.hex(), fname))
|
#!/usr/bin/env python
import os
import hashlib
import binascii
import struct
__all__ = ["UUHash"]
# https://en.wikipedia.org/wiki/UUHash
# MLDonkey source code, file src/utils/lib/fst_hash.c, retrieved 2014-08-20
# http://sourceforge.net/projects/mldonkey/files/mldonkey/3.1.5/mldonkey-3.1.5.tar.bz2
# http://www.opensource.apple.com/source/xnu/xnu-1456.1.26/bsd/libkern/crc32.c
def UUHash(fobj):
chunksize = 307200
fobj.seek(0, os.SEEK_END)
filesize = fobj.tell()
fobj.seek(0)
chunk = fobj.read(chunksize)
md5hash = hashlib.md5(chunk).digest()
smallhash = 0
if filesize > chunksize:
lastpos = fobj.tell()
offset = 0x100000
while offset + 2*chunksize < filesize: # yes, LESS than, not equal
fobj.seek(offset)
chunk = fobj.read(chunksize)
smallhash = binascii.crc32(chunk, smallhash)
lastpos = offset + chunksize
offset <<= 1
endlen = filesize - lastpos
if endlen > chunksize:
endlen = chunksize
fobj.seek(filesize-endlen)
chunk = fobj.read(endlen)
smallhash = binascii.crc32(chunk, smallhash)
smallhash = ((~smallhash) ^ filesize) % 2**32
return md5hash + struct.pack("<I", smallhash)
if __name__ == '__main__':
import sys
import glob
import base64
import time
files = []
for globbable in sys.argv[1:]:
files += glob.glob(globbable) or [globbable]
for fname in files:
if not os.path.isfile(fname): continue
t0 = time.time()
hash = UUHash(file(fname, 'rb'))
t1 = time.time()
encoded = base64.b64encode(hash)
print "%-28s" % encoded, hash.encode('hex').upper(), fname
|
mit
|
Python
|
ae179cf964939a97184402039574d7ee9b2e62da
|
Add BlogPost update test
|
andreagrandi/bloggato,andreagrandi/bloggato
|
blog/tests.py
|
blog/tests.py
|
from django.test import TestCase
from .models import BlogPost
from django.contrib.auth.models import User
class BlogTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username = "user001", email = "[email protected]", password = "password123456")
def test_post_creation(self):
blogpost = BlogPost()
blogpost.user = self.user
blogpost.title = "Title Test"
blogpost.text = "Lorem ipsum tarapia tapioco..."
blogpost.save()
self.assertTrue(blogpost.id > 0, "BlogPost created correctly")
def test_post_update(self):
blogpost = BlogPost()
blogpost.user = self.user
blogpost.title = "Title Test"
blogpost.text = "Lorem ipsum tarapia tapioco..."
blogpost.save()
self.assertTrue(blogpost.id > 0, "BlogPost created correctly")
blogpost.title = "Title Test - modified"
blogpost.save()
blogpost_id = blogpost.id
blogpost_saved = BlogPost.objects.get(id = blogpost_id)
self.assertEquals(blogpost_saved.title, blogpost.title, "BlogPost updated correctly")
|
from django.test import TestCase
from .models import BlogPost
from django.contrib.auth.models import User
class BlogTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username = "user001", email = "[email protected]", password = "password123456")
def test_post_creation(self):
blogpost = BlogPost()
blogpost.user = self.user
blogpost.title = "Title Test"
blogpost.text = "Lorem ipsum tarapia tapioco..."
blogpost.save()
self.assertTrue(blogpost.id > 0, "BlogPost created correctly")
|
mit
|
Python
|
59851b283b8cb6f92895090ba23e71be48f1a990
|
remove distinction between sfa aggregate and geni_aggregate
|
yippeecw/sfa,yippeecw/sfa,onelab-eu/sfa,onelab-eu/sfa,yippeecw/sfa,onelab-eu/sfa
|
sfa/methods/GetVersion.py
|
sfa/methods/GetVersion.py
|
from sfa.util.faults import *
from sfa.util.namespace import *
from sfa.util.method import Method
from sfa.util.parameter import Parameter
class GetVersion(Method):
"""
Returns this GENI Aggregate Manager's Version Information
@return version
"""
interfaces = ['registry','aggregate', 'slicemgr', 'component']
accepts = []
returns = Parameter(dict, "Version information")
def call(self):
self.api.logger.info("interface: %s\tmethod-name: %s" % (self.api.interface, self.name))
manager = self.api.get_manager()
return manager.get_version()
|
from sfa.util.faults import *
from sfa.util.namespace import *
from sfa.util.method import Method
from sfa.util.parameter import Parameter
class GetVersion(Method):
"""
Returns this GENI Aggregate Manager's Version Information
@return version
"""
interfaces = ['geni_am','registry']
accepts = []
returns = Parameter(dict, "Version information")
def call(self):
self.api.logger.info("interface: %s\tmethod-name: %s" % (self.api.interface, self.name))
manager_base = 'sfa.managers'
if self.api.interface in ['geni_am']:
mgr_type = self.api.config.SFA_GENI_AGGREGATE_TYPE
manager_module = manager_base + ".geni_am_%s" % mgr_type
manager = __import__(manager_module, fromlist=[manager_base])
return manager.GetVersion()
if self.api.interface in ['registry']:
mgr_type = self.api.config.SFA_REGISTRY_TYPE
manager_module = manager_base + ".slice_manager_%s" % mgr_type
manager = __import__(manager_module, fromlist=[manager_base])
return manager.GetVersion()
return {}
|
mit
|
Python
|
7dd344129e2ae30a857f72fdf61dca6e40768983
|
fix city field duplication (#17919)
|
ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo
|
addons/base_address_city/models/res_partner.py
|
addons/base_address_city/models/res_partner.py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
from odoo import api, models, fields
class Partner(models.Model):
_inherit = 'res.partner'
country_enforce_cities = fields.Boolean(related='country_id.enforce_cities')
city_id = fields.Many2one('res.city', string='Company')
@api.onchange('city_id')
def _onchange_city_id(self):
self.city = self.city_id.name
self.zip = self.city_id.zipcode
self.state_id = self.city_id.state_id
@api.model
def _fields_view_get_address(self, arch):
arch = super(Partner, self)._fields_view_get_address(arch)
if not self._context.get('no_address_format'):
return arch
# render the partner address accordingly to address_view_id
doc = etree.fromstring(arch)
for city_node in doc.xpath("//field[@name='city']"):
replacement_xml = """
<div>
<field name="country_enforce_cities" invisible="1"/>
<field name='city' attrs="{'invisible': [('country_enforce_cities', '=', True), ('city_id', '!=', False)], 'readonly': [('type', '=', 'contact'), ('parent_id', '!=', False)]}"/>
<field name='city_id' attrs="{'invisible': [('country_enforce_cities', '=', False)], 'readonly': [('type', '=', 'contact'), ('parent_id', '!=', False)]}" context="{'default_country_id': country_id}" domain="[('country_id', '=', country_id)]"/>
</div>
"""
city_id_node = etree.fromstring(replacement_xml)
city_node.getparent().replace(city_node, city_id_node)
arch = etree.tostring(doc)
return arch
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
from odoo import api, models, fields
class Partner(models.Model):
_inherit = 'res.partner'
country_enforce_cities = fields.Boolean(related='country_id.enforce_cities')
city_id = fields.Many2one('res.city', string='Company')
@api.onchange('city_id')
def _onchange_city_id(self):
self.city = self.city_id.name
self.zip = self.city_id.zipcode
self.state_id = self.city_id.state_id
@api.model
def _fields_view_get_address(self, arch):
arch = super(Partner, self)._fields_view_get_address(arch)
# render the partner address accordingly to address_view_id
doc = etree.fromstring(arch)
for city_node in doc.xpath("//field[@name='city']"):
replacement_xml = """
<div>
<field name="country_enforce_cities" invisible="1"/>
<field name='city' attrs="{'invisible': [('country_enforce_cities', '=', True), ('city_id', '!=', False)], 'readonly': [('type', '=', 'contact'), ('parent_id', '!=', False)]}"/>
<field name='city_id' attrs="{'invisible': [('country_enforce_cities', '=', False)], 'readonly': [('type', '=', 'contact'), ('parent_id', '!=', False)]}" context="{'default_country_id': country_id}" domain="[('country_id', '=', country_id)]"/>
</div>
"""
city_id_node = etree.fromstring(replacement_xml)
city_node.getparent().replace(city_node, city_id_node)
arch = etree.tostring(doc)
return arch
|
agpl-3.0
|
Python
|
7a105fe9201882749a7415bff580b9588b7f9a46
|
update version number
|
scigghia/account-invoicing,archetipo/account-invoicing,acsone/account-invoicing,kmee/account-invoicing,gurneyalex/account-invoicing,brain-tec/account-invoicing,Noviat/account-invoicing,acsone/account-invoicing,open-synergy/account-invoicing,akretion/account-invoicing,kmee/account-invoicing,eezee-it/account-invoicing,brain-tec/account-invoicing,sergiocorato/account-invoicing
|
account_invoice_line_description/__openerp__.py
|
account_invoice_line_description/__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Account invoice line description",
'version': '8.0.1.0.1',
'category': 'Generic Modules/Accounting',
'author': "Agile Business Group, Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": [
'account',
],
"data": [
'security/invoice_security.xml',
'res_config_view.xml',
],
"installable": True
}
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Account invoice line description",
'version': '8.0.1.0.0',
'category': 'Generic Modules/Accounting',
'author': "Agile Business Group, Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": [
'account',
],
"data": [
'security/invoice_security.xml',
'res_config_view.xml',
],
"installable": True
}
|
agpl-3.0
|
Python
|
b20d07aa14d3c11d5509e96fd4911de5886afe28
|
fix order for AttributeViewSet
|
rdmorganiser/rdmo,DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,rdmorganiser/rdmo,rdmorganiser/rdmo,DMPwerkzeug/DMPwerkzeug
|
apps/domain/views.py
|
apps/domain/views.py
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from rest_framework import viewsets, mixins, filters
from rest_framework.permissions import DjangoModelPermissions, IsAuthenticated
from .models import *
from .serializers import *
@login_required()
def domain(request):
return render(request, 'domain/domain.html')
class AttributeEntityViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (DjangoModelPermissions, )
queryset = AttributeEntity.objects.filter(attribute__attributeset=None).order_by('tag')
serializer_class = AttributeEntitySerializer
class AttributeViewSet(viewsets.ModelViewSet):
permission_classes = (DjangoModelPermissions, )
queryset = Attribute.objects.order_by('attributeset__tag', 'tag')
serializer_class = AttributeSerializer
filter_backends = (filters.DjangoFilterBackend, )
filter_fields = ('tag', )
class AttributeSetViewSet(viewsets.ModelViewSet):
permission_classes = (DjangoModelPermissions, )
queryset = AttributeSet.objects.order_by('tag')
serializer_class = AttributeSetSerializer
filter_backends = (filters.DjangoFilterBackend, )
filter_fields = ('tag', )
class ValueTypeViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
permission_classes = (IsAuthenticated, )
serializer_class = ValueTypeSerializer
def get_queryset(self):
return Attribute.VALUE_TYPE_CHOICES
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from rest_framework import viewsets, mixins, filters
from rest_framework.permissions import DjangoModelPermissions, IsAuthenticated
from .models import *
from .serializers import *
@login_required()
def domain(request):
return render(request, 'domain/domain.html')
class AttributeEntityViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (DjangoModelPermissions, )
queryset = AttributeEntity.objects.filter(attribute__attributeset=None).order_by('tag')
serializer_class = AttributeEntitySerializer
class AttributeViewSet(viewsets.ModelViewSet):
permission_classes = (DjangoModelPermissions, )
queryset = Attribute.objects.order_by('tag')
serializer_class = AttributeSerializer
filter_backends = (filters.DjangoFilterBackend, )
filter_fields = ('tag', )
class AttributeSetViewSet(viewsets.ModelViewSet):
permission_classes = (DjangoModelPermissions, )
queryset = AttributeSet.objects.order_by('tag')
serializer_class = AttributeSetSerializer
filter_backends = (filters.DjangoFilterBackend, )
filter_fields = ('tag', )
class ValueTypeViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
permission_classes = (IsAuthenticated, )
serializer_class = ValueTypeSerializer
def get_queryset(self):
return Attribute.VALUE_TYPE_CHOICES
|
apache-2.0
|
Python
|
34035c4b272e9271834c531990c404940eee8633
|
Add a link between vote and subproposal
|
yohanboniface/memopol-core,yohanboniface/memopol-core,yohanboniface/memopol-core
|
apps/votes/models.py
|
apps/votes/models.py
|
from django.db import models
from meps.models import MEP
class Proposal(models.Model):
id = models.CharField(max_length=63, primary_key=True)
title = models.CharField(max_length=255, unique=True)
class SubProposal(models.Model):
datetime = models.DateTimeField()
subject = models.CharField(max_length=255)
part = models.CharField(max_length=255)
description = models.CharField(max_length=511)
weight = models.IntegerField(null=True)
vote = models.ForeignKey(Proposal)
recommendation = models.CharField(max_length=15, choices=((u'against', u'against'), (u'for', u'for')), null=True)
class Vote(models.Model):
choice = models.CharField(max_length=15, choices=((u'for', u'for'), (u'against', u'against'), (u'abstention', u'abstention')))
name = models.CharField(max_length=127)
sub_proposal = models.ForeignKey(SubProposal)
mep = models.ForeignKey(MEP)
|
from django.db import models
from meps.models import MEP
class Proposal(models.Model):
id = models.CharField(max_length=63, primary_key=True)
title = models.CharField(max_length=255, unique=True)
class SubProposal(models.Model):
datetime = models.DateTimeField()
subject = models.CharField(max_length=255)
part = models.CharField(max_length=255)
description = models.CharField(max_length=511)
weight = models.IntegerField(null=True)
vote = models.ForeignKey(Proposal)
recommendation = models.CharField(max_length=15, choices=((u'against', u'against'), (u'for', u'for')), null=True)
class Vote(models.Model):
choice = models.CharField(max_length=15, choices=((u'for', u'for'), (u'against', u'against'), (u'abstention', u'abstention')))
name = models.CharField(max_length=127)
mep = models.ForeignKey(MEP)
|
agpl-3.0
|
Python
|
a2082e319854f88842e3acf8244d38a81f7046ae
|
Add secure/insecure reverse helpers.
|
messense/djsubdomains,ipsosante/django-subdomains,mysociety/django-subdomains,aajtodd/django-subdomains,jangeador/django-subdomains,adi-li/django-subdomains,nanopony/django-subdomains,tkaemming/django-subdomains,larikov/django-subdomains
|
subdomains/utils.py
|
subdomains/utils.py
|
import functools
from urlparse import urlunparse
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse as simple_reverse
def urljoin(domain, path=None, scheme=None):
if path is None:
path = ''
if scheme is None:
scheme = getattr(settings, 'DEFAULT_URL_SCHEME', 'http')
return urlunparse((scheme, domain, path, None, None, None))
def reverse(viewname, subdomain=None, scheme=None, urlconf=None,
*args, **kwargs):
# We imply the urlconf from the `subdomain` argument -- providing the
# urlconf is a violation of this logic.
if urlconf is not None:
raise ValueError('`subdomains.utils.reverse` does not accept the '
'`urlconf` argument.')
site = Site.objects.get_current()
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain)
if subdomain is not None:
domain = '%s.%s' % (subdomain, site.domain)
else:
domain = site.domain
path = simple_reverse(viewname, urlconf=urlconf, *args, **kwargs)
return urljoin(domain, path, scheme=scheme)
insecure_reverse = functools.partial(reverse, scheme='http')
secure_reverse = functools.partial(reverse, scheme='https')
relative_reverse = functools.partial(reverse, scheme='')
|
from urlparse import urlunparse
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse as simple_reverse
def urljoin(domain, path=None, scheme=None):
if path is None:
path = ''
if scheme is None:
scheme = getattr(settings, 'DEFAULT_URL_SCHEME', 'http')
return urlunparse((scheme, domain, path, None, None, None))
def reverse(viewname, subdomain=None, scheme=None, urlconf=None,
*args, **kwargs):
# We imply the urlconf from the `subdomain` argument -- providing the
# urlconf is a violation of this logic.
if urlconf is not None:
raise ValueError('`subdomains.utils.reverse` does not accept the '
'`urlconf` argument.')
site = Site.objects.get_current()
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain)
if subdomain is not None:
domain = '%s.%s' % (subdomain, site.domain)
else:
domain = site.domain
path = simple_reverse(viewname, urlconf=urlconf, *args, **kwargs)
return urljoin(domain, path, scheme=scheme)
|
mit
|
Python
|
8315fb3fb1b7ef65b9c1ced4feaeb473863495f5
|
Remove current_site_domain
|
messense/djsubdomains
|
subdomains/utils.py
|
subdomains/utils.py
|
import functools
try:
from urlparse import urlunparse
except ImportError:
from urllib.parse import urlunparse
from django.conf import settings
from django.core.urlresolvers import reverse as simple_reverse
def get_domain():
domain = getattr(settings, 'BASE_DOMAIN', False)
prefix = 'www.'
if getattr(settings, 'REMOVE_WWW_FROM_DOMAIN', False) \
and domain.startswith(prefix):
domain = domain.replace(prefix, '', 1)
return domain
def urljoin(domain, path=None, scheme=None):
"""
Joins a domain, path and scheme part together, returning a full URL.
:param domain: the domain, e.g. ``example.com``
:param path: the path part of the URL, e.g. ``/example/``
:param scheme: the scheme part of the URL, e.g. ``http``, defaulting to the
value of ``settings.DEFAULT_URL_SCHEME``
:returns: a full URL
"""
if scheme is None:
scheme = getattr(settings, 'DEFAULT_URL_SCHEME', 'http')
return urlunparse((scheme, domain, path or '', None, None, None))
def reverse(viewname, subdomain=None, scheme=None,
args=None, kwargs=None, current_app=None):
"""
Reverses a URL from the given parameters, in a similar fashion to
:meth:`django.core.urlresolvers.reverse`.
:param viewname: the name of URL
:param subdomain: the subdomain to use for URL reversing
:param scheme: the scheme to use when generating the full URL
:param args: positional arguments used for URL reversing
:param kwargs: named arguments used for URL reversing
:param current_app: hint for the currently executing application
"""
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain, settings.ROOT_URLCONF)
domain = get_domain()
if subdomain is not None:
domain = '%s.%s' % (subdomain, domain)
path = simple_reverse(
viewname,
urlconf=urlconf,
args=args,
kwargs=kwargs,
current_app=current_app
)
return urljoin(domain, path, scheme=scheme)
#: :func:`reverse` bound to insecure (non-HTTPS) URLs scheme
insecure_reverse = functools.partial(reverse, scheme='http')
#: :func:`reverse` bound to secure (HTTPS) URLs scheme
secure_reverse = functools.partial(reverse, scheme='https')
#: :func:`reverse` bound to be relative to the current scheme
relative_reverse = functools.partial(reverse, scheme='')
|
import functools
try:
from urlparse import urlunparse
except ImportError:
from urllib.parse import urlunparse
from django.conf import settings
from django.core.urlresolvers import reverse as simple_reverse
def current_site_domain():
domain = getattr(settings, 'BASE_DOMAIN', False)
prefix = 'www.'
if getattr(settings, 'REMOVE_WWW_FROM_DOMAIN', False) \
and domain.startswith(prefix):
domain = domain.replace(prefix, '', 1)
return domain
get_domain = current_site_domain
def urljoin(domain, path=None, scheme=None):
"""
Joins a domain, path and scheme part together, returning a full URL.
:param domain: the domain, e.g. ``example.com``
:param path: the path part of the URL, e.g. ``/example/``
:param scheme: the scheme part of the URL, e.g. ``http``, defaulting to the
value of ``settings.DEFAULT_URL_SCHEME``
:returns: a full URL
"""
if scheme is None:
scheme = getattr(settings, 'DEFAULT_URL_SCHEME', 'http')
return urlunparse((scheme, domain, path or '', None, None, None))
def reverse(viewname, subdomain=None, scheme=None, args=None, kwargs=None, current_app=None):
"""
Reverses a URL from the given parameters, in a similar fashion to
:meth:`django.core.urlresolvers.reverse`.
:param viewname: the name of URL
:param subdomain: the subdomain to use for URL reversing
:param scheme: the scheme to use when generating the full URL
:param args: positional arguments used for URL reversing
:param kwargs: named arguments used for URL reversing
:param current_app: hint for the currently executing application
"""
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain, settings.ROOT_URLCONF)
domain = get_domain()
if subdomain is not None:
domain = '%s.%s' % (subdomain, domain)
path = simple_reverse(viewname, urlconf=urlconf, args=args, kwargs=kwargs, current_app=current_app)
return urljoin(domain, path, scheme=scheme)
#: :func:`reverse` bound to insecure (non-HTTPS) URLs scheme
insecure_reverse = functools.partial(reverse, scheme='http')
#: :func:`reverse` bound to secure (HTTPS) URLs scheme
secure_reverse = functools.partial(reverse, scheme='https')
#: :func:`reverse` bound to be relative to the current scheme
relative_reverse = functools.partial(reverse, scheme='')
|
mit
|
Python
|
496481e3bd6392a44788fadc7cf517fc36143e96
|
Change to cb_story, clean up TZ handling some more
|
willkg/douglas,daitangio/pyblosxom,willkg/douglas,daitangio/pyblosxom
|
contrib/plugins/w3cdate.py
|
contrib/plugins/w3cdate.py
|
"""
Add a 'w3cdate' key to every entry -- this contains the date in ISO8601 format
WARNING: you must have PyXML installed as part of your python installation
in order for this plugin to work
Place this plugin early in your load_plugins list, so that the w3cdate will
be available to subsequent plugins
"""
__author__ = "Ted Leung <[email protected]>"
__version__ = "$Id:"
__copyright__ = "Copyright (c) 2003 Ted Leung"
__license__ = "Python"
import xml.utils.iso8601
import time
from Pyblosxom import tools
def cb_story(args):
request = tools.get_registry()["request"]
data = request.getData()
entry_list = data['entry_list']
for i in range(len(entry_list)):
entry = entry_list[i]
t = entry['timetuple']
# adjust for daylight savings time
tzoffset = 0
if time.timezone != 0:
tzoffset = time.altzone
entry['w3cdate'] = xml.utils.iso8601.tostring(time.mktime(t),tzoffset)
|
"""
Add a 'w3cdate' key to every entry -- this contains the date in ISO8601 format
WARNING: you must have PyXML installed as part of your python installation
in order for this plugin to work
Place this plugin early in your load_plugins list, so that the w3cdate will
be available to subsequent plugins
"""
__author__ = "Ted Leung <[email protected]>"
__version__ = "$Id:"
__copyright__ = "Copyright (c) 2003 Ted Leung"
__license__ = "Python"
import xml.utils.iso8601
import time
def cb_prepare(args):
request = args["request"]
form = request.getHttp()['form']
config = request.getConfiguration()
data = request.getData()
entry_list = data['entry_list']
for i in range(len(entry_list)):
entry = entry_list[i]
t = entry['timetuple']
# adjust for daylight savings time
t = t[0],t[1],t[2],t[3]+time.localtime()[-1],t[4],t[5],t[6],t[7],t[8]
entry['w3cdate'] = xml.utils.iso8601.ctime(time.mktime(t))
|
mit
|
Python
|
b43a70a045ae9efbc06d407dd17fe97804a238a1
|
Cover art discovery: handle badly encoded files
|
spl0k/supysonic,spl0k/supysonic,spl0k/supysonic
|
supysonic/covers.py
|
supysonic/covers.py
|
# coding: utf-8
#
# This file is part of Supysonic.
# Supysonic is a Python implementation of the Subsonic server API.
#
# Copyright (C) 2018 Alban 'spl0k' Féron
#
# Distributed under terms of the GNU AGPLv3 license.
import os, os.path
import re
from PIL import Image
EXTENSIONS = ('.jpg', '.jpeg', '.png', '.bmp')
NAMING_SCORE_RULES = (
('cover', 5),
('albumart', 5),
('folder', 5),
('front', 10),
('back', -10),
('large', 2),
('small', -2)
)
class CoverFile(object):
__clean_regex = re.compile(r'[^a-z]')
@staticmethod
def __clean_name(name):
return CoverFile.__clean_regex.sub('', name.lower())
def __init__(self, name, album_name = None):
self.name = name
self.score = 0
for part, score in NAMING_SCORE_RULES:
if part in name.lower():
self.score += score
if album_name:
basename, _ = os.path.splitext(name)
clean = CoverFile.__clean_name(basename)
album_name = CoverFile.__clean_name(album_name)
if clean in album_name or album_name in clean:
self.score += 20
def is_valid_cover(path):
if not os.path.isfile(path):
return False
_, ext = os.path.splitext(path)
if ext.lower() not in EXTENSIONS:
return False
try: # Ensure the image can be read
with Image.open(path):
return True
except IOError:
return False
def find_cover_in_folder(path, album_name = None):
if not os.path.isdir(path):
raise ValueError('Invalid path')
candidates = []
for f in os.listdir(path):
try:
file_path = os.path.join(path, f)
except UnicodeError:
continue
if not is_valid_cover(file_path):
continue
cover = CoverFile(f, album_name)
candidates.append(cover)
if not candidates:
return None
if len(candidates) == 1:
return candidates[0]
return sorted(candidates, key = lambda c: c.score, reverse = True)[0]
|
# coding: utf-8
#
# This file is part of Supysonic.
# Supysonic is a Python implementation of the Subsonic server API.
#
# Copyright (C) 2018 Alban 'spl0k' Féron
#
# Distributed under terms of the GNU AGPLv3 license.
import os, os.path
import re
from PIL import Image
EXTENSIONS = ('.jpg', '.jpeg', '.png', '.bmp')
NAMING_SCORE_RULES = (
('cover', 5),
('albumart', 5),
('folder', 5),
('front', 10),
('back', -10),
('large', 2),
('small', -2)
)
class CoverFile(object):
__clean_regex = re.compile(r'[^a-z]')
@staticmethod
def __clean_name(name):
return CoverFile.__clean_regex.sub('', name.lower())
def __init__(self, name, album_name = None):
self.name = name
self.score = 0
for part, score in NAMING_SCORE_RULES:
if part in name.lower():
self.score += score
if album_name:
basename, _ = os.path.splitext(name)
clean = CoverFile.__clean_name(basename)
album_name = CoverFile.__clean_name(album_name)
if clean in album_name or album_name in clean:
self.score += 20
def is_valid_cover(path):
if not os.path.isfile(path):
return False
_, ext = os.path.splitext(path)
if ext.lower() not in EXTENSIONS:
return False
try: # Ensure the image can be read
with Image.open(path):
return True
except IOError:
return False
def find_cover_in_folder(path, album_name = None):
if not os.path.isdir(path):
raise ValueError('Invalid path')
candidates = []
for f in os.listdir(path):
file_path = os.path.join(path, f)
if not is_valid_cover(file_path):
continue
cover = CoverFile(f, album_name)
candidates.append(cover)
if not candidates:
return None
if len(candidates) == 1:
return candidates[0]
return sorted(candidates, key = lambda c: c.score, reverse = True)[0]
|
agpl-3.0
|
Python
|
e5e83b75e250ee3c6d8084e23ee777d519293cb6
|
Fix for keystone / swift 1.8.0
|
spilgames/swprobe,spilgames/swprobe
|
swprobe/__init__.py
|
swprobe/__init__.py
|
# Copyright (c) 2012 Spil Games
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version_info = (0 , 3, 1)
version = __version__ = ".".join(map(str, version_info))
|
# Copyright (c) 2012 Spil Games
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version_info = (0 , 3, 0)
version = __version__ = ".".join(map(str, version_info))
|
apache-2.0
|
Python
|
dde25723c845ad4f6df72729e76174210b16e71c
|
remove redundant code
|
IMAMBAKS/pyrelatics
|
test/test_client.py
|
test/test_client.py
|
import pytest
from pyrelatics.client import *
def test_relaticsapi_raise_exception_with_dummy_url():
with pytest.raises(URLError):
RelaticsAPI('dummy_company', 'dummy_env_id', 'dummy_wid')
def test_relaticsapi_initializes_properties():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
assert relaticsapi.environment_id == 'dummy_env_id'
assert relaticsapi.workspace_id == 'dummy_wid'
assert relaticsapi.__repr__() != ''
def test_relaticsapi_login_returns_token_or_falsy_message():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
assert type(relaticsapi.login('dummy_name', 'dummy_password')) != str
with pytest.raises(AttributeError):
relaticsapi.CreateInstancelement('asdas')
def test_relaticsapi_login_dummy_token_raises_exeption():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
with pytest.raises(RelaticsException):
relaticsapi.CreateInstanceElement('asdas')
def test_get_result():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
relaticsapi.token = '123123'
assert isinstance(relaticsapi.GetResult('dummy_operation', 'dummy_entry_code'), object)
def test_invoke_method_string():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
relaticsapi.token = '123123'
assert isinstance(relaticsapi.CreateInstanceElement('dummyCOR'), object)
def test_invoke_method_tuple():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
relaticsapi.token = '123123'
assert isinstance(relaticsapi.CreateInstanceRelation(('dummyR1', 'dummyR2', 'dummyRR')), object)
def test_Import():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
relaticsapi.token = '123123'
assert isinstance(relaticsapi.Import('dummy_operation', 'dummy', data=[]), object)
|
import pytest
from pyrelatics.client import *
def test_relaticsapi_raise_exception_with_dummy_url():
with pytest.raises(URLError):
relaticsapi = RelaticsAPI('dummy_company', 'dummy_env_id', 'dummy_wid')
def test_relaticsapi_initializes_properties():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
assert relaticsapi.environment_id == 'dummy_env_id'
assert relaticsapi.workspace_id == 'dummy_wid'
assert relaticsapi.__repr__() != ''
def test_relaticsapi_login_returns_token_or_falsy_message():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
assert type(relaticsapi.login('dummy_name', 'dummy_password')) != str
with pytest.raises(AttributeError):
relaticsapi.CreateInstancelement('asdas')
def test_relaticsapi_login_dummy_token_raises_exeption():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
with pytest.raises(RelaticsException):
relaticsapi.CreateInstanceElement('asdas')
def test_get_result():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
relaticsapi.token = '123123'
assert isinstance(relaticsapi.GetResult('dummy_operation', 'dummy_entry_code'), object)
def test_invoke_method_string():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
relaticsapi.token = '123123'
assert isinstance(relaticsapi.CreateInstanceElement('dummyCOR'), object)
def test_invoke_method_tuple():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
relaticsapi.token = '123123'
assert isinstance(relaticsapi.CreateInstanceRelation(('dummyR1', 'dummyR2', 'dummyRR')), object)
def test_Import():
relaticsapi = RelaticsAPI('kb', 'dummy_env_id', 'dummy_wid')
relaticsapi.token = '123123'
assert isinstance(relaticsapi.Import('dummy_operation', 'dummy', data=[]), object)
|
mit
|
Python
|
9fa55bc43a3f83a57318799ba8b9f2769676bd44
|
Include the tags module tests in the full library testsuite.
|
wulczer/flvlib
|
test/test_flvlib.py
|
test/test_flvlib.py
|
import unittest
import test_primitives, test_astypes, test_helpers, test_tags
def get_suite():
modules = (test_primitives, test_astypes, test_helpers, test_tags)
suites = [unittest.TestLoader().loadTestsFromModule(module) for
module in modules]
return unittest.TestSuite(suites)
def main():
unittest.TextTestRunner(verbosity=2).run(get_suite())
if __name__ == "__main__":
main()
|
import unittest
import test_primitives, test_astypes, test_helpers
def get_suite():
modules = (test_primitives, test_astypes, test_helpers)
suites = [unittest.TestLoader().loadTestsFromModule(module) for
module in modules]
return unittest.TestSuite(suites)
def main():
unittest.TextTestRunner(verbosity=2).run(get_suite())
if __name__ == "__main__":
main()
|
mit
|
Python
|
1598a865094591cbfd1e4e37eddb905fffd1d9b0
|
improve and extend unit tests for Logfile
|
berquist/cclib,berquist/cclib,andersx/cclib,ben-albrecht/cclib,ben-albrecht/cclib,cclib/cclib,gaursagar/cclib,jchodera/cclib,ATenderholt/cclib,berquist/cclib,jchodera/cclib,ghutchis/cclib,cclib/cclib,langner/cclib,langner/cclib,andersx/cclib,cclib/cclib,ATenderholt/cclib,gaursagar/cclib,Schamnad/cclib,Schamnad/cclib,ghutchis/cclib,langner/cclib
|
test/test_parser.py
|
test/test_parser.py
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2015, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Run parser unit tests for cclib."""
from __future__ import print_function
import unittest
import numpy
import cclib
class LogfileTest(unittest.TestCase):
"""Unit tests for Logfile class."""
logfile = cclib.parser.logfileparser.Logfile('')
def test_float(self):
"""Are floats converted from strings correctly?"""
self.assertEqual(self.logfile.float("0.0"), 0.0)
self.assertEqual(self.logfile.float("1.0"), 1.0)
self.assertEqual(self.logfile.float("-1.0"), -1.0)
self.assertEqual(self.logfile.float("1.2345E+02"), 123.45)
self.assertEqual(self.logfile.float("1.2345D+02"), 123.45)
self.assertTrue(numpy.isnan(self.logfile.float("*")))
self.assertTrue(numpy.isnan(self.logfile.float("*****")))
def test_normalisesym(self):
"""Does this method return ERROR in base class?"""
self.assertTrue("ERROR" in self.logfile.normalisesym(""))
if __name__ == "__main__":
unittest.main()
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2015, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Run parser unit tests for cclib."""
from __future__ import print_function
import unittest
import numpy
import cclib
class LogfileTest(unittest.TestCase):
"""Unit tests for Logfile class."""
logfile = cclib.parser.logfileparser.Logfile('')
def test_float(self):
self.assertTrue(self.logfile.float("1.0"), 1.0)
self.assertTrue(self.logfile.float("1.2345E+02"), 123.45)
self.assertTrue(self.logfile.float("1.2345D+02"), 123.45)
self.assertTrue(self.logfile.float("*****"), numpy.nan)
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
Python
|
2250fcaefc1b69116684c72c559a44ee1d6721b6
|
change component count back to 4 in dp 2-cluster test
|
whitews/dpconverge
|
test_dp_2cluster.py
|
test_dp_2cluster.py
|
from dpconverge.data_set import DataSet
from sklearn.datasets.samples_generator import make_blobs
n_features = 2
points_per_feature = 100
centers = [[2, 2], [4, 4]]
ds = DataSet(parameter_count=2)
for i, center in enumerate(centers):
X, y = make_blobs(
n_samples=points_per_feature,
n_features=n_features,
centers=center,
cluster_std=0.3,
random_state=5
)
ds.add_blob(i, X)
ds.plot(ds.classifications, x_lim=[0, 6], y_lim=[0, 6])
ds.cluster(
component_count=4,
burn_in=2,
iteration_count=50,
random_seed=123
)
ds.plot_iteration_traces(0)
ds.plot_iteration_traces(1)
ds.plot_iteration_traces(2)
ds.plot_iteration_traces(3)
|
from dpconverge.data_set import DataSet
from sklearn.datasets.samples_generator import make_blobs
n_features = 2
points_per_feature = 100
centers = [[2, 2], [4, 4]]
ds = DataSet(parameter_count=2)
for i, center in enumerate(centers):
X, y = make_blobs(
n_samples=points_per_feature,
n_features=n_features,
centers=center,
cluster_std=0.3,
random_state=5
)
ds.add_blob(i, X)
ds.plot(ds.classifications, x_lim=[0, 6], y_lim=[0, 6])
ds.cluster(
component_count=2,
burn_in=2,
iteration_count=50,
random_seed=123
)
ds.plot_iteration_traces(0)
ds.plot_iteration_traces(1)
ds.plot_iteration_traces(2)
ds.plot_iteration_traces(3)
|
bsd-3-clause
|
Python
|
2ee45754c73a344d2cdbc0007a5a7877ba45288e
|
improve output, calculate frequencies at non-singletons
|
mateidavid/nanopolish,mateidavid/nanopolish,jts/nanopolish,jts/nanopolish,jts/nanopolish,mateidavid/nanopolish,mateidavid/nanopolish,jts/nanopolish,jts/nanopolish,mateidavid/nanopolish
|
scripts/calculate_methylation_frequency.py
|
scripts/calculate_methylation_frequency.py
|
#! /usr/bin/env python
import math
import sys
import csv
import argparse
from collections import namedtuple
class SiteStats:
def __init__(self, g_size, g_seq):
self.num_reads = 0
self.posterior_methylated = 0
self.called_sites = 0
self.called_sites_methylated = 0
self.group_size = g_size
self.sequence = g_seq
parser = argparse.ArgumentParser( description='Calculate methylation frequency at genomic CpG sites')
parser.add_argument('-c', '--call-threshold', type=float, required=False, default=2.5)
parser.add_argument('-i', '--input', type=str, required=False)
args = parser.parse_args()
assert(args.call_threshold is not None)
sites = dict()
if args.input:
in_fh = open(args.input)
else:
in_fh = sys.stdin
csv_reader = csv.DictReader(in_fh, delimiter='\t')
for record in csv_reader:
num_sites = int(record['num_cpgs'])
key = record['chromosome'] + ":" + record['start'] + ":" + record['end']
if key not in sites:
sites[key] = SiteStats(num_sites, record['sequence'].rstrip())
llr = float(record['log_lik_ratio'])
# is the evidence strong enough at this site to make a call?
if abs(llr) >= args.call_threshold:
sites[key].num_reads += 1
sites[key].called_sites += num_sites
if llr > 0:
sites[key].called_sites_methylated += num_sites
# header
print "\t".join(["chromosome", "start", "end", "num_cpgs_in_group", "called_sites", "called_sites_methylated", "methylated_frequency", "group_sequence"])
for key in sites:
if sites[key].called_sites > 0:
(c, s, e) = key.split(":")
f = float(sites[key].called_sites_methylated) / sites[key].called_sites
print "\t".join([str(x) for x in [c, s, e, sites[key].group_size, sites[key].called_sites, sites[key].called_sites_methylated, f, sites[key].sequence]])
|
#! /usr/bin/env python
import math
import sys
import csv
import argparse
from collections import namedtuple
class SiteStats:
def __init__(self):
self.num_reads = 0
self.posterior_methylated = 0
self.called_sites = 0
self.called_sites_methylated = 0
parser = argparse.ArgumentParser( description='Calculate methylation frequency at genomic CpG sites')
parser.add_argument('-c', '--call-threshold', type=float, required=False, default=0)
parser.add_argument('-i', '--input', type=str, required=False)
args = parser.parse_args()
assert(args.call_threshold is not None)
sites = dict()
if args.input:
in_fh = open(args.input)
else:
in_fh = sys.stdin
csv_reader = csv.DictReader(in_fh, delimiter='\t')
for record in csv_reader:
num_sites = int(record['num_cpgs'])
# skip non-singletons for now
if num_sites > 1:
continue
key = record['chromosome'] + ":" + record['start'] + "-" + record['end']
if key not in sites:
sites[key] = SiteStats()
llr = float(record['log_lik_ratio'])
# is the evidence strong enough at this site to make a call?
if abs(llr) >= args.call_threshold:
sites[key].num_reads += 1
sites[key].called_sites += num_sites
if llr > 0:
sites[key].called_sites_methylated += num_sites
# header
print "\t".join(["key", "called_sites", "called_sites_methylated", "methylated_frequency"])
for key in sites:
if sites[key].called_sites > 0:
f = float(sites[key].called_sites_methylated) / sites[key].called_sites
print "\t".join([str(x) for x in [key, sites[key].called_sites, sites[key].called_sites_methylated, f]])
|
mit
|
Python
|
6a0c3d0dc5f0106fdc1f7682fa65eabfb5c9d250
|
Set version as 0.6.12
|
Alignak-monitoring-contrib/alignak-webui,Alignak-monitoring-contrib/alignak-webui,Alignak-monitoring-contrib/alignak-webui
|
alignak_webui/version.py
|
alignak_webui/version.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# Frederic Mohier, [email protected]
#
"""
Alignak - Web User Interface
"""
# Package name
__pkg_name__ = u"alignak_webui"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"demo"
# Application manifest
__application__ = u"Alignak-WebUI"
VERSION = (0, 6, 12)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__short_version__ = '.'.join((str(each) for each in VERSION[:2]))
__author__ = u"Frédéric Mohier"
__author_email__ = u"[email protected]"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-webui"
__doc_url__ = "http://alignak-web-ui.readthedocs.io/?badge=latest"
__description__ = u"Alignak - Web User Interface"
__releasenotes__ = u"""Alignak monitoring framework Web User Interface"""
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Bottle',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
# Application manifest
__manifest__ = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'url': __git_url__,
'doc': __doc_url__
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# Frederic Mohier, [email protected]
#
"""
Alignak - Web User Interface
"""
# Package name
__pkg_name__ = u"alignak_webui"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"demo"
# Application manifest
__application__ = u"Alignak-WebUI"
VERSION = (0, 6, 11)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__short_version__ = '.'.join((str(each) for each in VERSION[:2]))
__author__ = u"Frédéric Mohier"
__author_email__ = u"[email protected]"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-webui"
__doc_url__ = "http://alignak-web-ui.readthedocs.io/?badge=latest"
__description__ = u"Alignak - Web User Interface"
__releasenotes__ = u"""Alignak monitoring framework Web User Interface"""
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Bottle',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
# Application manifest
__manifest__ = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'url': __git_url__,
'doc': __doc_url__
}
|
agpl-3.0
|
Python
|
b4013acd97851b041a47afa87e0da137e556ca3f
|
sort the output
|
jmtd/freedoom,CWolfRU/freedoom,jmtd/freedoom,CWolfRU/freedoom,jmtd/freedoom
|
tools/gen_gallery.py
|
tools/gen_gallery.py
|
#!/usr/bin/python
import os,sys,re
# this sucks
patches = [ x for x in os.listdir('.') if re.match(r'.*\.gif$', x) ]
patches.sort()
print '''<style type="text/css">
div {
float: left;
width: 20%;
}
</style>
'''
print ''.join(['<div><img src="%s" /><br />%s</div>' % (x,x) for x in patches])
|
#!/usr/bin/python
import os,sys,re
# this sucks
patches = [ x for x in os.listdir('.') if re.match(r'.*\.gif$', x) ]
print '''<style type="text/css">
div {
float: left;
width: 20%;
}
</style>
'''
print ''.join(['<div><img src="%s" /><br />%s</div>' % (x,x) for x in patches])
|
bsd-3-clause
|
Python
|
35f8ac20ec5ef830f264ba51bcb5df5af72b24d6
|
mask out HC3N features
|
adamginsburg/APEX_CMZ_H2CO,keflavich/APEX_CMZ_H2CO,keflavich/APEX_CMZ_H2CO,adamginsburg/APEX_CMZ_H2CO
|
analysis/masked_cubes.py
|
analysis/masked_cubes.py
|
import numpy as np
from spectral_cube import SpectralCube,BooleanArrayMask
from astropy import units as u
from paths import hpath
from astropy.io import fits
import time
from astropy import log
t0 = time.time()
hc3n_regions = [{'v':(-101,55),
'x':(500,533),
'y':(108,133),},
{'v':(-133,-70),
'x':(787,884),
'y':(87,120),}]
def mask_out_region(mask_array, cube, regions=hc3n_regions):
for region in regions:
z = [cube.closest_spectral_channel(v*u.km/u.s)
for v in region['v']]
view = [slice(*z),
slice(*region['y']),
slice(*region['x'])
]
mask_array[view] = False
return mask_array
cube303 = SpectralCube.read(hpath('APEX_H2CO_303_202_bl.fits')).with_spectral_unit(u.km/u.s, velocity_convention='radio')
cube321 = SpectralCube.read(hpath('APEX_H2CO_321_220_bl.fits')).with_spectral_unit(u.km/u.s, velocity_convention='radio')
maskarr = mask_out_region(fits.getdata(hpath('APEX_H2CO_303_202_bl_mask.fits')).astype('bool'), cube303)
mask = (maskarr &
cube303.mask.include(cube303, cube303.wcs) &
cube321.mask.include(cube321, cube321.wcs))
bmask = BooleanArrayMask(mask, cube303.wcs)
cube303m = cube303.with_mask(bmask)
cube321m = cube321.with_mask(bmask)
cube303sm = SpectralCube.read(hpath('APEX_H2CO_303_202_smooth_bl.fits')).with_spectral_unit(u.km/u.s, velocity_convention='radio')
cube321sm = SpectralCube.read(hpath('APEX_H2CO_321_220_smooth_bl.fits')).with_spectral_unit(u.km/u.s, velocity_convention='radio')
smmaskarr = mask_out_region(fits.getdata(hpath('APEX_H2CO_303_202_smooth_bl_mask.fits')).astype('bool'), cube303sm)
masksm = (smmaskarr &
cube303sm.mask.include(cube303sm, cube303sm.wcs) &
cube321sm.mask.include(cube321sm, cube321sm.wcs))
bmasksm = BooleanArrayMask(masksm, cube303sm.wcs)
cube303msm = cube303sm.with_mask(bmasksm)
cube321msm = cube321sm.with_mask(bmasksm)
# resample smoothed mask onto original grid
masksm_rs = np.zeros_like(mask, dtype='bool')
masksm_rs[::2,:,:] = masksm
masksm_rs[1::2,:,:] = masksm
bmasksm_rs = BooleanArrayMask(masksm_rs, cube303.wcs)
sncube = SpectralCube.read(hpath('APEX_H2CO_303_202_signal_to_noise_cube.fits'))
sncube._wcs = cube303._wcs
sncube.mask._wcs = cube303._wcs
sncubesm = SpectralCube.read(hpath('APEX_H2CO_303_202_smooth_signal_to_noise_cube.fits'))
sncubesm._wcs = cube303sm._wcs
sncubesm.mask._wcs = cube303sm._wcs
log.info("Masked cube creation took {0:0.1f} seconds".format(time.time()-t0))
|
import numpy as np
from spectral_cube import SpectralCube,BooleanArrayMask
from astropy import units as u
from paths import hpath
from astropy.io import fits
import time
from astropy import log
t0 = time.time()
cube303 = SpectralCube.read(hpath('APEX_H2CO_303_202_bl.fits')).with_spectral_unit(u.km/u.s, velocity_convention='radio')
cube321 = SpectralCube.read(hpath('APEX_H2CO_321_220_bl.fits')).with_spectral_unit(u.km/u.s, velocity_convention='radio')
mask = (fits.getdata(hpath('APEX_H2CO_303_202_bl_mask.fits')).astype('bool') &
cube303.mask.include(cube303._data, cube303.wcs) &
cube321.mask.include(cube321._data, cube321.wcs))
bmask = BooleanArrayMask(mask, cube303.wcs)
cube303m = cube303.with_mask(bmask)
cube321m = cube321.with_mask(bmask)
cube303sm = SpectralCube.read(hpath('APEX_H2CO_303_202_smooth_bl.fits')).with_spectral_unit(u.km/u.s, velocity_convention='radio')
cube321sm = SpectralCube.read(hpath('APEX_H2CO_321_220_smooth_bl.fits')).with_spectral_unit(u.km/u.s, velocity_convention='radio')
masksm = (fits.getdata(hpath('APEX_H2CO_303_202_smooth_bl_mask.fits')).astype('bool') &
cube303sm.mask.include(cube303sm._data, cube303sm.wcs) &
cube321sm.mask.include(cube321sm._data, cube321sm.wcs))
bmasksm = BooleanArrayMask(masksm, cube303sm.wcs)
cube303msm = cube303sm.with_mask(bmasksm)
cube321msm = cube321sm.with_mask(bmasksm)
# resample smoothed mask onto original grid
masksm_rs = np.zeros_like(mask, dtype='bool')
masksm_rs[::2,:,:] = masksm
masksm_rs[1::2,:,:] = masksm
bmasksm_rs = BooleanArrayMask(masksm_rs, cube303.wcs)
sncube = SpectralCube.read(hpath('APEX_H2CO_303_202_signal_to_noise_cube.fits'))
sncube._wcs = cube303._wcs
sncube.mask._wcs = cube303._wcs
sncubesm = SpectralCube.read(hpath('APEX_H2CO_303_202_smooth_signal_to_noise_cube.fits'))
sncubesm._wcs = cube303sm._wcs
sncubesm.mask._wcs = cube303sm._wcs
log.info("Masked cube creation took {0:0.1f} seconds".format(time.time()-t0))
|
bsd-3-clause
|
Python
|
79f8f6c922e6f0be3f6bf62c13cbe6dc9c50366a
|
Remove project_config dependency
|
hackoregon/team-budget,hackoregon/team-budget,hackoregon/team-budget
|
budget_proj/budget_proj/settings/production.py
|
budget_proj/budget_proj/settings/production.py
|
import requests
from .base import *
import os
# from .. import project_config
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
# ALLOWED_HOSTS = project_config.ALLOWED_HOSTS
ALLOWED_HOSTS = ['*']
# Get the IPV4 address we're working with on AWS
# The Loadbalancer uses this ip address for healthchecks
# EC2_PRIVATE_IP = None
# try:
# EC2_PRIVATE_IP = requests.get('http://169.254.169.254/latest/meta-data/local-ipv4', timeout=0.01).text
# except requests.exceptions.RequestException:
# pass
# if EC2_PRIVATE_IP:
# ALLOWED_HOSTS.append(EC2_PRIVATE_IP)
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django_db_geventpool.backends.postgresql_psycopg2',
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'NAME': os.environ.get('POSTGRES_NAME'),
'USER': os.environ.get('POSTGRES_USER'),
'HOST': os.environ.get('POSTGRES_HOST'),
'PORT': os.environ.get('POSTGRES_PORT'), }
}
|
import requests
from .base import *
from .. import project_config
SECRET_KEY = project_config.DJANGO_SECRET_KEY
ALLOWED_HOSTS = project_config.ALLOWED_HOSTS
# Get the IPV4 address we're working with on AWS
# The Loadbalancer uses this ip address for healthchecks
EC2_PRIVATE_IP = None
try:
EC2_PRIVATE_IP = requests.get('http://169.254.169.254/latest/meta-data/local-ipv4', timeout=0.01).text
except requests.exceptions.RequestException:
pass
if EC2_PRIVATE_IP:
ALLOWED_HOSTS.append(EC2_PRIVATE_IP)
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': project_config.AWS['ENGINE'],
'NAME': project_config.AWS['NAME'],
'HOST': project_config.AWS['HOST'],
'PORT': project_config.AWS['PORT'],
'USER': project_config.AWS['USER'],
'PASSWORD': project_config.AWS['PASSWORD'],
}
}
|
mit
|
Python
|
02360f5251ac308f45cb210a305fa225a056e1be
|
add travis config for keen public read key
|
rdhyee/osf.io,chrisseto/osf.io,caneruguz/osf.io,emetsger/osf.io,felliott/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,monikagrabowska/osf.io,binoculars/osf.io,mluo613/osf.io,caneruguz/osf.io,SSJohns/osf.io,crcresearch/osf.io,Johnetordoff/osf.io,crcresearch/osf.io,wearpants/osf.io,baylee-d/osf.io,wearpants/osf.io,hmoco/osf.io,Nesiehr/osf.io,mluo613/osf.io,chennan47/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,icereval/osf.io,cslzchen/osf.io,alexschiller/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,cslzchen/osf.io,felliott/osf.io,alexschiller/osf.io,leb2dg/osf.io,Nesiehr/osf.io,caseyrollins/osf.io,icereval/osf.io,samchrisinger/osf.io,acshi/osf.io,cslzchen/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,binoculars/osf.io,cslzchen/osf.io,aaxelb/osf.io,hmoco/osf.io,Johnetordoff/osf.io,wearpants/osf.io,TomBaxter/osf.io,TomBaxter/osf.io,adlius/osf.io,alexschiller/osf.io,binoculars/osf.io,samchrisinger/osf.io,erinspace/osf.io,mluo613/osf.io,mfraezz/osf.io,mluo613/osf.io,emetsger/osf.io,HalcyonChimera/osf.io,leb2dg/osf.io,adlius/osf.io,chrisseto/osf.io,chrisseto/osf.io,leb2dg/osf.io,rdhyee/osf.io,erinspace/osf.io,baylee-d/osf.io,emetsger/osf.io,Johnetordoff/osf.io,cwisecarver/osf.io,rdhyee/osf.io,rdhyee/osf.io,mattclark/osf.io,laurenrevere/osf.io,pattisdr/osf.io,acshi/osf.io,baylee-d/osf.io,chennan47/osf.io,adlius/osf.io,caseyrollins/osf.io,crcresearch/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,acshi/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,SSJohns/osf.io,caneruguz/osf.io,saradbowman/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,sloria/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,mattclark/osf.io,Johnetordoff/osf.io,monikagrabowska/osf.io,felliott/osf.io,SSJohns/osf.io,HalcyonChimera/osf.io,samchrisinger/osf.io,samchrisinger/osf.io,wearpants/osf.io,emetsger/osf.io,cwisecarver/osf.io,adlius/osf.io,mfraezz/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,sloria/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,sloria/osf.io,caneruguz/osf.io,acshi/osf.io,icereval/osf.io,mfraezz/osf.io,SSJohns/osf.io,leb2dg/osf.io,mluo613/osf.io,hmoco/osf.io,hmoco/osf.io,laurenrevere/osf.io,Nesiehr/osf.io,mfraezz/osf.io,chennan47/osf.io,alexschiller/osf.io,chrisseto/osf.io,laurenrevere/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io
|
website/settings/local-travis.py
|
website/settings/local-travis.py
|
# -*- coding: utf-8 -*-
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
import inspect
from . import defaults
import os
DB_PORT = 27017
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SECURE_MODE = not DEBUG_MODE # Disable osf secure cookie
PROTOCOL = 'https://' if SECURE_MODE else 'http://'
DOMAIN = PROTOCOL + 'localhost:5000/'
API_DOMAIN = PROTOCOL + 'localhost:8000/'
SEARCH_ENGINE = 'elastic'
USE_EMAIL = False
USE_CELERY = False
USE_GNUPG = False
# Email
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Session
COOKIE_NAME = 'osf'
SECRET_KEY = "CHANGEME"
SESSION_COOKIE_SECURE = SECURE_MODE
OSF_SERVER_KEY = None
OSF_SERVER_CERT = None
##### Celery #####
## Default RabbitMQ broker
BROKER_URL = 'amqp://'
# In-memory result backend
CELERY_RESULT_BACKEND = 'cache'
CELERY_CACHE_BACKEND = 'memory'
USE_CDN_FOR_CLIENT_LIBS = False
SENTRY_DSN = None
TEST_DB_NAME = DB_NAME = 'osf_test'
VARNISH_SERVERS = ['http://localhost:8080']
# if ENABLE_VARNISH isn't set in python read it from the env var and set it
locals().setdefault('ENABLE_VARNISH', os.environ.get('ENABLE_VARNISH') == 'True')
KEEN = {
'public': {
'project_id': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
'master_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
'write_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
'read_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
},
'private': {
'project_id': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
'write_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
'read_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
},
}
|
# -*- coding: utf-8 -*-
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
import inspect
from . import defaults
import os
DB_PORT = 27017
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SECURE_MODE = not DEBUG_MODE # Disable osf secure cookie
PROTOCOL = 'https://' if SECURE_MODE else 'http://'
DOMAIN = PROTOCOL + 'localhost:5000/'
API_DOMAIN = PROTOCOL + 'localhost:8000/'
SEARCH_ENGINE = 'elastic'
USE_EMAIL = False
USE_CELERY = False
USE_GNUPG = False
# Email
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Session
COOKIE_NAME = 'osf'
SECRET_KEY = "CHANGEME"
SESSION_COOKIE_SECURE = SECURE_MODE
OSF_SERVER_KEY = None
OSF_SERVER_CERT = None
##### Celery #####
## Default RabbitMQ broker
BROKER_URL = 'amqp://'
# In-memory result backend
CELERY_RESULT_BACKEND = 'cache'
CELERY_CACHE_BACKEND = 'memory'
USE_CDN_FOR_CLIENT_LIBS = False
SENTRY_DSN = None
TEST_DB_NAME = DB_NAME = 'osf_test'
VARNISH_SERVERS = ['http://localhost:8080']
# if ENABLE_VARNISH isn't set in python read it from the env var and set it
locals().setdefault('ENABLE_VARNISH', os.environ.get('ENABLE_VARNISH') == 'True')
KEEN = {
'public': {
'project_id': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
'master_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
'write_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
},
'private': {
'project_id': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
'write_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
'read_key': '123456789abcdef101112131415161718191a1b1c1d1e1f20212223242526272',
},
}
|
apache-2.0
|
Python
|
47c2936e65d00a08896b4e60060ff737b7a2f675
|
Check that the permission migrations work
|
comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django
|
app/tests/workstations_tests/test_migrations.py
|
app/tests/workstations_tests/test_migrations.py
|
import pytest
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from guardian.shortcuts import get_perms
from grandchallenge.workstations.models import Workstation
from tests.factories import UserFactory
@pytest.mark.django_db(transaction=True)
def test_workstation_group_migration():
executor = MigrationExecutor(connection)
app = "workstations"
migrate_from = [(app, "0001_initial")]
migrate_to = [(app, "0004_auto_20190813_1302")]
executor.migrate(migrate_from)
old_apps = executor.loader.project_state(migrate_from).apps
user = UserFactory()
OldWorkstation = old_apps.get_model(app, "Workstation")
old_ws = OldWorkstation.objects.create(title="foo")
assert not hasattr(old_ws, "editors_group")
assert not hasattr(old_ws, "users_group")
# Reload
executor.loader.build_graph()
# Migrate forwards
executor.migrate(migrate_to)
new_ws = Workstation.objects.get(title="foo")
new_ws.add_user(user=user)
assert new_ws.editors_group
assert new_ws.users_group
assert new_ws.slug == old_ws.slug
assert new_ws.title == old_ws.title
assert "view_workstation" in get_perms(user, new_ws)
|
import pytest
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
@pytest.mark.django_db(transaction=True)
def test_workstation_group_migration():
executor = MigrationExecutor(connection)
app = "workstations"
migrate_from = [(app, "0001_initial")]
migrate_to = [(app, "0004_auto_20190813_1302")]
executor.migrate(migrate_from)
old_apps = executor.loader.project_state(migrate_from).apps
Workstation = old_apps.get_model(app, "Workstation")
old_ws = Workstation.objects.create(title="foo")
assert not hasattr(old_ws, "editors_group")
assert not hasattr(old_ws, "users_group")
# Reload
executor.loader.build_graph()
# Migrate forwards
executor.migrate(migrate_to)
new_apps = executor.loader.project_state(migrate_to).apps
Workstation = new_apps.get_model(app, "Workstation")
new_ws = Workstation.objects.get(title="foo")
assert new_ws.editors_group
assert new_ws.users_group
assert new_ws.slug == old_ws.slug
assert new_ws.title == old_ws.title
|
apache-2.0
|
Python
|
f92b27c1ea241f381e41ef9b20bc6e75fc03c159
|
Add OCA as author
|
bluestar-solutions/account-invoicing,gurneyalex/account-invoicing,akretion/account-invoicing,brain-tec/account-invoicing,raycarnes/account-invoicing,BT-fgarbely/account-invoicing,Trust-Code/account-invoicing,open-synergy/account-invoicing,iDTLabssl/account-invoicing,scigghia/account-invoicing,Elneo-group/account-invoicing,BT-jmichaud/account-invoicing,kmee/account-invoicing,sergiocorato/account-invoicing,damdam-s/account-invoicing,EBII/account-invoicing,acsone/account-invoicing,kittiu/account-invoicing,brain-tec/account-invoicing,Noviat/account-invoicing,acsone/account-invoicing,BT-ojossen/account-invoicing,Antiun/account-invoicing,eezee-it/account-invoicing,abstract-open-solutions/account-invoicing,archetipo/account-invoicing,hbrunn/account-invoicing,sysadminmatmoz/account-invoicing,kmee/account-invoicing,Endika/account-invoicing
|
account_invoice_merge_payment/__openerp__.py
|
account_invoice_merge_payment/__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of account_invoice_merge_payment,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# account_invoice_merge_payment is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# account_invoice_merge_payment is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with account_invoice_merge_payment.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "account_invoice_merge_payment",
'summary': """
Use invoice merge regarding fields on Account Payment Partner""",
'author': "ACSONE SA/NV,Odoo Community Association (OCA)",
'website': "http://acsone.eu",
'category': 'Invoicing & Payments',
'version': '0.1',
'license': 'AGPL-3',
'depends': [
'account_invoice_merge',
'account_payment_partner',
],
'data': [],
'auto_install': True
}
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of account_invoice_merge_payment,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# account_invoice_merge_payment is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# account_invoice_merge_payment is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with account_invoice_merge_payment.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "account_invoice_merge_payment",
'summary': """
Use invoice merge regarding fields on Account Payment Partner""",
'author': "ACSONE SA/NV",
'website': "http://acsone.eu",
'category': 'Invoicing & Payments',
'version': '0.1',
'license': 'AGPL-3',
'depends': [
'account_invoice_merge',
'account_payment_partner',
],
'data': [],
'auto_install': True
}
|
agpl-3.0
|
Python
|
ca27ff5efa987ce413d7e7f43c49fad189930aed
|
Fix missing migration dependency
|
stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten
|
entities/migrations/0045_auto_20160922_1330.py
|
entities/migrations/0045_auto_20160922_1330.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-22 11:30
from __future__ import unicode_literals
from django.db import migrations
def set_groupcontent_group(apps, schema_editor):
Group1 = apps.get_model('entities.Group')
Group2 = apps.get_model('groups.Group')
GroupContent = apps.get_model('entities.GroupContent')
for gc in GroupContent.objects.values('id', 'group_id'):
g1 = Group1.objects.get(id=gc['group_id'])
g2 = Group2.objects.get(slug=g1.slug)
GroupContent.objects.filter(id=gc['id']).update(group_id=g2.id)
class Migration(migrations.Migration):
dependencies = [
('groups', '0002_auto_20160922_1108'),
('entities', '0044_auto_20160922_1118'),
]
operations = [
migrations.RunPython(set_groupcontent_group)
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-22 11:30
from __future__ import unicode_literals
from django.db import migrations
def set_groupcontent_group(apps, schema_editor):
Group1 = apps.get_model('entities.Group')
Group2 = apps.get_model('groups.Group')
GroupContent = apps.get_model('entities.GroupContent')
for gc in GroupContent.objects.values('id', 'group_id'):
g1 = Group1.objects.get(id=gc['group_id'])
g2 = Group2.objects.get(slug=g1.slug)
GroupContent.objects.filter(id=gc['id']).update(group_id=g2.id)
class Migration(migrations.Migration):
dependencies = [
('entities', '0044_auto_20160922_1118'),
]
operations = [
migrations.RunPython(set_groupcontent_group)
]
|
agpl-3.0
|
Python
|
72ba3a0401ad08d4df2fdc03b326eab16af47832
|
Bump version to 0.4.2.dev1
|
team23/django_backend,team23/django_backend,team23/django_backend,team23/django_backend,team23/django_backend
|
django_backend/__init__.py
|
django_backend/__init__.py
|
from .backend.renderable import Renderable # noqa
from .group import Group # noqa
from .sitebackend import SiteBackend
__version__ = '0.4.2.dev1'
default_app_config = 'django_backend.apps.DjangoBackendConfig'
site = SiteBackend(id='backend')
|
from .backend.renderable import Renderable # noqa
from .group import Group # noqa
from .sitebackend import SiteBackend
__version__ = '0.4.1'
default_app_config = 'django_backend.apps.DjangoBackendConfig'
site = SiteBackend(id='backend')
|
bsd-3-clause
|
Python
|
6dd3b2f7844e670b8774aa0afd25b83f60753703
|
make sure django is setup before importing
|
fanout/leaderboard,fanout/leaderboard,fanout/leaderboard
|
dblistener.py
|
dblistener.py
|
import os, django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'leaderboard.settings')
django.setup()
import time
import logging
from rethinkdb.errors import RqlDriverError
from leaderboardapp.models import Board, Player
from leaderboardapp.views import publish_board
logger = logging.getLogger('dblistener')
while True:
try:
for change in Player.get_all_changes():
logger.debug('got change: %s' % change)
try:
row = change['new_val']
board = Board.get(row['board'])
publish_board(board)
except Exception:
logger.exception('failed to handle')
except RqlDriverError:
logger.exception('failed to connect')
time.sleep(1)
|
import os
import time
import logging
import django
from rethinkdb.errors import RqlDriverError
from leaderboardapp.models import Board, Player
from leaderboardapp.views import publish_board
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'leaderboard.settings')
django.setup()
logger = logging.getLogger('dblistener')
while True:
try:
for change in Player.get_all_changes():
logger.debug('got change: %s' % change)
try:
row = change['new_val']
board = Board.get(row['board'])
publish_board(board)
except Exception:
logger.exception('failed to handle')
except RqlDriverError:
logger.exception('failed to connect')
time.sleep(1)
|
mit
|
Python
|
0eb08689906556951bacf82166d13cda7a8d720b
|
Update pylsy_test.py
|
muteness/Pylsy,bcho/Pylsy,gnithin/Pylsy,gnithin/Pylsy,muteness/Pylsy,huiyi1990/Pylsy,huiyi1990/Pylsy,bcho/Pylsy
|
tests/pylsy_test.py
|
tests/pylsy_test.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pylsy.pylsy import PylsyTable
class PylsyTableTests(unittest.TestCase):
def setUp(self):
attributes=["name","age"]
self.table = PylsyTable(attributes)
def tearDown(self):
self.table = None
def testCreateTable(self):
name=["a", "b"]
self.table.add_data("name",name)
age=[1, 2]
self.table.add_data("age",age)
correct_file = open('correct.out', 'r')
correctPrint = correct_file.read()
try:
import io
from contextlib import redirect_stdout
with io.StringIO() as buf, redirect_stdout(buf):
print('redirected')
output = buf.getvalue()
self.assertEqual(output, correctPrint)
except ImportError:
import sys
f_handler = open('test.out', 'w')
sys.stdout=f_handler
self.table.create_table()
f_handler.close()
f_handler = open('test.out', 'r')
self.assertEqual(f_handler.read(), correctPrint)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
__author__ = 'choleraehyq'
import unittest
from pylsy.pylsy import PylsyTable
class PylsyTableTests(unittest.TestCase):
def setUp(self):
attributes=["name","age"]
self.table = PylsyTable(attributes)
def tearDown(self):
self.table = None
def testCreateTable(self):
name=["a", "b"]
self.table.add_data("name",name)
age=[1, 2]
self.table.add_data("age",age)
correct_file = open('correct.out', 'r')
correctPrint = correct_file.read()
try:
import io
from contextlib import redirect_stdout
with io.StringIO() as buf, redirect_stdout(buf):
print('redirected')
output = buf.getvalue()
self.assertEqual(output, correctPrint)
except ImportError:
import sys
f_handler = open('test.out', 'w')
sys.stdout=f_handler
self.table.create_table()
f_handler.close()
f_handler = open('test.out', 'r')
self.assertEqual(f_handler.read(), correctPrint)
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
c4d784f1b478ca80697e9bbe843ebf84fe124f2b
|
update legacy scripted test to use new syntax
|
eHealthAfrica/rapidsms,catalpainternational/rapidsms,lsgunth/rapidsms,lsgunth/rapidsms,eHealthAfrica/rapidsms,catalpainternational/rapidsms,peterayeni/rapidsms,ehealthafrica-ci/rapidsms,caktus/rapidsms,eHealthAfrica/rapidsms,catalpainternational/rapidsms,caktus/rapidsms,catalpainternational/rapidsms,peterayeni/rapidsms,peterayeni/rapidsms,peterayeni/rapidsms,lsgunth/rapidsms,caktus/rapidsms,lsgunth/rapidsms,ehealthafrica-ci/rapidsms,ehealthafrica-ci/rapidsms
|
lib/rapidsms/tests/scripted.py
|
lib/rapidsms/tests/scripted.py
|
import warnings
from rapidsms.tests.harness import TestScript as TestScriptMixin
class TestScript(TestScriptMixin):
def startRouter(self):
warnings.warn("startRouter is deprecated and will be removed in a future "
"release. Please, see the release notes.", DeprecationWarning, stacklevel=2)
self.clear() # make sure the outbox is clean
def stopRouter(self):
warnings.warn("stopRouter is deprecated and will be removed in a future "
"release. Please, see the release notes.", DeprecationWarning, stacklevel=2)
|
import warnings
from django.test import TestCase
from rapidsms.tests.harness import TestScript as TestScriptMixin
class TestScript(TestScriptMixin, TestCase):
def startRouter(self):
warnings.warn("startRouter is deprecated and will be removed in a future "
"release. Please, see the release notes.", DeprecationWarning, stacklevel=2)
self.clear() # make sure the outbox is clean
def stopRouter(self):
warnings.warn("stopRouter is deprecated and will be removed in a future "
"release. Please, see the release notes.", DeprecationWarning, stacklevel=2)
|
bsd-3-clause
|
Python
|
8067af0c58ad3815fb15b530708bcb96a1874f3c
|
Add unit test for removing an element from an Ordering
|
madman-bob/python-order-maintenance
|
tests/test_basic.py
|
tests/test_basic.py
|
from unittest import TestCase
from ordering import Ordering
class TestOrderingBasic(TestCase):
def test_empty_insert_start(self) -> None:
ordering = Ordering[int]()
ordering.insert_start(0)
self.assertIn(0, ordering)
self.assertNotIn(1, ordering)
def test_empty_insert_end(self) -> None:
ordering = Ordering[int]()
ordering.insert_end(0)
self.assertIn(0, ordering)
self.assertNotIn(1, ordering)
def test_remove(self) -> None:
ordering = Ordering[int]()
self.assertNotIn(0, ordering)
ordering.insert_start(0)
self.assertIn(0, ordering)
ordering.remove(0)
self.assertNotIn(0, ordering)
def test_basic_insert_after(self) -> None:
ordering = Ordering[int]()
ordering.insert_start(0)
ordering.insert_after(0, 1)
self.assertIn(0, ordering)
self.assertIn(1, ordering)
self.assertNotIn(2, ordering)
def test_basic_insert_before(self) -> None:
ordering = Ordering[int]()
ordering.insert_start(0)
ordering.insert_before(0, 1)
self.assertIn(0, ordering)
self.assertIn(1, ordering)
self.assertNotIn(2, ordering)
def test_basic_compare(self) -> None:
ordering = Ordering[int]()
ordering.insert_start(0)
ordering.insert_after(0, 1)
ordering.insert_before(0, 2)
self.assertTrue(ordering.compare(0, 1))
self.assertFalse(ordering.compare(1, 0))
self.assertTrue(ordering.compare(2, 0))
self.assertFalse(ordering.compare(0, 2))
|
from unittest import TestCase
from ordering import Ordering
class TestOrderingBasic(TestCase):
def test_empty_insert_start(self) -> None:
ordering = Ordering[int]()
ordering.insert_start(0)
self.assertIn(0, ordering)
self.assertNotIn(1, ordering)
def test_empty_insert_end(self) -> None:
ordering = Ordering[int]()
ordering.insert_end(0)
self.assertIn(0, ordering)
self.assertNotIn(1, ordering)
def test_basic_insert_after(self) -> None:
ordering = Ordering[int]()
ordering.insert_start(0)
ordering.insert_after(0, 1)
self.assertIn(0, ordering)
self.assertIn(1, ordering)
self.assertNotIn(2, ordering)
def test_basic_insert_before(self) -> None:
ordering = Ordering[int]()
ordering.insert_start(0)
ordering.insert_before(0, 1)
self.assertIn(0, ordering)
self.assertIn(1, ordering)
self.assertNotIn(2, ordering)
def test_basic_compare(self) -> None:
ordering = Ordering[int]()
ordering.insert_start(0)
ordering.insert_after(0, 1)
ordering.insert_before(0, 2)
self.assertTrue(ordering.compare(0, 1))
self.assertFalse(ordering.compare(1, 0))
self.assertTrue(ordering.compare(2, 0))
self.assertFalse(ordering.compare(0, 2))
|
mit
|
Python
|
014184197b6eeede4a8681a446aa1a8e7bdce9fa
|
Update views.py
|
agarone-mm/scholastic-demo
|
demo/views.py
|
demo/views.py
|
import flask
import sys
from flask import request
app = flask.Flask(__name__)
@app.route('/')
def hello_world():
return flask.jsonify({
'message': 'Hello World!',
'python': sys.version,
'headers': str(request.headers)
})
|
import flask
import sys
from flask import request
app = flask.Flask(__name__)
@app.route('/')
def hello_world():
return flask.jsonify({
'message': 'Hello Worlb!',
'python': sys.version,
'headers': str(request.headers)
})
|
mit
|
Python
|
2e18e05659e9ba88f2fcce77259792f84b25e5fa
|
Add ability to disable frame evaluation
|
Elizaveta239/PyDev.Debugger,Elizaveta239/PyDev.Debugger,fabioz/PyDev.Debugger,fabioz/PyDev.Debugger,fabioz/PyDev.Debugger,Elizaveta239/PyDev.Debugger,Elizaveta239/PyDev.Debugger,fabioz/PyDev.Debugger,Elizaveta239/PyDev.Debugger,fabioz/PyDev.Debugger
|
_pydevd_frame_eval/pydevd_frame_eval_main.py
|
_pydevd_frame_eval/pydevd_frame_eval_main.py
|
import os
import sys
IS_PY36_OR_OLDER = False
if (sys.version_info[0] == 3 and sys.version_info[1] >= 6) or sys.version_info[0] > 3:
IS_PY36_OR_OLDER = True
set_frame_eval = None
stop_frame_eval = None
use_frame_eval = os.environ.get('PYDEVD_USE_FRAME_EVAL', None)
if use_frame_eval == 'NO':
frame_eval_func, stop_frame_eval = None, None
else:
if IS_PY36_OR_OLDER:
try:
from _pydevd_frame_eval.pydevd_frame_evaluator import frame_eval_func, stop_frame_eval
except ImportError:
from _pydev_bundle.pydev_monkey import log_error_once
dirname = os.path.dirname(__file__)
log_error_once("warning: Debugger speedups for Python 3.6 not found. Run '\"%s\" \"%s\" build_ext --inplace' to build." % (
sys.executable, os.path.join(dirname, 'setup.py')))
|
import os
import sys
IS_PY36_OR_OLDER = False
if (sys.version_info[0] == 3 and sys.version_info[1] >= 6) or sys.version_info[0] > 3:
IS_PY36_OR_OLDER = True
set_frame_eval = None
stop_frame_eval = None
if IS_PY36_OR_OLDER:
try:
from _pydevd_frame_eval.pydevd_frame_evaluator import frame_eval_func, stop_frame_eval
except ImportError:
from _pydev_bundle.pydev_monkey import log_error_once
dirname = os.path.dirname(__file__)
log_error_once("warning: Debugger speedups for Python 3.6 not found. Run '\"%s\" \"%s\" build_ext --inplace' to build." % (
sys.executable, os.path.join(dirname, 'setup.py')))
|
epl-1.0
|
Python
|
56675ad39c734993561b47373fa9db39e7f36323
|
Remove spec_set from mock.patch('os.chdir').
|
s3rvac/git-branch-viewer,s3rvac/git-branch-viewer
|
tests/test_utils.py
|
tests/test_utils.py
|
#
# Unit tests for the viewer.utils module.
#
# Copyright: (c) 2014 by Petr Zemek <[email protected]> and contributors
# License: BSD, see LICENSE for more details
#
import os
import unittest
from unittest import mock
from viewer.utils import chdir
@mock.patch('os.chdir')
class ChdirTests(unittest.TestCase):
def setUp(self):
self.orig_cwd = os.getcwd()
self.dst_dir = 'test'
def test_os_chdir_is_called_with_dst_dir_in_entry(self, mock_chdir):
with chdir(self.dst_dir):
mock_chdir.assert_called_once_with(self.dst_dir)
def test_os_chdir_is_called_with_orig_cwd_in_exit(self, mock_chdir):
with chdir(self.dst_dir):
mock_chdir.reset_mock()
mock_chdir.assert_called_once_with(self.orig_cwd)
def test_os_chdir_is_called_with_orig_cwd_in_exit_even_if_exception_occurs(
self, mock_chdir):
try:
with chdir(self.dst_dir):
mock_chdir.reset_mock()
raise RuntimeError
except RuntimeError:
mock_chdir.assert_called_once_with(self.orig_cwd)
|
#
# Unit tests for the viewer.utils module.
#
# Copyright: (c) 2014 by Petr Zemek <[email protected]> and contributors
# License: BSD, see LICENSE for more details
#
import os
import unittest
from unittest import mock
from viewer.utils import chdir
@mock.patch('os.chdir', spec_set=os.chdir)
class ChdirTests(unittest.TestCase):
def setUp(self):
self.orig_cwd = os.getcwd()
self.dst_dir = 'test'
def test_os_chdir_is_called_with_dst_dir_in_entry(self, mock_chdir):
with chdir(self.dst_dir):
mock_chdir.assert_called_once_with(self.dst_dir)
def test_os_chdir_is_called_with_orig_cwd_in_exit(self, mock_chdir):
with chdir(self.dst_dir):
mock_chdir.reset_mock()
mock_chdir.assert_called_once_with(self.orig_cwd)
def test_os_chdir_is_called_with_orig_cwd_in_exit_even_if_exception_occurs(
self, mock_chdir):
try:
with chdir(self.dst_dir):
mock_chdir.reset_mock()
raise RuntimeError
except RuntimeError:
mock_chdir.assert_called_once_with(self.orig_cwd)
|
bsd-3-clause
|
Python
|
ea027e70f94d351fade02a3110135e031b9f52c5
|
Update botcmd import.
|
krismolendyke/err-diehardbot
|
dieHardBot.py
|
dieHardBot.py
|
#!/usr/bin/env python
"""A bot which will respond to various Die Hard character name commands and
mentions and respond with a random line spoken by that character in the film.
"""
from errbot.botplugin import BotPlugin
from errbot import botcmd
from dieHard import DieHard
def generate(character):
f = lambda self, mess, args: "(%s) %s" % (character,
self.diehard.get_random(character))
f.__name__ = character
f.__doc__ = "Get a random quote from %s." % character.title()
return f
class DieHardBotBuilder(type):
def __new__(mcs, name, bases, classDict):
newClassDict = dict(classDict.items() +
[(character, botcmd(generate(character)))
for character in DieHard.CHARACTERS])
return super(DieHardBotBuilder, mcs).__new__(mcs, name, bases,
newClassDict)
class DieHardBot(BotPlugin):
__metaclass__ = DieHardBotBuilder
def __init__(self):
super(BotPlugin, self).__init__()
self.diehard = DieHard()
def callback_message(self, conn, mess):
"""Listen for Die Hard mentions and interject random lines from those
characters who were mentioned.
"""
message = ""
for character in DieHard.CHARACTERS:
if mess.getBody().find("(%s)" % character) != -1:
message = "(%s) %s" % (character,
self.diehard.get_random(character))
break
if message:
self.send(mess.getFrom(), message, message_type=mess.getType())
|
#!/usr/bin/env python
"""A bot which will respond to various Die Hard character name commands and
mentions and respond with a random line spoken by that character in the film.
"""
from errbot.botplugin import BotPlugin
from errbot.jabberbot import botcmd
from dieHard import DieHard
def generate(character):
f = lambda self, mess, args: "(%s) %s" % (character,
self.diehard.get_random(character))
f.__name__ = character
f.__doc__ = "Get a random quote from %s." % character.title()
return f
class DieHardBotBuilder(type):
def __new__(mcs, name, bases, classDict):
newClassDict = dict(classDict.items() +
[(character, botcmd(generate(character)))
for character in DieHard.CHARACTERS])
return super(DieHardBotBuilder, mcs).__new__(mcs, name, bases,
newClassDict)
class DieHardBot(BotPlugin):
__metaclass__ = DieHardBotBuilder
def __init__(self):
super(BotPlugin, self).__init__()
self.diehard = DieHard()
def callback_message(self, conn, mess):
"""Listen for Die Hard mentions and interject random lines from those
characters who were mentioned.
"""
message = ""
for character in DieHard.CHARACTERS:
if mess.getBody().find("(%s)" % character) != -1:
message = "(%s) %s" % (character,
self.diehard.get_random(character))
break
if message:
self.send(mess.getFrom(), message, message_type=mess.getType())
|
mit
|
Python
|
74286f4d631b09d46a0c9df995aa21e608b99dc2
|
Update regex_utils
|
interhui/py-text,PinaeOS/py-text
|
text/regex_utils.py
|
text/regex_utils.py
|
# coding=utf-8
import re
def parse_line(regex , line):
if line == None:
return None
if regex == None:
return line
items = []
pattern = re.compile(regex)
match = pattern.match(line)
if match:
items = match.groups()
return items
def check_line(regex, line):
if line == None:
return False
if regex == None:
return False
pattern = re.compile(regex)
match = pattern.match(line)
if match:
return True
else:
return False
def match(regex, line):
return check_line(regex, line)
def group(regex, line):
return parse_line(regex, line)
def sub(regex, repl, line, count = 0):
if line == None:
return None
if regex == None or repl == None:
return line
return re.sub(regex, repl, line, count)
def split(regex, line):
if line == None:
return None
if regex == None:
return line
return re.split(regex, line)
|
# coding=utf-8
import re
def parse_line(regex , line):
if line == None:
return None
if regex == None:
return line
items = []
pattern = re.compile(regex)
match = pattern.match(line)
if match:
items = match.groups()
return items
def check_line(regex, line):
if line == None:
return False
if regex == None:
return False
pattern = re.compile(regex)
match = pattern.match(line)
if match:
return True
else:
return False
def sub(regex, repl, line, count = 0):
if line == None:
return None
if regex == None or repl == None:
return line
return re.sub(regex, repl, line, count)
def split(regex, line):
if line == None:
return None
if regex == None:
return line
return re.split(regex, line)
|
apache-2.0
|
Python
|
f49fc187bc397a56f03217c88fa06b7ef1704b41
|
Add docstring for `is_landscape()`
|
shunghsiyu/pdf-processor
|
Util.py
|
Util.py
|
"""Collection of Helper Functions"""
import os
from fnmatch import fnmatch
from PyPDF2 import PdfFileReader
def pdf_file(filename):
"""Test whether or the the filename ends with '.pdf'."""
return fnmatch(filename, '*.pdf')
def all_pdf_files_in_directory(path):
"""Return a list of of PDF files in a directory."""
return [filename for filename in os.listdir(path) if pdf_file(filename)]
def concat_pdf_pages(files):
"""A generator that yields one PDF page a time for all pages in the PDF files."""
for input_file in files:
for page in PdfFileReader(input_file).pages:
yield page
def split_on_condition(iterable, predicate):
"""Split a iterable into chunks, where the first item in the chunk will be the
evaluate to True with predicate function, and the rest of the items in the chunk
evaluates to False."""
it = iter(iterable)
# Initialize the chunk list with an item
# StopIteration will be thrown if there are no further items in the iterator
chunk = [it.next()]
while True:
try:
item = it.next()
if predicate(item):
# If the next item should be in a new chunk then return the current chunk
yield chunk
# Then rest the chunk list
chunk = [item]
else:
# Simply append the item to current chunk if it doesn't match the predicate
chunk.append(item)
except StopIteration:
# If the end of the iterator is reached then simply return the current chunk
yield chunk
break
def is_landscape(page):
"""Check whether or not a page is in landscape orientation."""
box = page.mediaBox
return box.getWidth() > box.getHeight()
|
"""Collection of Helper Functions"""
import os
from fnmatch import fnmatch
from PyPDF2 import PdfFileReader
def pdf_file(filename):
"""Test whether or the the filename ends with '.pdf'."""
return fnmatch(filename, '*.pdf')
def all_pdf_files_in_directory(path):
"""Return a list of of PDF files in a directory."""
return [filename for filename in os.listdir(path) if pdf_file(filename)]
def concat_pdf_pages(files):
"""A generator that yields one PDF page a time for all pages in the PDF files."""
for input_file in files:
for page in PdfFileReader(input_file).pages:
yield page
def split_on_condition(iterable, predicate):
"""Split a iterable into chunks, where the first item in the chunk will be the
evaluate to True with predicate function, and the rest of the items in the chunk
evaluates to False."""
it = iter(iterable)
# Initialize the chunk list with an item
# StopIteration will be thrown if there are no further items in the iterator
chunk = [it.next()]
while True:
try:
item = it.next()
if predicate(item):
# If the next item should be in a new chunk then return the current chunk
yield chunk
# Then rest the chunk list
chunk = [item]
else:
# Simply append the item to current chunk if it doesn't match the predicate
chunk.append(item)
except StopIteration:
# If the end of the iterator is reached then simply return the current chunk
yield chunk
break
def is_landscape(page):
box = page.mediaBox
return box.getWidth() > box.getHeight()
|
mit
|
Python
|
92a57e512e4437b781d7db76587d27092033a49a
|
remove dead code
|
rlowrance/re-local-linear,rlowrance/re-local-linear,rlowrance/re-local-linear
|
chart-02-ols-median-of-root-median-squared-errors.py
|
chart-02-ols-median-of-root-median-squared-errors.py
|
# create files for chart-02-ols-median-of-root-mdian-squared-errors
# with these choices
# metric in median-root-median-squared-errors
# model in ols
# ndays in 30 60 ... 360
# predictors in act actlog ct ctlog
# responses in price logprice
# usetax in yes no
# year in 2008
# invocations and files created
# python chart-02X.py makefile -> src/chart-02X.makefile
# python chart-02X.py data -> data/working/chart-02X.data
# python chart-02X.py txt -> data/working/chart-02X.txt
# python chart-02X.py txtY -> data/working/chart-02X-Y.txt
import sys
from Bunch import Bunch
from chart_02_template import chart
def main():
specs = Bunch(metric='median-of-root-median-squared-errors',
title='Median of Root Median Squared Errors',
model='ols',
training_periods=['30', '60', '90', '120', '150', '180',
'210', '240', '270', '300', '330', '360'],
feature_sets=['act', 'actlog', 'ct', 'ctlog'],
responses=['price', 'logprice'],
year='2008')
chart(specs=specs,
argv=sys.argv)
if __name__ == '__main__':
main()
|
# create files for chart-02-ols-median-of-root-mdian-squared-errors
# with these choices
# metric in median-root-median-squared-errors
# model in ols
# ndays in 30 60 ... 360
# predictors in act actlog ct ctlog
# responses in price logprice
# usetax in yes no
# year in 2008
# invocations and files created
# python chart-02X.py makefile -> src/chart-02X.makefile
# python chart-02X.py data -> data/working/chart-02X.data
# python chart-02X.py txt -> data/working/chart-02X.txt
# python chart-02X.py txtY -> data/working/chart-02X-Y.txt
import sys
from Bunch import Bunch
from chart_02_template import chart
def main():
def median_value(x):
cvresult = CvResult(x)
specs = Bunch(metric='median-of-root-median-squared-errors',
title='Median of Root Median Squared Errors',
model='ols',
training_periods=['30', '60', '90', '120', '150', '180',
'210', '240', '270', '300', '330', '360'],
feature_sets=['act', 'actlog', 'ct', 'ctlog'],
responses=['price', 'logprice'],
usetax=['yes', 'no'],
year='2008')
chart(specs=specs,
argv=sys.argv)
if __name__ == '__main__':
main()
|
mit
|
Python
|
3809d9a277412ef7c53905ecdcae55d537e08c95
|
Fix whitespace in tests file
|
jstasiak/travis-solo
|
travis_solo_tests.py
|
travis_solo_tests.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from nose.tools import eq_, ok_
from travis_solo import Configuration, Loader, Step
class TestLoader(object):
def setup(self):
self.loader = Loader()
def test_loading_steps(self):
settings = dict(
before_install=['do before install',],
install='pip install .',
script='nosetests',
after_script=['a', 'b'],
)
steps = self.loader.load_steps(settings)
eq_(steps, (
Step('before_install', ('do before install',)),
Step('install', ('pip install .',)),
Step('script', ('nosetests',)),
Step('after_script', ('a', 'b'), can_fail=True),
))
def test_loading_configurations(self):
settings = dict(
language='python',
python=['2.7', '3.3'],
env=['A=a', 'A=b'],
matrix=dict(
include=[
dict(
python='2.7',
env='A=c',
),
],
exclude=[
dict(
python='3.3',
env='A=a',
),
],
)
)
configurations = self.loader.load_configurations(settings)
eq_(configurations, (
Configuration(python='2.7', variables={'A': 'a'}),
Configuration(python='2.7', variables={'A': 'b'}),
Configuration(python='3.3', variables={'A': 'b'}),
Configuration(python='2.7', variables={'A': 'c'}),
))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from nose.tools import eq_, ok_
from travis_solo import Configuration, Loader, Step
class TestLoader(object):
def setup(self):
self.loader = Loader()
def test_loading_steps(self):
settings = dict(
before_install=['do before install',],
install='pip install .',
script='nosetests',
after_script=['a', 'b'],
)
steps = self.loader.load_steps(settings)
eq_(steps, (
Step('before_install', ('do before install',)),
Step('install', ('pip install .',)),
Step('script', ('nosetests',)),
Step('after_script', ('a', 'b'), can_fail=True),
))
def test_loading_configurations(self):
settings = dict(
language='python',
python=['2.7', '3.3'],
env=['A=a', 'A=b'],
matrix=dict(
include=[
dict(
python='2.7',
env='A=c',
),
],
exclude=[
dict(
python='3.3',
env='A=a',
),
],
)
)
configurations = self.loader.load_configurations(settings)
eq_(configurations, (
Configuration(python='2.7', variables={'A': 'a'}),
Configuration(python='2.7', variables={'A': 'b'}),
Configuration(python='3.3', variables={'A': 'b'}),
Configuration(python='2.7', variables={'A': 'c'}),
))
|
mit
|
Python
|
1e222b72e632e5649d26dc71ab44ef31af7459fe
|
Fix rendering of groups in sidebar that didn't get all the template context passed into it.
|
team23/django_backend,team23/django_backend,team23/django_backend,team23/django_backend,team23/django_backend
|
django_backend/group.py
|
django_backend/group.py
|
from django.forms.forms import pretty_name
from django.template import Context
from django.template.loader import render_to_string
from .compat import context_flatten
class Group(list):
"""
A simplistic representation of backends that are related and should be
displayed as one "group" in the backend (e.g. as one box in the sidebar).
"""
template_name = 'django_backend/_group.html'
def __init__(self, id, name=None, position=0, template_name=None):
self.id = id
if name is None:
name = pretty_name(id)
self.template_name = template_name or self.template_name
self.name = name
self.position = position
super(Group, self).__init__()
@property
def backends(self):
return list(self)
def get_context_data(self, context, **kwargs):
data = {
'group': self,
}
data.update(kwargs)
return data
def get_template_name(self):
return self.template_name
def render(self, context):
context_data = {}
if isinstance(context, Context):
context_data.update(context_flatten(context))
context_data = self.get_context_data(context, **context_data)
return render_to_string(self.get_template_name(), context_data)
|
from django.forms.forms import pretty_name
from django.template import Context
from django.template.loader import render_to_string
from .compat import context_flatten
class Group(list):
"""
A simplistic representation of backends that are related and should be
displayed as one "group" in the backend (e.g. as one box in the sidebar).
"""
template_name = 'django_backend/_group.html'
def __init__(self, id, name=None, position=0, template_name=None):
self.id = id
if name is None:
name = pretty_name(id)
self.template_name = template_name or self.template_name
self.name = name
self.position = position
super(Group, self).__init__()
@property
def backends(self):
return list(self)
def get_context_data(self, context, **kwargs):
data = {
'group': self,
}
data.update(kwargs)
return data
def get_template_name(self):
return self.template_name
def render(self, context):
context_data = {}
if isinstance(context, Context):
context_data.update(context_flatten(context))
context_data.update(self.get_context_data(context))
return render_to_string(
self.get_template_name(),
self.get_context_data(context))
|
bsd-3-clause
|
Python
|
bda88dfe6e0a2f16f0c3be74a42cf8783aae1d9e
|
Fix to support django v1.7
|
leifdenby/django_enum_js
|
django_enum_js/views.py
|
django_enum_js/views.py
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.safestring import mark_safe
from django_enum_js import enum_wrapper
def enums_js(request):
enums = enum_wrapper.get_json_formatted_enums()
return render_to_response('django_enum_js/enums_js.tpl', { 'enums': mark_safe(enums), }, context_instance=RequestContext(request), content_type='application/javascript')
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.safestring import mark_safe
from django_enum_js import enum_wrapper
def enums_js(request):
enums = enum_wrapper.get_json_formatted_enums()
return render_to_response('django_enum_js/enums_js.tpl', { 'enums': mark_safe(enums), }, context_instance=RequestContext(request), mimetype='application/javascript')
|
mit
|
Python
|
6a9ed8867ccaab1284ae999d752de92174de399e
|
fix error message rendering failing due to message being a dict
|
pmclanahan/django-celery-email,pmclanahan/django-celery-email,andresriancho/django-celery-email
|
djcelery_email/tasks.py
|
djcelery_email/tasks.py
|
from django.conf import settings
from django.core.mail import get_connection, EmailMessage
from celery.task import task
CONFIG = getattr(settings, 'CELERY_EMAIL_TASK_CONFIG', {})
BACKEND = getattr(settings, 'CELERY_EMAIL_BACKEND',
'django.core.mail.backends.smtp.EmailBackend')
TASK_CONFIG = {
'name': 'djcelery_email_send',
'ignore_result': True,
}
TASK_CONFIG.update(CONFIG)
def from_dict(messagedict):
return EmailMessage(**messagedict)
@task(**TASK_CONFIG)
def send_email(message, **kwargs):
logger = send_email.get_logger()
conn = get_connection(backend=BACKEND,
**kwargs.pop('_backend_init_kwargs', {}))
try:
result = conn.send_messages([from_dict(message)])
logger.debug("Successfully sent email message to %r.", message['to'])
return result
except Exception as e:
# catching all exceptions b/c it could be any number of things
# depending on the backend
logger.warning("Failed to send email message to %r, retrying.",
message['to'])
send_email.retry(exc=e)
# backwards compat
SendEmailTask = send_email
|
from django.conf import settings
from django.core.mail import get_connection, EmailMessage
from celery.task import task
CONFIG = getattr(settings, 'CELERY_EMAIL_TASK_CONFIG', {})
BACKEND = getattr(settings, 'CELERY_EMAIL_BACKEND',
'django.core.mail.backends.smtp.EmailBackend')
TASK_CONFIG = {
'name': 'djcelery_email_send',
'ignore_result': True,
}
TASK_CONFIG.update(CONFIG)
def from_dict(messagedict):
return EmailMessage(**messagedict)
@task(**TASK_CONFIG)
def send_email(message, **kwargs):
logger = send_email.get_logger()
conn = get_connection(backend=BACKEND,
**kwargs.pop('_backend_init_kwargs', {}))
try:
result = conn.send_messages([from_dict(message)])
logger.debug("Successfully sent email message to %r.", message['to'])
return result
except Exception as e:
# catching all exceptions b/c it could be any number of things
# depending on the backend
logger.warning("Failed to send email message to %r, retrying.",
message.to)
send_email.retry(exc=e)
# backwards compat
SendEmailTask = send_email
|
bsd-3-clause
|
Python
|
81ff4ede4ea6397e6d54020c56cdf8dddcda1485
|
add dg sub-package to sfepy/discrete/setup.py
|
vlukes/sfepy,rc/sfepy,BubuLK/sfepy,rc/sfepy,BubuLK/sfepy,sfepy/sfepy,rc/sfepy,sfepy/sfepy,sfepy/sfepy,BubuLK/sfepy,vlukes/sfepy,vlukes/sfepy
|
sfepy/discrete/setup.py
|
sfepy/discrete/setup.py
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
import os.path as op
auto_name = op.split(op.dirname(__file__))[-1]
config = Configuration(auto_name, parent_package, top_path)
subdirs = [
'common',
'dg',
'fem',
'iga',
'structural',
]
for subdir in subdirs:
config.add_subpackage(subdir)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
import os.path as op
auto_name = op.split(op.dirname(__file__))[-1]
config = Configuration(auto_name, parent_package, top_path)
subdirs = [
'common',
'fem',
'iga',
'structural',
]
for subdir in subdirs:
config.add_subpackage(subdir)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.