commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
8cce0f9789673c9b4033fa2685b6da24d8d53147
|
Bump vers for OE POS taggers
|
LBenzahia/cltk,TylerKirby/cltk,TylerKirby/cltk,LBenzahia/cltk,kylepjohnson/cltk,diyclassics/cltk,D-K-E/cltk,cltk/cltk
|
setup.py
|
setup.py
|
"""Config for PyPI."""
from setuptools import find_packages
from setuptools import setup
setup(
author='Kyle P. Johnson',
author_email='[email protected]',
classifiers=[
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: Chinese (Traditional)',
'Natural Language :: English',
'Natural Language :: Greek',
'Natural Language :: Latin',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Linguistic',
],
description='NLP for the ancient world',
install_requires=['gitpython',
'nltk',
'python-crfsuite',
'pyuca',
'pyyaml',
'regex',
'whoosh'],
keywords=['nlp', 'nltk', 'greek', 'latin', 'chinese', 'sanskrit', 'pali', 'tibetan', 'arabic', "germanic"],
license='MIT',
long_description='The Classical Language Toolkit (CLTK) is a framework for natural language processing for Classical languages.', # pylint: disable=C0301,
name='cltk',
packages=find_packages(),
url='https://github.com/cltk/cltk',
version='0.1.94',
zip_safe=True,
test_suite='cltk.tests.test_cltk',
)
|
"""Config for PyPI."""
from setuptools import find_packages
from setuptools import setup
setup(
author='Kyle P. Johnson',
author_email='[email protected]',
classifiers=[
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: Chinese (Traditional)',
'Natural Language :: English',
'Natural Language :: Greek',
'Natural Language :: Latin',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Linguistic',
],
description='NLP for the ancient world',
install_requires=['gitpython',
'nltk',
'python-crfsuite',
'pyuca',
'pyyaml',
'regex',
'whoosh'],
keywords=['nlp', 'nltk', 'greek', 'latin', 'chinese', 'sanskrit', 'pali', 'tibetan', 'arabic', "germanic"],
license='MIT',
long_description='The Classical Language Toolkit (CLTK) is a framework for natural language processing for Classical languages.', # pylint: disable=C0301,
name='cltk',
packages=find_packages(),
url='https://github.com/cltk/cltk',
version='0.1.93',
zip_safe=True,
test_suite='cltk.tests.test_cltk',
)
|
mit
|
Python
|
b8fb03a8945f1a895c8f5e55f01a6d4219aa745a
|
Add metadata
|
nicholasbishop/pyglsl_parser,nicholasbishop/pyglsl_parser
|
setup.py
|
setup.py
|
#! /usr/bin/env python
# pylint: disable=invalid-name,missing-docstring
from setuptools import setup
from setuptools.extension import Extension
from Cython.Build import cythonize
extensions = [
Extension(
name='pyglsl_parser.parser',
sources=['pyglsl_parser/parser.pyx',
'glsl-parser/ast.cpp',
'glsl-parser/lexer.cpp',
'glsl-parser/parser.cpp',
'glsl-parser/util.cpp'],
language='c++',
)
]
setup(name='pyglsl_parser',
version='0.5.0',
test_suite='test',
url='https://github.com/nicholasbishop/pyglsl_parser',
author='Nicholas Bishop',
author_email='[email protected]',
ext_modules=cythonize(extensions))
|
#! /usr/bin/env python
# pylint: disable=invalid-name,missing-docstring
from setuptools import setup
from setuptools.extension import Extension
from Cython.Build import cythonize
extensions = [
Extension(
name='pyglsl_parser.parser',
sources=['pyglsl_parser/parser.pyx',
'glsl-parser/ast.cpp',
'glsl-parser/lexer.cpp',
'glsl-parser/parser.cpp',
'glsl-parser/util.cpp'],
language='c++',
)
]
setup(name='pyglsl_parser',
version='0.5.0',
test_suite='test',
ext_modules=cythonize(extensions))
|
mit
|
Python
|
a3a51b905ec92b5f11c5dec5d9eed455c992fb86
|
Update version of cluster package pulled in.
|
getwarped/powershift-cli,getwarped/powershift-cli
|
setup.py
|
setup.py
|
import sys
import os
from setuptools import setup
long_description = open('README.rst').read()
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
setup_kwargs = dict(
name='powershift-cli',
version='1.1.8',
description='Pluggable command line client for OpenShift.',
long_description=long_description,
url='https://github.com/getwarped/powershift-cli',
author='Graham Dumpleton',
author_email='[email protected]',
license='BSD',
classifiers=classifiers,
keywords='openshift kubernetes',
packages=['powershift', 'powershift.cli'],
package_dir={'powershift': 'src/powershift'},
package_data={'powershift.cli': ['completion-bash.sh']},
entry_points = {'console_scripts':['powershift = powershift.cli:main']},
install_requires=['click'],
extras_require={'all':['powershift-cluster>=1.1.5'],
'cluster':['powershift-cluster>=1.1.5']},
)
setup(**setup_kwargs)
|
import sys
import os
from setuptools import setup
long_description = open('README.rst').read()
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
setup_kwargs = dict(
name='powershift-cli',
version='1.1.8',
description='Pluggable command line client for OpenShift.',
long_description=long_description,
url='https://github.com/getwarped/powershift-cli',
author='Graham Dumpleton',
author_email='[email protected]',
license='BSD',
classifiers=classifiers,
keywords='openshift kubernetes',
packages=['powershift', 'powershift.cli'],
package_dir={'powershift': 'src/powershift'},
package_data={'powershift.cli': ['completion-bash.sh']},
entry_points = {'console_scripts':['powershift = powershift.cli:main']},
install_requires=['click'],
extras_require={'all':['powershift-cluster>=1.1.1'],
'cluster':['powershift-cluster>=1.1.1']},
)
setup(**setup_kwargs)
|
bsd-2-clause
|
Python
|
27e2b252db2e2372d98996b3768683cfcd5b8bc2
|
bump version number
|
codeforamerica/w-drive-extractor
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# convert .md to .rst; from here: http://stackoverflow.com/questions/10718767/have-the-same-readme-both-in-markdown-and-restructuredtext
try:
from pypandoc import convert
long_description = convert(path.join(here, 'README.md'), 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
long_description = ''
with open('requirements/pkg.txt') as f:
required = f.read().splitlines()
setup(
name='wextractor',
url='https://github.com/codeforamerica/w-drive-extractor',
license='MIT',
version='0.1.dev3',
author='Ben Smithgall',
author_email='[email protected]',
description='Extract flat data and load it as relational data',
long_description=long_description,
packages=find_packages(),
install_requires=required,
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# convert .md to .rst; from here: http://stackoverflow.com/questions/10718767/have-the-same-readme-both-in-markdown-and-restructuredtext
try:
from pypandoc import convert
long_description = convert(path.join(here, 'README.md'), 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
long_description = ''
with open('requirements/pkg.txt') as f:
required = f.read().splitlines()
setup(
name='wextractor',
url='https://github.com/codeforamerica/w-drive-extractor',
license='MIT',
version='0.1.dev2',
author='Ben Smithgall',
author_email='[email protected]',
description='Extract flat data and load it as relational data',
long_description=long_description,
packages=find_packages(),
install_requires=required,
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
mit
|
Python
|
5550b2feb24ee4f8f527182c115018b170f0d2cc
|
update version in setup file
|
fritzprix/jconfigpy,fritzprix/jconfigpy
|
setup.py
|
setup.py
|
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "jconfigpy",
version = "0.0.5",
author = "fritzprix",
author_email = "[email protected]",
description = ("configuration utility which easily integrated into project using gnu make as build system"),
license = "BSD",
keywords = "configuration utility make",
url = "http://github.com/fritzprix/jconfigpy",
download_url = "http://github.com/fritzprix/jconfigpy/archive/0.0.5.tar.gz",
packages=['jconfigpy'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
|
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "jconfigpy",
version = "0.0.4",
author = "fritzprix",
author_email = "[email protected]",
description = ("configuration utility which easily integrated into project using gnu make as build system"),
license = "BSD",
keywords = "configuration utility make",
url = "http://github.com/fritzprix/jconfigpy",
download_url = "http://github.com/fritzprix/jconfigpy/archive/0.0.4.tar.gz",
packages=['jconfigpy'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
|
bsd-2-clause
|
Python
|
f8babfd27fdc8c618fe8827d8a2cc4cf3153a1ad
|
support form Flask-WTF bigger then 0.8, aim for python 3
|
qpxu007/Flask-AppBuilder,rpiotti/Flask-AppBuilder,qpxu007/Flask-AppBuilder,zhounanshu/Flask-AppBuilder,dpgaspar/Flask-AppBuilder,dpgaspar/Flask-AppBuilder,rpiotti/Flask-AppBuilder,qpxu007/Flask-AppBuilder,zhounanshu/Flask-AppBuilder,qpxu007/Flask-AppBuilder,zhounanshu/Flask-AppBuilder,dpgaspar/Flask-AppBuilder,rpiotti/Flask-AppBuilder,zhounanshu/Flask-AppBuilder,rpiotti/Flask-AppBuilder,dpgaspar/Flask-AppBuilder
|
setup.py
|
setup.py
|
import os
import sys
from setuptools import setup, find_packages
def fpath(name):
return os.path.join(os.path.dirname(__file__), name)
def read(fname):
return open(fpath(fname)).read()
def desc():
return read('README.rst')
setup(
name='Flask-AppBuilder',
version='0.8.1',
url='https://github.com/dpgaspar/flask-appbuilder/',
license='BSD',
author='Daniel Vaz Gaspar',
author_email='[email protected]',
description='Simple and rapid Application builder, includes detailed security, auto form generation, google charts and much more.',
long_description=desc(),
packages=find_packages(),
package_data={'': ['LICENSE']},
scripts=['bin/hash_db_password.py'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'Flask>=0.10',
'Flask-BabelPkg>=0.9.4',
'Flask-Login>=0.2.0',
'Flask-OpenID>=1.1.0',
'Flask-SQLAlchemy>=0.16',
'Flask-WTF>=0.9.1',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite=''
)
|
import os
import sys
from setuptools import setup, find_packages
def fpath(name):
return os.path.join(os.path.dirname(__file__), name)
def read(fname):
return open(fpath(fname)).read()
def desc():
return read('README.rst')
setup(
name='Flask-AppBuilder',
version='0.8.1',
url='https://github.com/dpgaspar/flask-appbuilder/',
license='BSD',
author='Daniel Vaz Gaspar',
author_email='[email protected]',
description='Simple and rapid Application builder, includes detailed security, auto form generation, google charts and much more.',
long_description=desc(),
packages=find_packages(),
package_data={'': ['LICENSE']},
scripts=['bin/hash_db_password.py'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'Flask>=0.10',
'Flask-BabelPkg>=0.9.4',
'Flask-Login>=0.2.0',
'Flask-OpenID>=1.1.0',
'Flask-SQLAlchemy>=0.16',
'Flask-WTF=>0.9.1',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite=''
)
|
bsd-3-clause
|
Python
|
b0ec026a6f75f03073de6de3fb2f7bfaff7b6d70
|
Update version number to 0.11
|
google/empirical_calibration
|
setup.py
|
setup.py
|
# Copyright 2019 The Empirical Calibration Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setup for empirical calibration package."""
from setuptools import find_packages
from setuptools import setup
setup(
name='empirical_calibration',
version='0.11',
description='Package for empirical calibration',
author='Google LLC',
author_email='[email protected]',
url='https://github.com/google/empirical_calibration',
license='Apache 2.0',
packages=find_packages(),
install_requires=[
'absl-py',
'numpy >= 1.11.1',
'pandas',
'patsy',
'scipy',
'six',
'sklearn',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Mathematics',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
# Copyright 2019 The Empirical Calibration Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setup for empirical calibration package."""
from setuptools import find_packages
from setuptools import setup
setup(
name='empirical_calibration',
version='0.1',
description='Package for empirical calibration',
author='Google LLC',
author_email='[email protected]',
url='https://github.com/google/empirical_calibration',
license='Apache 2.0',
packages=find_packages(),
install_requires=[
'absl-py',
'numpy >= 1.11.1',
'pandas',
'patsy',
'scipy',
'six',
'sklearn',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Mathematics',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
apache-2.0
|
Python
|
327b4a656347951b1b6bed1f04465dffc5011ec1
|
Update setup.py
|
rfverbruggen/rachiopy
|
setup.py
|
setup.py
|
"""Rachiopy setup script."""
from setuptools import find_packages, setup
VERSION = "1.0.0"
GITHUB_USERNAME = "rfverbruggen"
GITHUB_REPOSITORY = "rachiopy"
GITHUB_PATH = f"{GITHUB_USERNAME}/{GITHUB_REPOSITORY}"
GITHUB_URL = f"https://github.com/{GITHUB_PATH}"
DOWNLOAD_URL = f"{GITHUB_URL}/archive/{VERSION}.tar.gz"
PROJECT_URLS = {"Bug Reports": f"{GITHUB_URL}/issues"}
PACKAGES = find_packages(exclude=["tests", "tests.*"])
setup(
name="RachioPy",
version=VERSION,
author="Robbert Verbruggen",
author_email="[email protected]",
packages=PACKAGES,
install_requires=["requests"],
url=GITHUB_URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
license="MIT",
description="A Python module for the Rachio API.",
platforms="Cross Platform",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Software Development",
],
)
|
"""Rachiopy setup script."""
from setuptools import find_packages, setup
from datetime import datetime
VERSION = "1.0.0"
GITHUB_USERNAME = "rfverbruggen"
GITHUB_REPOSITORY = "rachiopy"
GITHUB_PATH = f"{GITHUB_USERNAME}/{GITHUB_REPOSITORY}"
GITHUB_URL = f"https://github.com/{GITHUB_PATH}"
DOWNLOAD_URL = f"{GITHUB_URL}/archive/{VERSION}.tar.gz"
PROJECT_URLS = {"Bug Reports": f"{GITHUB_URL}/issues"}
PACKAGES = find_packages(exclude=["tests", "tests.*"])
setup(
name="RachioPy",
version=VERSION,
author="Robbert Verbruggen",
author_email="[email protected]",
packages=PACKAGES,
install_requires=["requests"],
url=GITHUB_URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
license="MIT",
description="A Python module for the Rachio API.",
platforms="Cross Platform",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Software Development",
],
)
|
mit
|
Python
|
f91007560497d370b7451d212ad91dd23b47c892
|
Create version 0.3.22
|
Duke-GCB/DukeDSClient,Duke-GCB/DukeDSClient
|
setup.py
|
setup.py
|
from setuptools import setup
setup(name='DukeDSClient',
version='0.3.22',
description='Command line tool(ddsclient) to upload/manage projects on the duke-data-service.',
url='https://github.com/Duke-GCB/DukeDSClient',
keywords='duke dds dukedataservice',
author='John Bradley',
license='MIT',
packages=['ddsc','ddsc.core'],
install_requires=[
'requests',
'PyYAML',
'pytz',
'future',
'six',
],
test_suite='nose.collector',
tests_require=['nose', 'mock'],
entry_points={
'console_scripts': [
'ddsclient = ddsc.__main__:main'
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
from setuptools import setup
setup(name='DukeDSClient',
version='0.3.21',
description='Command line tool(ddsclient) to upload/manage projects on the duke-data-service.',
url='https://github.com/Duke-GCB/DukeDSClient',
keywords='duke dds dukedataservice',
author='John Bradley',
license='MIT',
packages=['ddsc','ddsc.core'],
install_requires=[
'requests',
'PyYAML',
'pytz',
'future',
'six',
],
test_suite='nose.collector',
tests_require=['nose', 'mock'],
entry_points={
'console_scripts': [
'ddsclient = ddsc.__main__:main'
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
mit
|
Python
|
4c15430e02c12f190b95791681f20f1a36a016d9
|
Add Python 3.4 classifier
|
mnaberez/py65,mkeller0815/py65
|
setup.py
|
setup.py
|
__version__ = '0.21-dev'
import os
import sys
py_version = sys.version_info[:2]
PY3 = py_version[0] == 3
if PY3:
if py_version < (3, 2):
raise RuntimeError('On Python 3, Py65 requires Python 3.2 or later')
else:
if py_version < (2, 6):
raise RuntimeError('On Python 2, Py65 requires Python 2.6 or later')
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
DESC = """\
Simulate 6502-based microcomputer systems in Python."""
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Assembly',
'Topic :: Software Development :: Assemblers',
'Topic :: Software Development :: Disassemblers',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Embedded Systems',
'Topic :: Software Development :: Interpreters',
'Topic :: System :: Emulators',
'Topic :: System :: Hardware'
]
setup(
name='py65',
version=__version__,
license='License :: OSI Approved :: BSD License',
url='https://github.com/mnaberez/py65',
description='6502 microprocessor simulation package',
long_description=DESC,
classifiers=CLASSIFIERS,
author="Mike Naberezny",
author_email="[email protected]",
maintainer="Mike Naberezny",
maintainer_email="[email protected]",
packages=find_packages(),
install_requires=[],
extras_require={},
tests_require=[],
include_package_data=True,
zip_safe=False,
namespace_packages=['py65'],
test_suite="py65.tests",
entry_points={
'console_scripts': [
'py65mon = py65.monitor:main',
],
},
)
|
__version__ = '0.21-dev'
import os
import sys
py_version = sys.version_info[:2]
PY3 = py_version[0] == 3
if PY3:
if py_version < (3, 2):
raise RuntimeError('On Python 3, Py65 requires Python 3.2 or later')
else:
if py_version < (2, 6):
raise RuntimeError('On Python 2, Py65 requires Python 2.6 or later')
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
DESC = """\
Simulate 6502-based microcomputer systems in Python."""
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX',
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
'Programming Language :: Assembly',
'Topic :: Software Development :: Assemblers',
'Topic :: Software Development :: Disassemblers',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Embedded Systems',
'Topic :: Software Development :: Interpreters',
'Topic :: System :: Emulators',
'Topic :: System :: Hardware'
]
setup(
name='py65',
version=__version__,
license='License :: OSI Approved :: BSD License',
url='https://github.com/mnaberez/py65',
description='6502 microprocessor simulation package',
long_description=DESC,
classifiers=CLASSIFIERS,
author="Mike Naberezny",
author_email="[email protected]",
maintainer="Mike Naberezny",
maintainer_email="[email protected]",
packages=find_packages(),
install_requires=[],
extras_require={},
tests_require=[],
include_package_data=True,
zip_safe=False,
namespace_packages=['py65'],
test_suite="py65.tests",
entry_points={
'console_scripts': [
'py65mon = py65.monitor:main',
],
},
)
|
bsd-3-clause
|
Python
|
f31d7a224d0c5a3c2b1d2b16fb4bc74edefacf24
|
Bump version string
|
madisongh/autobuilder
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
setup(
name='autobuilder',
version='0.6.7',
packages=find_packages(),
license='MIT',
author='Matt Madison',
author_email='[email protected]',
install_requires=['buildbot>=0.8.12m1']
)
|
from setuptools import setup, find_packages
setup(
name='autobuilder',
version='0.6.6',
packages=find_packages(),
license='MIT',
author='Matt Madison',
author_email='[email protected]',
install_requires=['buildbot>=0.8.12m1']
)
|
mit
|
Python
|
014acee66b9cff26cb8f02f65c071f78a0a77e19
|
Bump version to 0.9.4pbs22
|
pbs/cmsplugin-filer,pbs/cmsplugin-filer,pbs/cmsplugin-filer,pbs/cmsplugin-filer
|
setup.py
|
setup.py
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
dependency_links = [
'http://github.com/pbs/django-cms/tarball/support/2.3.x#egg=django-cms-2.3.5pbs.X.dev',
'http://github.com/pbs/django-filer/tarball/master_pbs#egg=django-filer-0.9pbs.X.dev',
]
setup(
name = "cmsplugin-filer",
version = "0.9.4pbs22",
url = 'http://github.com/stefanfoulis/cmsplugin-filer',
license = 'BSD',
description = "django-cms plugins for django-filer",
long_description = read('README.rst'),
author = 'Stefan Foulis',
author_email = '[email protected]',
packages = find_packages(),
#package_dir = {'':'src'},
dependency_links=dependency_links,
classifiers = [
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
install_requires=[
"django-cms>=2.3.5pbs, <2.3.6",
"django-filer >= 0.9pbs, <0.9.1"
],
include_package_data=True,
zip_safe = False,
)
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
dependency_links = [
'http://github.com/pbs/django-cms/tarball/support/2.3.x#egg=django-cms-2.3.5pbs.X.dev',
'http://github.com/pbs/django-filer/tarball/master_pbs#egg=django-filer-0.9pbs.X.dev',
]
setup(
name = "cmsplugin-filer",
version = "0.9.4pbs21",
url = 'http://github.com/stefanfoulis/cmsplugin-filer',
license = 'BSD',
description = "django-cms plugins for django-filer",
long_description = read('README.rst'),
author = 'Stefan Foulis',
author_email = '[email protected]',
packages = find_packages(),
#package_dir = {'':'src'},
dependency_links=dependency_links,
classifiers = [
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
install_requires=[
"django-cms>=2.3.5pbs, <2.3.6",
"django-filer >= 0.9pbs, <0.9.1"
],
include_package_data=True,
zip_safe = False,
)
|
bsd-3-clause
|
Python
|
c53b8a0a06596d9803a8df92e909d4ad28169dda
|
Update the cookiedomain requirement to 0.6, which has httponly.
|
FND/tiddlyspace,TiddlySpace/tiddlyspace,TiddlySpace/tiddlyspace,FND/tiddlyspace,FND/tiddlyspace,TiddlySpace/tiddlyspace
|
setup.py
|
setup.py
|
AUTHOR = 'Osmosoft'
AUTHOR_EMAIL = '[email protected]'
NAME = 'tiddlywebplugins.tiddlyspace'
DESCRIPTION = 'A discoursive social model for TiddlyWiki'
VERSION = '0.2.2' # N.B.: duplicate of tiddlywebplugins.tiddlyspace.__init__
import os
from setuptools import setup, find_packages
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = 'http://pypi.python.org/pypi/%s' % NAME,
platforms = 'Posix; MacOS X; Windows',
packages = find_packages(exclude=['test']),
scripts = ['tiddlyspace'],
install_requires = [
'setuptools',
'tiddlyweb>=1.1.dev5',
'tiddlywebwiki>=0.32',
'tiddlywebplugins.utils>=1.0',
'tiddlywebplugins.logout>=0.6',
'tiddlywebplugins.virtualhosting',
'tiddlywebplugins.socialusers>=0.3',
'tiddlywebplugins.magicuser>=0.3',
'tiddlywebplugins.openid2>=0.5',
'tiddlywebplugins.cookiedomain>=0.6',
'tiddlywebplugins.mselect',
'tiddlywebplugins.prettyerror>=0.8',
'tiddlywebplugins.pathinfohack>=0.8',
'tiddlywebplugins.form==dev',
'tiddlywebplugins.reflector>=0.2',
'tiddlywebplugins.whoosher>=0.9.10',
'tiddlywebplugins.atom>=1.2.0',
],
include_package_data = True,
zip_safe = False
)
|
AUTHOR = 'Osmosoft'
AUTHOR_EMAIL = '[email protected]'
NAME = 'tiddlywebplugins.tiddlyspace'
DESCRIPTION = 'A discoursive social model for TiddlyWiki'
VERSION = '0.2.2' # N.B.: duplicate of tiddlywebplugins.tiddlyspace.__init__
import os
from setuptools import setup, find_packages
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = 'http://pypi.python.org/pypi/%s' % NAME,
platforms = 'Posix; MacOS X; Windows',
packages = find_packages(exclude=['test']),
scripts = ['tiddlyspace'],
install_requires = [
'setuptools',
'tiddlyweb>=1.1.dev5',
'tiddlywebwiki>=0.32',
'tiddlywebplugins.utils>=1.0',
'tiddlywebplugins.logout>=0.6',
'tiddlywebplugins.virtualhosting',
'tiddlywebplugins.socialusers>=0.3',
'tiddlywebplugins.magicuser>=0.3',
'tiddlywebplugins.openid2>=0.5',
'tiddlywebplugins.cookiedomain>=0.3',
'tiddlywebplugins.mselect',
'tiddlywebplugins.prettyerror>=0.8',
'tiddlywebplugins.pathinfohack>=0.8',
'tiddlywebplugins.form==dev',
'tiddlywebplugins.reflector>=0.2',
'tiddlywebplugins.whoosher>=0.9.10',
'tiddlywebplugins.atom>=1.2.0',
],
include_package_data = True,
zip_safe = False
)
|
bsd-3-clause
|
Python
|
07656b928c506c0f9d602a5c4195793b5e449693
|
test fix
|
GiovanniMCMXCIX/async-connect.py
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
import sys
import re
with open('requirements.txt') as f:
requirements = f.readlines()
with open('async_connect/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
with open('README.rst') as f:
readme = f.read()
if sys.version_info[1] == 6:
test_require = ['uvloop>=0.8.0']
else:
test_require = []
setup(name='async-connect.py',
author='GiovanniMCMXCIX',
author_email='[email protected]',
url='https://github.com/GiovanniMCMXCIX/async-connect.py',
version=version,
packages=find_packages(),
license='MIT',
description='Asynchronous version of connect.py',
long_description=readme,
include_package_data=True,
install_requires=requirements,
extras_require={'performance': ['uvloop>=0.8.0']},
test_suite='tests',
tests_require=test_require,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
)
|
from setuptools import setup, find_packages
import re
with open('requirements.txt') as f:
requirements = f.readlines()
with open('async_connect/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
with open('README.rst') as f:
readme = f.read()
setup(name='async-connect.py',
author='GiovanniMCMXCIX',
author_email='[email protected]',
url='https://github.com/GiovanniMCMXCIX/async-connect.py',
version=version,
packages=find_packages(),
license='MIT',
description='Asynchronous version of connect.py',
long_description=readme,
include_package_data=True,
install_requires=requirements,
extras_require={'performance': ['uvloop>=0.8.0']},
test_suite='tests',
tests_require=['uvloop>=0.8.0'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
)
|
mit
|
Python
|
ca62972886bcaef6375e78e32514883183f50929
|
bump version UP! Align to pypi
|
b3nab/instapy-cli,b3nab/instapy-cli
|
setup.py
|
setup.py
|
import os
import codecs
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(here, *parts), "rb", "utf-8") as f:
return f.read()
setup(
name='instapy-cli',
version='0.0.2',
description='Python library and cli used to upload photo on Instagram. W/o a phone!',
long_description=read('README.rst'),
classifiers=[
# How mature is this project?
'Development Status :: 5 - Production/Stable',
# For who your project is intended for and its usage
'Intended Audience :: Developers',
'Environment :: Console',
# Project's License
'License :: OSI Approved :: MIT License',
# Python versions instapy-cli support here
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='instagram private upload api instapy instapy-cli instapycli',
author='Benedetto Abbenanti',
author_email='[email protected]',
url='https://github.com/b3nab/instapy-cli',
license='MIT',
packages=['instapy_cli'],
install_requires=[ # external packages as dependencies
'requests>=2',
'emoji'
],
entry_points={
'console_scripts': [
'instapy=instapy_cli.__main__:main'
]
},
# python_requires='>=2.7'
)
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
setup(
name='instapy-cli',
version='0.0.1',
description='Python library and cli used to upload photo on Instagram. W/o a phone!',
long_description=open('README.md.rst').read(),
classifiers=[
# How mature is this project?
'Development Status :: 5 - Production/Stable',
# For who your project is intended for and its usage
'Intended Audience :: Developers',
'Environment :: Console',
# Project's License
'License :: OSI Approved :: MIT License',
# Python versions instapy-cli support here
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='instagram private upload api instapy instapy-cli instapycli',
author='Benedetto Abbenanti',
author_email='[email protected]',
url='https://github.com/b3nab/instapy-cli',
license='MIT',
packages=['instapy_cli'],
install_requires=[ # external packages as dependencies
'requests>=2',
'emoji'
],
entry_points={
'console_scripts': [
'instapy=instapy_cli.__main__:main'
]
},
# python_requires='>=2.7'
)
|
mit
|
Python
|
47dd0d3f9d5d150adad7097bb2a24964f3c732b2
|
add pytest dep
|
williballenthin/python-evtx
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import setuptools
long_description = """python-evtx is a pure Python parser for \
Windows Event Log files (those with the file extension ".evtx"). \
The module provides programmatic access to the File and Chunk headers, \
record templates, and event entries. For example, you can use \
python-evtx to review the event logs of Windows 7 systems from \
a Mac or Linux workstation. The structure definitions and parsing \
strategies were heavily inspired by the work of Andreas Schuster \
and his Perl implementation "Parse-Evtx"."""
setuptools.setup(
name="python-evtx",
version="0.5.3",
description="Pure Python parser for recent Windows event log files (.evtx).",
long_description=long_description,
author="Willi Ballenthin",
author_email="[email protected]",
url="https://github.com/williballenthin/python-evtx",
license="Apache 2.0 License",
packages=setuptools.find_packages(),
install_requires=[
'six',
'pytest',
'hexdump',
],
scripts=['scripts/evtx_dump.py',
'scripts/evtx_dump_chunk_slack.py',
'scripts/evtx_eid_record_numbers.py',
'scripts/evtx_extract_record.py',
'scripts/evtx_filter_records.py',
'scripts/evtx_find_bugs.py',
'scripts/evtx_get_pretty_record.py',
'scripts/evtx_info.py',
'scripts/evtx_record_structure.py',
'scripts/evtx_structure.py',
'scripts/evtx_templates.py',
],
)
|
#!/usr/bin/env python
import setuptools
long_description = """python-evtx is a pure Python parser for \
Windows Event Log files (those with the file extension ".evtx"). \
The module provides programmatic access to the File and Chunk headers, \
record templates, and event entries. For example, you can use \
python-evtx to review the event logs of Windows 7 systems from \
a Mac or Linux workstation. The structure definitions and parsing \
strategies were heavily inspired by the work of Andreas Schuster \
and his Perl implementation "Parse-Evtx"."""
setuptools.setup(
name="python-evtx",
version="0.5.3",
description="Pure Python parser for recent Windows event log files (.evtx).",
long_description=long_description,
author="Willi Ballenthin",
author_email="[email protected]",
url="https://github.com/williballenthin/python-evtx",
license="Apache 2.0 License",
packages=setuptools.find_packages(),
install_requires=['hexdump', 'six'],
scripts=['scripts/evtx_dump.py',
'scripts/evtx_dump_chunk_slack.py',
'scripts/evtx_eid_record_numbers.py',
'scripts/evtx_extract_record.py',
'scripts/evtx_filter_records.py',
'scripts/evtx_find_bugs.py',
'scripts/evtx_get_pretty_record.py',
'scripts/evtx_info.py',
'scripts/evtx_record_structure.py',
'scripts/evtx_structure.py',
'scripts/evtx_templates.py',
],
)
|
apache-2.0
|
Python
|
f3cc4d84fbfcb430b004d730e44653079b3b3490
|
Update version to 1.6
|
zillolo/vsut-python
|
setup.py
|
setup.py
|
import os
from setuptools import setup
def read(file):
return open(os.path.join(os.path.dirname(__file__), file)).read()
setup(
name="vsut",
version="1.6",
author="Alex Egger",
author_email="[email protected]",
description="A simple unit testing framework for Python 3.4",
license="MIT",
keywords="unit unittest test testing",
url="http://github.com/zillolo/vsut-python",
packages=["vsut"],
scripts=["runner.py"],
entry_points = {"console_scripts" : ["vrun = runner:main"]},
long_description="""For usage information visit:
http://github.com/zillolo/vsut-python
""",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Testing"]
)
|
import os
from setuptools import setup
def read(file):
return open(os.path.join(os.path.dirname(__file__), file)).read()
setup(
name="vsut",
version="1.5.5",
author="Alex Egger",
author_email="[email protected]",
description="A simple unit testing framework for Python 3.4",
license="MIT",
keywords="unit unittest test testing",
url="http://github.com/zillolo/vsut-python",
packages=["vsut"],
scripts=["runner.py"],
entry_points = {"console_scripts" : ["vrun = runner:main"]},
long_description="""For usage information visit:
http://github.com/zillolo/vsut-python
""",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Testing"]
)
|
mit
|
Python
|
5ea089d150a7e6132670ab92521bf763b1e719bf
|
bump version to 0.2
|
srittau/python-htmlgen
|
setup.py
|
setup.py
|
#!/usr/bin/python
from setuptools import setup
setup(
name="htmlgen",
version="0.2",
description="HTML 5 Generator",
author="Sebastian Rittau",
author_email="[email protected]",
url="https://github.com/srittau/python-htmlgen",
packages=["htmlgen", "test_htmlgen"],
depends=["asserts"],
license="MIT",
)
|
#!/usr/bin/python
from setuptools import setup
setup(
name="htmlgen",
version="0.1",
description="HTML 5 Generator",
author="Sebastian Rittau",
author_email="[email protected]",
url="https://github.com/srittau/python-htmlgen",
packages=["htmlgen", "test_htmlgen"],
depends=["asserts"],
license="MIT",
)
|
mit
|
Python
|
7cfe670f621ac8ee7723e80c279a9794dc0a1bb9
|
update docker version
|
Jeff-Wang93/vent,cglewis/vent,CyberReboot/vent,CyberReboot/vent,Jeff-Wang93/vent,CyberReboot/vent,cglewis/vent,Jeff-Wang93/vent,cglewis/vent
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name='vent',
version='v0.6.9.dev',
packages=['vent', 'vent.core', 'vent.core.file_drop',
'vent.core.rq_worker', 'vent.core.rq_dashboard', 'vent.menus',
'vent.core.network_tap', 'vent.core.network_tap.ncontrol',
'vent.core.rmq_es_connector', 'vent.helpers', 'vent.api'],
install_requires=['docker>=3.5.1', 'npyscreen>=4.10.5', 'pyyaml>=3.13'],
scripts=['bin/vent'],
license='Apache License 2.0',
author='arpit',
author_email='',
maintainer='Charlie Lewis',
maintainer_email='[email protected]',
description=('A library that includes a CLI designed to serve as a'
' platform to collect and analyze data across a flexible set'
' of tools and technologies.'),
keywords='docker containers platform collection analysis tools devops',
url='https://github.com/CyberReboot/vent',
)
|
from setuptools import setup
setup(
name='vent',
version='v0.6.9.dev',
packages=['vent', 'vent.core', 'vent.core.file_drop',
'vent.core.rq_worker', 'vent.core.rq_dashboard', 'vent.menus',
'vent.core.network_tap', 'vent.core.network_tap.ncontrol',
'vent.core.rmq_es_connector', 'vent.helpers', 'vent.api'],
install_requires=['docker>=3.5.0', 'npyscreen>=4.10.5', 'pyyaml>=3.13'],
scripts=['bin/vent'],
license='Apache License 2.0',
author='arpit',
author_email='',
maintainer='Charlie Lewis',
maintainer_email='[email protected]',
description=('A library that includes a CLI designed to serve as a'
' platform to collect and analyze data across a flexible set'
' of tools and technologies.'),
keywords='docker containers platform collection analysis tools devops',
url='https://github.com/CyberReboot/vent',
)
|
apache-2.0
|
Python
|
046f9e35568c4f647780e504e2000fabf0f9937b
|
use __version__ from dynash.py
|
raff/dynash
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from dynash import __version__
SETUP_OPTIONS = dict(
name='dynash',
version=__version__,
description='Command line client for DynamoDB',
long_description = open("README.md").read(),
author='Raffaele Sena',
author_email='[email protected]',
url='https://github.com/raff/dynash',
license = "MIT",
platforms = "Posix; MacOS X; Windows",
py_modules=['dynash'
],
data_files=[('.', ['README.md'])
],
install_requires=['distribute',
'setuptools >= 0.6c11',
'boto >= 2.5.1'
],
entry_points="""
[console_scripts]
dynash=dynash:run_command
"""
)
def do_setup():
setup(**SETUP_OPTIONS)
if __name__ == '__main__':
do_setup()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
SETUP_OPTIONS = dict(
name='dynash',
version='0.5.1',
description='Command line client for DynamoDB',
long_description = open("README.md").read(),
author='Raffaele Sena',
author_email='[email protected]',
url='https://github.com/raff/dynash',
license = "MIT",
platforms = "Posix; MacOS X; Windows",
py_modules=['dynash'
],
data_files=[('.', ['README.md'])
],
install_requires=['distribute',
'setuptools >= 0.6c11',
'boto >= 2.5.1'
],
entry_points="""
[console_scripts]
dynash=dynash:run_command
"""
)
def do_setup():
setup(**SETUP_OPTIONS)
if __name__ == '__main__':
do_setup()
|
mit
|
Python
|
1f9b2c7836d6738d5a86d93644a04da7729b3688
|
Prepare openprocurement.api 2.4.5.
|
openprocurement/openprocurement.api
|
setup.py
|
setup.py
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
requires = [
'barbecue',
'chaussette',
'cornice',
'couchdb-schematics',
'gevent',
'iso8601',
'jsonpatch',
'libnacl',
'pbkdf2',
'pycrypto',
'pyramid_exclog',
'requests',
'rfc6266',
'setuptools',
'tzlocal',
]
test_requires = requires + [
'webtest',
'python-coveralls',
'mock'
]
docs_requires = requires + [
'sphinxcontrib-httpdomain',
]
entry_points = {
'paste.app_factory': [
'main = openprocurement.api.app:main'
],
'openprocurement.api.plugins': [
'api = openprocurement.api.includeme:includeme'
],
'openprocurement.api.migrations': [
'tenders = openprocurement.api.migration:migrate_data'
],
'console_scripts': [
'bootstrap_api_security = openprocurement.api.database:bootstrap_api_security'
]
}
setup(name='openprocurement.api',
version='2.4.5',
description='openprocurement.api',
long_description=README,
classifiers=[
"Framework :: Pylons",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application"
],
keywords="web services",
author='Quintagroup, Ltd.',
author_email='[email protected]',
license='Apache License 2.0',
url='https://github.com/openprocurement/openprocurement.api',
package_dir={'': 'src'},
py_modules=['cgi'],
packages=find_packages('src'),
namespace_packages=['openprocurement'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=test_requires,
extras_require={'test': test_requires, 'docs': docs_requires},
test_suite="openprocurement.api.tests.main.suite",
entry_points=entry_points)
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
requires = [
'barbecue',
'chaussette',
'cornice',
'couchdb-schematics',
'gevent',
'iso8601',
'jsonpatch',
'libnacl',
'pbkdf2',
'pycrypto',
'pyramid_exclog',
'requests',
'rfc6266',
'setuptools',
'tzlocal',
]
test_requires = requires + [
'webtest',
'python-coveralls',
'mock'
]
docs_requires = requires + [
'sphinxcontrib-httpdomain',
]
entry_points = {
'paste.app_factory': [
'main = openprocurement.api.app:main'
],
'openprocurement.api.plugins': [
'api = openprocurement.api.includeme:includeme'
],
'openprocurement.api.migrations': [
'tenders = openprocurement.api.migration:migrate_data'
],
'console_scripts': [
'bootstrap_api_security = openprocurement.api.database:bootstrap_api_security'
]
}
setup(name='openprocurement.api',
version='2.4.4',
description='openprocurement.api',
long_description=README,
classifiers=[
"Framework :: Pylons",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application"
],
keywords="web services",
author='Quintagroup, Ltd.',
author_email='[email protected]',
license='Apache License 2.0',
url='https://github.com/openprocurement/openprocurement.api',
package_dir={'': 'src'},
py_modules=['cgi'],
packages=find_packages('src'),
namespace_packages=['openprocurement'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=test_requires,
extras_require={'test': test_requires, 'docs': docs_requires},
test_suite="openprocurement.api.tests.main.suite",
entry_points=entry_points)
|
apache-2.0
|
Python
|
cabfe1a7586e5ce313105cab08810f51dae6b479
|
Update setup.py
|
mattharrison/rst2odp,mattharrison/rst2odp
|
setup.py
|
setup.py
|
#from distutils.core import setup
from setuptools import setup
from odplib import meta
setup(name="rst2odp",
version=meta.__version__,
author=meta.__author__,
author_email=meta.__email__,
description="Converter for rst to OpenOffice Impress",
long_description='''Packacking of rst2odp and opdlib from docutils sandbox. odplib is a standalone library for creating odp output from python. rst2odp wraps it for rst users. Now supports Python 3''',
license='MIT',
url='https://github.com/mattharrison/rst2odp',
scripts=["bin/rst2odp", "bin/otptweak", "bin/odpstyles"],
#package_dir={"odplib":"odplib"},
install_requires=['docutils >= 0.10', 'pygments >= 1.6',
'pillow >= 1.7.8', 'lxml >= 3.6.4'],
package_data={'odplib':['data/*.xml']},
packages=['odplib'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Office/Business'
]
)
|
#from distutils.core import setup
from setuptools import setup
from odplib import meta
setup(name="rst2odp",
version=meta.__version__,
author=meta.__author__,
author_email=meta.__email__,
description="Converter for rst to OpenOffice Impress",
long_description='''Packacking of rst2odp and opdlib from docutils sandbox. odplib is a standalone library for creating odp output from python. rst2odp wraps it for rst users. Now supports Python 3''',
license='Apache',
url='https://github.com/mattharrison/rst2odp',
scripts=["bin/rst2odp", "bin/otptweak", "bin/odpstyles"],
#package_dir={"odplib":"odplib"},
install_requires=['docutils >= 0.10', 'pygments >= 1.6',
'pillow >= 1.7.8', 'lxml >= 3.6.4'],
package_data={'odplib':['data/*.xml']},
packages=['odplib'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Office/Business'
]
)
|
mit
|
Python
|
1a7eb2627fbf3bc0ddb5d8fd5b051f3139275590
|
Fix moderation rule
|
hackEns/Jarvis,hackEns/Jarvis
|
Rules/Moderation.py
|
Rules/Moderation.py
|
from ._shared import *
class Moderation(Rule):
"""Handles message to moderate listing"""
def __init__(self, bot):
self.bot = bot
def __call__(self, serv, author, args):
"""Handles message to moderate listing"""
if not self.bot.has_admin_rights(serv, author):
return
if len(args) > 1:
liste = args[1].split("@")[0]
query = ("SELECT id, subject, author, liste FROM moderation " +
"WHERE liste=%s AND moderated=0 ORDER BY date DESC")
values = (liste,)
message = ("Messages en attente de modération " +
"pour la liste " + liste + " :")
else:
query = ("SELECT id, subject, author, liste FROM moderation " +
"WHERE moderated=0 ORDER BY date DESC")
values = ()
message = "Messages en attente de modération :"
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query, values)
if bdd_cursor.rowcount <= 0:
self.bot.ans(serv,
author,
"Aucun message en attente de modération.")
return
self.bot.ans(serv, author, message)
for (ident, subject, author, liste) in bdd_cursor:
self.bot.say(serv, "["+liste+"] : « "+subject+" » par "+author)
bdd_cursor.close()
bdd.close()
def close(self):
pass
|
from ._shared import *
class Moderation(Rule):
"""Handles message to moderate listing"""
def __init__(self, bot):
self.bot = bot
def __call__(self, serv, author, args):
"""Handles message to moderate listing"""
if not self.bot.has_admin_rights(serv, author):
return
if len(args) > 1:
liste = args[1].split("@")[0]
query = ("SELECT id, subject, author, liste FROM moderation " +
"WHERE liste=%s AND moderated=0 ORDER BY date DESC")
values = (liste,)
message = ("Messages en attente de modération " +
"pour la liste " + liste + " :")
else:
query = ("SELECT id, subject, author, liste FROM moderation " +
"WHERE moderated=0 ORDER BY date DESC")
values = ()
message = "Messages en attente de modération :"
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query, values)
if bdd_cursor.rowcount <= 0:
self.ans(serv,
author,
"Aucun message en attente de modération.")
return
self.ans(serv, author, message)
for (ident, subject, author, liste) in bdd_cursor:
self.say(serv, "["+liste+"] : « "+subject+" » par "+author)
bdd_cursor.close()
bdd.close()
def close(self):
pass
|
mit
|
Python
|
90c82f0936addeb4469db2c42c1cd48713e7f3cf
|
Switch from bold to red highlighting.
|
adlr/wash-sale-calculator
|
progress_logger.py
|
progress_logger.py
|
# Copyright Google
# BSD License
import copy
import wash
# from http://stackoverflow.com/questions/8924173/how-do-i-print-bold-text-in-python
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
class TermLogger(object):
def print_progress(self, lots, text, red_lots):
lots = copy.copy(lots) # so I can re-sort non-destructively
print text
lots.sort(cmp=wash.cmp_by_buy_date)
red_ids = [id(lot) for lot in red_lots]
for lot in lots:
header = ''
footer = ''
if id(lot) in red_ids:
header = color.RED
footer = color.END
print header + str(lot) + footer
raw_input('hit enter>')
class NullLogger(object):
def print_progress(self, lots, text, red_lots):
pass
|
# Copyright Google
# BSD License
import copy
import wash
# from http://stackoverflow.com/questions/8924173/how-do-i-print-bold-text-in-python
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
class TermLogger(object):
def print_progress(self, lots, text, bold_lots):
lots = copy.copy(lots) # so I can re-sort non-destructively
print text
lots.sort(cmp=wash.cmp_by_buy_date)
bold_ids = [id(lot) for lot in bold_lots]
for lot in lots:
header = ''
footer = ''
if id(lot) in bold_ids:
header = color.BOLD
footer = color.END
print header + str(lot) + footer
raw_input('hit enter>')
class NullLogger(object):
def print_progress(self, lots, text, bold_lots):
pass
|
bsd-2-clause
|
Python
|
a25558ef0f1a902e948424bc0a3e137cc4d0f569
|
implement programming via remote gdb session
|
chrism333/xpcc,chrism333/xpcc,dergraaf/xpcc,dergraaf/xpcc,dergraaf/xpcc,dergraaf/xpcc,chrism333/xpcc,chrism333/xpcc
|
scons/site_tools/openocd_remote.py
|
scons/site_tools/openocd_remote.py
|
#!/usr/bin/env python
#
# Copyright (c) 2014, Roboterclub Aachen e.V.
# All Rights Reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
import platform
from SCons.Script import *
# -----------------------------------------------------------------------------
# Copy the hex file to the remote target as /tmp/openocd.hex
# Then use telnet interface of openocd to remotely control openocd to flash
# hex file to connected target.
# openocd must be running on target at port 4444
def openocd_remote_run(env, source, alias='openocd_remote_run'):
if platform.system() == "Windows":
print "Not supported under windows"
exit(1)
else:
commands = ["init", "reset halt", "flash write_image erase /tmp/openocd.hex", "reset run"]
action = Action("scp $SOURCE $OPENOCD_REMOTE_USER@$OPENOCD_REMOTE_HOST:/tmp/openocd.hex; echo %s | nc $OPENOCD_REMOTE_HOST 4444" % ' '.join(['"%s;"' % c for c in commands]),
cmdstr="$OPENOCD_COMSTR")
return env.AlwaysBuild(env.Alias(alias, source, action))
# -----------------------------------------------------------------------------
# Program elf file via a remote gdb session
def gdb_remote_program(env, source, alias='gdb_remote_program'):
if platform.system() == "Windows":
print "Not supported under windows"
exit(1)
else:
gdb = "arm-none-eabi-gdb"
cmd = [gdb, '-q',
'-ex "target remote $OPENOCD_REMOTE_HOST:3333"',
'-ex "load"',
'-ex "monitor reset"',
'-ex "disconnect"',
'-ex "quit"',
'$SOURCE']
action = Action(' '.join(cmd))
return env.AlwaysBuild(env.Alias(alias, source, action))
# -----------------------------------------------------------------------------
def generate(env, **kw):
# build messages
if not ARGUMENTS.get('verbose'):
env['OPENOCD_COMSTR'] = "OpenOCD remote: program $SOURCE"
env['OPENOCD'] = 'openocd'
env.AddMethod(openocd_remote_run, 'OpenOcdRemote')
env.AddMethod(gdb_remote_program, 'GdbRemoteProgram')
def exists(env):
return env.Detect('openocd_remote')
|
#!/usr/bin/env python
#
# Copyright (c) 2014, Roboterclub Aachen e.V.
# All Rights Reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
import platform
from SCons.Script import *
# -----------------------------------------------------------------------------
# Copy the hex file to the remote target as /tmp/openocd.hex
# Then use telnet interface of openocd to remotely control openocd to flash
# hex file to connected target.
# openocd must be running on target at port 4444
def openocd_remote_run(env, source, alias='openocd_remote_run'):
if platform.system() == "Windows":
print "Not supported under windows"
exit(1)
else:
commands = ["init", "reset halt", "flash write_image erase /tmp/openocd.hex", "reset run"]
action = Action("scp $SOURCE $OPENOCD_REMOTE_USER@$OPENOCD_REMOTE_HOST:/tmp/openocd.hex; echo %s | nc $OPENOCD_REMOTE_HOST 4444" % ' '.join(['"%s;"' % c for c in commands]),
cmdstr="$OPENOCD_COMSTR")
return env.AlwaysBuild(env.Alias(alias, source, action))
# -----------------------------------------------------------------------------
def generate(env, **kw):
# build messages
if not ARGUMENTS.get('verbose'):
env['OPENOCD_COMSTR'] = "OpenOCD remote: program $SOURCE"
env['OPENOCD'] = 'openocd'
env.AddMethod(openocd_remote_run, 'OpenOcdRemote')
def exists(env):
return env.Detect('openocd_remote')
|
bsd-3-clause
|
Python
|
c3f844cfe03a23a5d6207ba99b5dc6abac9b94a1
|
Add unit test.
|
wwitzel3/awx,snahelou/awx,snahelou/awx,wwitzel3/awx,wwitzel3/awx,snahelou/awx,wwitzel3/awx,snahelou/awx
|
awx/main/tests/unit/models/test_job_unit.py
|
awx/main/tests/unit/models/test_job_unit.py
|
import pytest
import json
from awx.main.tasks import RunJob
from awx.main.models import Job
@pytest.fixture
def job(mocker):
return mocker.MagicMock(**{
'display_extra_vars.return_value': '{\"secret_key\": \"$encrypted$\"}',
'extra_vars_dict': {"secret_key": "my_password"},
'pk': 1, 'job_template.pk': 1, 'job_template.name': '',
'created_by.pk': 1, 'created_by.username': 'admin',
'launch_type': 'manual'})
@pytest.mark.survey
def test_job_survey_password_redaction():
"""Tests the Job model's funciton to redact passwords from
extra_vars - used when displaying job information"""
job = Job(
name="test-job-with-passwords",
extra_vars=json.dumps({
'submitter_email': '[email protected]',
'secret_key': '6kQngg3h8lgiSTvIEb21',
'SSN': '123-45-6789'}),
survey_passwords={
'secret_key': '$encrypted$',
'SSN': '$encrypted$'})
assert json.loads(job.display_extra_vars()) == {
'submitter_email': '[email protected]',
'secret_key': '$encrypted$',
'SSN': '$encrypted$'}
@pytest.mark.survey
def test_survey_passwords_not_in_extra_vars():
"""Tests that survey passwords not included in extra_vars are
not included when displaying job information"""
job = Job(
name="test-survey-not-in",
extra_vars=json.dumps({
'submitter_email': '[email protected]'}),
survey_passwords={
'secret_key': '$encrypted$',
'SSN': '$encrypted$'})
assert json.loads(job.display_extra_vars()) == {
'submitter_email': '[email protected]',
}
def test_job_safe_args_redacted_passwords(job):
"""Verify that safe_args hides passwords in the job extra_vars"""
kwargs = {'ansible_version': '2.1'}
run_job = RunJob()
safe_args = run_job.build_safe_args(job, **kwargs)
ev_index = safe_args.index('-e') + 1
extra_vars = json.loads(safe_args[ev_index])
assert extra_vars['secret_key'] == '$encrypted$'
def test_job_args_unredacted_passwords(job):
kwargs = {'ansible_version': '2.1'}
run_job = RunJob()
args = run_job.build_args(job, **kwargs)
ev_index = args.index('-e') + 1
extra_vars = json.loads(args[ev_index])
assert extra_vars['secret_key'] == 'my_password'
|
import pytest
import json
from awx.main.tasks import RunJob
from awx.main.models import Job
@pytest.fixture
def job(mocker):
return mocker.MagicMock(**{
'display_extra_vars.return_value': '{\"secret_key\": \"$encrypted$\"}',
'extra_vars_dict': {"secret_key": "my_password"},
'pk': 1, 'job_template.pk': 1, 'job_template.name': '',
'created_by.pk': 1, 'created_by.username': 'admin',
'launch_type': 'manual'})
@pytest.mark.survey
def test_job_survey_password_redaction():
"""Tests the Job model's funciton to redact passwords from
extra_vars - used when displaying job information"""
job = Job(
name="test-job-with-passwords",
extra_vars=json.dumps({
'submitter_email': '[email protected]',
'secret_key': '6kQngg3h8lgiSTvIEb21',
'SSN': '123-45-6789'}),
survey_passwords={
'secret_key': '$encrypted$',
'SSN': '$encrypted$'})
assert json.loads(job.display_extra_vars()) == {
'submitter_email': '[email protected]',
'secret_key': '$encrypted$',
'SSN': '$encrypted$'}
def test_job_safe_args_redacted_passwords(job):
"""Verify that safe_args hides passwords in the job extra_vars"""
kwargs = {'ansible_version': '2.1'}
run_job = RunJob()
safe_args = run_job.build_safe_args(job, **kwargs)
ev_index = safe_args.index('-e') + 1
extra_vars = json.loads(safe_args[ev_index])
assert extra_vars['secret_key'] == '$encrypted$'
def test_job_args_unredacted_passwords(job):
kwargs = {'ansible_version': '2.1'}
run_job = RunJob()
args = run_job.build_args(job, **kwargs)
ev_index = args.index('-e') + 1
extra_vars = json.loads(args[ev_index])
assert extra_vars['secret_key'] == 'my_password'
|
apache-2.0
|
Python
|
3b0760ce604ab5e627f8459fc83f4f7b638c224e
|
Fix flake8 warning
|
nimbis/cmsplugin-forms-builder
|
cmsplugin_forms_builder/cms_plugins.py
|
cmsplugin_forms_builder/cms_plugins.py
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_forms_builder.models import PluginForm
from django.utils.translation import ugettext_lazy as _
class FormBuilderPlugin(CMSPluginBase):
"""
Plugin class for form-builder forms.
"""
model = PluginForm
name = _("Form")
render_template = "forms/form_detail.html"
cache = False
def render(self, context, instance, placeholder):
context['form'] = instance.form
return context
plugin_pool.register_plugin(FormBuilderPlugin)
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_forms_builder.models import PluginForm
from django.utils.translation import ugettext_lazy as _
class FormBuilderPlugin(CMSPluginBase):
"""
Plugin class for form-builder forms.
"""
model = PluginForm
name = _("Form")
render_template = "forms/form_detail.html"
cache = False
def render(self, context, instance, placeholder):
context['form'] = instance.form
return context
plugin_pool.register_plugin(FormBuilderPlugin)
|
bsd-3-clause
|
Python
|
749b05712474ea8695fb6f2d1662399da42b1d32
|
Update error with user input
|
ZEUSOFCS/Python
|
SimpleCalculator.py
|
SimpleCalculator.py
|
'''
Author : DORIAN JAVA BROWN
Version : N/A
Copyright : All Rights Reserve; You may use, distribute and modify this code.
Description : This is a simple calculator that can add, subtract, multiply and divide using functions
'''
def calculate():
# operation selection from the user
print('\n\n')
print('\t\t\tOperation Selection')
print('-------------------------------------------------------------')
print('+ for addition')
print('- for subtraction')
print('* for multiplication')
print('/ for division')
print('\ for modulus')
print('\n\n')
operation = raw_input('Enter Operation symbol: ')
# check if operation symbol is valid
# addition
if operation == '+':
userData(num1, num2)
print('\n\n')
print('ANSWER: {} + {} = '.format(num1, num2) + str(num1 + num2))
print('\n\n')
# subtraction
elif operation == '-':
userData()
print('\n\n')
print('ANSWER: {} - {} = '.format(num1,num2) + str(num1 - num2))
print('\n\n')
# multiplication
elif operation == '*':
print('\n\n')
print('ANSWER: {} * {} = '.format(num1,num2) + str(num1 * num2))
print('\n\n')
# divison
elif operation == '/':
print('\n\n')
print('ANSWER: {} / {} = '.format(num1,num2) + str(num1 / num2))
print('\n\n')
# modulus
elif operation == '/':
print('\n\n')
print('ANSWER: {} \ {} = '.format(num1,num2) + str(num1 % num2))
print('\n\n')
else: print('Invalid character, please run the program again.')
def userData(num1, num2):
# recieving two numbers from user
num1 = int(raw_input('\nEnter first number : '))
num2 = int(raw_input('\nEnter second number : '))
return num1, num2
'''function definitions'''
num1 = userData(num1, num2)
num2 = userData(num1, num2)
'''function call'''
calculate()
|
'''
Author : DORIAN JAVA BROWN
Version : N/A
Copyright : All Rights Reserve; You may use, distribute and modify this code.
Description : This is a simple calculator that can add, subtract, multiply and divide using functions
'''
'''function definitions'''
def calculate():
# operation selection from the user
print('\n\n')
print('\t\t\tOperation Selection')
print('-------------------------------------------------------------')
print('+ for addition')
print('- for subtraction')
print('* for multiplication')
print('/ for division')
print('\ for modulus')
print('\n\n')
operation = raw_input('Enter Operation symbol: ')
# check if operation symbol is valid
operator(operation)
# recieving two numbers from user
num1 = int(raw_input('\nEnter first number : '))
num2 = int(raw_input('\nEnter second number : '))
# addition
if operation == '+':
print('\n\n')
print('ANSWER: {} + {} = '.format(num1,num2) + str(num1 + num2))
print('\n\n')
# subtraction
elif operation == '-':
print('\n\n')
print('ANSWER: {} - {} = '.format(num1,num2) + str(num1 - num2))
print('\n\n')
# multiplication
elif operation == '*':
print('\n\n')
print('ANSWER: {} * {} = '.format(num1,num2) + str(num1 * num2))
print('\n\n')
# divison
elif operation == '/':
print('\n\n')
print('ANSWER: {} / {} = '.format(num1,num2) + str(num1 / num2))
print('\n\n')
# modulus
elif operation == '/':
print('\n\n')
print('ANSWER: {} \ {} = '.format(num1,num2) + str(num1 % num2))
print('\n\n')
else: print('Invalid character, please run the program again.')
def operator(operation):
if operation != '+':
if operation == 3 :
print('Invalid operator, please run the program again.')
calculate()
'''function call'''
calculate()
|
mit
|
Python
|
8f815c41b505c01cbc1c57088ddc3a465f1ac07c
|
Add a configuration key for the URL of the Fedora OpenID server
|
jeremycline/fmn,jeremycline/fmn,jeremycline/fmn
|
fmn/web/default_config.py
|
fmn/web/default_config.py
|
SECRET_KEY = 'changeme please'
# TODO -- May I set this to true?
FAS_OPENID_CHECK_CERT = False
#ADMIN_GROUPS = ['sysadmin-web']
FMN_FEDORA_OPENID = 'https://id.fedoraproject.org'
|
SECRET_KEY = 'changeme please'
# TODO -- May I set this to true?
FAS_OPENID_CHECK_CERT = False
#ADMIN_GROUPS = ['sysadmin-web']
|
lgpl-2.1
|
Python
|
87b6166bb2f88b54c78569d6b566c4d557733c57
|
Modify proxy to use inlineCallbacks
|
cataliniacob/ep2012-tutorial-twisted
|
proxy.py
|
proxy.py
|
from twisted.internet import defer, endpoints, protocol, reactor
from twisted.protocols import basic
from twisted.web.client import getPage
import time
class ProxyProtocol(basic.LineReceiver):
def lineReceived(self, line):
if not line.startswith('http://'):
return
self.getPage(line)
@defer.inlineCallbacks
def getPage(self, line):
start = time.time()
print 'Fetching {}'.format(line)
try:
data = yield getPage(line)
except Exception as e:
print 'Error while fetching {}: {}'.format(line, e)
else:
print 'Fetched {} in {} sec'.format(line, time.time() - start)
self.transport.write(data)
if __name__ == '__main__':
factory = protocol.ServerFactory()
factory.protocol = ProxyProtocol
endpoints.serverFromString(reactor, 'tcp:8000').listen(factory)
reactor.run()
|
from twisted.internet import endpoints, protocol, reactor
from twisted.protocols import basic
from twisted.web.client import getPage
import time
class ProxyProtocol(basic.LineReceiver):
def gotPage(self, data, line, start):
print 'Fetched {} in {} sec'.format(line, time.time() - start)
self.transport.write(data)
def errGettingPage(self, reason, line):
print 'Error while fetching {}: {}'.format(line, reason.getErrorMessage())
def lineReceived(self, line):
if not line.startswith('http://'):
return
start = time.time()
print 'Fetching {}'.format(line)
d = getPage(line)
d.addCallback(self.gotPage, line, start)
d.addErrback(self.errGettingPage, line)
if __name__ == '__main__':
factory = protocol.ServerFactory()
factory.protocol = ProxyProtocol
endpoints.serverFromString(reactor, 'tcp:8000').listen(factory)
reactor.run()
|
mit
|
Python
|
d75e1309bf41f05489ffcf8502309b765bc8c359
|
handle non dev/rc last release (#5133)
|
draperjames/bokeh,DuCorey/bokeh,rs2/bokeh,stonebig/bokeh,dennisobrien/bokeh,philippjfr/bokeh,jakirkham/bokeh,jakirkham/bokeh,azjps/bokeh,ericmjl/bokeh,draperjames/bokeh,philippjfr/bokeh,schoolie/bokeh,azjps/bokeh,percyfal/bokeh,mindriot101/bokeh,aavanian/bokeh,percyfal/bokeh,timsnyder/bokeh,aavanian/bokeh,timsnyder/bokeh,bokeh/bokeh,bokeh/bokeh,dennisobrien/bokeh,schoolie/bokeh,draperjames/bokeh,timsnyder/bokeh,dennisobrien/bokeh,aiguofer/bokeh,rs2/bokeh,aavanian/bokeh,draperjames/bokeh,rs2/bokeh,schoolie/bokeh,rs2/bokeh,aiguofer/bokeh,timsnyder/bokeh,aiguofer/bokeh,ericmjl/bokeh,ericmjl/bokeh,schoolie/bokeh,ericmjl/bokeh,bokeh/bokeh,aavanian/bokeh,percyfal/bokeh,mindriot101/bokeh,jakirkham/bokeh,mindriot101/bokeh,stonebig/bokeh,stonebig/bokeh,Karel-van-de-Plassche/bokeh,DuCorey/bokeh,azjps/bokeh,rs2/bokeh,mindriot101/bokeh,draperjames/bokeh,percyfal/bokeh,philippjfr/bokeh,bokeh/bokeh,schoolie/bokeh,timsnyder/bokeh,DuCorey/bokeh,philippjfr/bokeh,Karel-van-de-Plassche/bokeh,bokeh/bokeh,aiguofer/bokeh,ericmjl/bokeh,aavanian/bokeh,stonebig/bokeh,azjps/bokeh,DuCorey/bokeh,jakirkham/bokeh,aiguofer/bokeh,azjps/bokeh,Karel-van-de-Plassche/bokeh,Karel-van-de-Plassche/bokeh,DuCorey/bokeh,philippjfr/bokeh,percyfal/bokeh,dennisobrien/bokeh,dennisobrien/bokeh,jakirkham/bokeh,Karel-van-de-Plassche/bokeh
|
scripts/update_bokehjs_versions.py
|
scripts/update_bokehjs_versions.py
|
import os
import re
import sys
def check_input(version):
pat = r'^(\d+.\d+.\d+)((dev|rc)\d+)?$'
if not re.match(pat, version):
print("The new version must be in the format X.X.X([dev|rc]X) (ex. '0.12.0')")
return False
return True
def version_update(version, filename):
pat = r"""(release|version)([\" ][:=] [\"\'])(\d+.\d+.\d+)((dev|rc)\d+)?([\"\'])"""
with open(filename) as f:
text = f.read()
match = re.search(pat, text)
if not match:
print("ERROR: Unable to find version string to replace in %s" % filename)
sys.exit(1)
old_version = match.group(3)
if match.group(4) is not None:
old_version += match.group(4)
text = re.sub(pat, r'\g<1>\g<2>%s\g<6>' % version, text)
with open(filename, 'w') as f:
f.write(text)
print("Edited {filename}: Updated version string '{old_version}' to '{version}'".format(filename=filename, version=version, old_version=old_version))
if __name__ == '__main__':
if not len(sys.argv) == 2:
print("Please provide the new version number to update.")
sys.exit(1)
version = sys.argv[1]
if not check_input(version):
sys.exit(1)
os.chdir('../')
filenames = [
'bokehjs/src/coffee/version.coffee',
'bokehjs/package.json',
]
for filename in filenames:
version_update(version, filename)
|
import os
import re
import sys
def check_input(version):
pat = r'^(\d+.\d+.\d+)((dev|rc)\d+)?$'
if not re.match(pat, version):
print("The new version must be in the format X.X.X([dev|rc]X) (ex. '0.12.0')")
return False
return True
def version_update(version, filename):
pat = r"""(release|version)([\" ][:=] [\"\'])(\d+.\d+.\d+)((dev|rc)\d+)?([\"\'])"""
with open(filename) as f:
text = f.read()
match = re.search(pat, text)
if not match:
print("ERROR: Unable to find version string to replace in %s" % filename)
sys.exit(1)
old_version = match.group(3) + match.group(4)
text = re.sub(pat, r'\g<1>\g<2>%s\g<6>' % version, text)
with open(filename, 'w') as f:
f.write(text)
print("Edited {filename}: Updated version string '{old_version}' to '{version}'".format(filename=filename, version=version, old_version=old_version))
if __name__ == '__main__':
if not len(sys.argv) == 2:
print("Please provide the new version number to update.")
sys.exit(1)
version = sys.argv[1]
if not check_input(version):
sys.exit(1)
os.chdir('../')
filenames = [
'bokehjs/src/coffee/version.coffee',
'bokehjs/package.json',
]
for filename in filenames:
version_update(version, filename)
|
bsd-3-clause
|
Python
|
001924264830761cfa5938ca542201f2e8bdaf66
|
Fix haystack index
|
kelvan/freieit,kelvan/freieit
|
freieit/search_indexes.py
|
freieit/search_indexes.py
|
from haystack import indexes
from .models import ExpertProfile
class ExpertProfileIndex(indexes.SearchIndex, indexes.Indexable):
#text = indexes.CharField(document=True, use_template=True)
text = indexes.EdgeNgramField(document=True, use_template=True)
#name = indexes.CharField(model_attr='name')
#location = indexes.CharField(model_attr='location')
#services = indexes.CharField(model_attr='services')
def get_model(self):
return ExpertProfile
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
return self.get_model().objects.filter(available=True)
|
from haystack import indexes, site
from .models import ExpertProfile
class ExpertProfileIndex(indexes.SearchIndex):
#text = indexes.CharField(document=True, use_template=True)
text = indexes.EdgeNgramField(document=True, use_template=True)
#name = indexes.CharField(model_attr='name')
#location = indexes.CharField(model_attr='location')
#services = indexes.CharField(model_attr='services')
def get_model(self):
return ExpertProfile
def index_queryset(self):
"""Used when the entire index for model is updated."""
return self.get_model().objects.filter(available=True)
site.register(ExpertProfile, ExpertProfileIndex)
|
agpl-3.0
|
Python
|
270df828dfc76f993a999daeeb8fa8f6c82b1e62
|
Fix typo in documentation
|
leshchevds/ganeti,mbakke/ganeti,leshchevds/ganeti,dimara/ganeti,ganeti/ganeti,bitemyapp/ganeti,ganeti/ganeti,ganeti-github-testing/ganeti-test-1,mbakke/ganeti,ganeti-github-testing/ganeti-test-1,yiannist/ganeti,yiannist/ganeti,onponomarev/ganeti,bitemyapp/ganeti,onponomarev/ganeti,andir/ganeti,andir/ganeti,yiannist/ganeti,apyrgio/ganeti,mbakke/ganeti,grnet/snf-ganeti,dimara/ganeti,grnet/snf-ganeti,leshchevds/ganeti,andir/ganeti,apyrgio/ganeti,ganeti/ganeti
|
lib/cmdlib/query.py
|
lib/cmdlib/query.py
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Logical units for queries."""
from ganeti import constants
from ganeti import errors
from ganeti import query
from ganeti.cmdlib.backup import ExportQuery
from ganeti.cmdlib.base import NoHooksLU
from ganeti.cmdlib.cluster import ClusterQuery
from ganeti.cmdlib.group import GroupQuery
from ganeti.cmdlib.instance_query import InstanceQuery
from ganeti.cmdlib.misc import ExtStorageQuery
from ganeti.cmdlib.network import NetworkQuery
from ganeti.cmdlib.node import NodeQuery
from ganeti.cmdlib.operating_system import OsQuery
#: Query type implementations
_QUERY_IMPL = {
constants.QR_CLUSTER: ClusterQuery,
constants.QR_INSTANCE: InstanceQuery,
constants.QR_NODE: NodeQuery,
constants.QR_GROUP: GroupQuery,
constants.QR_NETWORK: NetworkQuery,
constants.QR_OS: OsQuery,
constants.QR_EXTSTORAGE: ExtStorageQuery,
constants.QR_EXPORT: ExportQuery,
}
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
def _GetQueryImplementation(name):
"""Returns the implementation for a query type.
@param name: Query type, must be one of L{constants.QR_VIA_OP}
"""
try:
return _QUERY_IMPL[name]
except KeyError:
raise errors.OpPrereqError("Unknown query resource '%s'" % name,
errors.ECODE_INVAL)
class LUQuery(NoHooksLU):
"""Query for resources/items of a certain kind.
"""
# pylint: disable=W0142
REQ_BGL = False
def CheckArguments(self):
qcls = _GetQueryImplementation(self.op.what)
self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking)
def ExpandNames(self):
self.impl.ExpandNames(self)
def DeclareLocks(self, level):
self.impl.DeclareLocks(self, level)
def Exec(self, feedback_fn):
return self.impl.NewStyleQuery(self)
class LUQueryFields(NoHooksLU):
"""Query for resources/items of a certain kind.
"""
# pylint: disable=W0142
REQ_BGL = False
def CheckArguments(self):
self.qcls = _GetQueryImplementation(self.op.what)
def ExpandNames(self):
self.needed_locks = {}
def Exec(self, feedback_fn):
return query.QueryFields(self.qcls.FIELDS, self.op.fields)
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Logical units for queries."""
from ganeti import constants
from ganeti import errors
from ganeti import query
from ganeti.cmdlib.backup import ExportQuery
from ganeti.cmdlib.base import NoHooksLU
from ganeti.cmdlib.cluster import ClusterQuery
from ganeti.cmdlib.group import GroupQuery
from ganeti.cmdlib.instance_query import InstanceQuery
from ganeti.cmdlib.misc import ExtStorageQuery
from ganeti.cmdlib.network import NetworkQuery
from ganeti.cmdlib.node import NodeQuery
from ganeti.cmdlib.operating_system import OsQuery
#: Query type implementations
_QUERY_IMPL = {
constants.QR_CLUSTER: ClusterQuery,
constants.QR_INSTANCE: InstanceQuery,
constants.QR_NODE: NodeQuery,
constants.QR_GROUP: GroupQuery,
constants.QR_NETWORK: NetworkQuery,
constants.QR_OS: OsQuery,
constants.QR_EXTSTORAGE: ExtStorageQuery,
constants.QR_EXPORT: ExportQuery,
}
assert set(_QUERY_IMPL.keys()) == constants.QR_VIA_OP
def _GetQueryImplementation(name):
"""Returns the implemtnation for a query type.
@param name: Query type, must be one of L{constants.QR_VIA_OP}
"""
try:
return _QUERY_IMPL[name]
except KeyError:
raise errors.OpPrereqError("Unknown query resource '%s'" % name,
errors.ECODE_INVAL)
class LUQuery(NoHooksLU):
"""Query for resources/items of a certain kind.
"""
# pylint: disable=W0142
REQ_BGL = False
def CheckArguments(self):
qcls = _GetQueryImplementation(self.op.what)
self.impl = qcls(self.op.qfilter, self.op.fields, self.op.use_locking)
def ExpandNames(self):
self.impl.ExpandNames(self)
def DeclareLocks(self, level):
self.impl.DeclareLocks(self, level)
def Exec(self, feedback_fn):
return self.impl.NewStyleQuery(self)
class LUQueryFields(NoHooksLU):
"""Query for resources/items of a certain kind.
"""
# pylint: disable=W0142
REQ_BGL = False
def CheckArguments(self):
self.qcls = _GetQueryImplementation(self.op.what)
def ExpandNames(self):
self.needed_locks = {}
def Exec(self, feedback_fn):
return query.QueryFields(self.qcls.FIELDS, self.op.fields)
|
bsd-2-clause
|
Python
|
afc47fd469d7836dcda0eb8bb027cce2a0ed2121
|
Update MergeSort.py
|
maneeshd/Algorithms-and-Data-Structures
|
fundamentals/MergeSort.py
|
fundamentals/MergeSort.py
|
"""
@author: Maneesh D
@email: [email protected]
@date: 24/6/17
Worst Case Analysis: Merge Sort -> O(nlog n)
"""
from random import randint
from timeit import Timer, default_timer
def merge_sort(data):
if len(data) == 1:
return data
n = len(data)
mid = n // 2
# Divide and sort the sub lists
left = merge_sort(data[:mid])
right = merge_sort(data[mid:])
# Merge
merged = []
left_len = len(left)
right_len = len(right)
i = 0
j = 0
while i < left_len and j < right_len:
if left[i] <= right[j]: # Insert left list element if smaller.
merged.append(left[i])
i += 1
else:
merged.append(right[j]) # Insert right list element if smaller.
j += 1
while i < left_len: # Insert the remaining elements in left if any.
merged.append(left[i])
i += 1
while j < right_len: # Insert the remaining elements in right if any.
merged.append(right[j])
j += 1
return merged
def main():
start = default_timer()
data = [i for i in range(100000, 0, -1)] # Worst Case Input (Reverse Sorted)
merge_sort(data)
print("Sort Time = %f Seconds" % (default_timer() - start))
if __name__ == '__main__':
print("Merge Sort")
print("-" * len("Merge Sort"))
t = Timer(main)
print("\nAverage sorting time for 100000 elements in 10 runs = %f Seconds" % (t.timeit(10) / 10))
|
"""
@author: Maneesh D
@email: [email protected]
@date: 24/6/17
Merge Sort -> O(nlog n)
"""
from random import randint
from timeit import Timer, default_timer
def merge_sort(data):
if len(data) == 1:
return data
n = len(data)
mid = n // 2
# Divide and sort the sub lists
left = merge_sort(data[:mid])
right = merge_sort(data[mid:])
# Merge
merged = []
left_len = len(left)
right_len = len(right)
i = 0
j = 0
while i < left_len and j < right_len:
if left[i] <= right[j]: # Insert left list element if smaller.
merged.append(left[i])
i += 1
else:
merged.append(right[j]) # Insert right list element if smaller.
j += 1
while i < left_len: # Insert the remaining elements in left if any.
merged.append(left[i])
i += 1
while j < right_len: # Insert the remaining elements in right if any.
merged.append(right[j])
j += 1
return merged
def main():
start = default_timer()
data = [randint(1, 10) + i for i in range(100001)]
merge_sort(data)
print("Sort Time = %f Seconds" % (default_timer() - start))
if __name__ == '__main__':
print("Merge Sort")
print("-" * len("Merge Sort"))
t = Timer(main)
print("\nAverage sorting time for 100000 elements in 10 runs = %f Seconds" % (t.timeit(10) / 10))
|
mit
|
Python
|
bebbbed6dba82329fcaab32fdb0109ec78cd7ad9
|
Use correct subprotocol
|
zmap/ztag
|
ztag/transforms/s7.py
|
ztag/transforms/s7.py
|
from ztag.transform import *
from ztag import protocols, errors
class S7Transform(ZGrabTransform):
name = "s7/status"
port = 102
protocol = protocols.S7
subprotocol = protocols.S7.SZL
def _transform_object(self, obj):
zout = ZMapTransformOutput()
wrapped = Transformable(obj)
s = wrapped['data']['s7']
if not s['is_s7'].resolve() or not s.resolve():
raise errors.IgnoreObject()
out = s.resolve()
out['support'] = True
del out['is_s7']
zout.transformed = out
return zout
|
from ztag.transform import *
from ztag import protocols, errors
class S7Transform(ZGrabTransform):
name = "s7/status"
port = 102
protocol = protocols.S7
subprotocol = protocols.S7.STATUS
def _transform_object(self, obj):
zout = ZMapTransformOutput()
wrapped = Transformable(obj)
s = wrapped['data']['s7']
if not s['is_s7'].resolve() or not s.resolve():
raise errors.IgnoreObject()
out = s.resolve()
out['support'] = True
del out['is_s7']
zout.transformed = out
return zout
|
apache-2.0
|
Python
|
0cd617ddc7e6303139583b6a92e90cfac1e91b31
|
set AUTOSYNTH_MULTIPLE_COMMITS=true for context aware commits (#938)
|
googleapis/nodejs-pubsub,googleapis/nodejs-pubsub,googleapis/nodejs-pubsub
|
synth.py
|
synth.py
|
import synthtool as s
import synthtool.gcp as gcp
import logging
import subprocess
import os
logging.basicConfig(level=logging.DEBUG)
AUTOSYNTH_MULTIPLE_COMMITS = True
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
# tasks has two product names, and a poorly named artman yaml
version = 'v1'
library = gapic.node_library(
'pubsub', version, config_path="/google/pubsub/artman_pubsub.yaml")
# skip index, protos, package.json, and README.md
s.copy(
library,
excludes=['package.json', 'README.md', 'src/index.js'])
templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
# https://github.com/googleapis/gapic-generator/issues/2127
s.replace("src/v1/subscriber_client.js",
" }\n\s*/\*\*\n\s+\* The DNS address for this API service\.",
"\n // note: editing generated code\n"
" this.waitForReady = function(deadline, callback) {\n"
" return subscriberStub.then(\n"
" stub => stub.waitForReady(deadline, callback),\n"
" callback\n"
" );\n"
" };\n"
" this.getSubscriberStub = function() {\n"
" return subscriberStub;\n"
" };\n"
"\g<0>")
# Update path discovery due to build/ dir and TypeScript conversion.
s.replace("src/v1/publisher_client.js", "../../package.json", "../../../package.json")
s.replace("src/v1/subscriber_client.js", "../../package.json", "../../../package.json")
# [START fix-dead-link]
s.replace('src/**/doc/google/protobuf/doc_timestamp.js',
'https:\/\/cloud\.google\.com[\s\*]*http:\/\/(.*)[\s\*]*\)',
r"https://\1)")
s.replace('src/**/doc/google/protobuf/doc_timestamp.js',
'toISOString\]',
'toISOString)')
# [END fix-dead-link]
# No browser support for TypeScript libraries yet
os.unlink('webpack.config.js')
os.unlink('src/browser.js')
# Node.js specific cleanup
subprocess.run(['npm', 'install'])
subprocess.run(['npm', 'run', 'fix'])
subprocess.run(['npx', 'compileProtos', 'src'])
|
import synthtool as s
import synthtool.gcp as gcp
import logging
import subprocess
import os
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
# tasks has two product names, and a poorly named artman yaml
version = 'v1'
library = gapic.node_library(
'pubsub', version, config_path="/google/pubsub/artman_pubsub.yaml")
# skip index, protos, package.json, and README.md
s.copy(
library,
excludes=['package.json', 'README.md', 'src/index.js'])
templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
# https://github.com/googleapis/gapic-generator/issues/2127
s.replace("src/v1/subscriber_client.js",
" }\n\s*/\*\*\n\s+\* The DNS address for this API service\.",
"\n // note: editing generated code\n"
" this.waitForReady = function(deadline, callback) {\n"
" return subscriberStub.then(\n"
" stub => stub.waitForReady(deadline, callback),\n"
" callback\n"
" );\n"
" };\n"
" this.getSubscriberStub = function() {\n"
" return subscriberStub;\n"
" };\n"
"\g<0>")
# Update path discovery due to build/ dir and TypeScript conversion.
s.replace("src/v1/publisher_client.js", "../../package.json", "../../../package.json")
s.replace("src/v1/subscriber_client.js", "../../package.json", "../../../package.json")
# [START fix-dead-link]
s.replace('src/**/doc/google/protobuf/doc_timestamp.js',
'https:\/\/cloud\.google\.com[\s\*]*http:\/\/(.*)[\s\*]*\)',
r"https://\1)")
s.replace('src/**/doc/google/protobuf/doc_timestamp.js',
'toISOString\]',
'toISOString)')
# [END fix-dead-link]
# No browser support for TypeScript libraries yet
os.unlink('webpack.config.js')
os.unlink('src/browser.js')
# Node.js specific cleanup
subprocess.run(['npm', 'install'])
subprocess.run(['npm', 'run', 'fix'])
subprocess.run(['npx', 'compileProtos', 'src'])
|
apache-2.0
|
Python
|
59860336a32b4cb438d275ced06a3100a2dd78b6
|
Update synth.py
|
googleapis/java-pubsublite-spark,googleapis/java-pubsublite-spark,googleapis/java-pubsublite-spark
|
synth.py
|
synth.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool.languages.java as java
java.common_templates()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool.languages.java as java
java.common_templates()
|
apache-2.0
|
Python
|
0e8994d1dbeb6266156fa9615fc003aa0d0a4822
|
remove unused import, flake8
|
chrisseto/waterbutler,RCOSDP/waterbutler,rdhyee/waterbutler,CenterForOpenScience/waterbutler,TomBaxter/waterbutler,hmoco/waterbutler,icereval/waterbutler,kwierman/waterbutler,cosenal/waterbutler,Johnetordoff/waterbutler,Ghalko/waterbutler,rafaeldelucena/waterbutler,felliott/waterbutler
|
tasks.py
|
tasks.py
|
# -*- coding: utf-8 -*-
import sys
from invoke import task, run
from waterbutler import settings
@task
def install(upgrade=False, pip_cache=None, wheel_repo=None):
cmd = 'pip install -r dev-requirements.txt'
if upgrade:
cmd += ' --upgrade'
if pip_cache:
cmd += ' --download-cache={}'.format(pip_cache)
if wheel_repo:
run('pip install wheel', pty=True)
# get the current python version, expected git branch name
ver = '.'.join([str(i) for i in sys.version_info[0:2]])
name = 'wheelhouse-{}'.format(ver)
ext = '.zip'
url = '{}/archive/{}{}'.format(wheel_repo, ver, ext)
# download and extract the wheelhouse github repository archive
run('curl -o {}{} -L {}'.format(name, ext, url), pty=True)
run('unzip {}{}'.format(name, ext, name), pty=True)
# run pip install w/ the wheelhouse dependencies available
run(cmd + ' --use-wheel --find-links={}'.format(name), pty=True)
# cleanup wheelhouse-{ver} folder and wheelhouse-{ver}{ext} file
run('rm -rf {}'.format(name), pty=True)
run('rm -f {}{}'.format(name, ext), pty=True)
else:
run(cmd, pty=True)
@task
def flake():
run('flake8 .')
@task
def test():
cmd = 'py.test --cov-report term-missing --cov waterbutler tests'
run(cmd, pty=True)
@task
def tornado(port=settings.PORT, address=settings.ADDRESS, debug=settings.DEBUG):
from waterbutler.server import serve
serve(port, address, debug)
|
# -*- coding: utf-8 -*-
import os
import sys
from invoke import task, run
from waterbutler import settings
@task
def install(upgrade=False, pip_cache=None, wheel_repo=None):
cmd = 'pip install -r dev-requirements.txt'
if upgrade:
cmd += ' --upgrade'
if pip_cache:
cmd += ' --download-cache={}'.format(pip_cache)
if wheel_repo:
run('pip install wheel', pty=True)
# get the current python version, expected git branch name
ver = '.'.join([str(i) for i in sys.version_info[0:2]])
name = 'wheelhouse-{}'.format(ver)
ext = '.zip'
url = '{}/archive/{}{}'.format(wheel_repo, ver, ext)
# download and extract the wheelhouse github repository archive
run('curl -o {}{} -L {}'.format(name, ext, url), pty=True)
run('unzip {}{}'.format(name, ext, name), pty=True)
# run pip install w/ the wheelhouse dependencies available
run(cmd + ' --use-wheel --find-links={}'.format(name), pty=True)
# cleanup wheelhouse-{ver} folder and wheelhouse-{ver}{ext} file
run('rm -rf {}'.format(name), pty=True)
run('rm -f {}{}'.format(name, ext), pty=True)
else:
run(cmd, pty=True)
@task
def flake():
run('flake8 .')
@task
def test():
cmd = 'py.test --cov-report term-missing --cov waterbutler tests'
run(cmd, pty=True)
@task
def tornado(port=settings.PORT, address=settings.ADDRESS, debug=settings.DEBUG):
from waterbutler.server import serve
serve(port, address, debug)
|
apache-2.0
|
Python
|
fd9ef4b30ef09d52c120e9e8ab2461cbce939dfd
|
Add --show-diff-on-failure
|
marshmallow-code/apispec,marshmallow-code/smore
|
tasks.py
|
tasks.py
|
# -*- coding: utf-8 -*-
import os
import sys
import webbrowser
from invoke import task
docs_dir = 'docs'
build_dir = os.path.join(docs_dir, '_build')
@task
def test(ctx, watch=False, last_failing=False):
"""Run the tests.
Note: --watch requires pytest-xdist to be installed.
"""
import pytest
syntax(ctx)
args = []
if watch:
args.append('-f')
if last_failing:
args.append('--lf')
args.append('tests')
retcode = pytest.main(args)
sys.exit(retcode)
@task
def syntax(ctx):
"""Run flake8 on codebase."""
ctx.run('pre-commit run --all-files --show-diff-on-failure', echo=True)
@task
def watch(ctx):
"""Run tests when a file changes. Requires pytest-xdist."""
import pytest
errcode = pytest.main(['-f'])
sys.exit(errcode)
@task
def clean(ctx):
ctx.run('rm -rf build')
ctx.run('rm -rf dist')
ctx.run('rm -rf apispec.egg-info')
clean_docs(ctx)
print('Cleaned up.')
@task
def clean_docs(ctx):
ctx.run('rm -rf %s' % build_dir)
@task
def browse_docs(ctx):
path = os.path.join(build_dir, 'index.html')
webbrowser.open_new_tab(path)
def build_docs(ctx, browse):
ctx.run('sphinx-build %s %s' % (docs_dir, build_dir), echo=True)
if browse:
browse_docs(ctx)
@task
def docs(ctx, clean=False, browse=False, watch=False):
"""Build the docs."""
if clean:
clean_docs(ctx)
if watch:
watch_docs(ctx, browse=browse)
else:
build_docs(ctx, browse=browse)
@task
def watch_docs(ctx, browse=False):
"""Run build the docs when a file changes."""
try:
import sphinx_autobuild # noqa
except ImportError:
print('ERROR: watch task requires the sphinx_autobuild package.')
print('Install it with:')
print(' pip install sphinx-autobuild')
sys.exit(1)
ctx.run(
'sphinx-autobuild {0} {1} {2} -z apispec'.format(
'--open-browser' if browse else '', docs_dir, build_dir,
), echo=True, pty=True,
)
@task
def readme(ctx, browse=False):
ctx.run('rst2html.py README.rst > README.html')
if browse:
webbrowser.open_new_tab('README.html')
|
# -*- coding: utf-8 -*-
import os
import sys
import webbrowser
from invoke import task
docs_dir = 'docs'
build_dir = os.path.join(docs_dir, '_build')
@task
def test(ctx, watch=False, last_failing=False):
"""Run the tests.
Note: --watch requires pytest-xdist to be installed.
"""
import pytest
syntax(ctx)
args = []
if watch:
args.append('-f')
if last_failing:
args.append('--lf')
args.append('tests')
retcode = pytest.main(args)
sys.exit(retcode)
@task
def syntax(ctx):
"""Run flake8 on codebase."""
ctx.run('pre-commit run --all-files', echo=True)
@task
def watch(ctx):
"""Run tests when a file changes. Requires pytest-xdist."""
import pytest
errcode = pytest.main(['-f'])
sys.exit(errcode)
@task
def clean(ctx):
ctx.run('rm -rf build')
ctx.run('rm -rf dist')
ctx.run('rm -rf apispec.egg-info')
clean_docs(ctx)
print('Cleaned up.')
@task
def clean_docs(ctx):
ctx.run('rm -rf %s' % build_dir)
@task
def browse_docs(ctx):
path = os.path.join(build_dir, 'index.html')
webbrowser.open_new_tab(path)
def build_docs(ctx, browse):
ctx.run('sphinx-build %s %s' % (docs_dir, build_dir), echo=True)
if browse:
browse_docs(ctx)
@task
def docs(ctx, clean=False, browse=False, watch=False):
"""Build the docs."""
if clean:
clean_docs(ctx)
if watch:
watch_docs(ctx, browse=browse)
else:
build_docs(ctx, browse=browse)
@task
def watch_docs(ctx, browse=False):
"""Run build the docs when a file changes."""
try:
import sphinx_autobuild # noqa
except ImportError:
print('ERROR: watch task requires the sphinx_autobuild package.')
print('Install it with:')
print(' pip install sphinx-autobuild')
sys.exit(1)
ctx.run(
'sphinx-autobuild {0} {1} {2} -z apispec'.format(
'--open-browser' if browse else '', docs_dir, build_dir,
), echo=True, pty=True,
)
@task
def readme(ctx, browse=False):
ctx.run('rst2html.py README.rst > README.html')
if browse:
webbrowser.open_new_tab('README.html')
|
mit
|
Python
|
59c9e30df3397a9114a59cc8a318cf011ba04085
|
fix terminal size formatting
|
GoogleCloudPlatform/django-demo-app-unicodex,GoogleCloudPlatform/django-demo-app-unicodex,GoogleCloudPlatform/django-demo-app-unicodex
|
.util/cliformatting.py
|
.util/cliformatting.py
|
import os
import sys
import click
from math import ceil
import shutil
columns, _ = shutil.get_terminal_size()
RESULTS = {"success": 0, "failure": 0}
def header(msg):
click.secho(f"\n# {msg}", bold=True)
def s(n):
if n == 1:
return ""
return "s"
def error(s, details=None):
lineart = "********************************"
click.secho(f"{lineart}\nError {s}", bold=True, fg="red")
if details:
click.echo(details)
click.secho(f"{lineart}", bold=True, fg="red")
def echo(msg, indent=""):
click.echo(f"{indent}{msg}")
def summary():
total = RESULTS["success"] + RESULTS["failure"]
fails = RESULTS["failure"]
if fails != 0:
failcol = {"bold": True, "fg": "red"}
else:
failcol = {}
click.echo(
(
click.style(
f"\nResults: {total} check{s(total)}, ",
bold=True,
)
+ click.style(f"{fails} failure{s(fails)}", **failcol)
+ click.style(".", bold=True)
)
)
if fails == 0:
sys.exit(0)
else:
sys.exit(1)
def result(msg, success=True, details=None):
if success:
success_message = "PASS"
fg = "green"
RESULTS["success"] += 1
else:
success_message = "FAIL"
fg = "red"
RESULTS["failure"] += 1
# overflow math. 7 is the result length ("[FASL] ")
amsg = msg.ljust(ceil((len(msg) + 7) / columns) * columns - 7)
click.echo(amsg + click.style(f"[{success_message}]", fg=fg, bold=True))
if details and not success:
click.echo(details)
"""
Usage:
header("Testing the things")
result("I did a thing")
result("I failed a thing", success=False, details="how to fix the issue")
summary()
"""
|
import os
import sys
import click
from math import ceil
import shutil
rows, columns = shutil.get_terminal_size()
RESULTS = {"success": 0, "failure": 0}
def header(msg):
click.secho(f"\n# {msg}", bold=True)
def s(n):
if n == 1:
return ""
return "s"
def error(s, details=None):
lineart = "********************************"
click.secho(f"{lineart}\nError {s}", bold=True, fg="red")
if details:
click.echo(details)
click.secho(f"{lineart}", bold=True, fg="red")
def echo(msg, indent=""):
click.echo(f"{indent}{msg}")
def summary():
total = RESULTS["success"] + RESULTS["failure"]
fails = RESULTS["failure"]
if fails != 0:
failcol = {"bold": True, "fg": "red"}
else:
failcol = {}
click.echo(
(
click.style(
f"\nResults: {total} check{s(total)}, ",
bold=True,
)
+ click.style(f"{fails} failure{s(fails)}", **failcol)
+ click.style(".", bold=True)
)
)
if fails == 0:
sys.exit(0)
else:
sys.exit(1)
def result(msg, success=True, details=None):
if success:
success_message = "PASS"
fg = "green"
RESULTS["success"] += 1
else:
success_message = "FAIL"
fg = "red"
RESULTS["failure"] += 1
# overflow math. 7 is the result length ("[FASL] ")
amsg = msg.ljust(ceil((len(msg) + 7) / columns) * columns - 7)
click.echo(amsg + click.style(f"[{success_message}]", fg=fg, bold=True))
if details and not success:
click.echo(details)
"""
Usage:
header("Testing the things")
result("I did a thing")
result("I failed a thing", success=False, details="how to fix the issue")
summary()
"""
|
apache-2.0
|
Python
|
3b5473048d40feee7807c5ad50f4521113d8216f
|
copy packages instead of symlinking
|
scalableminds/webknossos,scalableminds/webknossos,scalableminds/webknossos,scalableminds/webknossos,scalableminds/webknossos,scalableminds/webknossos
|
buildtools/publish_deb.py
|
buildtools/publish_deb.py
|
#!/usr/bin/python2
import os
import sys
import subprocess
import shutil
import gzip
archive_dir = "%s/packages" % (os.environ["WORKSPACE"])
if not os.path.isdir(archive_dir):
print "no artifacts archived. Either this is a failed build or a job that does not archive artifacts"
sys.exit(0)
deb_packages = filter(lambda f: f.endswith(".deb"), os.listdir(archive_dir))
prod_deb_packages = [deb for deb in deb_packages if not deb.endswith("-dev.deb")]
dev_deb_packages = [deb for deb in deb_packages if not deb.endswith("-prod.deb")]
def publish_deb_packages(mode, packages):
prefix_path = "dists/stable/%s/binary-amd64" % os.environ["JOB_NAME"]
def create_package_info(mode, packages):
os.chdir(archive_dir)
if os.path.isdir(mode):
shutil.rmtree(mode)
os.mkdir(mode)
os.chdir(mode)
for pkg in packages:
os.symlink("../%s" % pkg, pkg)
package_info = subprocess.check_output(["dpkg-scanpackages","-m", "./", "/dev/null", prefix_path])
package_info = package_info.replace(prefix_path+"./", prefix_path)
os.chdir("..")
shutil.rmtree(mode)
return package_info
def extend_repo(mode, packages_info, packages):
repo_dir = "/srv/scmrepo/%s/%s" % (mode, prefix_path)
os.chdir(repo_dir)
symlinks_new = True
for pkg in packages:
target = "%s/%s" % (archive_dir, pkg)
if os.path.exists(pkg) or os.path.lexists(pkg):
symlinks_new = False
else:
shutil.copyfile(target, pkg)
if symlinks_new:
packages_file = gzip.open("Packages.gz", "a")
packages_file.write(packages_info)
packages_file.close()
packages_info = create_package_info(mode, packages)
extend_repo(mode, packages_info, packages)
publish_deb_packages("dev", dev_deb_packages)
publish_deb_packages("prod", prod_deb_packages)
|
#!/usr/bin/python2
import os
import sys
import subprocess
import shutil
import gzip
archive_dir = "%s/packages" % (os.environ["WORKSPACE"])
if not os.path.isdir(archive_dir):
print "no artifacts archived. Either this is a failed build or a job that does not archive artifacts"
sys.exit(0)
deb_packages = filter(lambda f: f.endswith(".deb"), os.listdir(archive_dir))
prod_deb_packages = [deb for deb in deb_packages if not deb.endswith("-dev.deb")]
dev_deb_packages = [deb for deb in deb_packages if not deb.endswith("-prod.deb")]
def publish_deb_packages(mode, packages):
prefix_path = "dists/stable/%s/binary-amd64" % os.environ["JOB_NAME"]
def create_package_info(mode, packages):
os.chdir(archive_dir)
if os.path.isdir(mode):
shutil.rmtree(mode)
os.mkdir(mode)
os.chdir(mode)
for pkg in packages:
os.symlink("../%s" % pkg, pkg)
package_info = subprocess.check_output(["dpkg-scanpackages","-m", "./", "/dev/null", prefix_path])
package_info = package_info.replace(prefix_path+"./", prefix_path)
os.chdir("..")
shutil.rmtree(mode)
return package_info
def extend_repo(mode, packages_info, packages):
repo_dir = "/srv/scmrepo/%s/%s" % (mode, prefix_path)
os.chdir(repo_dir)
symlinks_new = True
for pkg in packages:
target = "%s/%s" % (archive_dir, pkg)
if os.path.exists(pkg) or os.path.lexists(pkg):
symlinks_new = False
else:
os.symlink(target, pkg)
if symlinks_new:
packages_file = gzip.open("Packages.gz", "a")
packages_file.write(packages_info)
packages_file.close()
packages_info = create_package_info(mode, packages)
extend_repo(mode, packages_info, packages)
publish_deb_packages("dev", dev_deb_packages)
publish_deb_packages("prod", prod_deb_packages)
|
agpl-3.0
|
Python
|
2f0dc639a4448e0762843fe3c9a4239eb7fac8c6
|
Fix bug- socks_http does not resolve DNS remotely via Tor
|
metamarcdw/nowallet
|
nowallet/socks_http.py
|
nowallet/socks_http.py
|
import asyncio
import aiohttp
import aiosocks
from aiosocks.connector import ProxyConnector, ProxyClientRequest
class SocksHTTPError(Exception):
pass
async def urlopen(url: str) -> str:
auth5 = aiosocks.Socks5Auth(
'proxyuser1', password='pwd') # type: aiosocks.Socks5Auth
conn = ProxyConnector(remote_resolve=True) # type: ProxyConnector
try:
with aiohttp.ClientSession(connector=conn,
request_class=ProxyClientRequest) as session:
async with session.get(url, # Always connects through Tor.
proxy='socks5://127.0.0.1:9050',
proxy_auth=auth5) as resp:
if resp.status == 200:
return await resp.text()
else:
raise SocksHTTPError("HTTP response not OK")
except aiohttp.ClientProxyConnectionError:
# connection problem
pass
except aiosocks.SocksError:
# communication problem
pass
return "" # Should never happen
def main():
loop = asyncio.get_event_loop() # type: asyncio.AbstractEventLoop
html = loop.run_until_complete(urlopen("https://github.com/")) # type: str
print(html)
loop.close()
if __name__ == '__main__':
main()
|
import asyncio
import aiohttp
import aiosocks
from aiosocks.connector import ProxyConnector, ProxyClientRequest
class SocksHTTPError(Exception):
pass
async def urlopen(url: str) -> str:
auth5 = aiosocks.Socks5Auth(
'proxyuser1', password='pwd') # type: aiosocks.Socks5Auth
conn = ProxyConnector(remote_resolve=False) # type: ProxyConnector
try:
with aiohttp.ClientSession(connector=conn,
request_class=ProxyClientRequest) as session:
async with session.get(url, # Always connects through Tor.
proxy='socks5://127.0.0.1:9050',
proxy_auth=auth5) as resp:
if resp.status == 200:
return await resp.text()
else:
raise SocksHTTPError("HTTP response not OK")
except aiohttp.ClientProxyConnectionError:
# connection problem
pass
except aiosocks.SocksError:
# communication problem
pass
return "" # Should never happen
def main():
loop = asyncio.get_event_loop() # type: asyncio.AbstractEventLoop
html = loop.run_until_complete(urlopen("https://github.com/")) # type: str
print(html)
loop.close()
if __name__ == '__main__':
main()
|
mit
|
Python
|
a68193102354af807285d0238354314c9aa67a25
|
Move victim selection to enforce
|
tengqm/senlin,tengqm/senlin,tengqm/senlin-container,openstack/senlin,stackforge/senlin,Alzon/senlin,openstack/senlin,openstack/senlin,Alzon/senlin,tengqm/senlin-container,stackforge/senlin
|
senlin/policies/deletion_policy.py
|
senlin/policies/deletion_policy.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from senlin.db import api as db_api
from senlin.policies import base
class DeletionPolicy(base.Policy):
'''
Policy for deleting member(s) from a cluster.
'''
CRITERIA = (
OLDEST_FIRST, YOUNGEST_FIRST, RANDOM,
) = (
'oldest_first',
'youngest_first',
'random',
)
TARGET = [
('WHEN', 'CLUSTER_SCALE_DOWN'),
('AFTER', 'CLUSTER_DEL_NODES'),
('AFTER', 'CLUSTER_SCALE_DOWN'),
]
PROFILE_TYPE = [
'ANY'
]
def __init__(self, type_name, name, **kwargs):
super(DeletionPolicy, self).__init__(type_name, name, **kwargs)
self.criteria = kwargs.get('criteria', '')
self.grace_period = kwargs.get('grace_period', 0)
self.reduce_desired_capacity = kwargs.get('reduce_desired_capacity',
False)
random.seed()
def pre_op(self, cluster_id, action, **args):
'''
We don't block the deletion anyhow.
'''
return True
def enforce(self, cluster_id, action, **args):
'''
The enforcement of a deletion policy returns the chosen victims
that will be deleted.
'''
nodes = db_api.node_get_all_by_cluster_id(cluster_id)
if self.criteria == self.RANDOM:
rand = random.randrange(len(nodes))
return nodes[rand]
sorted_list = sorted(nodes, key=lambda r: (r.created_time, r.name))
if self.criteria == self.OLDEST_FIRST:
victim = sorted_list[0]
else: # self.criteria == self.YOUNGEST_FIRST:
victim = sorted_list[-1]
return victim
def post_op(self, cluster_id, action, **args):
# TODO(Qiming): process grace period here if needed
pass
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.db import api as db_api
from senlin.policies import base
class DeletionPolicy(base.PolicyBase):
'''
Policy for deleting member(s) from a cluster.
'''
CRITERIA = (
OLDEST_FIRST, YOUNGEST_FIRST, RANDOM,
) = (
'oldest_first',
'youngest_first',
'random',
)
TARGET = [
('BEFORE', 'CLUSTER', 'DELETE_MEMBER'),
('AFTER', 'CLUSTER', 'DELETE_MEMBER'),
]
PROFILE_TYPE = [
'ANY'
]
def __init__(self, name, type_name, **kwargs):
super(DeletionPolicy, self).__init__(name, type_name, kwargs)
self.criteria = kwargs.get('criteria')
self.grace_period = kwargs.get('grace_period')
self.delete_desired_capacity = kwargs.get('reduce_desired_capacity')
def _sort_members_by_creation_time(members):
# TODO: do sorting
return members
def pre_op(self, cluster_id, action, **args):
# :cluster_id the cluster
# :action 'DEL_MEMBER'
# :args a list of candidate members
# TODO: choose victims from the given cluster
members = db_api.get_members(cluster_id)
sorted = self._sort_members_by_creation_time(members)
if self.criteria == self.OLDEST_FIRST:
victim = sorted[0]
elif self.criteria ==self.YOUNGEST_FIRST:
victim = sorted[-1]
else:
rand = random(len(sorted))
victim = sorted[rand]
# TODO: return True/False
return victim
def enforce(self, cluster_id, action, **args):
pass
def post_op(self, cluster_id, action, **args):
pass
|
apache-2.0
|
Python
|
b3213feae032032018354548f244a3204f04707f
|
Change compressor.
|
alephmelo/pyncd
|
pyncd.py
|
pyncd.py
|
import lzma
x = open('examples/imgs/square.png', 'rb').read() # file 1 of any type
y = open('examples/imgs/circle.png', 'rb').read() # file 2 of the same type as file 1
x_y = x + y # the concatenation of files
x_comp = lzma.compress(x) # compress file 1
y_comp = lzma.compress(y) # compress file 2
x_y_comp = lzma.compress(x_y) # compress file concatenated
# print len() of each file
print(len(x_comp), len(y_comp), len(x_y_comp), sep=' ', end='\n')
# magic happens here
ncd = (len(x_y_comp) - min(len(x_comp), len(y_comp))) / max(len(x_comp), len(y_comp))
print(ncd)
|
import gzip
x = open('1.png', 'rb').read() # file 1 of any type
y = open('2.png', 'rb').read() # file 2 of the same type as file 1
x_y = x + y # the concatenation of files
x_comp = gzip.compress(x) # compress file 1
y_comp = gzip.compress(y) # compress file 2
x_y_comp = gzip.compress(x_y) # compress file concatenated
# print len() of each file
print(len(x_comp), len(y_comp), len(x_y_comp), sep=' ', end='\n')
# magic happens here
ncd = (len(x_y_comp) - min(len(x_comp), len(y_comp))) / max(len(x_comp), len(y_comp))
print(ncd)
|
mit
|
Python
|
111e1d1b92860a4342c3499ca9eb0e5623b4974c
|
Increase RQ default_timeout to one hour (in case we have very very long running jobs)
|
OpenSourceActivismTech/call-power,OpenSourceActivismTech/call-power,spacedogXYZ/call-power,OpenSourceActivismTech/call-power,OpenSourceActivismTech/call-power,spacedogXYZ/call-power,spacedogXYZ/call-power,spacedogXYZ/call-power
|
call_server/extensions.py
|
call_server/extensions.py
|
# define flask extensions in separate file, to resolve import dependencies
from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy
# workaround to enable pool_pre_ping
# per https://github.com/pallets/flask-sqlalchemy/issues/589#issuecomment-361075700
class SQLAlchemy(_BaseSQLAlchemy):
def apply_pool_defaults(self, app, options):
options["pool_pre_ping"] = True
super(SQLAlchemy, self).apply_pool_defaults(app, options)
db = SQLAlchemy()
from flask_caching import Cache
cache = Cache()
from flask_assets import Environment
assets = Environment()
from flask_babel import Babel
babel = Babel()
from flask_mail import Mail
mail = Mail()
from flask_login import LoginManager
login_manager = LoginManager()
from flask_restless import APIManager
rest = APIManager()
from flask_wtf.csrf import CSRFProtect
csrf = CSRFProtect()
from flask_cors import CORS as cors
from flask_store import Store
store = Store()
from flask_rq2 import RQ
rq = RQ(default_timeout=60*60)
from flask_talisman import Talisman
CALLPOWER_CSP = {
'default-src':'\'self\'',
'script-src':['\'self\'', '\'unsafe-inline\'', '\'unsafe-eval\'', # for local scripts
'cdnjs.cloudflare.com', 'ajax.cloudflare.com', 'media.twiliocdn.com', # required for jquery, twilio
'js-agent.newrelic.com', '*.nr-data.net'], # additional analytics platforms
'style-src': ['\'self\'', '\'unsafe-inline\'', 'fonts.googleapis.com'],
'font-src': ['\'self\'', 'data:', 'fonts.gstatic.com'],
'media-src': ['\'self\'', 'blob:', 'media.twiliocdn.com'],
'connect-src': ['\'self\'', 'https://*.twilio.com', 'wss://*.twilio.com', 'media.twiliocdn.com', 'openstates.org'],
'object-src': ['\'self\'', 'blob:'],
'img-src': ['\'self\'', 'data:']
}
# unsafe-inline needed to render <script> tags without nonce
# unsafe-eval needed to run bootstrap templates
talisman = Talisman()
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
limiter = Limiter(key_func=get_remote_address)
|
# define flask extensions in separate file, to resolve import dependencies
from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy
# workaround to enable pool_pre_ping
# per https://github.com/pallets/flask-sqlalchemy/issues/589#issuecomment-361075700
class SQLAlchemy(_BaseSQLAlchemy):
def apply_pool_defaults(self, app, options):
options["pool_pre_ping"] = True
super(SQLAlchemy, self).apply_pool_defaults(app, options)
db = SQLAlchemy()
from flask_caching import Cache
cache = Cache()
from flask_assets import Environment
assets = Environment()
from flask_babel import Babel
babel = Babel()
from flask_mail import Mail
mail = Mail()
from flask_login import LoginManager
login_manager = LoginManager()
from flask_restless import APIManager
rest = APIManager()
from flask_wtf.csrf import CSRFProtect
csrf = CSRFProtect()
from flask_cors import CORS as cors
from flask_store import Store
store = Store()
from flask_rq2 import RQ
rq = RQ()
from flask_talisman import Talisman
CALLPOWER_CSP = {
'default-src':'\'self\'',
'script-src':['\'self\'', '\'unsafe-inline\'', '\'unsafe-eval\'', # for local scripts
'cdnjs.cloudflare.com', 'ajax.cloudflare.com', 'media.twiliocdn.com', # required for jquery, twilio
'js-agent.newrelic.com', '*.nr-data.net'], # additional analytics platforms
'style-src': ['\'self\'', '\'unsafe-inline\'', 'fonts.googleapis.com'],
'font-src': ['\'self\'', 'data:', 'fonts.gstatic.com'],
'media-src': ['\'self\'', 'blob:', 'media.twiliocdn.com'],
'connect-src': ['\'self\'', 'https://*.twilio.com', 'wss://*.twilio.com', 'media.twiliocdn.com', 'openstates.org'],
'object-src': ['\'self\'', 'blob:'],
'img-src': ['\'self\'', 'data:']
}
# unsafe-inline needed to render <script> tags without nonce
# unsafe-eval needed to run bootstrap templates
talisman = Talisman()
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
limiter = Limiter(key_func=get_remote_address)
|
agpl-3.0
|
Python
|
e8fdbc786b7a8391014199366a71b290b49efb4c
|
Fix views decorators (ref #141)
|
GeotrekCE/Geotrek-admin,mabhub/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,camillemonchicourt/Geotrek,Anaethelion/Geotrek,mabhub/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,johan--/Geotrek,johan--/Geotrek,johan--/Geotrek,camillemonchicourt/Geotrek,Anaethelion/Geotrek,mabhub/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,Anaethelion/Geotrek,camillemonchicourt/Geotrek,johan--/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek
|
caminae/trekking/views.py
|
caminae/trekking/views.py
|
from django.utils.decorators import method_decorator
from caminae.authent.decorators import trekking_manager_required
from caminae.mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,
MapEntityDetail, MapEntityCreate, MapEntityUpdate, MapEntityDelete)
from .models import Trek, POI
from .filters import TrekFilter, POIFilter
from .forms import TrekForm, POIForm
class TrekLayer(MapEntityLayer):
model = Trek
class TrekList(MapEntityList):
model = Trek
filterform = TrekFilter
columns = ['name', 'departure', 'arrival']
class TrekJsonList(MapEntityJsonList, TrekList):
pass
class TrekDetail(MapEntityDetail):
model = Trek
def can_edit(self):
return self.request.user.profile.is_trekking_manager()
class TrekCreate(MapEntityCreate):
model = Trek
form_class = TrekForm
@method_decorator(trekking_manager_required('trekking:trek_list'))
def dispatch(self, *args, **kwargs):
return super(TrekCreate, self).dispatch(*args, **kwargs)
class TrekUpdate(MapEntityUpdate):
model = Trek
form_class = TrekForm
@method_decorator(trekking_manager_required('trekking:trek_detail'))
def dispatch(self, *args, **kwargs):
return super(TrekUpdate, self).dispatch(*args, **kwargs)
class TrekDelete(MapEntityDelete):
model = Trek
@method_decorator(trekking_manager_required('trekking:trek_detail'))
def dispatch(self, *args, **kwargs):
return super(TrekDelete, self).dispatch(*args, **kwargs)
class POILayer(MapEntityLayer):
model = POI
class POIList(MapEntityList):
model = POI
filterform = POIFilter
columns = ['name', 'type']
class POIJsonList(MapEntityJsonList, POIList):
pass
class POIDetail(MapEntityDetail):
model = POI
def can_edit(self):
return self.request.user.profile.is_trekking_manager()
class POICreate(MapEntityCreate):
model = POI
form_class = POIForm
@method_decorator(trekking_manager_required('trekking:poi_list'))
def dispatch(self, *args, **kwargs):
return super(POICreate, self).dispatch(*args, **kwargs)
class POIUpdate(MapEntityUpdate):
model = POI
form_class = POIForm
@method_decorator(trekking_manager_required('trekking:poi_detail'))
def dispatch(self, *args, **kwargs):
return super(POIUpdate, self).dispatch(*args, **kwargs)
class POIDelete(MapEntityDelete):
model = POI
@method_decorator(trekking_manager_required('trekking:poi_detail'))
def dispatch(self, *args, **kwargs):
return super(POIDelete, self).dispatch(*args, **kwargs)
|
from caminae.authent.decorators import trekking_manager_required
from caminae.mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,
MapEntityDetail, MapEntityCreate, MapEntityUpdate, MapEntityDelete)
from .models import Trek, POI
from .filters import TrekFilter, POIFilter
from .forms import TrekForm, POIForm
class TrekLayer(MapEntityLayer):
model = Trek
class TrekList(MapEntityList):
model = Trek
filterform = TrekFilter
columns = ['name', 'departure', 'arrival']
class TrekJsonList(MapEntityJsonList, TrekList):
pass
class TrekDetail(MapEntityDetail):
model = Trek
def can_edit(self):
return self.request.user.profile.is_trekking_manager()
class TrekCreate(MapEntityCreate):
model = Trek
form_class = TrekForm
@trekking_manager_required('trekking:trek_list')
def dispatch(self, *args, **kwargs):
return super(TrekCreate, self).dispatch(*args, **kwargs)
class TrekUpdate(MapEntityUpdate):
model = Trek
form_class = TrekForm
@trekking_manager_required('trekking:trek_detail')
def dispatch(self, *args, **kwargs):
return super(TrekUpdate, self).dispatch(*args, **kwargs)
class TrekDelete(MapEntityDelete):
model = Trek
@trekking_manager_required('trekking:trek_detail')
def dispatch(self, *args, **kwargs):
return super(TrekDelete, self).dispatch(*args, **kwargs)
class POILayer(MapEntityLayer):
model = POI
class POIList(MapEntityList):
model = POI
filterform = POIFilter
columns = ['name', 'type']
class POIJsonList(MapEntityJsonList, POIList):
pass
class POIDetail(MapEntityDetail):
model = POI
def can_edit(self):
return self.request.user.profile.is_trekking_manager()
class POICreate(MapEntityCreate):
model = POI
form_class = POIForm
@trekking_manager_required('trekking:poi_list')
def dispatch(self, *args, **kwargs):
return super(TrekCreate, self).dispatch(*args, **kwargs)
class POIUpdate(MapEntityUpdate):
model = POI
form_class = POIForm
@trekking_manager_required('trekking:poi_detail')
def dispatch(self, *args, **kwargs):
return super(POIUpdate, self).dispatch(*args, **kwargs)
class POIDelete(MapEntityDelete):
model = POI
@trekking_manager_required('trekking:poi_detail')
def dispatch(self, *args, **kwargs):
return super(POIDelete, self).dispatch(*args, **kwargs)
|
bsd-2-clause
|
Python
|
c0c3d63c6124549008a2dc17c1e691e799129444
|
Fix getting unwatched episodes from Plex
|
verdel/plex2myshows
|
plex2myshows/modules/plex/plex.py
|
plex2myshows/modules/plex/plex.py
|
class Plex(object):
def __init__(self, plex):
self.plex = plex
def get_watched_episodes(self, section_name):
watched_episodes = []
shows = self.plex.library.section(section_name).searchShows()
for show in shows:
watched_episodes.extend(show.watched())
return watched_episodes
|
class Plex(object):
def __init__(self, plex):
self.plex = plex
def get_watched_episodes(self, section_name):
watched_episodes = set(self.plex.library.section(section_name).searchEpisodes(unwatched=False))
return watched_episodes
|
mit
|
Python
|
b34c8b94202294f63ff88d2d8085222bfa50dc46
|
Sort on first name after last name
|
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
|
candidates/csv_helpers.py
|
candidates/csv_helpers.py
|
from __future__ import unicode_literals
from compat import BufferDictWriter
from .models import CSV_ROW_FIELDS
def _candidate_sort_by_name_key(row):
return (
row['name'].split()[-1],
row['name'].rsplit(None, 1)[0],
not row['election_current'],
row['election_date'],
row['election'],
row['post_label']
)
def _candidate_sort_by_post_key(row):
return (
not row['election_current'],
row['election_date'],
row['election'],
row['post_label'],
row['name'].split()[-1],
row['name'].rsplit(None, 1)[0],
)
def list_to_csv(candidates_list, group_by_post=False):
from .election_specific import EXTRA_CSV_ROW_FIELDS
csv_fields = CSV_ROW_FIELDS + EXTRA_CSV_ROW_FIELDS
writer = BufferDictWriter(fieldnames=csv_fields)
writer.writeheader()
if group_by_post:
sorted_rows = sorted(candidates_list, key=_candidate_sort_by_post_key)
else:
sorted_rows = sorted(candidates_list, key=_candidate_sort_by_name_key)
for row in sorted_rows:
writer.writerow(row)
return writer.output
|
from __future__ import unicode_literals
from compat import BufferDictWriter
from .models import CSV_ROW_FIELDS
def _candidate_sort_by_name_key(row):
return (
row['name'].split()[-1],
not row['election_current'],
row['election_date'],
row['election'],
row['post_label']
)
def _candidate_sort_by_post_key(row):
return (
not row['election_current'],
row['election_date'],
row['election'],
row['post_label'],
row['name'].split()[-1])
def list_to_csv(candidates_list, group_by_post=False):
from .election_specific import EXTRA_CSV_ROW_FIELDS
csv_fields = CSV_ROW_FIELDS + EXTRA_CSV_ROW_FIELDS
writer = BufferDictWriter(fieldnames=csv_fields)
writer.writeheader()
if group_by_post:
sorted_rows = sorted(candidates_list, key=_candidate_sort_by_post_key)
else:
sorted_rows = sorted(candidates_list, key=_candidate_sort_by_name_key)
for row in sorted_rows:
writer.writerow(row)
return writer.output
|
agpl-3.0
|
Python
|
d8da358042e6b02426804783b20e256398c6c467
|
Remove decimals from y-axis percentage label
|
davidrobles/mlnd-capstone-code
|
capstone/rl/utils/plot.py
|
capstone/rl/utils/plot.py
|
from __future__ import division
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from .callbacks import Callback
from ...game.players import GreedyQ, RandPlayer
from ...game.utils import play_series
class EpisodicWLDPlotter(Callback):
'''
Plots the episodic win, loss and draws of a learner
against a fixed opponent
'''
def __init__(self, game, opp_player=None, n_matches=1000,
period=1, filename='test.pdf'):
self.game = game
self.opp_player = opp_player
self.n_matches = n_matches
self.period = period
self.filename = filename
self.x = []
self.y_wins = []
self.y_draws = []
self.y_losses = []
def on_episode_end(self, episode, qf):
if episode % self.period != 0:
return
self._plot(episode, qf)
def _plot(self, episode, qf):
results = play_series(
game=self.game.copy(),
players=[GreedyQ(qf), self.opp_player],
n_matches=self.n_matches,
verbose=False
)
self.x.append(episode)
self.y_wins.append(results['W'] / self.n_matches)
self.y_draws.append(results['D'] / self.n_matches)
self.y_losses.append(results['L'] / self.n_matches)
def on_train_end(self, qf):
n_episodes = len(self.x) * self.period
self._plot(n_episodes - 1, qf)
fig = plt.figure()
ax = fig.add_subplot(111)
w_line, = ax.plot(self.x, self.y_wins, label='Win')
l_line, = ax.plot(self.x, self.y_losses, label='Loss')
d_line, = ax.plot(self.x, self.y_draws, label='Draw')
ax.set_xlim([0, n_episodes])
ax.set_ylim([0, 1.0])
plt.xlabel('Episodes')
formatter = FuncFormatter(lambda y, _: '{:d}%'.format(int(y * 100)))
plt.gca().yaxis.set_major_formatter(formatter)
plt.legend(handles=[w_line, l_line, d_line], loc=7)
plt.savefig(self.filename)
|
from __future__ import division
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from .callbacks import Callback
from ...game.players import GreedyQ, RandPlayer
from ...game.utils import play_series
class EpisodicWLDPlotter(Callback):
'''
Plots the episodic win, loss and draws of a learner
against a fixed opponent
'''
def __init__(self, game, opp_player=None, n_matches=1000,
period=1, filename='test.pdf'):
self.game = game
self.opp_player = opp_player
self.n_matches = n_matches
self.period = period
self.filename = filename
self.x = []
self.y_wins = []
self.y_draws = []
self.y_losses = []
def on_episode_end(self, episode, qf):
if episode % self.period != 0:
return
self._plot(episode, qf)
def _plot(self, episode, qf):
results = play_series(
game=self.game.copy(),
players=[GreedyQ(qf), self.opp_player],
n_matches=self.n_matches,
verbose=False
)
self.x.append(episode)
self.y_wins.append(results['W'] / self.n_matches)
self.y_draws.append(results['D'] / self.n_matches)
self.y_losses.append(results['L'] / self.n_matches)
def on_train_end(self, qf):
n_episodes = len(self.x) * self.period
self._plot(n_episodes - 1, qf)
fig = plt.figure()
ax = fig.add_subplot(111)
w_line, = ax.plot(self.x, self.y_wins, label='Win')
l_line, = ax.plot(self.x, self.y_losses, label='Loss')
d_line, = ax.plot(self.x, self.y_draws, label='Draw')
ax.set_xlim([0, n_episodes])
ax.set_ylim([0, 1.0])
plt.xlabel('Episodes')
formatter = FuncFormatter(lambda y, pos: '{}%'.format(y * 100))
plt.gca().yaxis.set_major_formatter(formatter)
plt.legend(handles=[w_line, l_line, d_line], loc=7)
plt.savefig(self.filename)
|
mit
|
Python
|
b77685c442fcc769727c443a59ac2bb620d90bdc
|
update copyright year
|
lheagy/casingResearch,lheagy/casingSimulations
|
casingSimulations/info.py
|
casingSimulations/info.py
|
__version__ = '0.1.0b0'
__author__ = 'Lindsey Heagy'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018-2019 Lindsey Heagy'
|
__version__ = '0.1.0b0'
__author__ = 'Lindsey Heagy'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 Lindsey Heagy'
|
mit
|
Python
|
ff93b501d489f182617e7f8f497747f26e91f39e
|
add older experiments
|
ColumbiaCMB/kid_readout,ColumbiaCMB/kid_readout
|
kid_readout/analysis/resources/starcryo_experiments.py
|
kid_readout/analysis/resources/starcryo_experiments.py
|
import bisect
from kid_readout.utils.time_tools import date_to_unix_time
by_unix_time_table = [
('2014-07-03', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, LPF, copper shield, IR LED fiber', 'light'),
('2014-04-28', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, LPF, copper shield', 'light'),
('2014-04-16', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, fully taped', 'dark'),
('2014-04-10', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, Al tape over horns, copper shield', 'dark'),
('2014-04-04', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, LPF, Al tape over horns', 'dark'),
('2014-03-28', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, LPF, Al tape over a few horns', 'light'),
('2014-03-19', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, LPF, broken connection', 'light'),
('2014-02-27', 'STAR Cryo 4x5 0813f10 Cu horn package, LPF', 'light'),
('2014-01-28', 'STAR Cryo 4x5 0813f10 Cu horn package, no LPF', 'light'),
]
by_unix_time_table.sort(key = lambda x: date_to_unix_time(x[0]))
_unix_time_index = [date_to_unix_time(x[0]) for x in by_unix_time_table]
|
import bisect
from kid_readout.utils.time_tools import date_to_unix_time
by_unix_time_table = [('2014-04-28', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, LPF, copper shield', 'light'),
('2014-04-16', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, fully taped', 'dark'),
('2014-04-10', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, Al tape over horns, copper shield', 'dark'),
('2014-04-04', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, LPF, Al tape over horns', 'dark'),
('2014-03-28', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, LPF, Al tape over a few horns', 'light'),
('2014-03-19', 'STAR Cryo 4x5 0813f12 Al horn package, AR chip, LPF, broken connection', 'light'),
('2014-02-27', 'STAR Cryo 4x5 0813f10 Cu horn package, LPF', 'light'),
]
by_unix_time_table.sort(key = lambda x: date_to_unix_time(x[0]))
_unix_time_index = [date_to_unix_time(x[0]) for x in by_unix_time_table]
|
bsd-2-clause
|
Python
|
c91a593507cea9c6ba2777eff741b9e7ec7fdf0f
|
Fix test to give more data on failure
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
tests/integration/cli/custom_module.py
|
tests/integration/cli/custom_module.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Daniel Mizyrycki ([email protected])`
tests.integration.cli.custom_module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test salt-ssh sls with a custom module work.
$ cat srv/custom_module.sls
custom-module:
module.run:
- name: test.recho
- text: hello
$ cat srv/_modules/override_test.py
__virtualname__ = 'test'
def __virtual__():
return __virtualname__
def recho(text):
return text[::-1]
$ salt-ssh localhost state.sls custom_module
localhost:
olleh
This test can be run in a small test suite with:
$ python tests/runtests.py -C --ssh
'''
# Import Salt Libs
import integration
class SSHCustomModuleTest(integration.SSHCase):
'''
Test sls with custom module functionality using ssh
'''
def test_ssh_regular_module(self):
'''
Test regular module work using SSHCase environment
'''
expected = 'hello'
cmd = self.run_function('test.echo', arg=['hello'])
self.assertEqual(expected, cmd)
def test_ssh_custom_module(self):
'''
Test custom module work using SSHCase environment
'''
expected = 'hello'[::-1]
cmd = self.run_function('test.recho', arg=['hello'])
self.assertEqual(expected, cmd)
def test_ssh_sls_with_custom_module(self):
'''
Test sls with custom module work using SSHCase environment
'''
expected = {
"module_|-regular-module_|-test.echo_|-run": 'hello',
"module_|-custom-module_|-test.recho_|-run": 'olleh'}
cmd = self.run_function('state.sls', arg=['custom_module'])
for key in cmd:
if not isinstance(cmd, dict) or not isinstance(cmd[key], dict):
raise AssertionError('{0} is not a proper state return'
.format(cmd))
elif not cmd[key]['result']:
raise AssertionError(cmd[key]['comment'])
cmd_ret = cmd[key]['changes'].get('ret', None)
self.assertEqual(cmd_ret, expected[key])
if __name__ == '__main__':
from integration import run_tests
run_tests(SSHCustomModuleTest)
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Daniel Mizyrycki ([email protected])`
tests.integration.cli.custom_module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test salt-ssh sls with a custom module work.
$ cat srv/custom_module.sls
custom-module:
module.run:
- name: test.recho
- text: hello
$ cat srv/_modules/override_test.py
__virtualname__ = 'test'
def __virtual__():
return __virtualname__
def recho(text):
return text[::-1]
$ salt-ssh localhost state.sls custom_module
localhost:
olleh
This test can be run in a small test suite with:
$ python tests/runtests.py -C --ssh
'''
# Import Salt Libs
import integration
class SSHCustomModuleTest(integration.SSHCase):
'''
Test sls with custom module functionality using ssh
'''
def test_ssh_regular_module(self):
'''
Test regular module work using SSHCase environment
'''
expected = 'hello'
cmd = self.run_function('test.echo', arg=['hello'])
self.assertEqual(expected, cmd)
def test_ssh_custom_module(self):
'''
Test custom module work using SSHCase environment
'''
expected = 'hello'[::-1]
cmd = self.run_function('test.recho', arg=['hello'])
self.assertEqual(expected, cmd)
def test_ssh_sls_with_custom_module(self):
'''
Test sls with custom module work using SSHCase environment
'''
expected = {
"module_|-regular-module_|-test.echo_|-run": 'hello',
"module_|-custom-module_|-test.recho_|-run": 'olleh'}
cmd = self.run_function('state.sls', arg=['custom_module'])
for key in cmd:
if not cmd[key]['result']:
raise AssertionError(cmd[key]['comment'])
cmd_ret = cmd[key]['changes'].get('ret', None)
self.assertEqual(cmd_ret, expected[key])
if __name__ == '__main__':
from integration import run_tests
run_tests(SSHCustomModuleTest)
|
apache-2.0
|
Python
|
ccc8c0d885c9eaafeba2e91e248bf3febc484ab5
|
change default port to IANA and allow to change peer port
|
sileht/pifpaf,jd/pifpaf,jd/pifpaf,sileht/pifpaf,sileht/pifpaf
|
pifpaf/drivers/etcd.py
|
pifpaf/drivers/etcd.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pifpaf import drivers
class EtcdDriver(drivers.Driver):
DEFAULT_PORT = 2379
DEFAULT_PEER_PORT = 2380
def __init__(self, port=DEFAULT_PORT,
peer_port=DEFAULT_PEER_PORT,
**kwargs):
super(EtcdDriver, self).__init__(**kwargs)
self.port = port
self.peer_port = peer_port
@classmethod
def get_parser(cls, parser):
parser.add_argument("--port",
type=int,
default=cls.DEFAULT_PORT,
help="port to use for etcd")
parser.add_argument("--peer-port",
type=int,
default=cls.DEFAULT_PEER_PORT,
help="port to use for etcd peers")
return parser
def _setUp(self):
super(EtcdDriver, self)._setUp()
client_url = "http://localhost:%d" % self.port
peer_url = "http://localhost:%d" % self.peer_port
c, _ = self._exec(["etcd",
"--data-dir", self.tempdir,
"--listen-peer-urls", peer_url,
"--listen-client-urls", client_url,
"--advertise-client-urls", client_url],
wait_for_line="listening for client requests on")
self.addCleanup(self._kill, c.pid)
self.putenv("ETCD_PORT", str(self.port))
self.putenv("ETCD_PEER_PORT", str(self.peer_port))
self.putenv("HTTP_URL", "etcd://localhost:%d" % self.port)
self.putenv("URL", "etcd://localhost:%d" % self.port)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pifpaf import drivers
class EtcdDriver(drivers.Driver):
DEFAULT_PORT = 4001
def __init__(self, port=DEFAULT_PORT,
**kwargs):
super(EtcdDriver, self).__init__(**kwargs)
self.port = port
@classmethod
def get_parser(cls, parser):
parser.add_argument("--port",
type=int,
default=cls.DEFAULT_PORT,
help="port to use for etcd")
return parser
def _setUp(self):
super(EtcdDriver, self)._setUp()
http_url = "http://localhost:%d" % self.port
c, _ = self._exec(["etcd",
"--data-dir=" + self.tempdir,
"--listen-client-urls=" + http_url,
"--advertise-client-urls=" + http_url],
wait_for_line="listening for client requests on")
self.addCleanup(self._kill, c.pid)
self.putenv("ETCD_PORT", str(self.port))
self.putenv("URL", "etcd://localhost:%d" % self.port)
|
apache-2.0
|
Python
|
9044657473e949138343132bbfb4c96b137c6abb
|
Use NotImplementedError
|
toslunar/chainerrl,toslunar/chainerrl
|
chainerrl/agents/sarsa.py
|
chainerrl/agents/sarsa.py
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
from chainerrl.agents import dqn
class SARSA(dqn.DQN):
"""SARSA.
Unlike DQN, this agent uses actions that have been actually taken to
compute target Q values, thus is an on-policy algorithm.
"""
def _compute_target_values(self, exp_batch, gamma):
batch_next_state = exp_batch['next_state']
batch_next_action = exp_batch['next_action']
next_target_action_value = self.target_q_function(
batch_next_state)
next_q = next_target_action_value.evaluate_actions(
batch_next_action)
batch_rewards = exp_batch['reward']
batch_terminal = exp_batch['is_state_terminal']
return batch_rewards + self.gamma * (1.0 - batch_terminal) * next_q
def batch_act_and_train(self, batch_obs):
raise NotImplementedError('SARSA does not support batch training')
def batch_observe_and_train(self, batch_obs, batch_reward,
batch_done, batch_reset):
raise NotImplementedError('SARSA does not support batch training')
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
from chainerrl.agents import dqn
class SARSA(dqn.DQN):
"""SARSA.
Unlike DQN, this agent uses actions that have been actually taken to
compute target Q values, thus is an on-policy algorithm.
"""
def _compute_target_values(self, exp_batch, gamma):
batch_next_state = exp_batch['next_state']
batch_next_action = exp_batch['next_action']
next_target_action_value = self.target_q_function(
batch_next_state)
next_q = next_target_action_value.evaluate_actions(
batch_next_action)
batch_rewards = exp_batch['reward']
batch_terminal = exp_batch['is_state_terminal']
return batch_rewards + self.gamma * (1.0 - batch_terminal) * next_q
def batch_act_and_train(self, batch_obs):
raise RuntimeError('SARSA does not support batch training')
def batch_observe_and_train(self, batch_obs, batch_reward,
batch_done, batch_reset):
raise RuntimeError('SARSA does not support batch training')
|
mit
|
Python
|
6ab7c268d21ada1c30414551bdbb03190560ae55
|
Fix typo in runserver.py breaking debug mode.
|
chromakode/wake
|
runserver.py
|
runserver.py
|
#!/usr/bin/env python
import argparse
from wake import app
parser = argparse.ArgumentParser()
parser.add_argument(
'--host',
default='127.0.0.1',
help='hostname to listen on',
)
parser.add_argument(
'--port',
type=int,
default=5000,
help='port to listen on',
)
parser.add_argument(
'--debug',
type=bool,
default=True,
help='toggle tracebacks and debugger',
)
args = parser.parse_args()
app.run(host=args.host, port=args.port, debug=args.debug)
|
#!/usr/bin/env python
import argparse
from wake import app
parser = argparse.ArgumentParser()
parser.add_argument(
'--host',
default='127.0.0.1',
help='hostname to listen on',
)
parser.add_argument(
'--port',
type=int,
default=5000,
help='port to listen on',
)
parser.add_argument(
'--debug',
type=bool,
default=True,
help='toggle tracebacks and debugger',
)
args = parser.parse_args()
app.run(host=args.host, port=args.port, debug=app.debug)
|
bsd-3-clause
|
Python
|
64605573382f1c9fb2170d0cdbcd007f5ddae8d6
|
Fix to pass all test
|
ishikota/PyPokerEngine
|
tests/players/sample/console_player_test.py
|
tests/players/sample/console_player_test.py
|
from tests.base_unittest import BaseUnitTest
from pypokerengine.players.sample.console_player import PokerPlayer as ConsolePlayer
class ConsolePlayerTest(BaseUnitTest):
def setUp(self):
self.valid_actions = [\
{'action': 'fold', 'amount': 0},\
{'action': 'call', 'amount': 10},\
{'action': 'raise', 'amount': {'max': 105, 'min': 15}}\
]
self.round_state = {
'dealer_btn': 1,
'street': 'preflop',
'seats': [
{'stack': 85, 'state': 'participating', 'name': u'player1', 'uuid': 'ciglbcevkvoqzguqvnyhcb'},
{'stack': 100, 'state': 'participating', 'name': u'player2', 'uuid': 'zjttlanhlvpqzebrwmieho'}
],
'next_player': 1,
'community_card': [],
'pot': {
'main': {'amount': 15},
'side': []
}
}
self.action_histories = {
'action_histories': [
{'action': 'SMALLBLIND', 'amount': 5, 'add_amount': 5},
{'action': 'BIGBLIND', 'amount': 10, 'add_amount': 5}
]
}
def test_declare_fold(self):
mock_input = self.__gen_raw_input_mock(['f'])
player = ConsolePlayer(mock_input)
player.set_uuid("dummy")
action, amount = player.declare_action(None, self.valid_actions, self.round_state, self.action_histories)
self.eq('fold', action)
self.eq(0, amount)
def test_declare_call(self):
mock_input = self.__gen_raw_input_mock(['c'])
player = ConsolePlayer(mock_input)
player.set_uuid("dummy")
action, amount = player.declare_action(None, self.valid_actions, self.round_state, self.action_histories)
self.eq('call', action)
self.eq(10, amount)
def test_declare_valid_raise(self):
mock_input = self.__gen_raw_input_mock(['r', '15'])
player = ConsolePlayer(mock_input)
player.set_uuid("dummy")
action, amount = player.declare_action(None, self.valid_actions, self.round_state, self.action_histories)
self.eq('raise', action)
self.eq(15, amount)
def test_correct_invalid_raise(self):
mock_input = self.__gen_raw_input_mock(['r', '14', '105'])
player = ConsolePlayer(mock_input)
player.set_uuid("dummy")
action, amount = player.declare_action(None, self.valid_actions, self.round_state, self.action_histories)
self.eq('raise', action)
self.eq(105, amount)
def __gen_raw_input_mock(self, mock_returns):
counter = []
def raw_input_wrapper(self):
mock_return = mock_returns[len(counter)]
counter.append(0)
return mock_return
return raw_input_wrapper
|
from tests.base_unittest import BaseUnitTest
from pypokerengine.players.sample.console_player import PokerPlayer as ConsolePlayer
class ConsolePlayerTest(BaseUnitTest):
def setUp(self):
self.valid_actions = [\
{'action': 'fold', 'amount': 0},\
{'action': 'call', 'amount': 10},\
{'action': 'raise', 'amount': {'max': 105, 'min': 15}}\
]
def test_declare_fold(self):
mock_input = self.__gen_raw_input_mock(['f'])
player = ConsolePlayer(mock_input)
action, amount = player.declare_action(None, self.valid_actions, None, None)
self.eq('fold', action)
self.eq(0, amount)
def test_declare_call(self):
mock_input = self.__gen_raw_input_mock(['c'])
player = ConsolePlayer(mock_input)
action, amount = player.declare_action(None, self.valid_actions, None, None)
self.eq('call', action)
self.eq(10, amount)
def test_declare_valid_raise(self):
mock_input = self.__gen_raw_input_mock(['r', '15'])
player = ConsolePlayer(mock_input)
action, amount = player.declare_action(None, self.valid_actions, None, None)
self.eq('raise', action)
self.eq(15, amount)
def test_correct_invalid_raise(self):
mock_input = self.__gen_raw_input_mock(['r', '14', '105'])
player = ConsolePlayer(mock_input)
action, amount = player.declare_action(None, self.valid_actions, None, None)
self.eq('raise', action)
self.eq(105, amount)
def __gen_raw_input_mock(self, mock_returns):
counter = []
def raw_input_wrapper(self):
mock_return = mock_returns[len(counter)]
counter.append(0)
return mock_return
return raw_input_wrapper
|
mit
|
Python
|
c9cd0ed7b8d9d43c4143074489fcd5e14137b45a
|
implement list method main loop and quit action
|
CaptainDesAstres/Blender-Render-Manager,CaptainDesAstres/Simple-Blender-Render-Manager
|
queue.py
|
queue.py
|
#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module that contain queue class'''
from renderingTask import renderingTask
import os
class queue:
'''class who contain the list of all the rendering task to manage'''
def __init__(self,xml=False):
'''initialize queue object with empty queue who is filled with values extract from an xml object if paste to the function'''
self.tasks = []
if xml != False:
self.fromXml(xml)
def fromXml(self,xml):
'''extract rendering task parameters from an xml object and add them to the queue'''
if xml.tag == 'queue':
for t in xml.findall('task'):
self.add(renderingTask(xml = t))
def toXmlStr(self,head=False):
'''export rendering task queue to an xml syntax string '''
txt =''
if head:
txt+= '<?xml version="1.0" encoding="UTF-8"?>\n'
txt += '<queue>\n'
for r in self.tasks:
txt += r.toXmlStr()
txt += '</queue>\n'
return txt
def add(self,added):
'''add rendering task to the queue'''
if type(added) == renderingTask:
self.tasks.append(added)
def list(self, log, scriptSetting):
'''list task and access editing functions'''
os.system('clear')
log.menuIn('Rendering Queue')
while True:
choice = input("action?('q' to quit)").strip().lower()
try:
if choice in ['q', 'quit', 'cancel']:
choice = -1
else:
choice = int(choice)
except ValueError:
choice = -9999
if choice == -1:
log.menuOut()
return
|
#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module that contain queue class'''
from renderingTask import renderingTask
class queue:
'''class who contain the list of all the rendering task to manage'''
def __init__(self,xml=False):
'''initialize queue object with empty queue who is filled with values extract from an xml object if paste to the function'''
self.tasks = []
if xml != False:
self.fromXml(xml)
def fromXml(self,xml):
'''extract rendering task parameters from an xml object and add them to the queue'''
if xml.tag == 'queue':
for t in xml.findall('task'):
self.add(renderingTask(xml = t))
def toXmlStr(self,head=False):
'''export rendering task queue to an xml syntax string '''
txt =''
if head:
txt+= '<?xml version="1.0" encoding="UTF-8"?>\n'
txt += '<queue>\n'
for r in self.tasks:
txt += r.toXmlStr()
txt += '</queue>\n'
return txt
def add(self,added):
'''add rendering task to the queue'''
if type(added) == renderingTask:
self.tasks.append(added)
def list(self, log, scriptSetting):
'''list task and access editing functions'''
|
mit
|
Python
|
1bafd352110d186d1371d14714abd8de7e6e590f
|
Update prompt
|
0xddaa/pwndbg,anthraxx/pwndbg,0xddaa/pwndbg,pwndbg/pwndbg,pwndbg/pwndbg,chubbymaggie/pwndbg,cebrusfs/217gdb,cebrusfs/217gdb,anthraxx/pwndbg,pwndbg/pwndbg,cebrusfs/217gdb,pwndbg/pwndbg,disconnect3d/pwndbg,0xddaa/pwndbg,disconnect3d/pwndbg,cebrusfs/217gdb,anthraxx/pwndbg,zachriggle/pwndbg,anthraxx/pwndbg,chubbymaggie/pwndbg,zachriggle/pwndbg,disconnect3d/pwndbg
|
pwndbg/__init__.py
|
pwndbg/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gdb
import pwndbg.arch
import pwndbg.arguments
import pwndbg.disasm
import pwndbg.disasm.arm
import pwndbg.disasm.jump
import pwndbg.disasm.mips
import pwndbg.disasm.ppc
import pwndbg.disasm.sparc
import pwndbg.disasm.x86
import pwndbg.vmmap
import pwndbg.dt
import pwndbg.memory
import pwndbg.inthook
import pwndbg.elf
import pwndbg.proc
import pwndbg.regs
import pwndbg.stack
import pwndbg.stdio
import pwndbg.color
import pwndbg.typeinfo
import pwndbg.constants
import pwndbg.argv
import pwndbg.commands
import pwndbg.commands.hexdump
import pwndbg.commands.context
import pwndbg.commands.telescope
import pwndbg.commands.vmmap
import pwndbg.commands.dt
import pwndbg.commands.search
import pwndbg.commands.start
import pwndbg.commands.procinfo
import pwndbg.commands.auxv
import pwndbg.commands.windbg
import pwndbg.commands.ida
import pwndbg.commands.reload
import pwndbg.commands.rop
import pwndbg.commands.shell
import pwndbg.commands.aslr
import pwndbg.commands.misc
import pwndbg.commands.next
import pwndbg.commands.dumpargs
import pwndbg.commands.cpsr
import pwndbg.commands.argv
import pwndbg.commands.heap
__all__ = [
'arch',
'auxv',
'chain',
'color',
'compat',
'disasm',
'dt',
'elf',
'enhance',
'events',
'file',
'function',
'hexdump',
'ida',
'info',
'linkmap',
'malloc',
'memoize',
'memory',
'proc',
'regs',
'remote',
'search',
'stack',
'strings',
'symbol',
'typeinfo',
'ui',
'vmmap'
]
prompt = "pwndbg> "
prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX
prompt = pwndbg.color.red(prompt)
prompt = pwndbg.color.bold(prompt)
pre_commands = """
set confirm off
set verbose off
set output-radix 0x10
set prompt %s
set height 0
set history expansion on
set history save on
set disassembly-flavor intel
set follow-fork-mode child
set backtrace past-main on
set step-mode on
set print pretty on
set width 0
set print elements 15
set input-radix 16
handle SIGALRM nostop print nopass
handle SIGSEGV stop print nopass
""".strip() % prompt
for line in pre_commands.strip().splitlines():
gdb.execute(line)
msg = "Loaded %i commands. Type pwndbg for a list." % len(pwndbg.commands._Command.commands)
print(pwndbg.color.red(msg))
@pwndbg.memoize.reset_on_stop
def prompt_hook(*a):
with pwndbg.stdio.stdio:
pwndbg.commands.context.context()
gdb.prompt_hook = prompt_hook
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gdb
import pwndbg.arch
import pwndbg.arguments
import pwndbg.disasm
import pwndbg.disasm.arm
import pwndbg.disasm.jump
import pwndbg.disasm.mips
import pwndbg.disasm.ppc
import pwndbg.disasm.sparc
import pwndbg.disasm.x86
import pwndbg.vmmap
import pwndbg.dt
import pwndbg.memory
import pwndbg.inthook
import pwndbg.elf
import pwndbg.proc
import pwndbg.regs
import pwndbg.stack
import pwndbg.stdio
import pwndbg.color
import pwndbg.typeinfo
import pwndbg.constants
import pwndbg.argv
import pwndbg.commands
import pwndbg.commands.hexdump
import pwndbg.commands.context
import pwndbg.commands.telescope
import pwndbg.commands.vmmap
import pwndbg.commands.dt
import pwndbg.commands.search
import pwndbg.commands.start
import pwndbg.commands.procinfo
import pwndbg.commands.auxv
import pwndbg.commands.windbg
import pwndbg.commands.ida
import pwndbg.commands.reload
import pwndbg.commands.rop
import pwndbg.commands.shell
import pwndbg.commands.aslr
import pwndbg.commands.misc
import pwndbg.commands.next
import pwndbg.commands.dumpargs
import pwndbg.commands.cpsr
import pwndbg.commands.argv
import pwndbg.commands.heap
__all__ = [
'arch',
'auxv',
'chain',
'color',
'compat',
'disasm',
'dt',
'elf',
'enhance',
'events',
'file',
'function',
'hexdump',
'ida',
'info',
'linkmap',
'malloc',
'memoize',
'memory',
'proc',
'regs',
'remote',
'search',
'stack',
'strings',
'symbol',
'typeinfo',
'ui',
'vmmap'
]
prompt = "pwn> "
prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX
prompt = pwndbg.color.red(prompt)
prompt = pwndbg.color.bold(prompt)
pre_commands = """
set confirm off
set verbose off
set output-radix 0x10
set prompt %s
set height 0
set history expansion on
set history save on
set disassembly-flavor intel
set follow-fork-mode child
set backtrace past-main on
set step-mode on
set print pretty on
set width 0
set print elements 15
set input-radix 16
handle SIGALRM nostop print nopass
handle SIGSEGV stop print nopass
""".strip() % prompt
for line in pre_commands.strip().splitlines():
gdb.execute(line)
msg = "Loaded %i commands. Type pwndbg for a list." % len(pwndbg.commands._Command.commands)
print(pwndbg.color.red(msg))
@pwndbg.memoize.reset_on_stop
def prompt_hook(*a):
with pwndbg.stdio.stdio:
pwndbg.commands.context.context()
gdb.prompt_hook = prompt_hook
|
mit
|
Python
|
ea2faeb88d2b6ddc98a9a10c760574dca993673c
|
change func1 to accept args
|
armundle/scratch,armundle/scratch
|
py/decorator_ex.py
|
py/decorator_ex.py
|
def entryExitFunc(f):
def newFunc():
print "inside decorator function"
print "entering", f.__name__
f()
print "exited", f.__name__
return newFunc
class entryExit(object):
'''
If there are no decorator arguments, the function to be decorated is passed
to the constructor.
'''
def __init__(self, f):
self.f = f
print "entryExit.__init__"
'''
Note: The major constraint on the result of a decorator is that it be callable.
The __call__ method here achieves that.
'''
'''
The __call__ method is not called until the decorated function is called.
'''
def __call__(self, *args):
print "entryExit.__call__"
print "entering", self.f.__name__
self.f(*args)
print "exited", self.f.__name__
@entryExit
def func1(a1, a2, a3):
print "inside function 1"
print "spell args: ", a1, a2, a3
@entryExit
def func2():
print "inside function 2"
print "no args"
@entryExitFunc
def func3():
print "inside function 3"
if __name__ == "__main__":
func1("test", "multiple", "args")
print '\n'
func1("another", "round", "of args")
print '\n'
func2()
print '\n'
func3()
print '\n'
print "end of example"
|
def entryExitFunc(f):
def newFunc():
print "inside decorator function"
print "entering", f.__name__
f()
print "exited", f.__name__
return newFunc
class entryExit(object):
def __init__(self, f):
'''
If there are no decorator arguments, the function to be decorated is passed
to the constructor.
'''
self.f = f
'''
The major constraint on the result of a decorator is that it be callable.
The __call__ method here achieves that.
'''
'''
The __call__ method is not called until the decorated function is called.
'''
def __call__(self):
print "entering", self.f.__name__
self.f()
print "exited", self.f.__name__
@entryExit
def func1():
print "inside function 1"
@entryExit
def func2():
print "inside function 2"
@entryExitFunc
def func3():
print "inside function 3"
if __name__ == "__main__":
func1()
print '\n'
func2()
print '\n'
func3()
|
mit
|
Python
|
b553029859b55db2963b15694f5f17714ac8c079
|
Update Futures_demo.py
|
Chandlercjy/OnePy
|
examples/Futures_demo.py
|
examples/Futures_demo.py
|
import matplotlib.pyplot as plt
import OnePy as op
####### Strategy Demo
class MyStrategy(op.StrategyBase):
# 可用参数:
# list格式: self.cash, self.position, self.margin,
# self.total, self.unre_profit
def __init__(self,marketevent):
super(MyStrategy,self).__init__(marketevent)
def prenext(self):
# print sum(self.re_profit)
# print self.unre_profit[-1]
pass
def next(self):
"""这里写主要的策略思路"""
if self.i.SMA(period=5, index=-1) > self.i.SMA(period=10,index=-1):
self.Buy(2)
else:
self.Sell(1)
go = op.OnePiece()
data = op.Futures_CSVFeed(datapath='../data/IF0000_1min.csv',instrument='IF0000',
fromdate='2010-04-19',todate='2010-04-20')
data_list = [data]
portfolio = op.PortfolioBase
strategy = MyStrategy
broker = op.SimulatedBroker
go.set_backtest(data_list,[strategy],portfolio,broker,'Futures') # 期货模式
go.set_commission(commission=15,margin=0.13,mult=10,commtype='FIX') # 固定手续费
# go.set_commission(commission=0.00025,margin=0.15,mult=10,commtype='PCT') # 百分比手续费
go.set_cash(10000000) # 设置初始资金
# go.set_pricetype(‘close’) # 设置成交价格为close,若不设置,默认为open
# go.set_notify() # 打印交易日志
go.sunny() # 开始启动策略
# print go.get_tlog() # 打印交易记录
go.plot(instrument='IF0000')
# 简易的画图,将后面想要画的选项后面的 1 删掉即可
# go.oldplot(['un_profit','re_profit','position1','cash1','total','margin1','avg_price1'])
|
import matplotlib.pyplot as plt
import OnePy as op
####### Strategy Demo
class MyStrategy(op.StrategyBase):
# 可用参数:
# list格式: self.cash, self.position, self.margin,
# self.total, self.unre_profit
def __init__(self,marketevent):
super(MyStrategy,self).__init__(marketevent)
def prenext(self):
# print sum(self.re_profit)
# print self.unre_profit[-1]
pass
def next(self):
"""这里写主要的策略思路"""
if self.i.SMA(period=5, index=-1) > self.i.SMA(period=10,index=-1):
self.Buy(2)
else:
self.Sell(1)
go = op.OnePiece()
data = op.Futures_CSVFeed(datapath='../data/IF0000_1min.csv',instrument='IF0000',
fromdate='2010-04-19',todate='2010-04-20',
timeframe=1)
data_list = [data]
portfolio = op.PortfolioBase
strategy = MyStrategy
broker = op.SimulatedBroker
go.set_backtest(data_list,[strategy],portfolio,broker,'Futures') # 期货模式
go.set_commission(commission=15,margin=0.13,mult=10,commtype='FIX') # 固定手续费
# go.set_commission(commission=0.00025,margin=0.15,mult=10,commtype='PCT') # 百分比手续费
go.set_cash(100000) # 设置初始资金
# go.set_pricetype(‘close’) # 设置成交价格为close,若不设置,默认为open
# go.set_notify() # 打印交易日志
go.sunny() # 开始启动策略
# print go.get_tlog() # 打印交易记录
go.plot(instrument='IF0000')
# 简易的画图,将后面想要画的选项后面的 1 删掉即可
# go.oldplot(['un_profit','re_profit','position1','cash1','total','margin1','avg_price1'])
|
mit
|
Python
|
0ed07211d62044a42e1b0ff024f8feb20435270d
|
Use strings for IDs in Committee Popolo
|
mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola
|
pombola/south_africa/views/api.py
|
pombola/south_africa/views/api.py
|
from django.http import JsonResponse
from django.views.generic import ListView
from pombola.core.models import Organisation
# Output Popolo JSON suitable for WriteInPublic for any committees that have an
# email address.
class CommitteesPopoloJson(ListView):
queryset = Organisation.objects.filter(
kind__name='National Assembly Committees',
contacts__kind__slug='email'
)
def render_to_response(self, context, **response_kwargs):
return JsonResponse(
{
'persons': [
{
'id': str(committee.id),
'name': committee.name,
'email': committee.contacts.filter(kind__slug='email')[0].value,
'contact_details': []
}
for committee in context['object_list']
]
}
)
|
from django.http import JsonResponse
from django.views.generic import ListView
from pombola.core.models import Organisation
# Output Popolo JSON suitable for WriteInPublic for any committees that have an
# email address.
class CommitteesPopoloJson(ListView):
queryset = Organisation.objects.filter(
kind__name='National Assembly Committees',
contacts__kind__slug='email'
)
def render_to_response(self, context, **response_kwargs):
return JsonResponse(
{
'persons': [
{
'id': committee.id,
'name': committee.name,
'email': committee.contacts.filter(kind__slug='email')[0].value,
'contact_details': []
}
for committee in context['object_list']
]
}
)
|
agpl-3.0
|
Python
|
c606e5d1481ae82072cccd4b9edb6b7d73933277
|
update version in preparation for release
|
geggo/pyface,brett-patterson/pyface,pankajp/pyface,geggo/pyface
|
pyface/__init__.py
|
pyface/__init__.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2005-2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" Reusable MVC-based components for Traits-based applications.
Part of the TraitsGUI project of the Enthought Tool Suite.
"""
__version__ = '4.1.0'
__requires__ = [
'traits',
]
|
#------------------------------------------------------------------------------
# Copyright (c) 2005-2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" Reusable MVC-based components for Traits-based applications.
Part of the TraitsGUI project of the Enthought Tool Suite.
"""
__version__ = '4.0.1'
__requires__ = [
'traits',
]
|
bsd-3-clause
|
Python
|
3429e18dd112f4c5058d0e27662379c2860baded
|
Fix indentation on string_methods.py
|
exercism/python,jmluy/xpython,exercism/python,jmluy/xpython
|
exercises/concept/little-sisters-essay/string_methods.py
|
exercises/concept/little-sisters-essay/string_methods.py
|
def capitalize_title(title):
"""
:param title: str title string that needs title casing
:return: str title string in title case (first letters capitalized)
"""
pass
def check_sentence_ending(sentence):
"""
:param sentence: str a sentence to check.
:return: bool True if punctuated correctly with period, False otherwise.
"""
pass
def clean_up_spacing(sentence):
"""
:param sentence: str a sentence to clean of leading and trailing space characters.
:return: str a sentence that has been cleaned of leading and trailing space characters.
"""
pass
def replace_word_choice(sentence, old_word, new_word):
"""
:param sentence: str a sentence to replace words in.
:param new_word: str replacement word
:param old_word: str word to replace
:return: str input sentence with new words in place of old words
"""
pass
|
def capitalize_title(title):
"""
:param title: str title string that needs title casing
:return: str title string in title case (first letters capitalized)
"""
pass
def check_sentence_ending(sentence):
"""
:param sentence: str a sentence to check.
:return: bool True if punctuated correctly with period, False otherwise.
"""
pass
def clean_up_spacing(sentence):
"""
:param sentence: str a sentence to clean of leading and trailing space characters.
:return: str a sentence that has been cleaned of leading and trailing space characters.
"""
pass
def replace_word_choice(sentence, old_word, new_word):
"""
:param sentence: str a sentence to replace words in.
:param new_word: str replacement word
:param old_word: str word to replace
:return: str input sentence with new words in place of old words
"""
pass
|
mit
|
Python
|
d779c126e922b6b9907100ac4fc75de9d085b98a
|
Revert "Update runc4.py"
|
neurodata/ndgrutedb,openconnectome/m2g,neurodata/ndgrutedb,neurodata/ndgrutedb,openconnectome/m2g,neurodata/ndmg,neurodata/ndgrutedb,neurodata/ndgrutedb,neurodata/ndgrutedb,openconnectome/m2g,openconnectome/m2g,openconnectome/m2g,neurodata/ndgrutedb,openconnectome/m2g,openconnectome/m2g,openconnectome/m2g,neurodata/ndgrutedb
|
MR-OCP/MROCPdjango/ocpipeline/procs/runc4.py
|
MR-OCP/MROCPdjango/ocpipeline/procs/runc4.py
|
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# runc4.py
# Created by Greg Kiar on 2015-05-28.
# Email: TODO GK
import argparse
def runc4(nifti_paths, b_paths, opts, email):
print "I'm running!"
pass # TODO GK
|
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# runc4.py
# Created by Greg Kiar on 2015-05-28.
# Email: TODO GK
import argparse
def runc4(nifti_paths, b_paths, opts, email):
print "I'm running!"
pass # TODO GK
# parse inputs
#forge list files
#call m2g using qsub and list files as commandline args
def main():
#runc4(niftis, bs, opts, email)
if __name__ == '__main__':
main()
|
apache-2.0
|
Python
|
217d1f94f03b5cda709798dda98380362b937bd3
|
update comment
|
ddebrunner/streamsx.topology,ddebrunner/streamsx.topology,wmarshall484/streamsx.topology,ddebrunner/streamsx.topology,ibmkendrick/streamsx.topology,wmarshall484/streamsx.topology,IBMStreams/streamsx.topology,wmarshall484/streamsx.topology,IBMStreams/streamsx.topology,ddebrunner/streamsx.topology,ibmkendrick/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,wmarshall484/streamsx.topology,IBMStreams/streamsx.topology,wmarshall484/streamsx.topology,ddebrunner/streamsx.topology,ibmkendrick/streamsx.topology,ddebrunner/streamsx.topology,ibmkendrick/streamsx.topology,IBMStreams/streamsx.topology,ibmkendrick/streamsx.topology,IBMStreams/streamsx.topology,ibmkendrick/streamsx.topology,ddebrunner/streamsx.topology,ibmkendrick/streamsx.topology,wmarshall484/streamsx.topology,wmarshall484/streamsx.topology,wmarshall484/streamsx.topology
|
samples/python/topology/games/fizz_buzz.py
|
samples/python/topology/games/fizz_buzz.py
|
from streamsx.topology.topology import Topology
import streamsx.topology.context
import fizz_buzz_functions
def main():
"""
Plays Fizz Buzz (https://en.wikipedia.org/wiki/Fizz_buzz)
Example:
python3 fizz_buzz.py
Output:
1
2
Fizz!
4
Buzz!
Fizz!
7
8
Fizz!
Buzz!
11
Fizz!
13
14
FizzBuzz!
...
"""
topo = Topology("fizz_buzz")
# Declare a stream of int values
counting = topo.source(fizz_buzz_functions.int_tuples)
# Print the tuples to standard output
play_fizz_buzz(counting).print()
# At this point the streaming topology (streaming) is
# declared, but no data is flowing. The topology
# must be submitted to a context to be executed.
# execute the topology by submitting to a standalone context
streamsx.topology.context.submit("STANDALONE", topo.graph)
def play_fizz_buzz(counting):
"""
Return a stream that plays Fizz Buzz based
upon the values in the input stream.
Transform an input stream of integers to a
stream of strings that follows
the Fizz Buzz rules based upon each value in the
input stream.
Args:
counting: input stream
Returns:
transformed output stream
"""
shouts = counting.transform(fizz_buzz_functions.fizz_buzz)
return shouts
if __name__ == '__main__':
main()
|
from streamsx.topology.topology import Topology
import streamsx.topology.context
import fizz_buzz_functions
def main():
"""
Plays Fizz Buzz (https://en.wikipedia.org/wiki/Fizz_buzz)
Example:
python3 fizz_buzz.py
Output:
1
2
Fizz!
4
Buzz!
Fizz!
7
8
Fizz!
Buzz!
11
Fizz!
13
14
FizzBuzz!
...
"""
topo = Topology("fizz_buzz")
# Declare an stream of int values
counting = topo.source(fizz_buzz_functions.int_tuples)
# Print the tuples to standard output
play_fizz_buzz(counting).print()
# At this point the streaming topology (streaming) is
# declared, but no data is flowing. The topology
# must be submitted to a context to be executed.
# execute the topology by submitting to a standalone context
streamsx.topology.context.submit("STANDALONE", topo.graph)
def play_fizz_buzz(counting):
"""
Return a stream that plays Fizz Buzz based
upon the values in the input stream.
Transform an input stream of integers to a
stream of strings that follows
the Fizz Buzz rules based upon each value in the
input stream.
Args:
counting: input stream
Returns:
transformed output stream
"""
shouts = counting.transform(fizz_buzz_functions.fizz_buzz)
return shouts
if __name__ == '__main__':
main()
|
apache-2.0
|
Python
|
fca6289f6fe1e0e5605a7ea12a54395fe98d0425
|
Define rio tasks.
|
soasme/rio,soasme/rio,soasme/rio
|
rio/tasks.py
|
rio/tasks.py
|
# -*- coding: utf-8 -*-
"""
rio.tasks
~~~~~~~~~~
Implement of rio tasks based on celery.
"""
from os import environ
from celery import task
from celery.task.http import URL
from .core import celery
def get_webhook(url, payload):
return URL(url, app=celery, dispatcher=None).get_async(**payload)
def post_webhook(url, payload):
return URL(url, app=celery, dispatcher=None).post_async(**payload)
|
# -*- coding: utf-8 -*-
"""
rio.tasks
~~~~~~~~~~
Implement of rio tasks based on celery.
"""
from os import environ
from celery import Celery
from .conf import configure_app
def register_tasks(app):
"""Register tasks to application.
"""
pass
def create_app():
"""Celery application factory function."""
app = Celery('rio')
configure_app(app)
register_tasks(app)
return app
|
mit
|
Python
|
2b995c68c980f1f38e1e6c6bb69ab88b78353cce
|
Update version.
|
pyhmsa/pyhmsa
|
pyhmsa/__init__.py
|
pyhmsa/__init__.py
|
#!/usr/bin/env python
__author__ = "Philippe T. Pinard"
__email__ = "[email protected]"
__version__ = "0.1.6"
__copyright__ = "Copyright (c) 2013-2014 Philippe T. Pinard"
__license__ = "MIT"
# This is required to create a namespace package.
# A namespace package allows programs to be located in different directories or
# eggs.
__import__('pkg_resources').declare_namespace(__name__)
|
#!/usr/bin/env python
__author__ = "Philippe T. Pinard"
__email__ = "[email protected]"
__version__ = "0.1.5"
__copyright__ = "Copyright (c) 2013-2014 Philippe T. Pinard"
__license__ = "MIT"
# This is required to create a namespace package.
# A namespace package allows programs to be located in different directories or
# eggs.
__import__('pkg_resources').declare_namespace(__name__)
|
mit
|
Python
|
c46f46197589a89e98c8d5960d5c587a7c3dd6b0
|
delete load data part
|
w007878/Keras-GAN
|
train.py
|
train.py
|
import keras
import cv2
from load import load_args
from load import load_img
if __name__ == '__main__':
args = load_args()
print args
img = load_img(args.PATH)
|
import keras
import cv2
from load import load_args
if __name__ == '__main__':
args = load_args()
print args
|
mit
|
Python
|
4c025819cb34939c7b97b145155ee89c8f0b2e93
|
add concept of entity in the askomics abstraction
|
askomics/askomics,askomics/askomics,xgaia/askomics,xgaia/askomics,ofilangi/askomics,xgaia/askomics,ofilangi/askomics,askomics/askomics,ofilangi/askomics,askomics/askomics,ofilangi/askomics,xgaia/askomics
|
askomics/libaskomics/integration/AbstractedEntity.py
|
askomics/libaskomics/integration/AbstractedEntity.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import json
from askomics.libaskomics.ParamManager import ParamManager
from askomics.libaskomics.utils import pformat_generic_object
class AbstractedEntity(object):
"""
An AbstractedEntity represents the classes of the database.
It is defined by an uri and a label.
"""
def __init__(self, identifier):
self.uri = ":" + ParamManager.encodeToRDFURI(identifier)
self.label = identifier
self.log = logging.getLogger(__name__)
def get_uri(self):
return self.uri
def get_turtle(self):
"""
return the turtle code describing an AbstractedEntity
for the abstraction file generation.
"""
turtle = self.get_uri() + " rdf:type owl:Class ;\n"
turtle += (len(self.get_uri()) + 1) * " " + "displaySetting:entity \"true\"^^xsd:boolean ;\n"
turtle += (len(self.get_uri()) + 1) * " " + "rdfs:label " + json.dumps(self.label) + "^^xsd:string .\n\n"
return turtle
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import json
from askomics.libaskomics.ParamManager import ParamManager
from askomics.libaskomics.utils import pformat_generic_object
class AbstractedEntity(object):
"""
An AbstractedEntity represents the classes of the database.
It is defined by an uri and a label.
"""
def __init__(self, identifier):
self.uri = ":" + ParamManager.encodeToRDFURI(identifier)
self.label = identifier
self.log = logging.getLogger(__name__)
def get_uri(self):
return self.uri
def get_turtle(self):
"""
return the turtle code describing an AbstractedEntity
for the abstraction file generation.
"""
turtle = self.get_uri() + " rdf:type owl:Class ;\n"
turtle += (len(self.get_uri()) + 1) * " " + "rdfs:label " + json.dumps(self.label) + "^^xsd:string .\n\n"
return turtle
|
agpl-3.0
|
Python
|
2b9d8dba5f421ad854574e9d1b7004578dd78346
|
Bump version to 4.1.1b2
|
platformio/platformio,platformio/platformio-core,platformio/platformio-core
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 1, "1b2")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation ecosystem for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"RISC-V, FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 1, "1b1")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation ecosystem for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"RISC-V, FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "[email protected]"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
|
apache-2.0
|
Python
|
89a4d4dcf5533e2045d32282c3ad43c42d745a34
|
fix problem with wrong datatype
|
gousiosg/pullreqs-dnn,gousiosg/pullreqs-dnn
|
train.py
|
train.py
|
#!/usr/bin/env python
#
# (c) 2016 -- onwards Georgios Gousios <[email protected]>
#
import argparse
import pickle
import json
from keras.models import Sequential
from keras.layers import LSTM, Dense, Activation, Embedding, Bidirectional
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from config import *
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', default='default')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--dropout', type=float, default=0.2)
parser.add_argument('--lstm_output', type=int, default=256)
parser.add_argument('--embedding_output', type=int, default=512)
parser.add_argument('--checkpoint', type=bool, default=False)
args = parser.parse_args()
print("Loading data set for prefix %s" % args.prefix)
x_train = pickle.load(open(x_train_file % args.prefix))
y_train = pickle.load(open(y_train_file % args.prefix))
x_val = pickle.load(open(x_val_file % args.prefix))
y_val = pickle.load(open(y_val_file % args.prefix))
config = pickle.load(open(config_file % args.prefix))
print("Training on %d merged, %d unmerged PRs" % (y_train[y_train == 1].size,
y_train[y_train == 0].size))
config.update(vars(args))
print("Training configuration:")
print json.dumps(config, indent=1)
model = Sequential()
model.add(Embedding(config['vocabulary_size'], args.embedding_output, dropout=args.dropout))
model.add(LSTM(args.lstm_output, dropout_W=args.dropout, dropout_U=args.dropout))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy', 'fmeasure'])
print('Train...')
csv_logger = CSVLogger('traininglog_%s.csv' % args.prefix)
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.001)
callbacks = [csv_logger, early_stopping, reduce_lr]
if args.checkpoint:
checkpoint = ModelCheckpoint(checkpoint_file % args.prefix, monitor='val_loss')
callbacks.insert(checkpoint)
model.fit(x_train, y_train, batch_size=args.batch_size, nb_epoch=args.epochs,
validation_data=(x_val, y_val), callbacks=callbacks)
score, acc = model.evaluate(x_val, y_val, batch_size=args.batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
|
#!/usr/bin/env python
#
# (c) 2016 -- onwards Georgios Gousios <[email protected]>
#
import argparse
import pickle
import json
from keras.models import Sequential
from keras.layers import LSTM, Dense, Activation, Embedding, Bidirectional
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from config import *
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', default='default')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--dropout', type=float, default=0.2)
parser.add_argument('--lstm_output', type=float, default=256)
parser.add_argument('--embedding_output', type=float, default=512)
parser.add_argument('--checkpoint', type=bool, default=False)
args = parser.parse_args()
print("Loading data set for prefix %s" % args.prefix)
x_train = pickle.load(open(x_train_file % args.prefix))
y_train = pickle.load(open(y_train_file % args.prefix))
x_val = pickle.load(open(x_val_file % args.prefix))
y_val = pickle.load(open(y_val_file % args.prefix))
config = pickle.load(open(config_file % args.prefix))
print("Training on %d merged, %d unmerged PRs" % (y_train[y_train == 1].size,
y_train[y_train == 0].size))
config.update(vars(args))
print("Training configuration:")
print json.dumps(config, indent=1)
model = Sequential()
model.add(Embedding(config['vocabulary_size'], args.embedding_output, dropout=args.dropout))
model.add(LSTM(args.lstm_output, dropout_W=args.dropout, dropout_U=args.dropout))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy', 'fmeasure'])
print('Train...')
csv_logger = CSVLogger('traininglog_%s.csv' % args.prefix)
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.001)
callbacks = [csv_logger, early_stopping, reduce_lr]
if args.checkpoint:
checkpoint = ModelCheckpoint(checkpoint_file % args.prefix, monitor='val_loss')
callbacks.insert(checkpoint)
model.fit(x_train, y_train, batch_size=args.batch_size, nb_epoch=args.epochs,
validation_data=(x_val, y_val), callbacks=callbacks)
score, acc = model.evaluate(x_val, y_val, batch_size=args.batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
|
mit
|
Python
|
94756c1e7e6a164546b4808c8b8fb9db78e1990a
|
Update cluster_info.py
|
PlatformLSF/platform-python-lsf-api
|
examples/cluster_info.py
|
examples/cluster_info.py
|
#! /usr/bin/env python
from pythonlsf import lsf
if lsf.lsb_init("test") > 0:
exit(1)
print '\n Hosts in cluster: ', lsf.get_host_names()
print '\n Clustername: ', lsf.ls_getclustername(), '\n'
print '{0:15s} {1:20s} {2:20s} {3:5s} {4:4s}'.format('Hostname', 'Type',
'Model', 'Cores', 'Load')
for info in lsf.get_host_info():
#Deal with the case when hostname contain "-".
if '-' in info.hostName:
load = lsf.get_host_load("hname=" + "'" + info.hostName + "'", lsf.R15M)
else:
load = lsf.get_host_load("hname=" + info.hostName, lsf.R15M)
if load >= 65535:
load = -1
print '{0:15s} {1:20s} {2:20s} {3:5d} {4:4.2f}'.format(info.hostName,
info.hostType,
info.hostModel,
info.cores,
load)
resources = ""
index = 0;
if info.nRes > 0:
while(1):
item = lsf.stringArray_getitem(info.resources,index)
if(item):
resources += item +" "
index += 1
else:
break;
print ' +--> Resources:', resources
|
#!/usr/local/bin/python2.7
from pythonlsf import lsf
print '\n Hosts in cluster: ', lsf.get_host_names()
print '\n Clustername: ', lsf.ls_getclustername(), '\n'
print '{0:15s} {1:20s} {2:20s} {3:5s} {4:4s}'.format('Hostname', 'Type',
'Model', 'Cores', 'Load')
for info in lsf.get_host_info():
#Deal with the case when hostname contain "-".
if '-' in info.hostName:
load = lsf.get_host_load("hname=" + "'" + info.hostName + "'", lsf.R15M)
else:
load = lsf.get_host_load("hname=" + info.hostName, lsf.R15M)
if load >= 65535:
load = -1
print '{0:15s} {1:20s} {2:20s} {3:5d} {4:4.2f}'.format(info.hostName,
info.hostType,
info.hostModel,
info.cores,
load)
resources = ""
index = 0;
if info.nRes > 0:
while(1):
item = lsf.stringArray_getitem(info.resources,index)
if(item):
resources += item +" "
index += 1
else:
break;
print ' +--> Resources:', resources
|
epl-1.0
|
Python
|
e187ab0f5285378a891552b7cecba0bad47395ab
|
upgrade plenum version to 1.6
|
evernym/plenum,evernym/zeno
|
plenum/__metadata__.py
|
plenum/__metadata__.py
|
"""
plenum package metadata
"""
__title__ = 'indy-plenum'
__version_info__ = (1, 6)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Hyperledger"
__author_email__ = '[email protected]'
__maintainer__ = 'Hyperledger'
__maintainer_email__ = '[email protected]'
__url__ = 'https://github.com/hyperledger/indy-plenum'
__description__ = 'Plenum Byzantine Fault Tolerant Protocol'
__long_description__ = 'Plenum Byzantine Fault Tolerant Protocol'
__download_url__ = "https://github.com/hyperledger/indy-plenum/tarball/{}".format(__version__)
__license__ = "Apache 2.0"
__all__ = [
'__title__',
'__version_info__',
'__version__',
'__author__',
'__author_email__',
'__maintainer__',
'__maintainer_email__',
'__url__',
'__description__',
'__long_description__',
'__download_url__',
'__license__'
]
|
"""
plenum package metadata
"""
__title__ = 'indy-plenum'
__version_info__ = (1, 5)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Hyperledger"
__author_email__ = '[email protected]'
__maintainer__ = 'Hyperledger'
__maintainer_email__ = '[email protected]'
__url__ = 'https://github.com/hyperledger/indy-plenum'
__description__ = 'Plenum Byzantine Fault Tolerant Protocol'
__long_description__ = 'Plenum Byzantine Fault Tolerant Protocol'
__download_url__ = "https://github.com/hyperledger/indy-plenum/tarball/{}".format(__version__)
__license__ = "Apache 2.0"
__all__ = [
'__title__',
'__version_info__',
'__version__',
'__author__',
'__author_email__',
'__maintainer__',
'__maintainer_email__',
'__url__',
'__description__',
'__long_description__',
'__download_url__',
'__license__'
]
|
apache-2.0
|
Python
|
1a18445482c67b38810e330065e5ff04e772af4a
|
Fix from_email in IncomingLetter migrations
|
ad-m/foundation-manager,pilnujemy/pytamy,pilnujemy/pytamy,pilnujemy/pytamy,ad-m/foundation-manager,ad-m/foundation-manager,pilnujemy/pytamy,ad-m/foundation-manager
|
foundation/letters/migrations/0009_auto_20151216_0656.py
|
foundation/letters/migrations/0009_auto_20151216_0656.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def split_models(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
L = apps.get_model("letters", "Letter")
OL = apps.get_model("letters", "OutgoingLetter")
IL = apps.get_model("letters", "IncomingLetter")
for letter in L.objects.filter(incoming=True).all():
IL.objects.create(parent=letter,
temp_email=letter.email,
temp_sender=letter.sender_office)
for letter in L.objects.filter(incoming=False).all():
OL.objects.create(parent=letter,
temp_send_at=letter.send_at,
temp_sender=letter.sender_user,
temp_author=letter.author,
temp_email=letter.email)
class Migration(migrations.Migration):
dependencies = [
('letters', '0008_auto_20151216_0647'),
]
operations = [
migrations.RunPython(split_models),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def split_models(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
L = apps.get_model("letters", "Letter")
OL = apps.get_model("letters", "OutgoingLetter")
IL = apps.get_model("letters", "IncomingLetter")
for letter in L.objects.filter(incoming=True).all():
IL.objects.create(parent=letter,
temp_from_email=letter.email,
temp_sender=letter.sender_office)
for letter in L.objects.filter(incoming=False).all():
OL.objects.create(parent=letter,
temp_send_at=letter.send_at,
temp_sender=letter.sender_user,
temp_author=letter.author,
temp_email=letter.email)
class Migration(migrations.Migration):
dependencies = [
('letters', '0008_auto_20151216_0647'),
]
operations = [
migrations.RunPython(split_models),
]
|
bsd-3-clause
|
Python
|
11b293afd11b6d568644a559dff9299ec9dc916f
|
Add comments on current Timer abstraction
|
evernym/plenum,evernym/zeno
|
plenum/common/timer.py
|
plenum/common/timer.py
|
from abc import ABC, abstractmethod
from functools import wraps
from typing import Callable, NamedTuple
import time
from sortedcontainers import SortedListWithKey
# TODO: Consider renaming this into Scheduler?
class TimerService(ABC):
@abstractmethod
def get_current_time(self) -> float:
pass
@abstractmethod
# TODO: Swapping callback and delay would allow defaulting delay to zero,
# effectively simplifying use-case when we want delay execution of some code
# just to allow some other work to run
def schedule(self, delay: int, callback: Callable):
pass
@abstractmethod
def cancel(self, callback: Callable):
pass
class QueueTimer(TimerService):
TimerEvent = NamedTuple('TimerEvent', [('timestamp', float), ('callback', Callable)])
def __init__(self, get_current_time=time.perf_counter):
self._get_current_time = get_current_time
self._events = SortedListWithKey(key=lambda v: v.timestamp)
def queue_size(self):
return len(self._events)
def service(self):
while len(self._events) and self._events[0].timestamp <= self._get_current_time():
self._events.pop(0).callback()
def get_current_time(self) -> float:
return self._get_current_time()
def schedule(self, delay: float, callback: Callable):
timestamp = self._get_current_time() + delay
self._events.add(self.TimerEvent(timestamp=timestamp, callback=callback))
def cancel(self, callback: Callable):
indexes = [i for i, ev in enumerate(self._events) if ev.callback == callback]
for i in reversed(indexes):
del self._events[i]
class RepeatingTimer:
def __init__(self, timer: TimerService, interval: int, callback: Callable, active: bool = True):
@wraps(callback)
def wrapped_callback():
if not self._active:
return
callback()
self._timer.schedule(self._interval, self._callback)
self._timer = timer
self._interval = interval
self._callback = wrapped_callback
self._active = False
if active:
self.start()
def start(self):
if self._active:
return
self._active = True
self._timer.schedule(self._interval, self._callback)
def stop(self):
if not self._active:
return
self._active = False
self._timer.cancel(self._callback)
|
from abc import ABC, abstractmethod
from functools import wraps
from typing import Callable, NamedTuple
import time
from sortedcontainers import SortedListWithKey
class TimerService(ABC):
@abstractmethod
def get_current_time(self) -> float:
pass
@abstractmethod
def schedule(self, delay: int, callback: Callable):
pass
@abstractmethod
def cancel(self, callback: Callable):
pass
class QueueTimer(TimerService):
TimerEvent = NamedTuple('TimerEvent', [('timestamp', float), ('callback', Callable)])
def __init__(self, get_current_time=time.perf_counter):
self._get_current_time = get_current_time
self._events = SortedListWithKey(key=lambda v: v.timestamp)
def queue_size(self):
return len(self._events)
def service(self):
while len(self._events) and self._events[0].timestamp <= self._get_current_time():
self._events.pop(0).callback()
def get_current_time(self) -> float:
return self._get_current_time()
def schedule(self, delay: float, callback: Callable):
timestamp = self._get_current_time() + delay
self._events.add(self.TimerEvent(timestamp=timestamp, callback=callback))
def cancel(self, callback: Callable):
indexes = [i for i, ev in enumerate(self._events) if ev.callback == callback]
for i in reversed(indexes):
del self._events[i]
class RepeatingTimer:
def __init__(self, timer: TimerService, interval: int, callback: Callable, active: bool = True):
@wraps(callback)
def wrapped_callback():
if not self._active:
return
callback()
self._timer.schedule(self._interval, self._callback)
self._timer = timer
self._interval = interval
self._callback = wrapped_callback
self._active = False
if active:
self.start()
def start(self):
if self._active:
return
self._active = True
self._timer.schedule(self._interval, self._callback)
def stop(self):
if not self._active:
return
self._active = False
self._timer.cancel(self._callback)
|
apache-2.0
|
Python
|
317c19b2d2767276a426a4d058191dbaaf8f4c6f
|
Extend the duration of the tough_filters_cases page set.
|
hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,Fireblend/chromium-crosswalk,dednal/chromium.src,markYoungH/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,axinging/chromium-crosswalk,M4sse/chromium.src,Jonekee/chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,jaruba/chromium.src,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,ltilve/chromium,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,markYoungH/chromium.src,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,chuan9/chromium-crosswalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,fujunwei/chromium-crosswalk,markYoungH/chromium.src,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,jaruba/chromium.src,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,dednal/chromium.src,dushu1203/chromium.src,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,jaruba/chromium.src,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,dushu1203/chromium.src,dednal/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,ltilve/chromium,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ltilve/chromium,Just-D/chromium-1,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,Jonekee/chromium.src,Chilledheart/chromium,jaruba/chromium.src,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,dushu1203/chromium.src,chuan9/chromium-crosswalk,jaruba/chromium.src,krieger-od/nwjs_chromium.src,M4sse/chromium.src,ltilve/chromium,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,dednal/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,jaruba/chromium.src,markYoungH/chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,dushu1203/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk
|
tools/perf/page_sets/tough_filters_cases.py
|
tools/perf/page_sets/tough_filters_cases.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughFiltersCasesPage(page_module.Page):
def RunSmoothness(self, action_runner):
action_runner.Wait(10)
class ToughFiltersCasesPageSet(page_set_module.PageSet):
"""
Description: Self-driven filters animation examples
"""
def __init__(self):
super(ToughFiltersCasesPageSet, self).__init__(
archive_data_file='data/tough_filters_cases.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
'http://letmespellitoutforyou.com/samples/svg/filter_terrain.svg',
'http://static.bobdo.net/Analog_Clock.svg',
]
for url in urls_list:
self.AddPage(ToughFiltersCasesPage(url, self))
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughFiltersCasesPage(page_module.Page):
def RunSmoothness(self, action_runner):
action_runner.Wait(5)
class ToughFiltersCasesPageSet(page_set_module.PageSet):
"""
Description: Self-driven filters animation examples
"""
def __init__(self):
super(ToughFiltersCasesPageSet, self).__init__(
archive_data_file='data/tough_filters_cases.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
'http://letmespellitoutforyou.com/samples/svg/filter_terrain.svg',
'http://static.bobdo.net/Analog_Clock.svg',
]
for url in urls_list:
self.AddPage(ToughFiltersCasesPage(url, self))
|
bsd-3-clause
|
Python
|
bdddee22a4e710e580e105cf187ce77c59f09d31
|
enable auto parallel and non-uniform k-points
|
mlouhivu/gpaw-accelerator-benchmarks,mlouhivu/gpaw-accelerator-benchmarks
|
silicon-crystal/silicon-crystal.py
|
silicon-crystal/silicon-crystal.py
|
###
### GPAW benchmark: Silicon Crystal
###
from __future__ import print_function
from ase.lattice import bulk
from gpaw import GPAW, Mixer, ConvergenceError
from gpaw.eigensolvers.rmm_diis import RMM_DIIS
from gpaw.mpi import size, rank
try:
from gpaw import use_mic
except ImportError:
use_mic = False
# no. of replicates in each dimension (increase to scale up the system)
x = 4
y = 4
z = 4
# other parameters
h = 0.22
kpts = (1,1,1)
txt = 'output.txt'
maxiter = 6
conv = {'eigenstates' : 1e-4, 'density' : 1e-2, 'energy' : 1e-3}
# output benchmark parameters
if rank == 0:
print("#"*60)
print("GPAW benchmark: Silicon Crystal")
print(" dimensions: x=%d, y=%d, z=%d" % (x, y, z))
print(" grid spacing: h=%f" % h)
print(" Brillouin-zone sampling: kpts=" + str(kpts))
print(" MPI task: %d out of %d" % (rank, size))
print(" using MICs: " + str(use_mic))
print("#"*60)
print("")
# setup the system
atoms = bulk('Si', cubic=True)
atoms = atoms.repeat((x, y, z))
calc = GPAW(h=h, nbands=-20, width=0.2,
kpts=kpts, xc='PBE',
maxiter=maxiter,
txt=txt, eigensolver=RMM_DIIS(niter=2),
parallel={'sl_auto': True},
mixer=Mixer(0.1, 5, 100),
)
atoms.set_calculator(calc)
# execute the run
try:
atoms.get_potential_energy()
except ConvergenceError:
pass
|
###
### GPAW benchmark: Silicon Crystal
###
from __future__ import print_function
from ase.lattice import bulk
from gpaw import GPAW, Mixer, ConvergenceError
from gpaw.eigensolvers.rmm_diis import RMM_DIIS
from gpaw.mpi import size, rank
try:
from gpaw import use_mic
except ImportError:
use_mic = False
# no. of replicates in each dimension (increase to scale up the system)
x = 4
y = 4
z = 4
# other parameters
h = 0.22
kpt = 1
txt = 'output.txt'
maxiter = 6
conv = {'eigenstates' : 1e-4, 'density' : 1e-2, 'energy' : 1e-3}
# output benchmark parameters
if rank == 0:
print("#"*60)
print("GPAW benchmark: Silicon Crystal")
print(" dimensions: x=%d, y=%d, z=%d" % (x, y, z))
print(" grid spacing: h=%f" % h)
print(" Brillouin-zone sampling: kpts=(%d,%d,%d)" % (kpt, kpt, kpt))
print(" MPI task: %d out of %d" % (rank, size))
print(" using MICs: " + repr(use_mic))
print("#"*60)
print("")
# setup the system
atoms = bulk('Si', cubic=True)
atoms = atoms.repeat((x, y, z))
calc = GPAW(h=h, nbands=-20, width=0.2,
kpts=(kpt,kpt,kpt), xc='PBE',
maxiter=maxiter,
txt=txt, eigensolver=RMM_DIIS(niter=2),
mixer=Mixer(0.1, 5, 100),
)
atoms.set_calculator(calc)
# execute the run
try:
atoms.get_potential_energy()
except ConvergenceError:
pass
|
mit
|
Python
|
130d966f933983dc366a3023ac78a2ba24bf064c
|
add flag for data set binarization
|
TheRiddance/dcgan
|
train.py
|
train.py
|
"""
Andrin Jenal, 2017
ETH Zurich
"""
import tensorflow as tf
from dcgan import DCGAN
import hdf5_dataset
from checkpoint_saver import CheckpointSaver
from visualizer import ImageVisualizer
flags = tf.app.flags
flags.DEFINE_string("dataset", "datasets/celeb_dataset_3k_colored.h5", "sample results dir")
flags.DEFINE_boolean("binarized", False, "data set binarization")
flags.DEFINE_string("data_dir", "results/", "checkpoint and logging results dir")
flags.DEFINE_integer("batch_size", 128, "batch size")
flags.DEFINE_integer("image_size", 64, "image size")
flags.DEFINE_integer("channels", 3, "color channels")
flags.DEFINE_integer("max_epoch", 500, "max epoch")
flags.DEFINE_integer("z_size", 100, "size of latent (feature?) space")
flags.DEFINE_float("learning_rate", 5e-4, "learning rate")
flags.DEFINE_integer("generation_step", 1, "generate random images")
FLAGS = flags.FLAGS
def main(_):
# create checkpoint saver
# the checkpoint saver, can create checkpoint files, which later can be use to restore a model state, but it also
# audits the model progress to a log file
checkpoint_saver = CheckpointSaver(FLAGS.data_dir)
checkpoint_saver.save_experiment_config(FLAGS.__dict__['__flags'])
# load training data
data_set, data_set_shape = hdf5_dataset.read_data_set(FLAGS.dataset, image_size=FLAGS.image_size, shape=(FLAGS.image_size, FLAGS.image_size, FLAGS.channels), binarized=FLAGS.binarized, validation=0)
train_data = data_set.train
# create a data visualizer
visualizer = ImageVisualizer(checkpoint_saver.get_experiment_dir(), image_size=FLAGS.image_size)
visualizer.training_data_sample(train_data)
# create the actual DCGAN model
dcgan_model = DCGAN(FLAGS.image_size, FLAGS.channels, z_size=FLAGS.z_size, learning_rate=FLAGS.learning_rate)
print("start", type(dcgan_model).__name__, "model training")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
dcgan_model.initialize_summaries(sess, checkpoint_saver.get_experiment_dir())
for epoch in range(FLAGS.max_epoch):
for images in train_data.next_batch(FLAGS.batch_size):
d_loss, g_loss = dcgan_model.update_params(sess, images)
msg = "epoch: %3d" % epoch + " Discriminator loss %.4f" % d_loss + " Generator loss %.4f" % g_loss
checkpoint_saver.audit_loss(msg)
dcgan_model.update_summaries(sess, images, epoch)
if epoch % FLAGS.generation_step == 0:
visualizer.save_generated_samples(dcgan_model.generate_samples(sess, num_samples=200), epoch)
if __name__ == '__main__':
tf.app.run()
|
"""
Andrin Jenal, 2017
ETH Zurich
"""
import tensorflow as tf
from dcgan import DCGAN
import hdf5_dataset
from checkpoint_saver import CheckpointSaver
from visualizer import ImageVisualizer
flags = tf.app.flags
flags.DEFINE_string("dataset", "datasets/celeb_dataset_3k_colored.h5", "sample results dir")
flags.DEFINE_string("data_dir", "results/", "checkpoint and logging results dir")
flags.DEFINE_integer("batch_size", 128, "batch size")
flags.DEFINE_integer("image_size", 64, "image size")
flags.DEFINE_integer("channels", 3, "color channels")
flags.DEFINE_integer("max_epoch", 500, "max epoch")
flags.DEFINE_integer("z_size", 100, "size of latent (feature?) space")
flags.DEFINE_float("learning_rate", 5e-4, "learning rate")
flags.DEFINE_integer("generation_step", 1, "generate random images")
FLAGS = flags.FLAGS
def main(_):
# create checkpoint saver
# the checkpoint saver, can create checkpoint files, which later can be use to restore a model state, but it also
# audits the model progress to a log file
checkpoint_saver = CheckpointSaver(FLAGS.data_dir)
checkpoint_saver.save_experiment_config(FLAGS.__dict__['__flags'])
# load training data
data_set, data_set_shape = hdf5_dataset.read_data_set(FLAGS.dataset, image_size=FLAGS.image_size, shape=(FLAGS.image_size, FLAGS.image_size, FLAGS.channels), binarized=False, validation=0)
train_data = data_set.train
# create a data visualizer
visualizer = ImageVisualizer(checkpoint_saver.get_experiment_dir(), image_size=FLAGS.image_size)
visualizer.training_data_sample(train_data)
# create the actual DCGAN model
dcgan_model = DCGAN(FLAGS.image_size, FLAGS.channels, z_size=FLAGS.z_size, learning_rate=FLAGS.learning_rate)
print("start", type(dcgan_model).__name__, "model training")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
dcgan_model.initialize_summaries(sess, checkpoint_saver.get_experiment_dir())
for epoch in range(FLAGS.max_epoch):
for images in train_data.next_batch(FLAGS.batch_size):
d_loss, g_loss = dcgan_model.update_params(sess, images)
msg = "epoch: %3d" % epoch + " Discriminator loss %.4f" % d_loss + " Generator loss %.4f" % g_loss
checkpoint_saver.audit_loss(msg)
dcgan_model.update_summaries(sess, images, epoch)
if epoch % FLAGS.generation_step == 0:
visualizer.save_generated_samples(dcgan_model.generate_samples(sess, num_samples=200), epoch)
if __name__ == '__main__':
tf.app.run()
|
bsd-3-clause
|
Python
|
9933920ebc49b4e275ff93bd6d918945ee77e9a4
|
Make keepalive tests under macOS less stressful
|
cherrypy/cheroot
|
cheroot/test/test_wsgi.py
|
cheroot/test/test_wsgi.py
|
"""Test wsgi."""
from concurrent.futures.thread import ThreadPoolExecutor
import pytest
import portend
import requests
from requests_toolbelt.sessions import BaseUrlSession as Session
from jaraco.context import ExceptionTrap
from cheroot import wsgi
from cheroot._compat import IS_MACOS, IS_WINDOWS
IS_SLOW_ENV = IS_MACOS or IS_WINDOWS
@pytest.fixture
def simple_wsgi_server():
"""Fucking simple wsgi server fixture (duh)."""
port = portend.find_available_local_port()
def app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [b'Hello world!']
host = '::'
addr = host, port
server = wsgi.Server(addr, app, timeout=600 if IS_SLOW_ENV else 20)
url = 'http://localhost:{port}/'.format(**locals())
with server._run_in_thread() as thread:
yield locals()
def test_connection_keepalive(simple_wsgi_server):
"""Test the connection keepalive works (duh)."""
session = Session(base_url=simple_wsgi_server['url'])
pooled = requests.adapters.HTTPAdapter(
pool_connections=1, pool_maxsize=1000,
)
session.mount('http://', pooled)
def do_request():
with ExceptionTrap(requests.exceptions.ConnectionError) as trap:
resp = session.get('info')
resp.raise_for_status()
return bool(trap)
with ThreadPoolExecutor(max_workers=10 if IS_SLOW_ENV else 50) as pool:
tasks = [
pool.submit(do_request)
for n in range(250 if IS_SLOW_ENV else 1000)
]
failures = sum(task.result() for task in tasks)
assert not failures
|
"""Test wsgi."""
from concurrent.futures.thread import ThreadPoolExecutor
import pytest
import portend
import requests
from requests_toolbelt.sessions import BaseUrlSession as Session
from jaraco.context import ExceptionTrap
from cheroot import wsgi
@pytest.fixture
def simple_wsgi_server():
"""Fucking simple wsgi server fixture (duh)."""
port = portend.find_available_local_port()
def app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [b'Hello world!']
host = '::'
addr = host, port
server = wsgi.Server(addr, app, timeout=20)
url = 'http://localhost:{port}/'.format(**locals())
with server._run_in_thread() as thread:
yield locals()
def test_connection_keepalive(simple_wsgi_server):
"""Test the connection keepalive works (duh)."""
session = Session(base_url=simple_wsgi_server['url'])
pooled = requests.adapters.HTTPAdapter(
pool_connections=1, pool_maxsize=1000,
)
session.mount('http://', pooled)
def do_request():
with ExceptionTrap(requests.exceptions.ConnectionError) as trap:
resp = session.get('info')
resp.raise_for_status()
return bool(trap)
with ThreadPoolExecutor(max_workers=50) as pool:
tasks = [
pool.submit(do_request)
for n in range(1000)
]
failures = sum(task.result() for task in tasks)
assert not failures
|
bsd-3-clause
|
Python
|
88327c5e0a7ba7af086ad461e20395a33215b96c
|
Update api.py
|
FNNDSC/ChRIS_ultron_backEnd,FNNDSC/ChRIS_ultron_backEnd,FNNDSC/ChRIS_ultron_backEnd,FNNDSC/ChRIS_ultron_backEnd
|
chris_backend/core/api.py
|
chris_backend/core/api.py
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from feeds import views
# API v1 endpoints
urlpatterns = format_suffix_patterns([
url(r'^v1/$', views.api_root),
url(r'^v1/feeds/$', views.FeedList.as_view(), name='feed-list'),
url(r'^v1/feeds/(?P<pk>[0-9]+)/$',
views.FeedDetail.as_view(), name='feed-detail'),
url(r'^v1/users/$', views.UserList.as_view(), name='user-list'),
url(r'^v1/users/(?P<pk>[0-9]+)/$', views.UserDetail.as_view(), name='user-detail')
])
# Login and logout views for Djangos' browsable API
urlpatterns += [
url(r'^v1/auth/', include('rest_framework.urls', namespace='rest_framework')),
]
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from feeds import views
# API v1 endpoints
urlpatterns = format_suffix_patterns([
url(r'^v1/$', views.api_root),
url(r'^v1/feeds/$', views.FeedList.as_view(), name='feed-list'),
url(r'^v1/feeds/(?P<pk>[0-9]+)/$',
views.FeedDetail.as_view(), name='feed-detail'),
url(r'^v1/users/$', views.UserList.as_view(), name='user-list'),
url(r'^v1/users/(?P<pk>[0-9]+)/$', views.UserDetail.as_view(), name='user-detail')
])
# Login and logout views for Djangos' browsable API
urlpatterns += [
url(r'^v1/auth/', include('rest_framework.urls', namespace='rest_framework')),
]
print('lolo')
|
mit
|
Python
|
51fb4cc79ecba178b811a1a0bb403c91317a116e
|
allow kwargs in ASE atoms converter
|
nisse3000/pymatgen,davidwaroquiers/pymatgen,davidwaroquiers/pymatgen,fraricci/pymatgen,dongsenfo/pymatgen,vorwerkc/pymatgen,tschaume/pymatgen,blondegeek/pymatgen,vorwerkc/pymatgen,gVallverdu/pymatgen,tschaume/pymatgen,montoyjh/pymatgen,fraricci/pymatgen,fraricci/pymatgen,montoyjh/pymatgen,tschaume/pymatgen,gVallverdu/pymatgen,mbkumar/pymatgen,blondegeek/pymatgen,richardtran415/pymatgen,blondegeek/pymatgen,fraricci/pymatgen,mbkumar/pymatgen,nisse3000/pymatgen,gVallverdu/pymatgen,mbkumar/pymatgen,gpetretto/pymatgen,gmatteo/pymatgen,mbkumar/pymatgen,vorwerkc/pymatgen,gpetretto/pymatgen,dongsenfo/pymatgen,richardtran415/pymatgen,richardtran415/pymatgen,blondegeek/pymatgen,montoyjh/pymatgen,tschaume/pymatgen,vorwerkc/pymatgen,gVallverdu/pymatgen,tschaume/pymatgen,dongsenfo/pymatgen,dongsenfo/pymatgen,richardtran415/pymatgen,gmatteo/pymatgen,nisse3000/pymatgen,gpetretto/pymatgen,davidwaroquiers/pymatgen,nisse3000/pymatgen,gpetretto/pymatgen,montoyjh/pymatgen,davidwaroquiers/pymatgen
|
pymatgen/io/ase.py
|
pymatgen/io/ase.py
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, absolute_import
"""
This module provides conversion between the Atomic Simulation Environment
Atoms object and pymatgen Structure objects.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 8, 2012"
from pymatgen.core.structure import Structure
try:
from ase import Atoms
ase_loaded = True
except ImportError:
ase_loaded = False
class AseAtomsAdaptor(object):
"""
Adaptor serves as a bridge between ASE Atoms and pymatgen structure.
"""
@staticmethod
def get_atoms(structure, **kwargs):
"""
Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
**kwargs: other keyword args to pass into the ASE Atoms constructor
Returns:
ASE Atoms object
"""
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True,
cell=cell, **kwargs)
@staticmethod
def get_structure(atoms, cls=None):
"""
Returns pymatgen structure from ASE Atoms.
Args:
atoms: ASE Atoms object
cls: The Structure class to instantiate (defaults to pymatgen structure)
Returns:
Equivalent pymatgen.core.structure.Structure
"""
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
lattice = atoms.get_cell()
cls = Structure if cls is None else cls
return cls(lattice, symbols, positions,
coords_are_cartesian=True)
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, absolute_import
"""
This module provides conversion between the Atomic Simulation Environment
Atoms object and pymatgen Structure objects.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 8, 2012"
from pymatgen.core.structure import Structure
try:
from ase import Atoms
ase_loaded = True
except ImportError:
ase_loaded = False
class AseAtomsAdaptor(object):
"""
Adaptor serves as a bridge between ASE Atoms and pymatgen structure.
"""
@staticmethod
def get_atoms(structure):
"""
Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
Returns:
ASE Atoms object
"""
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True, cell=cell)
@staticmethod
def get_structure(atoms, cls=None):
"""
Returns pymatgen structure from ASE Atoms.
Args:
atoms: ASE Atoms object
cls: The Structure class to instantiate (defaults to pymatgen structure)
Returns:
Equivalent pymatgen.core.structure.Structure
"""
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
lattice = atoms.get_cell()
cls = Structure if cls is None else cls
return cls(lattice, symbols, positions,
coords_are_cartesian=True)
|
mit
|
Python
|
6db9a65c7b734c7c421075cbae11b5b1df35980e
|
Remove RingBuffer TODO from midi_monitor example
|
stephenedie/jackclient-python,spatialaudio/jackclient-python
|
examples/midi_monitor.py
|
examples/midi_monitor.py
|
#!/usr/bin/env python3
"""JACK client that prints all received MIDI events."""
import jack
import binascii
client = jack.Client("MIDI-Monitor")
port = client.midi_inports.register("input")
@client.set_process_callback
def process(frames):
for offset, data in port.incoming_midi_events():
print("{0}: 0x{1}".format(client.last_frame_time + offset,
binascii.hexlify(data).decode()))
return jack.CALL_AGAIN
with client:
print("#" * 80)
print("press Return to quit")
print("#" * 80)
input()
|
#!/usr/bin/env python3
"""JACK client that prints all received MIDI events."""
import jack
import binascii
client = jack.Client("MIDI-Monitor")
port = client.midi_inports.register("input")
@client.set_process_callback
def process(frames):
for offset, data in port.incoming_midi_events():
# TODO: use ringbuffer
print("{0}: 0x{1}".format(client.last_frame_time + offset,
binascii.hexlify(data).decode()))
return jack.CALL_AGAIN
with client:
print("#" * 80)
print("press Return to quit")
print("#" * 80)
input()
|
mit
|
Python
|
e47b7e5952d4001459aee5ba570a7cc6d4c10d43
|
Add import of the InvalidDirectoryValueError to the directory package's test file
|
SizzlingVortex/classyfd
|
tests/unit/directory/test_directory.py
|
tests/unit/directory/test_directory.py
|
"""Contains the unit tests for the inner directory package"""
import unittest
import os
from classyfd import Directory, InvalidDirectoryValueError
class TestDirectory(unittest.TestCase):
def setUp(self):
self.fake_path = os.path.abspath("hello-world-dir")
return
def test_create_directory_object(self):
d = Directory(self.fake_path)
self.assertTrue(d)
return
if __name__ == "__main__":
unittest.main()
|
"""Contains the unit tests for the inner directory package"""
import unittest
import os
from classyfd import Directory
class TestDirectory(unittest.TestCase):
def setUp(self):
self.fake_path = os.path.abspath("hello-world-dir")
return
def test_create_directory_object(self):
d = Directory(self.fake_path)
self.assertTrue(d)
return
if __name__ == "__main__":
unittest.main()
|
mit
|
Python
|
91eddb82671842cfd1dd7aa58dc42d7ffd1d1550
|
call passed functions in get_function
|
lefnire/tensorforce,reinforceio/tensorforce
|
tensorforce/util/config_util.py
|
tensorforce/util/config_util.py
|
# Copyright 2016 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Utility functions concerning configurations
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six import callable
import importlib
def get_function(fn, param=None, default=None):
"""
Get function reference by full module path. Either returns the function reference or calls the function
if param is not None and returns the result.
:param fn: Callable object or String containing the full function path
:param param: None to return function name, kwargs dict to return executed function
:param default: Default reference to return if str is None or empty
:return: Function reference, or result from function call
"""
if not fn:
return default
if callable(fn):
func = fn
else:
module_name, function_name = fn.rsplit('.', 1)
module = importlib.import_module(module_name)
func = getattr(module, function_name)
if isinstance(param, dict):
return func(**param)
else:
return func
|
# Copyright 2016 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Utility functions concerning configurations
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six import callable
import importlib
def get_function(fn, param=None, default=None):
"""
Get function reference by full module path. Either returns the function reference or calls the function
if param is not None and returns the result.
:param fn: Callable object or String containing the full function path
:param param: None to return function name, kwargs dict to return executed function
:param default: Default reference to return if str is None or empty
:return: Function reference, or result from function call
"""
if not fn:
return default
if callable(fn):
return fn
module_name, function_name = fn.rsplit('.', 1)
module = importlib.import_module(module_name)
func = getattr(module, function_name)
if isinstance(param, dict):
return func(**param)
else:
return func
|
apache-2.0
|
Python
|
d6532c24675956c6dc093dd330be1b78d691994f
|
Build Test
|
shear/rppy
|
rppy/rppy.py
|
rppy/rppy.py
|
# rppy - a geophysical library for Python
# Copyright (C) 2015 Sean Matthew Contenti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy as np
import matplotlib.pyplot as plt
|
# rppy - a geophysical library for Python
# Copyright (C) 2015 Sean Matthew Contenti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy as np
|
bsd-2-clause
|
Python
|
7ab671fea7fda45be5994d85378bfb326eddd7fb
|
Fix invalid use of F() when creating user story
|
gauravjns/taiga-back,seanchen/taiga-back,joshisa/taiga-back,dycodedev/taiga-back,WALR/taiga-back,astronaut1712/taiga-back,Tigerwhit4/taiga-back,jeffdwyatt/taiga-back,bdang2012/taiga-back-casting,CMLL/taiga-back,obimod/taiga-back,coopsource/taiga-back,Rademade/taiga-back,dayatz/taiga-back,CoolCloud/taiga-back,astagi/taiga-back,19kestier/taiga-back,Tigerwhit4/taiga-back,rajiteh/taiga-back,forging2012/taiga-back,CMLL/taiga-back,astronaut1712/taiga-back,seanchen/taiga-back,rajiteh/taiga-back,crr0004/taiga-back,CMLL/taiga-back,jeffdwyatt/taiga-back,gauravjns/taiga-back,obimod/taiga-back,jeffdwyatt/taiga-back,seanchen/taiga-back,Zaneh-/bearded-tribble-back,joshisa/taiga-back,seanchen/taiga-back,Rademade/taiga-back,astagi/taiga-back,taigaio/taiga-back,gam-phon/taiga-back,dycodedev/taiga-back,bdang2012/taiga-back-casting,crr0004/taiga-back,CoolCloud/taiga-back,coopsource/taiga-back,dayatz/taiga-back,WALR/taiga-back,astagi/taiga-back,Zaneh-/bearded-tribble-back,Rademade/taiga-back,forging2012/taiga-back,gauravjns/taiga-back,frt-arch/taiga-back,gam-phon/taiga-back,CMLL/taiga-back,forging2012/taiga-back,EvgeneOskin/taiga-back,Zaneh-/bearded-tribble-back,dayatz/taiga-back,rajiteh/taiga-back,crr0004/taiga-back,bdang2012/taiga-back-casting,coopsource/taiga-back,gam-phon/taiga-back,frt-arch/taiga-back,coopsource/taiga-back,taigaio/taiga-back,CoolCloud/taiga-back,Rademade/taiga-back,CoolCloud/taiga-back,frt-arch/taiga-back,obimod/taiga-back,19kestier/taiga-back,joshisa/taiga-back,EvgeneOskin/taiga-back,dycodedev/taiga-back,astronaut1712/taiga-back,joshisa/taiga-back,taigaio/taiga-back,xdevelsistemas/taiga-back-community,forging2012/taiga-back,EvgeneOskin/taiga-back,dycodedev/taiga-back,Tigerwhit4/taiga-back,astagi/taiga-back,EvgeneOskin/taiga-back,19kestier/taiga-back,Rademade/taiga-back,WALR/taiga-back,rajiteh/taiga-back,bdang2012/taiga-back-casting,astronaut1712/taiga-back,WALR/taiga-back,gauravjns/taiga-back,jeffdwyatt/taiga-back,xdevelsistemas/taiga-back-community,Tigerwhit4/taiga-back,xdevelsistemas/taiga-back-community,obimod/taiga-back,crr0004/taiga-back,gam-phon/taiga-back
|
taiga/projects/occ/mixins.py
|
taiga/projects/occ/mixins.py
|
# Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.utils.translation import ugettext_lazy as _
from taiga.base import exceptions as exc
class OCCResourceMixin(object):
"""
Rest Framework resource mixin for resources that need to have concurrent
accesses and editions controlled.
"""
def pre_save(self, obj):
current_version = obj.version
param_version = self.request.DATA.get('version', None)
if current_version != param_version:
raise exc.WrongArguments({"version": "The version doesn't match with the current one"})
if obj.id:
obj.version = models.F('version') + 1
super().pre_save(obj)
class OCCModelMixin(models.Model):
"""
Generic model mixin that makes model compatible
with concurrency control system.
"""
version = models.IntegerField(null=False, blank=False, default=1, verbose_name=_("version"))
class Meta:
abstract = True
|
# Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.utils.translation import ugettext_lazy as _
from taiga.base import exceptions as exc
class OCCResourceMixin(object):
"""
Rest Framework resource mixin for resources that need to have concurrent
accesses and editions controlled.
"""
def pre_save(self, obj):
current_version = obj.version
param_version = self.request.DATA.get('version', None)
if current_version != param_version:
raise exc.WrongArguments({"version": "The version doesn't match with the current one"})
obj.version = models.F('version') + 1
super().pre_save(obj)
class OCCModelMixin(models.Model):
"""
Generic model mixin that makes model compatible
with concurrency control system.
"""
version = models.IntegerField(null=False, blank=False, default=1, verbose_name=_("version"))
class Meta:
abstract = True
|
agpl-3.0
|
Python
|
4b7b2727a35cfcb0117b0ba4571da9a0ea81824a
|
Remove old reimplementation of routes.
|
joshisa/taiga-back,joshisa/taiga-back,gam-phon/taiga-back,bdang2012/taiga-back-casting,jeffdwyatt/taiga-back,rajiteh/taiga-back,crr0004/taiga-back,crr0004/taiga-back,EvgeneOskin/taiga-back,dayatz/taiga-back,Rademade/taiga-back,joshisa/taiga-back,Rademade/taiga-back,astronaut1712/taiga-back,forging2012/taiga-back,coopsource/taiga-back,dycodedev/taiga-back,frt-arch/taiga-back,astagi/taiga-back,Rademade/taiga-back,astronaut1712/taiga-back,astagi/taiga-back,seanchen/taiga-back,Tigerwhit4/taiga-back,seanchen/taiga-back,dayatz/taiga-back,taigaio/taiga-back,Tigerwhit4/taiga-back,gauravjns/taiga-back,jeffdwyatt/taiga-back,EvgeneOskin/taiga-back,joshisa/taiga-back,forging2012/taiga-back,Zaneh-/bearded-tribble-back,obimod/taiga-back,forging2012/taiga-back,Tigerwhit4/taiga-back,gauravjns/taiga-back,gam-phon/taiga-back,taigaio/taiga-back,obimod/taiga-back,astronaut1712/taiga-back,rajiteh/taiga-back,gauravjns/taiga-back,gam-phon/taiga-back,coopsource/taiga-back,WALR/taiga-back,Zaneh-/bearded-tribble-back,Zaneh-/bearded-tribble-back,jeffdwyatt/taiga-back,jeffdwyatt/taiga-back,xdevelsistemas/taiga-back-community,gauravjns/taiga-back,astagi/taiga-back,CoolCloud/taiga-back,obimod/taiga-back,bdang2012/taiga-back-casting,CoolCloud/taiga-back,seanchen/taiga-back,Rademade/taiga-back,Tigerwhit4/taiga-back,coopsource/taiga-back,dycodedev/taiga-back,crr0004/taiga-back,rajiteh/taiga-back,CMLL/taiga-back,CMLL/taiga-back,xdevelsistemas/taiga-back-community,CoolCloud/taiga-back,CMLL/taiga-back,dycodedev/taiga-back,WALR/taiga-back,Rademade/taiga-back,bdang2012/taiga-back-casting,obimod/taiga-back,WALR/taiga-back,xdevelsistemas/taiga-back-community,rajiteh/taiga-back,dayatz/taiga-back,astronaut1712/taiga-back,WALR/taiga-back,seanchen/taiga-back,19kestier/taiga-back,frt-arch/taiga-back,CMLL/taiga-back,EvgeneOskin/taiga-back,19kestier/taiga-back,bdang2012/taiga-back-casting,forging2012/taiga-back,astagi/taiga-back,crr0004/taiga-back,coopsource/taiga-back,frt-arch/taiga-back,19kestier/taiga-back,EvgeneOskin/taiga-back,dycodedev/taiga-back,CoolCloud/taiga-back,taigaio/taiga-back,gam-phon/taiga-back
|
greenmine/base/routers.py
|
greenmine/base/routers.py
|
# -*- coding: utf-8 -*-
from rest_framework import routers
class DefaultRouter(routers.DefaultRouter):
pass
__all__ = ["DefaultRouter"]
|
# -*- coding: utf-8 -*-
from rest_framework import routers
# Special router for actions.
actions_router = routers.Route(url=r'^{prefix}/{methodname}{trailing_slash}$',
mapping={'{httpmethod}': '{methodname}'},
name='{basename}-{methodnamehyphen}',
initkwargs={})
class DefaultRouter(routers.DefaultRouter):
routes = [
routers.DefaultRouter.routes[0],
actions_router,
routers.DefaultRouter.routes[2],
routers.DefaultRouter.routes[1]
]
__all__ = ["DefaultRouter"]
|
agpl-3.0
|
Python
|
0e2c092ce3472bf26db7d3b836eb230cfb002656
|
fix method naming conflict
|
r0fls/sanic,ashleysommer/sanic,lixxu/sanic,channelcat/sanic,yunstanford/sanic,lixxu/sanic,yunstanford/sanic,r0fls/sanic,channelcat/sanic,channelcat/sanic,ashleysommer/sanic,jrocketfingers/sanic,channelcat/sanic,jrocketfingers/sanic,lixxu/sanic,lixxu/sanic,Tim-Erwin/sanic,ashleysommer/sanic,ai0/sanic,yunstanford/sanic,Tim-Erwin/sanic,ai0/sanic,yunstanford/sanic
|
examples/sanic_peewee.py
|
examples/sanic_peewee.py
|
## You need the following additional packages for this example
# aiopg
# peewee_async
# peewee
## sanic imports
from sanic import Sanic
from sanic.response import json
## peewee_async related imports
import uvloop
import peewee
from peewee_async import Manager, PostgresqlDatabase
# we instantiate a custom loop so we can pass it to our db manager
loop = uvloop.new_event_loop()
database = PostgresqlDatabase(database='test',
host='127.0.0.1',
user='postgres',
password='mysecretpassword')
objects = Manager(database, loop=loop)
## from peewee_async docs:
# Also there’s no need to connect and re-connect before executing async queries
# with manager! It’s all automatic. But you can run Manager.connect() or
# Manager.close() when you need it.
# let's create a simple key value store:
class KeyValue(peewee.Model):
key = peewee.CharField(max_length=40, unique=True)
text = peewee.TextField(default='')
class Meta:
database = database
# create table synchronously
KeyValue.create_table(True)
# OPTIONAL: close synchronous connection
database.close()
# OPTIONAL: disable any future syncronous calls
objects.database.allow_sync = False # this will raise AssertionError on ANY sync call
app = Sanic('peewee_example')
@app.route('/post')
async def post(request):
await objects.create(KeyValue, key='my_first_async_db', text="I was inserted asynchronously!")
return json({'success': True})
@app.route('/get')
async def get(request):
all_objects = await objects.execute(KeyValue.select())
serialized_obj = []
for obj in all_objects:
serialized_obj.append({obj.key: obj.text})
return json({'objects': serialized_obj})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, loop=loop)
|
## You need the following additional packages for this example
# aiopg
# peewee_async
# peewee
## sanic imports
from sanic import Sanic
from sanic.response import json
## peewee_async related imports
import uvloop
import peewee
from peewee_async import Manager, PostgresqlDatabase
# we instantiate a custom loop so we can pass it to our db manager
loop = uvloop.new_event_loop()
database = PostgresqlDatabase(database='test',
host='127.0.0.1',
user='postgres',
password='mysecretpassword')
objects = Manager(database, loop=loop)
## from peewee_async docs:
# Also there’s no need to connect and re-connect before executing async queries
# with manager! It’s all automatic. But you can run Manager.connect() or
# Manager.close() when you need it.
# let's create a simple key value store:
class KeyValue(peewee.Model):
key = peewee.CharField(max_length=40, unique=True)
text = peewee.TextField(default='')
class Meta:
database = database
# create table synchronously
KeyValue.create_table(True)
# OPTIONAL: close synchronous connection
database.close()
# OPTIONAL: disable any future syncronous calls
objects.database.allow_sync = False # this will raise AssertionError on ANY sync call
app = Sanic('peewee_example')
@app.route('/post')
async def root(request):
await objects.create(KeyValue, key='my_first_async_db', text="I was inserted asynchronously!")
return json({'success': True})
@app.route('/get')
async def root(request):
all_objects = await objects.execute(KeyValue.select())
serialized_obj = []
for obj in all_objects:
serialized_obj.append({obj.key: obj.text})
return json({'objects': serialized_obj})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, loop=loop)
|
mit
|
Python
|
53e4a8a00d4b1c0bed0ae93bb48831b04f1fc12d
|
Exclude msaa on Mac bots Review URL: https://codereview.appspot.com/7055043
|
google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot
|
slave/skia_slave_scripts/run_gm.py
|
slave/skia_slave_scripts/run_gm.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia GM executable. """
from utils import shell_utils
from build_step import BuildStep
import errno
import os
import shutil
import sys
JSON_SUMMARY_FILENAME = 'actual-results.json'
class RunGM(BuildStep):
def _PreGM(self,):
print 'Removing %s' % self._gm_actual_dir
try:
shutil.rmtree(self._gm_actual_dir)
except:
pass
print 'Creating %s' % self._gm_actual_dir
try:
os.makedirs(self._gm_actual_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise e
def _RunModulo(self, cmd):
""" Run GM in multiple concurrent processes using the --modulo flag. """
subprocesses = []
retcodes = []
for idx in range(self._num_cores):
subprocesses.append(shell_utils.BashAsync(cmd + ['--modulo', str(idx),
str(self._num_cores)]))
for proc in subprocesses:
retcode = 0
try:
retcode = shell_utils.LogProcessToCompletion(proc)[0]
except:
retcode = 1
retcodes.append(retcode)
for retcode in retcodes:
if retcode != 0:
raise Exception('Command failed with code %d.' % retcode)
def _Run(self):
self._PreGM()
cmd = [self._PathToBinary('gm'),
'--writePath', self._gm_actual_dir,
'--writeJsonSummary', os.path.join(self._gm_actual_dir,
JSON_SUMMARY_FILENAME),
] + self._gm_args
# msaa16 is flaky on Macs (driver bug?) so we skip the test for now
if sys.platform == 'darwin':
cmd.extend(['--exclude-config', 'msaa16'])
self._RunModulo(cmd)
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(RunGM))
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia GM executable. """
from utils import shell_utils
from build_step import BuildStep
import errno
import os
import shutil
import sys
JSON_SUMMARY_FILENAME = 'actual-results.json'
class RunGM(BuildStep):
def _PreGM(self,):
print 'Removing %s' % self._gm_actual_dir
try:
shutil.rmtree(self._gm_actual_dir)
except:
pass
print 'Creating %s' % self._gm_actual_dir
try:
os.makedirs(self._gm_actual_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise e
def _RunModulo(self, cmd):
""" Run GM in multiple concurrent processes using the --modulo flag. """
subprocesses = []
retcodes = []
for idx in range(self._num_cores):
subprocesses.append(shell_utils.BashAsync(cmd + ['--modulo', str(idx),
str(self._num_cores)]))
for proc in subprocesses:
retcode = 0
try:
retcode = shell_utils.LogProcessToCompletion(proc)[0]
except:
retcode = 1
retcodes.append(retcode)
for retcode in retcodes:
if retcode != 0:
raise Exception('Command failed with code %d.' % retcode)
def _Run(self):
self._PreGM()
cmd = [self._PathToBinary('gm'),
'--writePath', self._gm_actual_dir,
'--writeJsonSummary', os.path.join(self._gm_actual_dir,
JSON_SUMMARY_FILENAME),
] + self._gm_args
self._RunModulo(cmd)
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(RunGM))
|
bsd-3-clause
|
Python
|
ff68546c69b68c4f83eb843f3ecb5789358d2f32
|
enable category select plugin by default
|
potato/searx,kdani3/searx,pointhi/searx,dzc34/searx,gugod/searx,asciimoo/searx,jibe-b/searx,gugod/searx,dzc34/searx,potato/searx,jcherqui/searx,dalf/searx,matejc/searx,jpope777/searx,potato/searx,jpope777/searx,jcherqui/searx,misnyo/searx,PwnArt1st/searx,PwnArt1st/searx,misnyo/searx,GreenLunar/searx,jcherqui/searx,gugod/searx,potato/searx,kdani3/searx,pointhi/searx,dzc34/searx,jcherqui/searx,framasoft/searx,GreenLunar/searx,kdani3/searx,asciimoo/searx,GreenLunar/searx,matejc/searx,framasoft/searx,pointhi/searx,framasoft/searx,jibe-b/searx,PwnArt1st/searx,matejc/searx,framasoft/searx,asciimoo/searx,pointhi/searx,GreenLunar/searx,jpope777/searx,dalf/searx,gugod/searx,dalf/searx,asciimoo/searx,jibe-b/searx,matejc/searx,dzc34/searx,jpope777/searx,kdani3/searx,dalf/searx,misnyo/searx,jibe-b/searx,PwnArt1st/searx,misnyo/searx
|
searx/plugins/search_on_category_select.py
|
searx/plugins/search_on_category_select.py
|
'''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2015 by Adam Tauber, <[email protected]>
'''
from flask.ext.babel import gettext
name = gettext('Search on category select')
description = gettext('Perform search immediately if a category selected. Disable to select multiple categories.')
default_on = True
js_dependencies = ('js/search_on_category_select.js',)
|
'''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2015 by Adam Tauber, <[email protected]>
'''
from flask.ext.babel import gettext
name = 'Search on category select'
description = gettext('Perform search immediately if a category selected')
default_on = False
js_dependencies = ('js/search_on_category_select.js',)
|
agpl-3.0
|
Python
|
e6a72c4987246e5c56863a7b98cdbe8be729a688
|
fix syntax errors
|
Krozark/django-slider,Krozark/django-slider,Krozark/django-slider
|
slider/templatetags/slider_tags.py
|
slider/templatetags/slider_tags.py
|
# -*- coding: utf-8 -*-
from django import template
from slider.models import SliderImage
import random
register = template.Library()
def get_random_item(l,max=None):
res= []
size = len(l)
indexs = range(0,size)
if max == None:
max = size
for i in range(0, max):
index = random.choice(indexs)
indexs.pop(index)
res += l[index]
return res
@register.assignment_tag
def get_slider_images(limit=False, randomize=True):
qs = SliderImage.objects.filter(is_visible=True)
if randomize is True and limit is True :
qs = get_random_item(qs,limit)
elif randomize is True:
qs = get_random_item(qs)
if limit is not False:
qs = qs[0:limit]
return qs
|
# -*- coding: utf-8 -*-
from django import template
from slider.models import SliderImage
import random
register = template.Library()
def get_random_item(l,max=None):
res= []
size = len(l)
indexs = range(0,size)
if max = None:
max = size
for i in range(0: max):
index = random.choice(indexs)
indexs.pop(index)
res += l[index]
return res
@register.assignment_tag
def get_slider_images(limit=False, randomize=True):
qs = SliderImage.objects.filter(is_visible=True)
if randomize is True and limit is True :
qs = get_random_item(qs,limit)
elif randomize is True:
qs = get_random_item(qs)
if limit is not False:
qs = qs[0:limit]
return qs
|
bsd-2-clause
|
Python
|
82b4ea673aefd73384eb442c1769211d55c74c14
|
Update test infrastructure
|
dbinetti/barberscore,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore,dbinetti/barberscore-django,dbinetti/barberscore-django,barberscore/barberscore-api
|
project/settings/test.py
|
project/settings/test.py
|
# Local
from .base import *
# Heroku
ALLOWED_HOSTS = [
'testserver',
]
# Redis
RQ_QUEUES['default']['ASYNC'] = False
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Cloudinary
CLOUDINARY_URL = None
MEDIA_URL = '/media/'
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
|
# Local
from .base import *
# Heroku
ALLOWED_HOSTS = [
'testserver',
]
# Redis
RQ_QUEUES['default']['ASYNC'] = False
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
bsd-2-clause
|
Python
|
e2ef73097ae220be4e52563e4e098aea228f82fa
|
rename polls hook
|
praekelt/molo-tuneme,praekelt/molo-tuneme,praekelt/molo-tuneme,praekelt/molo-tuneme
|
polls/wagtail_hooks.py
|
polls/wagtail_hooks.py
|
from django.conf.urls import url
from polls.admin import QuestionsModelAdmin
from polls.admin_views import QuestionResultsAdminView
from polls.models import PollsIndexPage
from wagtail.wagtailcore import hooks
from wagtail.contrib.modeladmin.options import modeladmin_register
from django.contrib.auth.models import User
@hooks.register('register_admin_urls')
def register_question_results_admin_view_url():
return [
url(r'polls/question/(?P<parent>\d+)/results/$',
QuestionResultsAdminView.as_view(),
name='question-results-admin'),
]
modeladmin_register(QuestionsModelAdmin)
@hooks.register('construct_main_menu')
def show_polls_entries_for_users_have_access(request, menu_items):
if not request.user.is_superuser and not User.objects.filter(
pk=request.user.pk, groups__name='Moderators').exists():
menu_items[:] = [
item for item in menu_items if item.name != 'polls']
@hooks.register('construct_explorer_page_queryset')
def hide_polls_index_page(parent_page, pages, request):
polls_index_page_pk = PollsIndexPage.objects.descendant_of(
request.site.root_page).first().pk
return pages.exclude(pk=polls_index_page_pk)
|
from django.conf.urls import url
from polls.admin import QuestionsModelAdmin
from polls.admin_views import QuestionResultsAdminView
from polls.models import PollsIndexPage
from wagtail.wagtailcore import hooks
from wagtail.contrib.modeladmin.options import modeladmin_register
from django.contrib.auth.models import User
@hooks.register('register_admin_urls')
def register_question_results_admin_view_url():
return [
url(r'polls/question/(?P<parent>\d+)/results/$',
QuestionResultsAdminView.as_view(),
name='question-results-admin'),
]
modeladmin_register(QuestionsModelAdmin)
@hooks.register('construct_main_menu')
def show_polls_entries_for_users_have_access(request, menu_items):
if not request.user.is_superuser and not User.objects.filter(
pk=request.user.pk, groups__name='Moderators').exists():
menu_items[:] = [
item for item in menu_items if item.name != 'polls']
@hooks.register('construct_explorer_page_queryset')
def show_main_language_only(parent_page, pages, request):
polls_index_page_pk = PollsIndexPage.objects.descendant_of(
request.site.root_page).first().pk
return pages.exclude(pk=polls_index_page_pk)
|
bsd-2-clause
|
Python
|
5fc16267239890acbf6c4d7ab4685c4a2f420360
|
allow empty domain in tests
|
qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
|
corehq/form_processor/utils/general.py
|
corehq/form_processor/utils/general.py
|
from django.conf import settings
from corehq.toggles import USE_SQL_BACKEND, NAMESPACE_DOMAIN, NEW_EXPORTS, TF_USES_SQLITE_BACKEND
from dimagi.utils.logging import notify_exception
def should_use_sql_backend(domain_name):
from corehq.apps.domain.models import Domain
if settings.UNIT_TESTING:
return _should_use_sql_backend_in_tests(domain_name)
# TODO: remove toggle once all domains have been migrated
toggle_enabled = USE_SQL_BACKEND.enabled(domain_name)
if toggle_enabled:
try:
# migrate domains in toggle
domain = Domain.get_by_name(domain_name)
if not domain.use_sql_backend:
domain.use_sql_backend = True
domain.save()
USE_SQL_BACKEND.set(domain_name, enabled=False, namespace=NAMESPACE_DOMAIN)
except Exception:
notify_exception(None, "Error migrating SQL BACKEND toggle", {
'domain': domain_name
})
return True
return toggle_enabled or Domain.get_by_name(domain_name).use_sql_backend
def _should_use_sql_backend_in_tests(domain_name):
"""The default return value is False unless the ``TESTS_SHOULD_USE_SQL_BACKEND`` setting
has been set or a Domain object with the same name exists."""
assert settings.UNIT_TESTING
from corehq.apps.domain.models import Domain
override = getattr(settings, 'TESTS_SHOULD_USE_SQL_BACKEND', None)
if override is not None:
return override
elif domain_name and getattr(settings, 'DB_ENABLED', True):
domain = Domain.get_by_name(domain_name)
return domain and domain.use_sql_backend
else:
return False
def use_new_exports(domain_name):
return NEW_EXPORTS.enabled(domain_name) or should_use_sql_backend(domain_name)
def use_sqlite_backend(domain_name):
return TF_USES_SQLITE_BACKEND.enabled(domain_name) or should_use_sql_backend(domain_name)
def is_commcarecase(obj):
from casexml.apps.case.models import CommCareCase
from corehq.form_processor.models import CommCareCaseSQL
return isinstance(obj, (CommCareCase, CommCareCaseSQL))
|
from django.conf import settings
from corehq.toggles import USE_SQL_BACKEND, NAMESPACE_DOMAIN, NEW_EXPORTS, TF_USES_SQLITE_BACKEND
from dimagi.utils.logging import notify_exception
def should_use_sql_backend(domain_name):
from corehq.apps.domain.models import Domain
if settings.UNIT_TESTING:
return _should_use_sql_backend_in_tests(domain_name)
# TODO: remove toggle once all domains have been migrated
toggle_enabled = USE_SQL_BACKEND.enabled(domain_name)
if toggle_enabled:
try:
# migrate domains in toggle
domain = Domain.get_by_name(domain_name)
if not domain.use_sql_backend:
domain.use_sql_backend = True
domain.save()
USE_SQL_BACKEND.set(domain_name, enabled=False, namespace=NAMESPACE_DOMAIN)
except Exception:
notify_exception(None, "Error migrating SQL BACKEND toggle", {
'domain': domain_name
})
return True
return toggle_enabled or Domain.get_by_name(domain_name).use_sql_backend
def _should_use_sql_backend_in_tests(domain_name):
"""The default return value is False unless the ``TESTS_SHOULD_USE_SQL_BACKEND`` setting
has been set or a Domain object with the same name exists."""
assert settings.UNIT_TESTING
from corehq.apps.domain.models import Domain
override = getattr(settings, 'TESTS_SHOULD_USE_SQL_BACKEND', None)
if override is not None:
return override
elif getattr(settings, 'DB_ENABLED', True):
domain = Domain.get_by_name(domain_name)
return domain and domain.use_sql_backend
else:
return False
def use_new_exports(domain_name):
return NEW_EXPORTS.enabled(domain_name) or should_use_sql_backend(domain_name)
def use_sqlite_backend(domain_name):
return TF_USES_SQLITE_BACKEND.enabled(domain_name) or should_use_sql_backend(domain_name)
def is_commcarecase(obj):
from casexml.apps.case.models import CommCareCase
from corehq.form_processor.models import CommCareCaseSQL
return isinstance(obj, (CommCareCase, CommCareCaseSQL))
|
bsd-3-clause
|
Python
|
a486d9bb6f498391997639b549b51b691490f4fa
|
Update settings.py
|
cartologic/cartoview-project-template
|
project_name/settings.py
|
project_name/settings.py
|
# -*- coding: utf-8 -*-
import os
from cartoview.settings import *
PROJECT_NAME = "{{project_name}}"
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# static settings section
STATICFILES_DIRS += [os.path.join(PROJECT_DIR, "static"), ]
MEDIA_ROOT = os.path.join(BASE_DIR, "uploaded")
MEDIA_URL = "/uploaded/"
LOCAL_MEDIA_URL = "/uploaded/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
APPS_DIR = os.path.abspath(os.path.join(BASE_DIR, "apps"))
try:
from .local_settings import *
except:
pass
# cartoview setings
TEMPLATES[0]["DIRS"] = CARTOVIEW_TEMPLATE_DIRS + TEMPLATES[0]["DIRS"]
from cartoview import app_manager
from past.builtins import execfile
app_manager_settings = os.path.join(
os.path.dirname(app_manager.__file__), "settings.py")
execfile(os.path.realpath(app_manager_settings))
load_apps(APPS_DIR)
INSTALLED_APPS += CARTOVIEW_APPS
for settings_file in APPS_SETTINGS:
try:
execfile(settings_file)
except Exception as e:
print(e.message)
|
# -*- coding: utf-8 -*-
import os
from cartoview.settings import *
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# static settings section
STATICFILES_DIRS += [os.path.join(PROJECT_DIR, "static"), ]
MEDIA_ROOT = os.path.join(BASE_DIR, "uploaded")
MEDIA_URL = "/uploaded/"
LOCAL_MEDIA_URL = "/uploaded/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
APPS_DIR = os.path.abspath(os.path.join(BASE_DIR, "apps"))
try:
from .local_settings import *
except:
pass
# cartoview setings
TEMPLATES[0]["DIRS"] = CARTOVIEW_TEMPLATE_DIRS + TEMPLATES[0]["DIRS"]
from cartoview import app_manager
from past.builtins import execfile
app_manager_settings = os.path.join(
os.path.dirname(app_manager.__file__), "settings.py")
execfile(os.path.realpath(app_manager_settings))
load_apps(APPS_DIR)
INSTALLED_APPS += CARTOVIEW_APPS
for settings_file in APPS_SETTINGS:
try:
execfile(settings_file)
except Exception as e:
print(e.message)
|
bsd-2-clause
|
Python
|
70b61dd599529009f9cf9631c9ae505dd210c23b
|
Fix flake8 issues with OtsuMultipleThreshold.py
|
thewtex/tomviz,cryos/tomviz,cjh1/tomviz,mathturtle/tomviz,thewtex/tomviz,cryos/tomviz,mathturtle/tomviz,cjh1/tomviz,mathturtle/tomviz,OpenChemistry/tomviz,OpenChemistry/tomviz,OpenChemistry/tomviz,thewtex/tomviz,cryos/tomviz,cjh1/tomviz,OpenChemistry/tomviz
|
tomviz/python/OtsuMultipleThreshold.py
|
tomviz/python/OtsuMultipleThreshold.py
|
def transform_scalars(dataset):
"""This filter performs semi-automatic multithresholding of a data set.
Voxels are automatically classified into a chosen number of classes such
that inter-class variance of the voxel values is minimized. The output is a
label map with one label per voxel class.
"""
try:
import itk
import vtk
from tomviz import utils
except Exception as exc:
print("Could not import necessary module(s) itk, vtk, or tomviz.utils")
print(exc)
#----USER SPECIFIED VARIABLES----#
###NUMBEROFTHRESHOLDS### # Specify number of thresholds between classes
###ENABLEVALLEYEMPHASIS### # Enable valley emphasis.
# Return values
returnValues = None
# Add a try/except around the ITK portion. ITK exceptions are
# passed up to the Python layer, so we can at least report what
# went wrong with the script, e.g,, unsupported image type.
try:
# Get the ITK image
itk_image = utils.convert_vtk_to_itk_image(dataset)
itk_input_image_type = type(itk_image)
# OtsuMultipleThresholdsImageFilter's wrapping requires that the input
# and output image types be the same.
# TODO - handle casting of float image types to some sensible integer
# format.
itk_threshold_image_type = itk_input_image_type
# Otsu multiple threshold filter
otsu_filter = itk.OtsuMultipleThresholdsImageFilter[
itk_input_image_type, itk_threshold_image_type].New()
otsu_filter.SetNumberOfThresholds(number_of_thresholds)
otsu_filter.SetValleyEmphasis(enable_valley_emphasis)
otsu_filter.SetInput(itk_image)
otsu_filter.Update()
print("Otsu threshold(s): %s" % (otsu_filter.GetThresholds(),))
itk_image_data = otsu_filter.GetOutput()
label_buffer = itk.PyBuffer[itk_threshold_image_type] \
.GetArrayFromImage(itk_image_data)
label_map_data_set = vtk.vtkImageData()
label_map_data_set.CopyStructure(dataset)
utils.set_label_map(label_map_data_set, label_buffer)
# Set up dictionary to return operator results
returnValues = {}
returnValues["label_map"] = label_map_data_set
except Exception as exc:
print("Exception encountered while running OtsuMultipleThreshold")
print(exc)
return returnValues
|
def transform_scalars(dataset):
"""This filter performs semi-automatic multithresholding of a data set.
Voxels are automatically classified into a chosen number of classes such that
inter-class variance of the voxel values is minimized. The output is a label
map with one label per voxel class.
"""
try:
import itk
import vtk
from tomviz import utils
except Exception as exc:
print("Could not import necessary module(s) itk, vtk, or tomviz.utils")
print(exc)
#----USER SPECIFIED VARIABLES----#
###NUMBEROFTHRESHOLDS### # Specify number of thresholds between classes
###ENABLEVALLEYEMPHASIS### # Enable valley emphasis.
# Return values
returnValues = None
# Add a try/except around the ITK portion. ITK exceptions are
# passed up to the Python layer, so we can at least report what
# went wrong with the script, e.g,, unsupported image type.
try:
# Get the ITK image
itk_image = utils.convert_vtk_to_itk_image(dataset)
itk_input_image_type = type(itk_image)
# OtsuMultipleThresholdsImageFilter's wrapping requires that the input
# and output image types be the same.
# TODO - handle casting of float image types to some sensible integer
# format.
itk_threshold_image_type = itk_input_image_type
# Otsu multiple threshold filter
otsu_filter = itk.OtsuMultipleThresholdsImageFilter[itk_input_image_type, itk_threshold_image_type].New()
otsu_filter.SetNumberOfThresholds(number_of_thresholds)
otsu_filter.SetValleyEmphasis(enable_valley_emphasis);
otsu_filter.SetInput(itk_image)
otsu_filter.Update()
print("Otsu threshold(s): %s" % (otsu_filter.GetThresholds(),))
itk_image_data = otsu_filter.GetOutput()
label_buffer = itk.PyBuffer[itk_threshold_image_type].GetArrayFromImage(itk_image_data)
label_map_data_set = vtk.vtkImageData()
label_map_data_set.CopyStructure(dataset)
utils.set_label_map(label_map_data_set, label_buffer);
# Set up dictionary to return operator results
returnValues = {}
returnValues["label_map"] = label_map_data_set
except Exception as exc:
print("Exception encountered while running OtsuMultipleThreshold")
print(exc)
return returnValues
|
bsd-3-clause
|
Python
|
3e1f330236fdb0af692099f91ee3435d273a7bad
|
Fix import error "No module named six.moves" for plugin sanity job
|
cisco-openstack/tempest,openstack/tempest,cisco-openstack/tempest,Juniper/tempest,masayukig/tempest,vedujoshi/tempest,masayukig/tempest,openstack/tempest,Juniper/tempest,vedujoshi/tempest
|
tools/generate-tempest-plugins-list.py
|
tools/generate-tempest-plugins-list.py
|
#! /usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is intended to be run as part of a periodic proposal bot
# job in OpenStack infrastructure.
#
# In order to function correctly, the environment in which the
# script runs must have
# * network access to the review.openstack.org Gerrit API
# working directory
# * network access to https://git.openstack.org/cgit
import json
import re
try:
# For Python 3.0 and later
from urllib.error import HTTPError as HTTPError
import urllib.request as urllib
except ImportError:
# Fall back to Python 2's urllib2
import urllib2 as urllib
from urllib2 import HTTPError as HTTPError
url = 'https://review.openstack.org/projects/'
# This is what a project looks like
'''
"openstack-attic/akanda": {
"id": "openstack-attic%2Fakanda",
"state": "READ_ONLY"
},
'''
def is_in_openstack_namespace(proj):
return proj.startswith('openstack/')
# Rather than returning a 404 for a nonexistent file, cgit delivers a
# 0-byte response to a GET request. It also does not provide a
# Content-Length in a HEAD response, so the way we tell if a file exists
# is to check the length of the entire GET response body.
def has_tempest_plugin(proj):
try:
r = urllib.urlopen(
"https://git.openstack.org/cgit/%s/plain/setup.cfg" % proj)
except HTTPError as err:
if err.code == 404:
return False
p = re.compile('^tempest\.test_plugins', re.M)
if p.findall(r.read().decode('utf-8')):
return True
else:
False
r = urllib.urlopen(url)
# Gerrit prepends 4 garbage octets to the JSON, in order to counter
# cross-site scripting attacks. Therefore we must discard it so the
# json library won't choke.
projects = sorted(filter(is_in_openstack_namespace, json.loads(r.read()[4:])))
# Retrieve projects having no deb, ui or spec namespace as those namespaces
# do not contains tempest plugins.
projects_list = [i for i in projects if not (i.startswith('openstack/deb-') or
i.endswith('-ui') or
i.endswith('-specs'))]
found_plugins = list(filter(has_tempest_plugin, projects_list))
# Every element of the found_plugins list begins with "openstack/".
# We drop those initial 10 octets when printing the list.
for project in found_plugins:
print(project[10:])
|
#! /usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is intended to be run as part of a periodic proposal bot
# job in OpenStack infrastructure.
#
# In order to function correctly, the environment in which the
# script runs must have
# * network access to the review.openstack.org Gerrit API
# working directory
# * network access to https://git.openstack.org/cgit
import json
import re
from six.moves import urllib
url = 'https://review.openstack.org/projects/'
# This is what a project looks like
'''
"openstack-attic/akanda": {
"id": "openstack-attic%2Fakanda",
"state": "READ_ONLY"
},
'''
def is_in_openstack_namespace(proj):
return proj.startswith('openstack/')
# Rather than returning a 404 for a nonexistent file, cgit delivers a
# 0-byte response to a GET request. It also does not provide a
# Content-Length in a HEAD response, so the way we tell if a file exists
# is to check the length of the entire GET response body.
def has_tempest_plugin(proj):
try:
r = urllib.request.urlopen(
"https://git.openstack.org/cgit/%s/plain/setup.cfg" % proj)
except urllib.error.HTTPError as err:
if err.code == 404:
return False
p = re.compile('^tempest\.test_plugins', re.M)
if p.findall(r.read().decode('utf-8')):
return True
else:
False
r = urllib.request.urlopen(url)
# Gerrit prepends 4 garbage octets to the JSON, in order to counter
# cross-site scripting attacks. Therefore we must discard it so the
# json library won't choke.
projects = sorted(filter(is_in_openstack_namespace, json.loads(r.read()[4:])))
# Retrieve projects having no deb, ui or spec namespace as those namespaces
# do not contains tempest plugins.
projects_list = [i for i in projects if not (i.startswith('openstack/deb-') or
i.endswith('-ui') or
i.endswith('-specs'))]
found_plugins = list(filter(has_tempest_plugin, projects_list))
# Every element of the found_plugins list begins with "openstack/".
# We drop those initial 10 octets when printing the list.
for project in found_plugins:
print(project[10:])
|
apache-2.0
|
Python
|
e065515362281039f459e5fa79292957f0435aa7
|
Fix copyright year
|
uber-common/opentracing-python-instrumentation,uber-common/opentracing-python-instrumentation
|
opentracing_instrumentation/client_hooks/_singleton.py
|
opentracing_instrumentation/client_hooks/_singleton.py
|
# Copyright (c) 2015,2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import functools
NOT_CALLED = 1
CALLED = 2
def singleton(func):
"""
This decorator allows you to make sure that a function is called once and
only once. Note that recursive functions will still work.
WARNING: Not thread-safe!!!
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if wrapper.__call_state__ == CALLED:
return
ret = func(*args, **kwargs)
wrapper.__call_state__ = CALLED
return ret
def reset():
wrapper.__call_state__ = NOT_CALLED
wrapper.reset = reset
reset()
# save original func to be able to patch and restore multiple times from
# unit tests
wrapper.__original_func = func
return wrapper
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import functools
NOT_CALLED = 1
CALLED = 2
def singleton(func):
"""
This decorator allows you to make sure that a function is called once and
only once. Note that recursive functions will still work.
WARNING: Not thread-safe!!!
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if wrapper.__call_state__ == CALLED:
return
ret = func(*args, **kwargs)
wrapper.__call_state__ = CALLED
return ret
def reset():
wrapper.__call_state__ = NOT_CALLED
wrapper.reset = reset
reset()
# save original func to be able to patch and restore multiple times from
# unit tests
wrapper.__original_func = func
return wrapper
|
mit
|
Python
|
d896082b282d17616573de2bcca4b383420d1e7a
|
Fix a bad import (get_version)
|
Agicia/lpod-python,lpod/lpod-docs,Agicia/lpod-python,lpod/lpod-docs
|
python/__init__.py
|
python/__init__.py
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2009 Itaapy, ArsAperta, Pierlis, Talend
# Import from itools
from itools.core import get_version
__version__ = get_version()
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2009 Itaapy, ArsAperta, Pierlis, Talend
# Import from itools
from itools.pkg import get_version
__version__ = get_version()
|
apache-2.0
|
Python
|
b78ce84f2a36789fc0fbb6b184b5c8d8ebb23234
|
Clarify py.test arguments in run_test.py
|
ericdill/bluesky,ericdill/bluesky
|
run_tests.py
|
run_tests.py
|
#!/usr/bin/env python
import sys
import pytest
if __name__ == '__main__':
# show output results from every test function
args = ['-v']
# show the message output for skipped and expected failure tests
args.append('-rxs')
# compute coverage stats for bluesky
args.extend(['--cov', 'bluesky'])
# call pytest and exit with the return code from pytest so that
# travis will fail correctly if tests fail
sys.exit(pytest.main(args))
|
#!/usr/bin/env python
import sys
import pytest
if __name__ == '__main__':
sys.exit(pytest.main())
|
bsd-3-clause
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.