commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
b3f4c5211d33d8242b863f812421b37b22cd91cc
update deps, setup.py
psolbach/metadoc
setup.py
setup.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys from subprocess import call from setuptools import setup, find_packages from setuptools.command.install import install as _install version = '0.3.1' def _post_install(dir): call([sys.executable, 'setup_post.py'], cwd=os.path.join(dir, 'metadoc')) class CustomInstall(_install): """Do stuff after setup.""" def run(self): _install.run(self) self.execute(_post_install, (self.install_lib,), msg="Running post install task") setup( name='metadoc', version=version, description="Post-truth era news article metadata service.", long_description="", classifiers=[ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP", "Development Status :: 3 - Alpha", "License :: OSI Approved :: MIT License", "Intended Audience :: Developers", "Operating System :: POSIX :: Linux", "Environment :: Web Environment", ], keywords=["scraping", "metadata", "news article"], author='Paul Solbach', author_email='[email protected]', url='https://github.com/psolbach/metadoc', license='MIT', cmdclass={'install': CustomInstall}, packages=find_packages(exclude=['tests']), include_package_data=True, zip_safe=False, install_requires=[ 'aiohttp==1.1.5', 'asynctest==0.9.0', 'bottle==0.12.10', 'jmespath==0.9.0', 'langdetect==1.0.7', 'libextract==0.0.12', 'nltk==3.2.1', 'pytest==3.0.5', 'pytest-cov==2.4.0', 'numpy==1.11.2', 'tldextract==2.0.2', 'requests==2.12.2', 'whois==0.7' ] )
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys from subprocess import call from setuptools import setup, find_packages from setuptools.command.install import install as _install version = '0.2.21' def _post_install(dir): call([sys.executable, 'setup_post.py'], cwd=os.path.join(dir, 'metadoc')) class CustomInstall(_install): """Do stuff after setup.""" def run(self): _install.run(self) self.execute(_post_install, (self.install_lib,), msg="Running post install task") setup( name='metadoc', version=version, description="Post-truth era news article metadata service.", long_description="", classifiers=[ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP", "Development Status :: 3 - Alpha", "License :: OSI Approved :: MIT License", "Intended Audience :: Developers", "Operating System :: POSIX :: Linux", "Environment :: Web Environment", ], keywords=["scraping", "metadata", "news article"], author='Paul Solbach', author_email='[email protected]', url='https://github.com/psolbach/metadoc', license='MIT', cmdclass={'install': CustomInstall}, packages=find_packages(exclude=['tests']), include_package_data=True, zip_safe=False, install_requires=[ 'aiohttp==1.1.5', 'asynctest==0.9.0', 'bottle==0.12.10', 'jmespath==0.9.0', 'langdetect==1.0.7', 'libextract==0.0.12', 'newspaper3k==0.1.7', 'nltk==3.2.1', 'pytest==3.0.5', 'pytest-cov==2.4.0', 'numpy==1.11.2', 'tldextract==2.0.2', 'requests==2.12.2', 'whois==0.7' ] )
mit
Python
8978c1b49f43465bc3cd51b3ee51350d44ed9ae7
Bump tqdm from 4.38.0 to 4.39.0
glidernet/ogn-python,glidernet/ogn-python,Meisterschueler/ogn-python,Meisterschueler/ogn-python,glidernet/ogn-python,glidernet/ogn-python,Meisterschueler/ogn-python,Meisterschueler/ogn-python
setup.py
setup.py
#!/usr/bin/env python3 from os import path from setuptools import setup, find_packages here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='ogn-python', version='0.5.0', description='A database backend for the Open Glider Network', long_description=long_description, url='https://github.com/glidernet/ogn-python', author='Konstantin Gründger aka Meisterschueler, Fabian P. Schmidt aka kerel, Dominic Spreitz', author_email='[email protected]', license='AGPLv3', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: GIS', 'License :: OSI Approved :: GNU Affero General Public License v3', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], keywords='gliding ogn', packages=find_packages(exclude=['tests', 'tests.*']), install_requires=[ 'Flask==1.1.1', 'Flask-SQLAlchemy==2.4.1', 'Flask-Migrate==2.5.2', 'Flask-Bootstrap==3.3.7.1', 'Flask-WTF==0.14.2', 'Flask-Caching==1.8.0', 'geopy==1.20.0', 'celery==4.3.0', 'redis==3.3.11', 'aerofiles==1.0.0', 'geoalchemy2==0.6.3', 'shapely==1.6.4.post2', 'ogn-client==0.9.5', 'psycopg2-binary==2.8.4', 'mgrs==1.3.5', 'xmlunittest==0.5.0', 'tqdm==4.39.0', 'requests==2.22.0', ], test_require=[ 'pytest==5.0.1', 'flake8==1.1.1', 'xmlunittest==0.4.0', ], zip_safe=False )
#!/usr/bin/env python3 from os import path from setuptools import setup, find_packages here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='ogn-python', version='0.5.0', description='A database backend for the Open Glider Network', long_description=long_description, url='https://github.com/glidernet/ogn-python', author='Konstantin Gründger aka Meisterschueler, Fabian P. Schmidt aka kerel, Dominic Spreitz', author_email='[email protected]', license='AGPLv3', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: GIS', 'License :: OSI Approved :: GNU Affero General Public License v3', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], keywords='gliding ogn', packages=find_packages(exclude=['tests', 'tests.*']), install_requires=[ 'Flask==1.1.1', 'Flask-SQLAlchemy==2.4.1', 'Flask-Migrate==2.5.2', 'Flask-Bootstrap==3.3.7.1', 'Flask-WTF==0.14.2', 'Flask-Caching==1.8.0', 'geopy==1.20.0', 'celery==4.3.0', 'redis==3.3.11', 'aerofiles==1.0.0', 'geoalchemy2==0.6.3', 'shapely==1.6.4.post2', 'ogn-client==0.9.5', 'psycopg2-binary==2.8.4', 'mgrs==1.3.5', 'xmlunittest==0.5.0', 'tqdm==4.38.0', 'requests==2.22.0', ], test_require=[ 'pytest==5.0.1', 'flake8==1.1.1', 'xmlunittest==0.4.0', ], zip_safe=False )
agpl-3.0
Python
05295bfa9edf99dfe66d21025088e00ae6152bfa
bump version to 0.3.5
MatthewCox/colour-valgrind
setup.py
setup.py
try: from setuptools import setup except ImportError: from distutils.core import setup try: from pypandoc import convert read_md = lambda f: convert(f, 'rst') except ImportError: print("warning: pypandoc module not found," "could not convert markdown README to RST") read_md = lambda f: open(f, 'r').read() config = { 'name': 'colour-valgrind', 'version': '0.3.5', 'description': 'Wraps Valgrind to colour the output.', 'long_description': read_md('README.md'), 'author': 'Matthew Cox', 'url': 'http://github.com/MatthewCox/colour-valgrind', 'author_email': '[email protected]', 'classifiers': [ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Debuggers', 'Topic :: Text Processing :: Filters', 'Topic :: Utilities', ], 'keywords': 'valgrind color colour filter', 'license': 'MIT', 'packages': ['colourvalgrind'], 'install_requires': [ 'colorama', 'regex', 'six', ], 'entry_points': { 'console_scripts': ['colour-valgrind=colourvalgrind.command_line:main'], }, 'include_package_data': True, } setup(**config)
try: from setuptools import setup except ImportError: from distutils.core import setup try: from pypandoc import convert read_md = lambda f: convert(f, 'rst') except ImportError: print("warning: pypandoc module not found," "could not convert markdown README to RST") read_md = lambda f: open(f, 'r').read() config = { 'name': 'colour-valgrind', 'version': '0.3.4', 'description': 'Wraps Valgrind to colour the output.', 'long_description': read_md('README.md'), 'author': 'Matthew Cox', 'url': 'http://github.com/MatthewCox/colour-valgrind', 'author_email': '[email protected]', 'classifiers': [ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Topic :: Software Development :: Debuggers', 'Topic :: Text Processing :: Filters', 'Topic :: Utilities', ], 'keywords': 'valgrind color colour filter', 'license': 'MIT', 'packages': ['colourvalgrind'], 'install_requires': [ 'colorama', 'regex', 'six', ], 'entry_points': { 'console_scripts': ['colour-valgrind=colourvalgrind.command_line:main'], }, 'include_package_data': True, } setup(**config)
mit
Python
57b17e6edcbe1e5400e3ede82292c1cd1c38f4e4
Bump version
Contraz/demosys-py
setup.py
setup.py
from setuptools import setup, find_packages from pip.req import parse_requirements def reqs_from_requirements_file(): reqs = parse_requirements('requirements.txt', session='hack') return [str(r.req) for r in reqs] setup( name="demosys-py", version="0.1.2", description="Modern OpenGL 4.1+ Prototype Framework inspired by Django", long_description=open('README.rst').read(), url="https://github.com/Contraz/demosys-py", author="Einar Forselv", author_email="[email protected]", maintainer="Einar Forselv", maintainer_email="[email protected]", packages=find_packages(), include_package_data=True, keywords = ['opengl', 'framework'], classifiers=[ 'Programming Language :: Python', 'Environment :: MacOS X', 'Environment :: X11 Applications', 'Intended Audience :: Developers', 'Topic :: Multimedia :: Graphics', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Application Frameworks', ], install_requires=reqs_from_requirements_file(), entry_points={'console_scripts': [ 'demosys_test = demosys_test.main:main', 'demosys-admin = demosys.core.management:execute_from_command_line', ]}, )
from setuptools import setup, find_packages from pip.req import parse_requirements def reqs_from_requirements_file(): reqs = parse_requirements('requirements.txt', session='hack') return [str(r.req) for r in reqs] setup( name="demosys-py", version="0.1.1", description="Modern OpenGL 4.1+ Prototype Framework inspired by Django", long_description=open('README.rst').read(), url="https://github.com/Contraz/demosys-py", author="Einar Forselv", author_email="[email protected]", maintainer="Einar Forselv", maintainer_email="[email protected]", packages=find_packages(), include_package_data=True, keywords = ['opengl', 'framework'], classifiers=[ 'Programming Language :: Python', 'Environment :: MacOS X', 'Environment :: X11 Applications', 'Intended Audience :: Developers', 'Topic :: Multimedia :: Graphics', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Application Frameworks', ], install_requires=reqs_from_requirements_file(), entry_points={'console_scripts': [ 'demosys_test = demosys_test.main:main', 'demosys-admin = demosys.core.management:execute_from_command_line', ]}, )
isc
Python
2cc2bf3665246f1876e9c25911baf6e418a356db
Add include_package_data=True to setup
lamenezes/simple-model
setup.py
setup.py
import os import sys from pathlib import Path from shutil import rmtree from setuptools import setup, find_packages, Command from simple_model.__version__ import __version__ here = Path(__file__).absolute().parent with open(here / 'README.rst') as f: readme = '\n' + f.read() class UploadCommand(Command): """Support setup.py publish.""" description = 'Build and publish the package.' user_options = [] @staticmethod def status(s): """Prints things in bold.""" print('\033[1m{0}\033[0m'.format(s)) def initialize_options(self): pass def finalize_options(self): pass def run(self): try: self.status('Removing previous builds…') rmtree(os.path.join(here, 'dist')) except FileNotFoundError: pass self.status('Building Source distribution…') os.system('{0} setup.py sdist'.format(sys.executable)) self.status('Uploading the package to PyPi via Twine…') os.system('twine upload dist/*') self.status('Pushing git tags…') os.system('git tag v{0}'.format(__version__)) os.system('git push --tags') sys.exit() setup( name='pysimplemodel', version=__version__, description='Data handling made easy', long_description='\n' + readme, url='https://github.com/lamenezes/simple-model', author='Luiz Menezes', author_email='[email protected]', packages=find_packages(exclude=['tests']), license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries', ], cmdclass={ 'upload': UploadCommand, }, include_package_data=True, )
import os import sys from pathlib import Path from shutil import rmtree from setuptools import setup, find_packages, Command from simple_model.__version__ import __version__ here = Path(__file__).absolute().parent with open(here / 'README.rst') as f: readme = '\n' + f.read() class UploadCommand(Command): """Support setup.py publish.""" description = 'Build and publish the package.' user_options = [] @staticmethod def status(s): """Prints things in bold.""" print('\033[1m{0}\033[0m'.format(s)) def initialize_options(self): pass def finalize_options(self): pass def run(self): try: self.status('Removing previous builds…') rmtree(os.path.join(here, 'dist')) except FileNotFoundError: pass self.status('Building Source distribution…') os.system('{0} setup.py sdist'.format(sys.executable)) self.status('Uploading the package to PyPi via Twine…') os.system('twine upload dist/*') self.status('Pushing git tags…') os.system('git tag v{0}'.format(__version__)) os.system('git push --tags') sys.exit() setup( name='pysimplemodel', version=__version__, description='Data handling made easy', long_description='\n' + readme, url='https://github.com/lamenezes/simple-model', author='Luiz Menezes', author_email='[email protected]', packages=find_packages(exclude=['tests']), license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries', ], cmdclass={ 'upload': UploadCommand, }, )
mit
Python
e846fff0060a431187e607fa0852b00265aff709
fix bug #141
ranaroussi/qtpylib,ranaroussi/qtpylib,ranaroussi/qtpylib
setup.py
setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """QTPyLib: Quantitative Trading Python Library (https://github.com/ranaroussi/qtpylib) Simple, event-driven algorithmic trading system written in Python 3, that supports backtesting and live trading using Interactive Brokers for market data and order execution. """ from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='QTPyLib', version='1.5.83', description='Quantitative Trading Python Library', long_description=long_description, url='https://github.com/ranaroussi/qtpylib', author='Ran Aroussi', author_email='[email protected]', license='LGPL', classifiers=[ 'License :: OSI Approved :: Apache Software License', 'Development Status :: 4 - Beta', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'Topic :: Office/Business :: Financial', 'Topic :: Office/Business :: Financial :: Investment', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], platforms = ['any'], keywords='qtpylib qtpy algotrading algo trading interactive brokers tws ibgw ibpy ezibpy', packages=find_packages(exclude=['contrib', 'docs', 'tests', 'demo', 'demos', 'examples']), install_requires=[ 'python-dateutil>=2.5.3','ezibpy>=1.12.66', 'flask>=0.11.1','numpy>=1.11.1','pandas>=0.22.0','pymysql>=0.7.6', 'pytz>=2016.6.1','requests>=2.10.0','pyzmq>=15.2.1', 'nexmo>=1.2.0','twilio>=5.4.0','ibpy2>=0.8.0', ], entry_points={ 'console_scripts': [ 'sample=sample:main', ], }, include_package_data=True, package_data={ 'static': ['qtpylib/_webapp/*'], 'db': ['qtpylib/schema.sql*'] }, )
#!/usr/bin/env python # -*- coding: utf-8 -*- """QTPyLib: Quantitative Trading Python Library (https://github.com/ranaroussi/qtpylib) Simple, event-driven algorithmic trading system written in Python 3, that supports backtesting and live trading using Interactive Brokers for market data and order execution. """ from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name='QTPyLib', version='1.5.83', description='Quantitative Trading Python Library', long_description=long_description, url='https://github.com/ranaroussi/qtpylib', author='Ran Aroussi', author_email='[email protected]', license='LGPL', classifiers=[ 'License :: OSI Approved :: Apache Software License', 'Development Status :: 4 - Beta', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'Topic :: Office/Business :: Financial', 'Topic :: Office/Business :: Financial :: Investment', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], platforms = ['any'], keywords='qtpylib qtpy algotrading algo trading interactive brokers tws ibgw ibpy ezibpy', packages=find_packages(exclude=['contrib', 'docs', 'tests', 'demo', 'demos', 'examples']), install_requires=[ 'python-dateutil>=2.5.3','ezibpy>=1.12.66', 'flask>=0.11.1','numpy>=1.11.1','pandas>=0.22.0','pymysql>=0.7.6', 'pytz>=2016.6.1','requests>=2.10.0','pyzmq>=15.2.1', 'nexmo>=1.2.0','twilio>=5.4.0','ibpy2>=0.8.0', ], entry_points={ 'console_scripts': [ 'sample=sample:main', ], }, include_package_data=True, package_data={ 'static': 'qtpylib/_webapp/*', 'db': 'qtpylib/schema.sql*' }, )
apache-2.0
Python
990d1a364dcfd62e700daba9945c35f96fbdfa5b
Order the main extensions list by name.
GNOME/extensions-web,GNOME/extensions-web,GNOME/extensions-web,GNOME/extensions-web,magcius/sweettooth,magcius/sweettooth
sweettooth/extensions/urls.py
sweettooth/extensions/urls.py
from django.conf.urls.defaults import patterns, include, url from django.views.generic.simple import direct_to_template from django.views.generic.list_detail import object_list from extensions import views, models upload_patterns = patterns('', url(r'^$', views.upload_file, dict(pk=None), name='extensions-upload-file'), url(r'^new-version/(?P<pk>\d+)/$', views.upload_file, name='extensions-upload-file'), ) ajax_patterns = patterns('', url(r'^edit/(?P<pk>\d+)', views.ajax_inline_edit_view, name='extensions-ajax-inline'), url(r'^submit/(?P<pk>\d+)', views.ajax_submit_and_lock_view, name='extensions-ajax-submit'), url(r'^upload/screenshot/(?P<pk>\d+)', views.ajax_upload_screenshot_view, name='extensions-ajax-screenshot'), url(r'^upload/icon/(?P<pk>\d+)', views.ajax_upload_icon_view, name='extensions-ajax-icon'), url(r'^detail/', views.ajax_details_view, name='extensions-ajax-details'), ) shell_patterns = patterns('', url(r'^extension-query/', views.ajax_query_view), url(r'^extension-info/', views.ajax_details_view), url(r'^download-extension/(?P<uuid>.+)\.shell-extension\.zip$', views.download), ) urlpatterns = patterns('', url(r'^$', object_list, dict(queryset=models.Extension.objects.visible().order_by('name'), paginate_by=10, template_object_name='extension', template_name='extensions/list.html'), name='extensions-index'), # we ignore PK of extension, and get extension from version PK url(r'^extension/(?P<ext_pk>\d+)/(?P<slug>.+)/version/(?P<pk>\d+)/$', views.extension_version_view, name='extensions-version-detail'), url(r'^extension/(?P<pk>\d+)/(?P<slug>.+)/$', views.extension_latest_version_view, name='extensions-detail'), url(r'^extension/(?P<pk>\d+)/$', views.extension_latest_version_view, dict(slug=None), name='extensions-detail'), url(r'^local/', direct_to_template, dict(template='extensions/local.html'), name='extensions-local'), url(r'^upload/', include(upload_patterns)), url(r'^ajax/', include(ajax_patterns)), url(r'', include(shell_patterns)), )
from django.conf.urls.defaults import patterns, include, url from django.views.generic.simple import direct_to_template from django.views.generic.list_detail import object_list from extensions import views, models upload_patterns = patterns('', url(r'^$', views.upload_file, dict(pk=None), name='extensions-upload-file'), url(r'^new-version/(?P<pk>\d+)/$', views.upload_file, name='extensions-upload-file'), ) ajax_patterns = patterns('', url(r'^edit/(?P<pk>\d+)', views.ajax_inline_edit_view, name='extensions-ajax-inline'), url(r'^submit/(?P<pk>\d+)', views.ajax_submit_and_lock_view, name='extensions-ajax-submit'), url(r'^upload/screenshot/(?P<pk>\d+)', views.ajax_upload_screenshot_view, name='extensions-ajax-screenshot'), url(r'^upload/icon/(?P<pk>\d+)', views.ajax_upload_icon_view, name='extensions-ajax-icon'), url(r'^detail/', views.ajax_details_view, name='extensions-ajax-details'), ) shell_patterns = patterns('', url(r'^extension-query/', views.ajax_query_view), url(r'^extension-info/', views.ajax_details_view), url(r'^download-extension/(?P<uuid>.+)\.shell-extension\.zip$', views.download), ) urlpatterns = patterns('', url(r'^$', object_list, dict(queryset=models.Extension.objects.visible(), paginate_by=10, template_object_name='extension', template_name='extensions/list.html'), name='extensions-index'), # we ignore PK of extension, and get extension from version PK url(r'^extension/(?P<ext_pk>\d+)/(?P<slug>.+)/version/(?P<pk>\d+)/$', views.extension_version_view, name='extensions-version-detail'), url(r'^extension/(?P<pk>\d+)/(?P<slug>.+)/$', views.extension_latest_version_view, name='extensions-detail'), url(r'^extension/(?P<pk>\d+)/$', views.extension_latest_version_view, dict(slug=None), name='extensions-detail'), url(r'^local/', direct_to_template, dict(template='extensions/local.html'), name='extensions-local'), url(r'^upload/', include(upload_patterns)), url(r'^ajax/', include(ajax_patterns)), url(r'', include(shell_patterns)), )
agpl-3.0
Python
b5a4708009e78c5727f2a54c54056df21983e958
Fix SlackOAuth
singingwolfboy/flask-dance-slack
slack.py
slack.py
import os import sys import logging from werkzeug.contrib.fixers import ProxyFix from werkzeug.urls import url_encode, url_decode import flask from flask import Flask, redirect, url_for from flask_dance.consumer import OAuth2ConsumerBlueprint from raven.contrib.flask import Sentry from requests.auth import AuthBase from urlobject import URLObject log = logging.getLogger(__name__) log.setLevel(logging.INFO) app = Flask(__name__) app.wsgi_app = ProxyFix(app.wsgi_app) sentry = Sentry(app) app.secret_key = os.environ.get("FLASK_SECRET_KEY", "supersekrit") app.config["SLACK_OAUTH_CLIENT_ID"] = os.environ.get("SLACK_OAUTH_CLIENT_ID") app.config["SLACK_OAUTH_CLIENT_SECRET"] = os.environ.get("SLACK_OAUTH_CLIENT_SECRET") class SlackOAuth(AuthBase): """ Slack wants the access token to be passed in a `token` GET parameter or POST parameter, rather than using the `Authorization: Bearer` header. This is annoying, but we can make it work using this custom Auth object. """ def __init__(self, blueprint): self.blueprint = blueprint def __call__(self, r): if self.blueprint.token: access_token = self.blueprint.token.get("access_token") else: access_token = None if not access_token: return r if r.method == "GET": url = URLObject(r.url) if not "token" in url.query_dict: url = url.add_query_param("token", access_token) r.url = url elif r.method == "POST": args = url_decode(r.body) args.setdefault("token", access_token) r.body = url_encode(args) return r slack_bp = OAuth2ConsumerBlueprint("slack", __name__, base_url="https://slack.com/api/", authorization_url="https://slack.com/oauth/authorize", token_url="https://slack.com/api/oauth.access", scope=["identify", "chat:write:bot"], ) slack_bp.auth = SlackOAuth(slack_bp) slack_bp.from_config["client_id"] = "SLACK_OAUTH_CLIENT_ID" slack_bp.from_config["client_secret"] = "SLACK_OAUTH_CLIENT_SECRET" app.register_blueprint(slack_bp, url_prefix="/login") @app.route("/") def index(): slack = slack_bp.session if not slack.authorized: return redirect(url_for("slack.login")) resp = slack.post("chat.postMessage", data={ "channel": "#general", "text": "ping", "icon_emoji": ":robot_face:", }) assert resp.ok, resp.text return resp.text if __name__ == "__main__": app.run()
import os import sys import logging from werkzeug.contrib.fixers import ProxyFix import flask from flask import Flask, redirect, url_for from flask_dance.consumer import OAuth2ConsumerBlueprint from raven.contrib.flask import Sentry from requests.auth import AuthBase log = logging.getLogger(__name__) log.setLevel(logging.INFO) app = Flask(__name__) app.wsgi_app = ProxyFix(app.wsgi_app) sentry = Sentry(app) app.secret_key = os.environ.get("FLASK_SECRET_KEY", "supersekrit") app.config["SLACK_OAUTH_CLIENT_ID"] = os.environ.get("SLACK_OAUTH_CLIENT_ID") app.config["SLACK_OAUTH_CLIENT_SECRET"] = os.environ.get("SLACK_OAUTH_CLIENT_SECRET") class SlackOAuth(AuthBase): """ Slack wants the access token to be passed in a `token` GET parameter or POST parameter, rather than using the `Authorization: Bearer` header. This is annoying, but we can make it work using this custom Auth object. """ def __init__(self, blueprint): self.blueprint = blueprint def __call__(self, r): if self.blueprint.token: access_token = self.blueprint.token.get("access_token") else: access_token = None if access_token: r.data.setdefault('token', access_token) return r slack_bp = OAuth2ConsumerBlueprint("slack", __name__, base_url="https://slack.com/api/", authorization_url="https://slack.com/oauth/authorize", token_url="https://slack.com/api/oauth.access", scope=["identify", "chat:write:bot"], ) slack_bp.auth = SlackOAuth(slack_bp) slack_bp.from_config["client_id"] = "SLACK_OAUTH_CLIENT_ID" slack_bp.from_config["client_secret"] = "SLACK_OAUTH_CLIENT_SECRET" app.register_blueprint(slack_bp, url_prefix="/login") @app.route("/") def index(): slack = slack_bp.session if not slack.authorized: return redirect(url_for("slack.login")) resp = slack.post("chat.postMessage", data={ "channel": "#general", "text": "ping", "icon_emoji": ":robot_face:", }) assert resp.ok, resp.text return resp.text if __name__ == "__main__": app.run()
mit
Python
e145d8012efbfb373dfe566845f3957777a3da5a
Clean up an unnecessary variable.
mjessome/sqltd
sqltd.py
sqltd.py
#!/usr/bin/env python import sqlite3 import sys import re def runPage(db, html): def parseStrings(s, query=[False]): output = '' if s == '<sql>': query[0] = True elif s == '</sql>': query[0] = False elif query[0]: result = dbExecute(db, s) output = '<table>\n%s</table>\n' % (''.join(makeTable(result))) else: output = ''.join(s) return output split = re.split('(<sql>|</sql>)', html) return ''.join([parseStrings(s) for s in split]) def dbConnect(db_path): db = sqlite3.connect(db_path) return db def dbExecute(db, query): #May need a way to check that the query is valid c = db.cursor() c.execute(query) return c def makeTable(rows): def makeRow(row): return ''.join(['<tr>\n', ''.join([' <td>%s</td>\n' % str(col) for col in row]), '</tr>\n']) header = ''.join(['<th>%s</th>\n' % (field[0]) for field in rows.description]) output = ''.join([makeRow(row) for row in rows.fetchall()]) return "%s%s" %(header, output) if __name__ == "__main__": if len(sys.argv) >= 2: DB_PATH = sys.argv[1]; else: print "No sqlite database specified." exit(1) db = dbConnect(DB_PATH) if(not db): print "Error opening database" exit(1); print runPage(db, ''.join(sys.stdin))
#!/usr/bin/env python import sqlite3 import sys import re def runPage(db, html): def parseStrings(s, query=[False]): output = '' if s == '<sql>': query[0] = True elif s == '</sql>': query[0] = False elif query[0]: result = dbExecute(db, s) output = '<table>\n%s</table>\n' % (''.join(makeTable(result))) else: output = ''.join(s) return output split = re.split('(<sql>|</sql>)', html) output = '' return ''.join([parseStrings(s) for s in split]) def dbConnect(db_path): db = sqlite3.connect(db_path) return db def dbExecute(db, query): #May need a way to check that the query is valid c = db.cursor() c.execute(query) return c def makeTable(rows): def makeRow(row): return ''.join(['<tr>\n', ''.join([' <td>%s</td>\n' % str(col) for col in row]), '</tr>\n']) header = ''.join(['<th>%s</th>\n' % (field[0]) for field in rows.description]) output = ''.join([makeRow(row) for row in rows.fetchall()]) return "%s%s" %(header, output) if __name__ == "__main__": if len(sys.argv) >= 2: DB_PATH = sys.argv[1]; else: print "No sqlite database specified." exit(1) db = dbConnect(DB_PATH) if(not db): print "Error opening database" exit(1); print runPage(db, ''.join(sys.stdin))
mit
Python
49e9fbbd00a7732faa716e5e930ec63dbaa18983
fix gumbel unit test
kengz/Unity-Lab,kengz/Unity-Lab
test/lib/test_distribution.py
test/lib/test_distribution.py
from flaky import flaky from slm_lab.lib import distribution import pytest import torch @pytest.mark.parametrize('pdparam_type', [ 'probs', 'logits' ]) def test_argmax(pdparam_type): pdparam = torch.tensor([1.1, 10.0, 2.1]) # test both probs or logits pd = distribution.Argmax(**{pdparam_type: pdparam}) for _ in range(10): assert pd.sample().item() == 1 assert torch.equal(pd.probs, torch.tensor([0., 1., 0.])) @flaky @pytest.mark.parametrize('pdparam_type', [ 'probs', 'logits' ]) def test_gumbel_categorical(pdparam_type): pdparam = torch.tensor([1.1, 10.0, 2.1]) pd = distribution.GumbelSoftmax(**{pdparam_type: pdparam, 'temperature': torch.tensor(1.0)}) for _ in range(10): assert torch.is_tensor(pd.sample()) @pytest.mark.parametrize('pdparam_type', [ 'probs', 'logits' ]) def test_multicategorical(pdparam_type): pdparam0 = torch.tensor([10.0, 0.0, 0.0]) pdparam1 = torch.tensor([0.0, 10.0, 0.0]) pdparam2 = torch.tensor([0.0, 0.0, 10.0]) pdparams = [pdparam0, pdparam1, pdparam2] # use a probs pd = distribution.MultiCategorical(**{pdparam_type: pdparams}) assert isinstance(pd.probs, list) # test probs only since if init from logits, probs will be close but not precise if pdparam_type == 'probs': assert torch.equal(pd.probs[0], torch.tensor([1., 0., 0.])) assert torch.equal(pd.probs[1], torch.tensor([0., 1., 0.])) assert torch.equal(pd.probs[2], torch.tensor([0., 0., 1.])) for _ in range(10): assert torch.equal(pd.sample(), torch.tensor([0, 1, 2]))
from flaky import flaky from slm_lab.lib import distribution import pytest import torch @pytest.mark.parametrize('pdparam_type', [ 'probs', 'logits' ]) def test_argmax(pdparam_type): pdparam = torch.tensor([1.1, 10.0, 2.1]) # test both probs or logits pd = distribution.Argmax(**{pdparam_type: pdparam}) for _ in range(10): assert pd.sample().item() == 1 assert torch.equal(pd.probs, torch.tensor([0., 1., 0.])) @flaky @pytest.mark.parametrize('pdparam_type', [ 'probs', 'logits' ]) def test_gumbel_categorical(pdparam_type): pdparam = torch.tensor([1.1, 10.0, 2.1]) pd = distribution.GumbelSoftmax(**{pdparam_type: pdparam}) for _ in range(10): assert torch.is_tensor(pd.sample()) @pytest.mark.parametrize('pdparam_type', [ 'probs', 'logits' ]) def test_multicategorical(pdparam_type): pdparam0 = torch.tensor([10.0, 0.0, 0.0]) pdparam1 = torch.tensor([0.0, 10.0, 0.0]) pdparam2 = torch.tensor([0.0, 0.0, 10.0]) pdparams = [pdparam0, pdparam1, pdparam2] # use a probs pd = distribution.MultiCategorical(**{pdparam_type: pdparams}) assert isinstance(pd.probs, list) # test probs only since if init from logits, probs will be close but not precise if pdparam_type == 'probs': assert torch.equal(pd.probs[0], torch.tensor([1., 0., 0.])) assert torch.equal(pd.probs[1], torch.tensor([0., 1., 0.])) assert torch.equal(pd.probs[2], torch.tensor([0., 0., 1.])) for _ in range(10): assert torch.equal(pd.sample(), torch.tensor([0, 1, 2]))
mit
Python
f27e08b0dcace5b9f49c5b2a211347a2f50f8254
Use tags or direct url
atulya2109/Stats-Royale-Python
stats.py
stats.py
from bs4 import BeautifulSoup import requests def statsRoyale(tag): if not tag.find('/') == -1: tag = tag[::-1] pos = tag.find('/') tag = tag[:pos] tag = tag[::-1] link = 'http://statsroyale.com/profile/' + tag response = (requests.get(link)).text soup = BeautifulSoup(response, 'html.parser') description = soup.find_all('div', {'class':'description'}) content = soup.find_all('div', {'class':'content'}) stats = {} for i in range(len(description)): description_text = ((description[i].get_text()).replace(' ', '_')).lower() content_text = content[i].get_text() stats[description_text] = content_text if stats['clan'] == 'No Clan': stats['clan'] = None return stats stats = statsRoyale(tag='9890JJJV') print stats
from bs4 import BeautifulSoup import requests def statsRoyale(tag): link = 'http://statsroyale.com/profile/' + tag response = (requests.get(link)).text soup = BeautifulSoup(response, 'html.parser') stats = {} content = soup.find_all('div', {'class':'content'}) stats['clan'] = content[0].get_text() if stats['clan'] == 'No Clan': stats['clan'] = None stats['highest_trophies'] = content[1].get_text() stats['last_known_trophies'] = content[2].get_text() stats['challenge_cards_won'] = content[3].get_text() stats['tournament_cards_won'] = content[4].get_text() stats['total_donations'] = content[5].get_text() stats['best_session_rank'] = content[6].get_text() stats['previous_session_rank'] = content[7].get_text() stats['legendary_trophies'] = content[8].get_text() stats['wins'] = content[9].get_text() stats['losses'] = content[10].get_text() stats['3_crown_wins'] = content[11].get_text() return stats stats = statsRoyale(tag='9890JJJV') print stats
mit
Python
f8ae46f22a9b5b1fc8215ac26aed6dfddf25c224
set AUTOSYNTH_MULTIPLE_COMMITS=true for context aware commits (#320)
googleapis/nodejs-cloud-container,googleapis/nodejs-cloud-container
synth.py
synth.py
import synthtool as s import synthtool.gcp as gcp import logging import subprocess logging.basicConfig(level=logging.DEBUG) AUTOSYNTH_MULTIPLE_COMMITS = True # Run the gapic generator gapic = gcp.GAPICMicrogenerator() version = 'v1' library = gapic.typescript_library( 'container', generator_args={ "grpc-service-config": f"google/container/{version}/container_grpc_service_config.json", "package-name": f"@google-cloud/container" }, proto_path=f'/google/container/{version}', version=version) s.copy( library, excludes=['package.json', 'README.md', 'src/index.ts'], ) # Copy templated files common_templates = gcp.CommonTemplates() templates = common_templates.node_library(source_location='build/src') s.copy(templates) # fix broken doc links s.replace("src/v1/doc/google/container/v1/doc_cluster_service.js", "https:\/\/cloud\.google\.com\/kubernetes-engine\/docs\/reference\/rest\/v1\/projects\.zones\.clusters\.nodePool", "https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters.nodePools#resource-nodepool") s.replace('src/v1/*.ts', '/compute/docs/zones', 'https://cloud.google.com/compute/docs/regions-zones/') s.replace('src/v1/*.ts', '/compute/docs/networks-and-firewalls', 'https://cloud.google.com/vpc/docs/firewalls') s.replace('src/v1/*.ts', "/container-engine/reference/rest/v1/projects.zones.clusters", "https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters") # Node.js specific cleanup subprocess.run(['npm', 'install']) subprocess.run(['npm', 'run', 'compile-protos']) subprocess.run(['npm', 'run', 'fix'])
import synthtool as s import synthtool.gcp as gcp import logging import subprocess logging.basicConfig(level=logging.DEBUG) # Run the gapic generator gapic = gcp.GAPICMicrogenerator() version = 'v1' library = gapic.typescript_library( 'container', generator_args={ "grpc-service-config": f"google/container/{version}/container_grpc_service_config.json", "package-name": f"@google-cloud/container" }, proto_path=f'/google/container/{version}', version=version) s.copy( library, excludes=['package.json', 'README.md', 'src/index.ts'], ) # Copy templated files common_templates = gcp.CommonTemplates() templates = common_templates.node_library(source_location='build/src') s.copy(templates) # fix broken doc links s.replace("src/v1/doc/google/container/v1/doc_cluster_service.js", "https:\/\/cloud\.google\.com\/kubernetes-engine\/docs\/reference\/rest\/v1\/projects\.zones\.clusters\.nodePool", "https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters.nodePools#resource-nodepool") s.replace('src/v1/*.ts', '/compute/docs/zones', 'https://cloud.google.com/compute/docs/regions-zones/') s.replace('src/v1/*.ts', '/compute/docs/networks-and-firewalls', 'https://cloud.google.com/vpc/docs/firewalls') s.replace('src/v1/*.ts', "/container-engine/reference/rest/v1/projects.zones.clusters", "https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters") # Node.js specific cleanup subprocess.run(['npm', 'install']) subprocess.run(['npm', 'run', 'compile-protos']) subprocess.run(['npm', 'run', 'fix'])
apache-2.0
Python
d54d202970610a59cb7fd60e51483c6e0db93d60
update synth scripts to document/utilize apiEndpoint instead of serviceAddress option (#2165)
googleapis/google-cloud-php-text-to-speech,googleapis/google-cloud-php-text-to-speech
synth.py
synth.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script is used to synthesize generated parts of this library.""" import os # https://github.com/googleapis/artman/pull/655#issuecomment-507784277 os.environ["SYNTHTOOL_ARTMAN_VERSION"] = "0.29.1" import synthtool as s import synthtool.gcp as gcp import logging logging.basicConfig(level=logging.DEBUG) gapic = gcp.GAPICGenerator() common = gcp.CommonTemplates() v1_library = gapic._generate_code( 'texttospeech', 'v1', 'php', config_path='artman_texttospeech_v1.yaml', artman_output_name='google-cloud-texttospeech-v1') s.copy(v1_library / f'src/') s.copy(v1_library / f'proto/src/GPBMetadata/Google/Cloud/Texttospeech', f'metadata') s.copy(v1_library / f'proto/src/Google/Cloud/TextToSpeech', f'src') s.copy(v1_library / f'tests') # document and utilize apiEndpoint instead of serviceAddress s.replace( "**/Gapic/*GapicClient.php", r"'serviceAddress' =>", r"'apiEndpoint' =>") s.replace( "**/Gapic/*GapicClient.php", r"@type string \$serviceAddress", r"""@type string $serviceAddress * **Deprecated**. This option will be removed in a future major release. Please * utilize the `$apiEndpoint` option instead. * @type string $apiEndpoint""") s.replace( "**/Gapic/*GapicClient.php", r"\$transportConfig, and any \$serviceAddress", r"$transportConfig, and any `$apiEndpoint`") # fix copyright year s.replace( 'src/V1/**/*Client.php', r'Copyright \d{4}', r'Copyright 2018') s.replace( 'tests/**/V1/*Test.php', r'Copyright \d{4}', r'Copyright 2018')
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script is used to synthesize generated parts of this library.""" import os # https://github.com/googleapis/artman/pull/655#issuecomment-507784277 os.environ["SYNTHTOOL_ARTMAN_VERSION"] = "0.29.1" import synthtool as s import synthtool.gcp as gcp import logging logging.basicConfig(level=logging.DEBUG) gapic = gcp.GAPICGenerator() common = gcp.CommonTemplates() v1_library = gapic._generate_code( 'texttospeech', 'v1', 'php', config_path='artman_texttospeech_v1.yaml', artman_output_name='google-cloud-texttospeech-v1') s.copy(v1_library / f'src/') s.copy(v1_library / f'proto/src/GPBMetadata/Google/Cloud/Texttospeech', f'metadata') s.copy(v1_library / f'proto/src/Google/Cloud/TextToSpeech', f'src') s.copy(v1_library / f'tests') # fix copyright year s.replace( 'src/V1/**/*Client.php', r'Copyright \d{4}', r'Copyright 2018') s.replace( 'tests/**/V1/*Test.php', r'Copyright \d{4}', r'Copyright 2018')
apache-2.0
Python
30d108b3a206d938ef67c112bc6c953a12c606af
Allow specifying custom host and port when starting app
rlucioni/typesetter,rlucioni/typesetter,rlucioni/typesetter
tasks.py
tasks.py
"""Task functions for use with Invoke.""" from invoke import task @task def clean(context): cmd = '$(npm bin)/gulp clean' context.run(cmd) @task def requirements(context): steps = [ 'pip install -r requirements.txt', 'npm install', '$(npm bin)/bower install', ] cmd = ' && '.join(steps) context.run(cmd) @task def run(context, host='127.0.0.1', port='5000'): steps = [ 'open http://{host}:{port}/', 'FLASK_APP=typesetter/typesetter.py FLASK_DEBUG=1 flask run --host={host} --port={port}', ] steps = [step.format(host=host, port=port) for step in steps] cmd = ' && '.join(steps) context.run(cmd) @task def static(context): cmd = '$(npm bin)/gulp' context.run(cmd)
"""Task functions for use with Invoke.""" from invoke import task @task def clean(context): cmd = '$(npm bin)/gulp clean' context.run(cmd) @task def requirements(context): steps = [ 'pip install -r requirements.txt', 'npm install', '$(npm bin)/bower install', ] cmd = ' && '.join(steps) context.run(cmd) @task def run(context): steps = [ 'open http://127.0.0.1:5000/', 'FLASK_APP=typesetter/typesetter.py FLASK_DEBUG=1 flask run', ] cmd = ' && '.join(steps) context.run(cmd) @task def static(context): cmd = '$(npm bin)/gulp' context.run(cmd)
mit
Python
c05b06577785bdf34f1fcd051ecf6d4398d2f77e
Add new release task w/ API doc prebuilding
thusoy/paramiko,CptLemming/paramiko,rcorrieri/paramiko,redixin/paramiko,Automatic/paramiko,jaraco/paramiko,esc/paramiko,ameily/paramiko,zarr12steven/paramiko,dorianpula/paramiko,mirrorcoder/paramiko,jorik041/paramiko,thisch/paramiko,dlitz/paramiko,paramiko/paramiko,digitalquacks/paramiko,fvicente/paramiko,SebastianDeiss/paramiko,anadigi/paramiko,varunarya10/paramiko,zpzgone/paramiko,torkil/paramiko,mhdaimi/paramiko,reaperhulk/paramiko,selboo/paramiko,remram44/paramiko,toby82/paramiko,davidbistolas/paramiko
tasks.py
tasks.py
from os.path import join from shutil import rmtree, move from invoke import Collection, ctask as task from invocations import docs as _docs from invocations.packaging import publish d = 'sites' # Usage doc/API site (published as docs.paramiko.org) docs_path = join(d, 'docs') docs_build = join(docs_path, '_build') docs = Collection.from_module(_docs, name='docs', config={ 'sphinx.source': docs_path, 'sphinx.target': docs_build, }) # Main/about/changelog site ((www.)?paramiko.org) www_path = join(d, 'www') www = Collection.from_module(_docs, name='www', config={ 'sphinx.source': www_path, 'sphinx.target': join(www_path, '_build'), }) # Until we move to spec-based testing @task def test(ctx): ctx.run("python test.py --verbose") @task def coverage(ctx): ctx.run("coverage run --source=paramiko test.py --verbose") # Until we stop bundling docs w/ releases. Need to discover use cases first. @task('docs') # Will invoke the API doc site build def release(ctx): # Move the built docs into where Epydocs used to live rmtree('docs') move(docs_build, 'docs') # Publish publish(ctx) ns = Collection(test, coverage, release, docs=docs, www=www)
from os.path import join from invoke import Collection, ctask as task from invocations import docs as _docs d = 'sites' # Usage doc/API site (published as docs.paramiko.org) path = join(d, 'docs') docs = Collection.from_module(_docs, name='docs', config={ 'sphinx.source': path, 'sphinx.target': join(path, '_build'), }) # Main/about/changelog site ((www.)?paramiko.org) path = join(d, 'www') www = Collection.from_module(_docs, name='www', config={ 'sphinx.source': path, 'sphinx.target': join(path, '_build'), }) # Until we move to spec-based testing @task def test(ctx): ctx.run("python test.py --verbose") @task def coverage(ctx): ctx.run("coverage run --source=paramiko test.py --verbose") ns = Collection(test, coverage, docs=docs, www=www)
lgpl-2.1
Python
b7e42d4a231cc1c34e193e2bd719c134f7f29b0a
Use a minimum of 1% completness to not ship empty translations.
agustin380/django-localflavor,jieter/django-localflavor,rsalmaso/django-localflavor,infoxchange/django-localflavor,maisim/django-localflavor,thor/django-localflavor,zarelit/django-localflavor,M157q/django-localflavor,django/django-localflavor
tasks.py
tasks.py
import os import os.path import sys from invoke import run, task @task def clean(): run('git clean -Xfd') @task def test(country='all'): print('Python version: ' + sys.version) test_cmd = 'coverage run `which django-admin.py` test --settings=tests.settings' flake_cmd = 'flake8 --ignore=W801,E128,E501,W402' country = os.environ.get('COUNTRY', country) # Fix issue #49 cwp = os.path.dirname(os.path.abspath(__name__)) pythonpath = os.environ.get('PYTHONPATH', '').split(os.pathsep) pythonpath.append(os.path.join(cwp, 'tests')) os.environ['PYTHONPATH'] = os.pathsep.join(pythonpath) if country == 'all': run('{0} localflavor'.format(flake_cmd)) run('{0} tests'.format(test_cmd)) run('coverage report') elif country not in os.listdir('localflavor'): print('The country {0!r} is not supported yet.'.format(country)) else: run('{0} localflavor/{1}'.format(flake_cmd, country)) run('{0} tests.test_{1}'.format(test_cmd, country)) run('coverage report -m --include=localflavor/{0}/*'.format(country)) @task def compile_translations(): run('cd localflavor; django-admin.py compilemessages; cd ..') @task(post=[compile_translations]) def pull_translations(locale=None): if locale: run('tx pull -f -l {0}'.format(locale)) else: run('tx pull --minimum-perc=1 -f -a') @task(post=[compile_translations]) def make_translations(locale=None): if locale: run('cd localflavor; ' 'django-admin.py makemessages -l {0}; '.format(locale)) else: run('cd localflavor; django-admin.py makemessages -a') @task def docs(): run('cd docs; make html; cd ..')
import os import os.path import sys from invoke import run, task @task def clean(): run('git clean -Xfd') @task def test(country='all'): print('Python version: ' + sys.version) test_cmd = 'coverage run `which django-admin.py` test --settings=tests.settings' flake_cmd = 'flake8 --ignore=W801,E128,E501,W402' country = os.environ.get('COUNTRY', country) # Fix issue #49 cwp = os.path.dirname(os.path.abspath(__name__)) pythonpath = os.environ.get('PYTHONPATH', '').split(os.pathsep) pythonpath.append(os.path.join(cwp, 'tests')) os.environ['PYTHONPATH'] = os.pathsep.join(pythonpath) if country == 'all': run('{0} localflavor'.format(flake_cmd)) run('{0} tests'.format(test_cmd)) run('coverage report') elif country not in os.listdir('localflavor'): print('The country {0!r} is not supported yet.'.format(country)) else: run('{0} localflavor/{1}'.format(flake_cmd, country)) run('{0} tests.test_{1}'.format(test_cmd, country)) run('coverage report -m --include=localflavor/{0}/*'.format(country)) @task def translations(pull=False, locale=None): if pull: if locale: run('tx pull -l {0}'.format(locale)) else: run('tx pull -a') if locale: run('cd localflavor; django-admin.py makemessages -l {0}; ' 'django-admin.py compilemessages -l {0}; cd ..'.format(locale)) else: run('cd localflavor; django-admin.py makemessages -a; ' 'django-admin.py compilemessages; cd ..') @task def docs(): run('cd docs; make html; cd ..')
bsd-3-clause
Python
f62626799eddfea04ffad5005de09305a18f287d
Add linux dependencies task.
okuser/okcupyd,okuser/okcupyd,IvanMalison/okcupyd,IvanMalison/okcupyd
tasks.py
tasks.py
from invoke import Collection, task, run from okcupyd import tasks ns = Collection() ns.add_collection(tasks, name='okcupyd') @ns.add_task @task(default=True) def install(): run("python setup.py install") @ns.add_task @task def pypi(): run("python setup.py sdist upload -r pypi") @ns.add_task @task(aliases='r') def rerecord(rest): run('tox -e py27 -- --record --credentials test_credentials {0} -s' .format(rest), pty=True) run('tox -e py27 -- --resave --scrub --credentials test_credentials {0} -s' .format(rest), pty=True) @ns.add_task @task(aliases='r1') def rerecord_one(rest): run('tox -e py27 -- --record --credentials test_credentials -k {0} -s' .format(rest), pty=True) run('tox -e py27 -- --resave --scrub --credentials test_credentials -k {0} -s' .format(rest), pty=True) @ns.add_task @task def rerecord_failing(): result = run("tox -e py27 | grep test_ | grep \u2015 | sed 's:\\\u2015::g'", hide='out') for test_name in result.stdout.split('\n'): rerecord_one(rest=test_name.strip()) linux_dependencies = ('zlib1g-dev', 'libxml2-dev', 'libxslt1-dev', 'python-dev', 'libncurses5-dev') @ns.add_task @task(aliases='linux_dep') def install_linux_dependencies(): for package in linux_pacakges: run('{0} {1}'.format(install_command, package), pty=False)
from invoke import Collection, task, run from okcupyd import tasks ns = Collection() ns.add_collection(tasks, name='okcupyd') @ns.add_task @task(default=True) def install(): run("python setup.py install") @ns.add_task @task def pypi(): run("python setup.py sdist upload -r pypi") @ns.add_task @task(aliases='r') def rerecord(rest): run('tox -e py27 -- --record --credentials test_credentials {0} -s' .format(rest), pty=True) run('tox -e py27 -- --resave --scrub --credentials test_credentials {0} -s' .format(rest), pty=True) @ns.add_task @task(aliases='r1') def rerecord_one(rest): run('tox -e py27 -- --record --credentials test_credentials -k {0} -s' .format(rest), pty=True) run('tox -e py27 -- --resave --scrub --credentials test_credentials -k {0} -s' .format(rest), pty=True) @ns.add_task @task def rerecord_failing(): result = run("tox -e py27 | grep test_ | grep \u2015 | sed 's:\\\u2015::g'", hide='out') for test_name in result.stdout.split('\n'): rerecord_one(rest=test_name.strip())
mit
Python
61deb461f2a36413cbb6108e7e0e86fc81f44891
Update to work with invoke >= 0.13
mopidy/mopidy,jcass77/mopidy,jcass77/mopidy,adamcik/mopidy,jodal/mopidy,jodal/mopidy,jcass77/mopidy,kingosticks/mopidy,adamcik/mopidy,kingosticks/mopidy,mopidy/mopidy,jodal/mopidy,kingosticks/mopidy,adamcik/mopidy,mopidy/mopidy
tasks.py
tasks.py
import sys from invoke import run, task @task def docs(ctx, watch=False, warn=False): if watch: return watcher(docs) run('make -C docs/ html', warn=warn) @task def test(ctx, path=None, coverage=False, watch=False, warn=False): if watch: return watcher(test, path=path, coverage=coverage) path = path or 'tests/' cmd = 'pytest' if coverage: cmd += ' --cov=mopidy --cov-report=term-missing' cmd += ' %s' % path run(cmd, pty=True, warn=warn) @task def lint(ctx, watch=False, warn=False): if watch: return watcher(lint) run('flake8', warn=warn) @task def update_authors(ctx): # Keep authors in the order of appearance and use awk to filter out dupes run("git log --format='- %aN <%aE>' --reverse | awk '!x[$0]++' > AUTHORS") def watcher(task, *args, **kwargs): while True: run('clear') kwargs['warn'] = True task(*args, **kwargs) try: run( 'inotifywait -q -e create -e modify -e delete ' '--exclude ".*\.(pyc|sw.)" -r docs/ mopidy/ tests/') except KeyboardInterrupt: sys.exit()
import sys from invoke import run, task @task def docs(watch=False, warn=False): if watch: return watcher(docs) run('make -C docs/ html', warn=warn) @task def test(path=None, coverage=False, watch=False, warn=False): if watch: return watcher(test, path=path, coverage=coverage) path = path or 'tests/' cmd = 'pytest' if coverage: cmd += ' --cov=mopidy --cov-report=term-missing' cmd += ' %s' % path run(cmd, pty=True, warn=warn) @task def lint(watch=False, warn=False): if watch: return watcher(lint) run('flake8', warn=warn) @task def update_authors(): # Keep authors in the order of appearance and use awk to filter out dupes run("git log --format='- %aN <%aE>' --reverse | awk '!x[$0]++' > AUTHORS") def watcher(task, *args, **kwargs): while True: run('clear') kwargs['warn'] = True task(*args, **kwargs) try: run( 'inotifywait -q -e create -e modify -e delete ' '--exclude ".*\.(pyc|sw.)" -r docs/ mopidy/ tests/') except KeyboardInterrupt: sys.exit()
apache-2.0
Python
45f098b3664a11ef51cd66a11773bab923b02c91
Make all exceptions inherit from ValueError
arthurdejong/python-stdnum,arthurdejong/python-stdnum,arthurdejong/python-stdnum
stdnum/exceptions.py
stdnum/exceptions.py
# exceptions.py - collection of stdnum exceptions # coding: utf-8 # # Copyright (C) 2013-2022 Arthur de Jong # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA """Collection of exceptions. The validation functions of stdnum should raise one of the below exceptions when validation of the number fails. """ __all__ = ['ValidationError', 'InvalidFormat', 'InvalidChecksum', 'InvalidLength', 'InvalidComponent'] class ValidationError(ValueError): """Top-level error for validating numbers. This exception should normally not be raised, only subclasses of this exception.""" def __str__(self): """Return the exception message.""" return ''.join(self.args[:1]) or getattr(self, 'message', '') class InvalidFormat(ValidationError): # noqa N818 """Something is wrong with the format of the number. This generally means characters or delimiters that are not allowed are part of the number or required parts are missing.""" message = 'The number has an invalid format.' class InvalidChecksum(ValidationError): # noqa N818 """The number's internal checksum or check digit does not match.""" message = "The number's checksum or check digit is invalid." class InvalidLength(InvalidFormat): # noqa N818 """The length of the number is wrong.""" message = 'The number has an invalid length.' class InvalidComponent(ValidationError): # noqa N818 """One of the parts of the number has an invalid reference. Some part of the number refers to some external entity like a country code, a date or a predefined collection of values. The number contains some invalid reference.""" message = 'One of the parts of the number are invalid or unknown.'
# exceptions.py - collection of stdnum exceptions # coding: utf-8 # # Copyright (C) 2013 Arthur de Jong # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA """Collection of exceptions. The validation functions of stdnum should raise one of the below exceptions when validation of the number fails. """ __all__ = ['ValidationError', 'InvalidFormat', 'InvalidChecksum', 'InvalidLength', 'InvalidComponent'] class ValidationError(Exception): """Top-level error for validating numbers. This exception should normally not be raised, only subclasses of this exception.""" def __str__(self): """Return the exception message.""" return ''.join(self.args[:1]) or getattr(self, 'message', '') class InvalidFormat(ValidationError): # noqa N818 """Something is wrong with the format of the number. This generally means characters or delimiters that are not allowed are part of the number or required parts are missing.""" message = 'The number has an invalid format.' class InvalidChecksum(ValidationError): # noqa N818 """The number's internal checksum or check digit does not match.""" message = "The number's checksum or check digit is invalid." class InvalidLength(InvalidFormat): # noqa N818 """The length of the number is wrong.""" message = 'The number has an invalid length.' class InvalidComponent(ValidationError): # noqa N818 """One of the parts of the number has an invalid reference. Some part of the number refers to some external entity like a country code, a date or a predefined collection of values. The number contains some invalid reference.""" message = 'One of the parts of the number are invalid or unknown.'
lgpl-2.1
Python
ad9f0c488bf761fe83714377fce06ed93d2ec5f3
Update navigation controller
PHSCRC/boxbot
controller/navigation.py
controller/navigation.py
import asyncio class DriveMotorControl: def __init__(self, left=0, right=1): self.loop = asyncio.get_event_loop() self._left = open("/var/run/motor{}".format(left),"w") self._right = open("/var/run/motor{}".format(right),"w") self.__left = 0 self.__right = 0 self.loop.run_until_complete(self.connect()) @asyncio.coroutine def connect(self): self._left, lpr = yield from self.loop.connect_write_pipe(asyncio.Protocol, self._left) self._right, rpr = yield from self.loop.connect_write_pipe(asyncio.Protocol, self._right) @property def left(self): return self.__left @left.setter def left(self, val): self.__left = val self._left.write("{}\n".format(val).encode()) @property def right(self): return self.__right @right.setter def right(self, val): self.__right = val self._right.write("{}\n".format(val).encode()) def stop(self): self.left = 0 self.right = 0 def forward(self): self.left = 1 self.right = -1 def turnright(self): self.right = 1 self.left = 1 return self.loop.call_later(1, self.stop) def turnleft(self): self.right = -1 self.left = -1 return self.loop.call_later(1, self.stop)
import asyncio class ControlProtocol: def connection_made(transport): pass def connection_lost(exc): pass class DriveMotorControl: def __init__(self, left=0, right=1): self.loop = asyncio.get_event_loop() left = open("/var/run/motor{}".format(left),"w") right = open("/var/run/motor{}".format(right),"w") self.__left = 0 self.__right = 0 self._left, lpr = self.loop.connect_write_pipe(asyncio.Protocol, left) self._right, rpr = self.loop.connect_write_pipe(asyncio.Protocol, right) @property def left(self): return self.__left @left.setter def setleft(self, val): self.__left = val self._left.write("{}\n".format(val)) @property def right(self): return self.__right @right.setter def setright(self, val): self.__right = val self._right.write("{}\n".format(val)) def stop(self): self.left = 0 self.right = 0 def forward(self): self.left = 1 self.right = -1 def turnright(self): self.right = 1 self.left = 1 self.call_later(1, self.stop) def turnleft(self): self.right = -1 self.left = -1 self.call_later(1, self.stop)
mit
Python
63a2deeb5602eb9834232a592bac16501bb8c8de
Fix program name when using __main__
pjbull/cookiecutter,dajose/cookiecutter,michaeljoseph/cookiecutter,luzfcb/cookiecutter,terryjbates/cookiecutter,audreyr/cookiecutter,stevepiercy/cookiecutter,hackebrot/cookiecutter,stevepiercy/cookiecutter,pjbull/cookiecutter,dajose/cookiecutter,hackebrot/cookiecutter,luzfcb/cookiecutter,michaeljoseph/cookiecutter,terryjbates/cookiecutter,audreyr/cookiecutter
cookiecutter/__main__.py
cookiecutter/__main__.py
"""Allow cookiecutter to be executable through `python -m cookiecutter`.""" from __future__ import absolute_import from .cli import main if __name__ == "__main__": # pragma: no cover main(prog_name="cookiecutter")
"""Allow cookiecutter to be executable through `python -m cookiecutter`.""" from __future__ import absolute_import from .cli import main if __name__ == "__main__": main()
bsd-3-clause
Python
f4ba2cba93222b4dd494caf487cdd6be4309e41a
Update labels for application form
p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles
studygroups/forms.py
studygroups/forms.py
from django import forms from studygroups.models import StudyGroupSignup, Application from localflavor.us.forms import USPhoneNumberField class ApplicationForm(forms.ModelForm): mobile = USPhoneNumberField(required=False) class Meta: model = Application labels = { 'name': 'Please tell us what to call you', 'mobile': 'What is your mobile number?', 'contact_method': 'Preferred Method of Contact.', 'computer_access': 'Do you have access to a computer outside of the library?', 'goals': 'In one sentence, please explain your goals for taking this course.', 'support': 'A successful study group requires the support of all of its members. How will you help your peers achieve their goals?', 'study_groups': 'Which course are you applying for? (by applying for a specific course, you agree to attend sessions at the specified time and location).', } widgets = { 'study_groups': forms.CheckboxSelectMultiple, } fields = '__all__' class SignupForm(forms.ModelForm): mobile = USPhoneNumberField(required=False) class Meta: model = StudyGroupSignup exclude = [] widgets = { 'study_group': forms.HiddenInput } class EmailForm(forms.Form): study_group_id = forms.IntegerField(widget=forms.HiddenInput) subject = forms.CharField() body = forms.CharField(widget=forms.Textarea) sms_body = forms.CharField(max_length=160, widget=forms.Textarea)
from django import forms from studygroups.models import StudyGroupSignup, Application from localflavor.us.forms import USPhoneNumberField class ApplicationForm(forms.ModelForm): mobile = USPhoneNumberField(required=False) class Meta: model = Application labels = { 'name': 'Please tell us what to call you', 'mobile': 'What is your mobile number?', 'contact_method': 'Please tell us how would you perfer us to contact us', 'computer_access': 'Do you have normal everyday access to the computer?', 'goals': 'Please tell what your learning goals are', 'support': '', } widgets = { 'study_groups': forms.CheckboxSelectMultiple, } fields = '__all__' class SignupForm(forms.ModelForm): mobile = USPhoneNumberField(required=False) class Meta: model = StudyGroupSignup exclude = [] widgets = { 'study_group': forms.HiddenInput } class EmailForm(forms.Form): study_group_id = forms.IntegerField(widget=forms.HiddenInput) subject = forms.CharField() body = forms.CharField(widget=forms.Textarea) sms_body = forms.CharField(max_length=160, widget=forms.Textarea)
mit
Python
95686be0b45e350791c85c757acd450623b14d60
test OK
josuebrunel/yahoo-fantasy-sport,unpairestgood/yahoo-fantasy-sport,unpairestgood/yahoo-fantasy-sport,josuebrunel/yahoo-fantasy-sport
tests.py
tests.py
import pdb import logging import unittest from yahoo_oauth import OAuth2, OAuth1 from fantasy_sport import FantasySport from fantasy_sport.utils import pretty_json, pretty_xml logging.getLogger('yahoo_oauth').setLevel(logging.WARNING) class TestFantasySport(unittest.TestCase): def setUp(self,): oauth = OAuth1(None, None, from_file='oauth.json',base_url='http://fantasysports.yahooapis.com/fantasy/v2/') self.yfs = FantasySport(oauth) def test_get_games_info(self,): response = self.yfs.get_games_info(['nfl']) self.assertEqual(response.status_code, 200) logging.debug(pretty_json(response.content)) def test_get_games_info_with_login(self,): response = self.yfs.get_games_info(['mlb'], use_login=True) self.assertEqual(response.status_code, 200) logging.debug(pretty_json(response.content)) def test_get_leagues(self): response = self.yfs.get_leagues(['238.l.627060']) self.assertEqual(response.status_code, 200) logging.debug(pretty_json(response.content)) def test_get_leagues_with_multiple_keys(self,): self.yfs.fmt = 'xml' response = self.yfs.get_leagues(('238.l.627060','238.l.627062')) self.yfs.fmt = 'json' self.assertEqual(response.status_code, 200) logging.debug(pretty_xml(response.content)) def test_get_leagues_scoreboard(self): response = self.yfs.get_leagues_scoreboard(['238.l.627060']) self.assertEqual(response.status_code, 200) logging.debug(pretty_json(response.content)) def test_get_leagues_settings(self): response = self.yfs.get_leagues_settings(['238.l.627060','238.l.627062']) self.assertEqual(response.status_code, 200) logging.debug(pretty_json(response.content))
import pdb import logging import unittest from yahoo_oauth import OAuth2, OAuth1 from fantasy_sport import FantasySport from fantasy_sport.utils import pretty_json, pretty_xml logging.getLogger('yahoo_oauth').setLevel(logging.WARNING) class TestFantasySport(unittest.TestCase): def setUp(self,): oauth = OAuth1(None, None, from_file='oauth.json',base_url='http://fantasysports.yahooapis.com/fantasy/v2/') self.yfs = FantasySport(oauth) def test_get_games_info(self,): response = self.yfs.get_games_info(['nfl']) self.assertEqual(response.status_code, 200) logging.debug(pretty_json(response.content)) def test_get_games_info_with_login(self,): response = self.yfs.get_games_info(['mlb'], use_login=True) self.assertEqual(response.status_code, 200) logging.debug(pretty_json(response.content)) def test_get_leagues(self): response = self.yfs.get_leagues(['238.l.627060']) self.assertEqual(response.status_code, 200) logging.debug(pretty_json(response.content)) def test_get_leagues_with_multiple_keys(self,): self.yfs.fmt = 'xml' response = self.yfs.get_leagues(('238.l.627060','238.l.627062')) self.yfs.fmt = 'json' self.assertEqual(response.status_code, 200) logging.debug(pretty_xml(response.content)) def test_get_leagues_scoreboard(self): response = self.yfs.get_leagues_scoreboard(['238.l.627060']) self.assertEqual(response.status_code, 200) logging.debug(pretty_json(response.content))
mit
Python
90bbb6604fdbb16c5a9d4390a429f2ce1c31035c
Add more tests, pend all dysfunctional tests.
fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,fhartwig/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,liqd/adhocracy3.mercator
tests/acceptance/test_rate.py
tests/acceptance/test_rate.py
import re from pytest import fixture from pytest import mark from adhocracy_core.testing import annotator_login from .shared import wait from .shared import get_column_listing from .shared import get_list_element from .shared import get_listing_create_form from .shared import login_god from .test_comment import create_comment class TestRate: def test_create(self, browser): login_god(browser) comment = create_comment(browser, 'comment1') assert comment is not None @mark.skipif(True, reason="pending weil schlechtes wetter") def test_upvote(self, browser): rateable = get_column_listing(browser, 'content2').find_by_css('.comment') button = rateable.find_by_css('.rate-pro') button.click() def check_result(): total = rateable.find_by_css('.rate-difference') return total[0].text == '+1' assert wait(check_result) def test_downvote(self, browser): rateable = get_column_listing(browser, 'content2').find_by_css('.comment') button = rateable.find_by_css('.rate-contra') button.click() def check_result(): total = rateable.find_by_css('.rate-difference') return total[0].text == '-1' assert wait(check_result) def test_neutralvote(self, browser): rateable = get_column_listing(browser, 'content2').find_by_css('.comment') button = rateable.find_by_css('.rate-neutral') button.click() def check_result(): total = rateable.find_by_css('.rate-difference') return total[0].text == '0' assert wait(check_result) @mark.skipif(True, reason="pending weil schlechtes wetter") def test_detaillist(self, browser): # FIXME: the button appears to be surprisingly click # resistant. since we don't have any clues as to why, we # postponed the investigations. rateable = get_column_listing(browser, 'content2').find_by_css('.comment').first button = rateable.find_by_css('.rate-difference').first button.click() def check_result(): try: auditTrail = rateable.find_by_css('.rate-details').first print(auditTrail) return "god" in auditTrail.text and "0" in auditTrail.text except Exception as e: print(e) return False assert wait(check_result) @mark.skipif(True, reason="pending weil schlechtes wetter") def test_multi_rateable(self, browser): # FIXME: all rate widgets are totalled over all others. there is # something wrong with the filter for the rating target (object). # write a test for that, then fix it! pass @mark.skipif(True, reason="pending weil schlechtes wetter") def test_multi_user(self, browser): # FIXME: test many users and more interesting totals and audit # trails. pass @mark.skipif(True, reason="pending weil schlechtes wetter") def test_authorisations(self, browser): # FIXME: test replacing god user with one that is allowed to # rate, but not much more. pass
from pytest import fixture from pytest import mark from adhocracy_core.testing import annotator_login from .shared import wait from .shared import get_column_listing from .shared import get_list_element from .shared import get_listing_create_form from .shared import login_god from .test_comment import create_comment class TestRate: def test_create(self, browser): login_god(browser) comment = create_comment(browser, 'comment1') assert comment is not None def test_upvote(self, browser): rateable = get_column_listing(browser, 'content2').find_by_css('.comment') pro_button = rateable.find_by_css('.rate-pro') pro_button.click() def check_result(): total = rateable.find_by_css('.rate-difference') return total[0].text == '+1' assert wait(check_result) def test_downvote(self, browser): rateable = get_column_listing(browser, 'content2').find_by_css('.comment') pro_button = rateable.find_by_css('.rate-contra') pro_button.click() def check_result(): total = rateable.find_by_css('.rate-difference') return total[0].text == '-1' assert wait(check_result) def test_neutralvote(self, browser): rateable = get_column_listing(browser, 'content2').find_by_css('.comment') pro_button = rateable.find_by_css('.rate-neutral') pro_button.click() def check_result(): total = rateable.find_by_css('.rate-difference') return total[0].text == '0' assert wait(check_result) # FIXME: test detail list. # FIXME: test replacing god user with one that is allowed to rate, but not much more. # FIXME: test manu users and more interesting totals.
agpl-3.0
Python
dc57d4b95e39f756858dc1d73c8f221f0bb1956c
add stubs
vastcharade/Vintageous,wbcustc/Vintageous,guillermooo-forks/Vintageous,himacro/Vintageous,denim2x/Vintageous,guillermooo-forks/Vintageous,gerardroche/Vintageous,himacro/Vintageous,denim2x/Vintageous,guillermooo-forks/Vintageous,zhangtuoparis13/Vintageous,zhangtuoparis13/Vintageous,xushuwei202/Vintageous,xushuwei202/Vintageous,denim2x/Vintageous,zhangtuoparis13/Vintageous,vastcharade/Vintageous,himacro/Vintageous,wbcustc/Vintageous,wbcustc/Vintageous,xushuwei202/Vintageous
tests/commands/test__vi_cc.py
tests/commands/test__vi_cc.py
import unittest from Vintageous.vi.constants import _MODE_INTERNAL_NORMAL from Vintageous.vi.constants import MODE_NORMAL from Vintageous.vi.constants import MODE_VISUAL from Vintageous.vi.constants import MODE_VISUAL_LINE from Vintageous.tests.commands import set_text from Vintageous.tests.commands import add_selection from Vintageous.tests.commands import get_sel from Vintageous.tests.commands import first_sel from Vintageous.tests.commands import BufferTest class Test_vi_cc_InModeInternalNormal(BufferTest): def testSelectsWholeLine(self): set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',))) add_selection(self.view, self.R((1, 2), (1, 2))) self.view.run_command('_vi_cc_motion', {'mode': _MODE_INTERNAL_NORMAL, 'count': 1}) self.assertEqual(self.R((1, 0), (1, 7)), first_sel(self.view)) def testDeletesWholeLine(self): set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',))) add_selection(self.view, self.R((1, 0), (1, 7))) self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL}) self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\nfoo bar\n') def testKeepsLeadingWhitespace(self): set_text(self.view, ''.join(('foo bar\n\t foo bar\nfoo bar\n',))) add_selection(self.view, self.R((1, 0), (1, 10))) self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL}) self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\t \nfoo bar\n') @unittest.skip("Implement") def testCanDeleteWithCount(self): self.assertTrue(False) @unittest.skip("Implement") def testDeletedLinesAreYanked(self): self.assertTrue(False)
import unittest from Vintageous.vi.constants import _MODE_INTERNAL_NORMAL from Vintageous.vi.constants import MODE_NORMAL from Vintageous.vi.constants import MODE_VISUAL from Vintageous.vi.constants import MODE_VISUAL_LINE from Vintageous.tests.commands import set_text from Vintageous.tests.commands import add_selection from Vintageous.tests.commands import get_sel from Vintageous.tests.commands import first_sel from Vintageous.tests.commands import BufferTest class Test_vi_cc_InModeInternalNormal(BufferTest): def testSelectsWholeLine(self): set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',))) add_selection(self.view, self.R((1, 2), (1, 2))) self.view.run_command('_vi_cc_motion', {'mode': _MODE_INTERNAL_NORMAL, 'count': 1}) self.assertEqual(self.R((1, 0), (1, 7)), first_sel(self.view)) def testDeletesWholeLine(self): set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',))) add_selection(self.view, self.R((1, 0), (1, 7))) self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL}) self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\nfoo bar\n') def testKeepsLeadingWhitespace(self): set_text(self.view, ''.join(('foo bar\n\t foo bar\nfoo bar\n',))) add_selection(self.view, self.R((1, 0), (1, 10))) self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL}) self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\t \nfoo bar\n') @unittest.skip("Implement this") def testCanDeleteWithCount(self): self.assertTrue(False)
mit
Python
6755255332039ab3c0ea60346f61420b52e2f474
Fix intermittent failure in l10n language selector test
sgarrity/bedrock,TheoChevalier/bedrock,craigcook/bedrock,mkmelin/bedrock,TheoChevalier/bedrock,Sancus/bedrock,hoosteeno/bedrock,sylvestre/bedrock,schalkneethling/bedrock,Sancus/bedrock,analytics-pros/mozilla-bedrock,sylvestre/bedrock,glogiotatidis/bedrock,jgmize/bedrock,kyoshino/bedrock,mkmelin/bedrock,gerv/bedrock,davehunt/bedrock,alexgibson/bedrock,jpetto/bedrock,sylvestre/bedrock,sgarrity/bedrock,gerv/bedrock,craigcook/bedrock,ericawright/bedrock,analytics-pros/mozilla-bedrock,gauthierm/bedrock,davehunt/bedrock,pascalchevrel/bedrock,flodolo/bedrock,mozilla/bedrock,MichaelKohler/bedrock,alexgibson/bedrock,MichaelKohler/bedrock,gauthierm/bedrock,flodolo/bedrock,ericawright/bedrock,sgarrity/bedrock,analytics-pros/mozilla-bedrock,hoosteeno/bedrock,jpetto/bedrock,flodolo/bedrock,gauthierm/bedrock,gauthierm/bedrock,CSCI-462-01-2017/bedrock,jgmize/bedrock,mkmelin/bedrock,ericawright/bedrock,CSCI-462-01-2017/bedrock,CSCI-462-01-2017/bedrock,Sancus/bedrock,ericawright/bedrock,TheJJ100100/bedrock,glogiotatidis/bedrock,alexgibson/bedrock,sgarrity/bedrock,kyoshino/bedrock,MichaelKohler/bedrock,MichaelKohler/bedrock,TheJJ100100/bedrock,craigcook/bedrock,gerv/bedrock,pascalchevrel/bedrock,kyoshino/bedrock,mozilla/bedrock,jpetto/bedrock,TheoChevalier/bedrock,sylvestre/bedrock,glogiotatidis/bedrock,schalkneethling/bedrock,schalkneethling/bedrock,mozilla/bedrock,hoosteeno/bedrock,alexgibson/bedrock,CSCI-462-01-2017/bedrock,mozilla/bedrock,analytics-pros/mozilla-bedrock,mkmelin/bedrock,flodolo/bedrock,kyoshino/bedrock,davehunt/bedrock,TheJJ100100/bedrock,gerv/bedrock,TheoChevalier/bedrock,pascalchevrel/bedrock,craigcook/bedrock,Sancus/bedrock,jpetto/bedrock,hoosteeno/bedrock,pascalchevrel/bedrock,davehunt/bedrock,glogiotatidis/bedrock,jgmize/bedrock,schalkneethling/bedrock,TheJJ100100/bedrock,jgmize/bedrock
tests/functional/test_l10n.py
tests/functional/test_l10n.py
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import random import pytest from ..pages.home import HomePage @pytest.mark.nondestructive def test_change_language(base_url, selenium): page = HomePage(base_url, selenium).open() initial = page.footer.language # avoid selecting the same language or locales that have homepage redirects excluded = [initial, 'ja', 'ja-JP-mac', 'zh-TW', 'zh-CN'] available = [l for l in page.footer.languages if l not in excluded] new = random.choice(available) page.footer.select_language(new) assert '/{0}/'.format(new) in selenium.current_url, 'Language is not in URL' assert new == page.footer.language, 'Language has not been selected'
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import random import pytest from ..pages.home import HomePage @pytest.mark.nondestructive def test_change_language(base_url, selenium): page = HomePage(base_url, selenium).open() initial = page.footer.language # avoid selecting the same language or locales that have homepage redirects excluded = [initial, 'ja', 'ja-JP-mac', 'zh-TW', 'zh-CN'] available = [l for l in page.footer.languages if l not in excluded] new = random.choice(available) page.footer.select_language(new) assert new in selenium.current_url, 'Language is not in URL' assert new == page.footer.language, 'Language has not been selected'
mpl-2.0
Python
da6c8f6daee4baa3798ab2c4b49fbc780e46ee3a
Rename test case for ObjectLoader to match
ironfroggy/straight.plugin,pombredanne/straight.plugin
tests.py
tests.py
#!/usr/bin/env python import sys import os import unittest from straight.plugin import loaders class ModuleLoaderTestCase(unittest.TestCase): def setUp(self): self.loader = loaders.ModuleLoader() sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'more-test-plugins')) sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'some-test-plugins')) def tearDown(self): del sys.path[-1] del sys.path[-1] def test_load(self): modules = list(self.loader.load('testplugin')) assert len(modules) == 2, modules def test_plugin(self): assert self.loader.load('testplugin')[0].do(1) == 2 class ObjectLoaderTestCase(unittest.TestCase): def setUp(self): self.loader = loaders.ObjectLoader() sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'more-test-plugins')) sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'some-test-plugins')) def tearDown(self): del sys.path[-1] del sys.path[-1] def test_load_all(self): objects = list(self.loader.load('testplugin')) self.assertEqual(len(objects), 2, str(objects)[:100] + ' ...') if __name__ == '__main__': unittest.main()
#!/usr/bin/env python import sys import os import unittest from straight.plugin import loaders class ModuleLoaderTestCase(unittest.TestCase): def setUp(self): self.loader = loaders.ModuleLoader() sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'more-test-plugins')) sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'some-test-plugins')) def tearDown(self): del sys.path[-1] del sys.path[-1] def test_load(self): modules = list(self.loader.load('testplugin')) assert len(modules) == 2, modules def test_plugin(self): assert self.loader.load('testplugin')[0].do(1) == 2 class SelectiveLoaderTestCase(unittest.TestCase): def setUp(self): self.loader = loaders.ObjectLoader() sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'more-test-plugins')) sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'some-test-plugins')) def tearDown(self): del sys.path[-1] del sys.path[-1] def test_load_all(self): objects = list(self.loader.load('testplugin')) self.assertEqual(len(objects), 2, str(objects)[:100] + ' ...') if __name__ == '__main__': unittest.main()
mit
Python
a18e195734983849a90786a4631987466952a232
Set vestion to 0.4.2 in __init__.py
vovanbo/trafaretrecord,vovanbo/trafaretrecord
lib/recordclass/__init__.py
lib/recordclass/__init__.py
# The MIT License (MIT) # # Copyright (c) <2011-2014> <Shibzukhov Zaur, szport at gmail dot com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from .memoryslots import memoryslots, itemgetset from .record import recordclass __version__ = '0.4.2'
# The MIT License (MIT) # # Copyright (c) <2011-2014> <Shibzukhov Zaur, szport at gmail dot com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from .memoryslots import memoryslots, itemgetset from .record import recordclass __version__ = '0.4'
mit
Python
d537ea32462c7ef46634d1527702c4c4a6d37e1e
Fix UDF test, take two
caseyching/Impala,theyaa/Impala,lirui-intel/Impala,ibmsoe/ImpalaPPC,gistic/PublicSpatialImpala,mapr/impala,cgvarela/Impala,rampage644/impala-cut,cchanning/Impala,cloudera/recordservice,caseyching/Impala,grundprinzip/Impala,placrosse/ImpalaToGo,kapilrastogi/Impala,brightchen/Impala,brightchen/Impala,tempbottle/Impala,rampage644/impala-cut,kapilrastogi/Impala,andybab/Impala,gistic/PublicSpatialImpala,bratatidas9/Impala-1,ImpalaToGo/ImpalaToGo,henryr/Impala,scalingdata/Impala,scalingdata/Impala,henryr/Impala,lirui-intel/Impala,tempbottle/Impala,henryr/Impala,placrosse/ImpalaToGo,gerashegalov/Impala,ibmsoe/ImpalaPPC,brightchen/Impala,ImpalaToGo/ImpalaToGo,theyaa/Impala,ImpalaToGo/ImpalaToGo,brightchen/Impala,cloudera/recordservice,XiaominZhang/Impala,gistic/PublicSpatialImpala,rampage644/impala-cut,lirui-intel/Impala,bowlofstew/Impala,cchanning/Impala,AtScaleInc/Impala,cloudera/recordservice,lnliuxing/Impala,lirui-intel/Impala,cloudera/recordservice,gistic/PublicSpatialImpala,cchanning/Impala,ibmsoe/ImpalaPPC,rdblue/Impala,rampage644/impala-cut,theyaa/Impala,cloudera/recordservice,theyaa/Impala,caseyching/Impala,rdblue/Impala,grundprinzip/Impala,scalingdata/Impala,bratatidas9/Impala-1,cgvarela/Impala,cgvarela/Impala,andybab/Impala,lnliuxing/Impala,andybab/Impala,theyaa/Impala,kapilrastogi/Impala,cloudera/recordservice,kapilrastogi/Impala,XiaominZhang/Impala,lnliuxing/Impala,theyaa/Impala,caseyching/Impala,grundprinzip/Impala,tempbottle/Impala,rampage644/impala-cut,gerashegalov/Impala,cchanning/Impala,caseyching/Impala,kapilrastogi/Impala,cgvarela/Impala,caseyching/Impala,rdblue/Impala,rdblue/Impala,ImpalaToGo/ImpalaToGo,lirui-intel/Impala,rdblue/Impala,gerashegalov/Impala,cchanning/Impala,tempbottle/Impala,bowlofstew/Impala,rdblue/Impala,caseyching/Impala,rdblue/Impala,XiaominZhang/Impala,cgvarela/Impala,mapr/impala,bowlofstew/Impala,XiaominZhang/Impala,henryr/Impala,gerashegalov/Impala,cloudera/recordservice,theyaa/Impala,ibmsoe/ImpalaPPC,mapr/impala,grundprinzip/Impala,cchanning/Impala,gistic/PublicSpatialImpala,andybab/Impala,mapr/impala,lnliuxing/Impala,bowlofstew/Impala,bowlofstew/Impala,grundprinzip/Impala,henryr/Impala,bowlofstew/Impala,scalingdata/Impala,lnliuxing/Impala,ImpalaToGo/ImpalaToGo,scalingdata/Impala,gerashegalov/Impala,mapr/impala,bratatidas9/Impala-1,kapilrastogi/Impala,AtScaleInc/Impala,AtScaleInc/Impala,lirui-intel/Impala,grundprinzip/Impala,tempbottle/Impala,bratatidas9/Impala-1,placrosse/ImpalaToGo,gistic/PublicSpatialImpala,AtScaleInc/Impala,XiaominZhang/Impala,henryr/Impala,brightchen/Impala,bratatidas9/Impala-1,bowlofstew/Impala,gerashegalov/Impala,brightchen/Impala,AtScaleInc/Impala,scalingdata/Impala,tempbottle/Impala,lnliuxing/Impala,placrosse/ImpalaToGo,XiaominZhang/Impala,bratatidas9/Impala-1,placrosse/ImpalaToGo,AtScaleInc/Impala,bratatidas9/Impala-1,lnliuxing/Impala,gerashegalov/Impala,ImpalaToGo/ImpalaToGo,ibmsoe/ImpalaPPC,cgvarela/Impala,rampage644/impala-cut,cchanning/Impala,andybab/Impala,brightchen/Impala,cgvarela/Impala,lirui-intel/Impala,kapilrastogi/Impala,andybab/Impala,placrosse/ImpalaToGo,XiaominZhang/Impala,ibmsoe/ImpalaPPC,tempbottle/Impala,ibmsoe/ImpalaPPC
tests/query_test/test_udfs.py
tests/query_test/test_udfs.py
#!/usr/bin/env python # Copyright (c) 2012 Cloudera, Inc. All rights reserved. from tests.common.test_vector import * from tests.common.impala_test_suite import * class TestUdfs(ImpalaTestSuite): @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestUdfs, cls).add_test_dimensions() # UDFs require codegen cls.TestMatrix.add_constraint( lambda v: v.get_value('exec_option')['disable_codegen'] == False) # There is no reason to run these tests using all dimensions. cls.TestMatrix.add_constraint(lambda v:\ v.get_value('table_format').file_format == 'text' and\ v.get_value('table_format').compression_codec == 'none') # This must run serially because other tests executing 'invalidate metadata' will nuke # all loaded functions. # TODO: This can be run in parallel once functions are persisted correctly. @pytest.mark.execute_serially def test_udfs(self, vector): self.run_test_case('QueryTest/udf', vector)
#!/usr/bin/env python # Copyright (c) 2012 Cloudera, Inc. All rights reserved. from tests.common.test_vector import * from tests.common.impala_test_suite import * class TestUdfs(ImpalaTestSuite): @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestUdfs, cls).add_test_dimensions() # UDFs require codegen cls.TestMatrix.add_constraint( lambda v: v.get_value('exec_option')['disable_codegen'] == False) # There is no reason to run these tests using all dimensions. cls.TestMatrix.add_constraint(lambda v:\ v.get_value('table_format').file_format == 'text' and\ v.get_value('table_format').compression_codec == 'none') def test_udfs(self, vector): self.run_test_case('QueryTest/udf', vector)
apache-2.0
Python
9c0a83da524831cf557e24ad0a61c160c856dec9
move definitions to the bottom again
fungusakafungus/cloudformation-jsonschema
tools.py
tools.py
# coding: utf-8 from pyquery import PyQuery as q import json from collections import OrderedDict this = None BASE = 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/' def load(filename='resource.json'): schema = json.load(open(filename), object_pairs_hook=OrderedDict) return schema def get_pq(uri=BASE + 'aws-template-resource-type-ref.html'): h = q(uri, headers={ 'user-agent': 'https://github.com/fungusakafungus/cloudformation-jsonschema' }) h.make_links_absolute() return h def all_resource_properties_hrefs(): h = get_pq(BASE + 'aws-product-property-reference.html') res = OrderedDict( (a1.attr('href'), a1.text()) for a1 in [q(a) for a in h('#main-col-body li a') ] ) return res def all_resource_hrefs(): h = get_pq(BASE + 'aws-template-resource-type-ref.html') all_resource_hrefs = OrderedDict( (a1.text().strip(), a1.attr('href')) for a1 in [q(a) for a in h('#main-col-body li a')]) return all_resource_hrefs def write(schema, filename='resource.json'): with open(filename, 'w') as f: f.write(json.dumps(schema, indent=4, separators=(',', ': '))) def print_(schema): return json.dumps(schema, indent=4) def all_resource_patterns_by_name(): h = get_pq(BASE + 'aws-template-resource-type-ref.html') all_resource_patterns_by_name = OrderedDict( ( a.strip(), {'properties': {'Type': {'enum': [a.strip()]}}} ) for a in h('#main-col-body li a').map(lambda x: this.text) ) return all_resource_patterns_by_name def resources_dict(schema): if 'definitions' not in schema: schema['definitions'] = OrderedDict( {'resource_types': OrderedDict()} ) return schema['definitions']['resource_types'] def get_oneOf(): res_names = all_resource_patterns_by_name().keys() return [{"$ref": "#/definitions/resource_types/" + i} for i in res_names] def update_all_resource_patterns_by_name(schema): o = resources_dict(schema) new = all_resource_patterns_by_name() new.update(o) schema['oneOf'] = get_oneOf() schema['definitions']['resource_types'] = new # put definitions last schema['definitions'] = schema.pop('definitions')
# coding: utf-8 from pyquery import PyQuery as q import json from collections import OrderedDict this = None BASE = 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/' def load(filename='resource.json'): schema = json.load(open(filename), object_pairs_hook=OrderedDict) return schema def get_pq(uri=BASE + 'aws-template-resource-type-ref.html'): h = q(uri, headers={ 'user-agent': 'https://github.com/fungusakafungus/cloudformation-jsonschema' }) h.make_links_absolute() return h def all_resource_properties_hrefs(): h = get_pq(BASE + 'aws-product-property-reference.html') res = OrderedDict( (a1.attr('href'), a1.text()) for a1 in [q(a) for a in h('#main-col-body li a') ] ) return res def all_resource_hrefs(): h = get_pq(BASE + 'aws-template-resource-type-ref.html') all_resource_hrefs = OrderedDict( (a1.text().strip(), a1.attr('href')) for a1 in [q(a) for a in h('#main-col-body li a')]) return all_resource_hrefs def write(schema, filename='resource.json'): with open(filename, 'w') as f: f.write(json.dumps(schema, indent=4, separators=(',', ': '))) def print_(schema): return json.dumps(schema, indent=4) def all_resource_patterns_by_name(): h = get_pq(BASE + 'aws-template-resource-type-ref.html') all_resource_patterns_by_name = OrderedDict( ( a.strip(), {'properties': {'Type': {'enum': [a.strip()]}}} ) for a in h('#main-col-body li a').map(lambda x: this.text) ) return all_resource_patterns_by_name def resources_dict(schema): if 'definitions' not in schema: schema['definitions'] = OrderedDict( {'resource_types': OrderedDict()} ) return schema['definitions']['resource_types'] def get_oneOf(): res_names = all_resource_patterns_by_name().keys() return [{"$ref": "#/definitions/resource_types/" + i} for i in res_names] def update_all_resource_patterns_by_name(schema): o = resources_dict(schema) new = all_resource_patterns_by_name() new.update(o) schema['oneOf'] = get_oneOf() schema['definitions']['resource_types'] = new
mit
Python
586cd6c864fdbdb3ac20aa49bdc6c550fa93aa2f
fix a testdir stragler
pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments
tests/test_latex_formatter.py
tests/test_latex_formatter.py
# -*- coding: utf-8 -*- """ Pygments LaTeX formatter tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: 2006-2007 by Georg Brandl. :license: BSD, see LICENSE for more details. """ import os import unittest import tempfile from pygments.formatters import LatexFormatter from pygments.lexers import PythonLexer from support import test_file class LatexFormatterTest(unittest.TestCase): def test_valid_output(self): tokensource = list(PythonLexer().get_tokens(file(test_file()).read())) fmt = LatexFormatter(full=True) handle, pathname = tempfile.mkstemp('.tex') # place all output files in /tmp too old_wd = os.getcwd() os.chdir(os.path.dirname(pathname)) tfile = os.fdopen(handle, 'w+b') fmt.format(tokensource, tfile) tfile.close() try: try: import subprocess ret = subprocess.Popen(['latex', '-interaction=nonstopmode', pathname], stdout=subprocess.PIPE).wait() except ImportError: # Python 2.3 - no subprocess module ret = os.popen('latex -interaction=nonstopmode "%s"' % pathname).close() if ret == 32512: raise OSError # not found except OSError: # latex not available pass else: self.failIf(ret, 'latex run reported errors') os.unlink(pathname) os.chdir(old_wd)
# -*- coding: utf-8 -*- """ Pygments LaTeX formatter tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: 2006-2007 by Georg Brandl. :license: BSD, see LICENSE for more details. """ import os import unittest import tempfile from pygments.formatters import LatexFormatter from pygments.lexers import PythonLexer class LatexFormatterTest(unittest.TestCase): def test_valid_output(self): tokensource = list(PythonLexer().get_tokens(file( os.path.join(testdir, testfile)).read())) fmt = LatexFormatter(full=True) handle, pathname = tempfile.mkstemp('.tex') # place all output files in /tmp too old_wd = os.getcwd() os.chdir(os.path.dirname(pathname)) tfile = os.fdopen(handle, 'w+b') fmt.format(tokensource, tfile) tfile.close() try: try: import subprocess ret = subprocess.Popen(['latex', '-interaction=nonstopmode', pathname], stdout=subprocess.PIPE).wait() except ImportError: # Python 2.3 - no subprocess module ret = os.popen('latex -interaction=nonstopmode "%s"' % pathname).close() if ret == 32512: raise OSError # not found except OSError: # latex not available pass else: self.failIf(ret, 'latex run reported errors') os.unlink(pathname) os.chdir(old_wd)
bsd-2-clause
Python
07cfc39e50251384ddb647ccc7f73c98ed8cf7b9
Save model with an interval of 1000 steps
vanhuyz/CycleGAN-TensorFlow,vanhuyz/CycleGAN-TensorFlow
train.py
train.py
import tensorflow as tf from model import CycleGAN from reader import Reader from datetime import datetime import os X_TRAIN_FILE = 'data/tfrecords/apple.tfrecords' Y_TRAIN_FILE = 'data/tfrecords/orange.tfrecords' BATCH_SIZE = 1 def train(): current_time = datetime.now().strftime("%Y%m%d-%H%M") checkpoints_dir = "checkpoints/{}".format(current_time) os.makedirs(checkpoints_dir, exist_ok=True) graph = tf.Graph() cycle_gan = CycleGAN() with graph.as_default(): X_reader = Reader(X_TRAIN_FILE, batch_size=BATCH_SIZE, name='X') Y_reader = Reader(Y_TRAIN_FILE, batch_size=BATCH_SIZE, name='Y') x = X_reader.feed() y = Y_reader.feed() G_loss, D_Y_loss, F_loss, D_X_loss, summary_op = cycle_gan.model(x, y) optimizer = cycle_gan.optimize(G_loss, D_Y_loss, F_loss, D_X_loss) saver = tf.train.Saver() train_writer = tf.summary.FileWriter(checkpoints_dir, graph) with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: step = 0 while not coord.should_stop(): _, G_loss_val, D_Y_loss_val, F_loss_val, D_X_loss_val, summary = \ sess.run([optimizer, G_loss, D_Y_loss, F_loss, D_X_loss, summary_op]) train_writer.add_summary(summary, step) train_writer.flush() if step % 100 == 0: print('-----------Step %d:-------------' % step) print(' G_loss : {}'.format(G_loss_val)) print(' D_Y_loss : {}'.format(D_Y_loss_val)) print(' F_loss : {}'.format(F_loss_val)) print(' D_X_loss : {}'.format(D_X_loss_val)) if step % 1000 == 0: save_path = saver.save(sess, checkpoints_dir + "/model.ckpt", global_step=step) print("Model saved in file: %s" % save_path) step += 1 except KeyboardInterrupt: print('Interrupted') coord.request_stop() except Exception as e: coord.request_stop(e) finally: save_path = saver.save(sess, checkpoints_dir + "/model.ckpt") print("Model saved in file: %s" % save_path) # When done, ask the threads to stop. coord.request_stop() coord.join(threads) if __name__ == '__main__': train()
import tensorflow as tf from model import CycleGAN from reader import Reader from datetime import datetime import os X_TRAIN_FILE = 'data/tfrecords/apple.tfrecords' Y_TRAIN_FILE = 'data/tfrecords/orange.tfrecords' BATCH_SIZE = 1 def train(): current_time = datetime.now().strftime("%Y%m%d-%H%M") checkpoints_dir = "checkpoints/{}".format(current_time) os.makedirs(checkpoints_dir, exist_ok=True) graph = tf.Graph() cycle_gan = CycleGAN() with graph.as_default(): X_reader = Reader(X_TRAIN_FILE, batch_size=BATCH_SIZE, name='X') Y_reader = Reader(Y_TRAIN_FILE, batch_size=BATCH_SIZE, name='Y') x = X_reader.feed() y = Y_reader.feed() G_loss, D_Y_loss, F_loss, D_X_loss, summary_op = cycle_gan.model(x, y) optimizer = cycle_gan.optimize(G_loss, D_Y_loss, F_loss, D_X_loss) saver = tf.train.Saver() train_writer = tf.summary.FileWriter(checkpoints_dir, graph) with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: step = 0 while not coord.should_stop(): _, G_loss_val, D_Y_loss_val, F_loss_val, D_X_loss_val, summary = \ sess.run([optimizer, G_loss, D_Y_loss, F_loss, D_X_loss, summary_op]) train_writer.add_summary(summary, step) train_writer.flush() print('-----------Step %d:-------------' % step) print(' G_loss : {}'.format(G_loss_val)) print(' D_Y_loss : {}'.format(D_Y_loss_val)) print(' F_loss : {}'.format(F_loss_val)) print(' D_X_loss : {}'.format(D_X_loss_val)) if step % 10 == 0: save_path = saver.save(sess, checkpoints_dir + "/model.ckpt") print("Model saved in file: %s" % save_path) step += 1 except KeyboardInterrupt: print('Interrupted') coord.request_stop() except Exception as e: coord.request_stop(e) finally: save_path = saver.save(sess, checkpoints_dir + "/model.ckpt") print("Model saved in file: %s" % save_path) # When done, ask the threads to stop. coord.request_stop() coord.join(threads) if __name__ == '__main__': train()
mit
Python
1970bad9d9933432154de2042c4ed74a8696b7f0
fix timeout when no options are specified
caibo2014/teuthology,ivotron/teuthology,michaelsevilla/teuthology,t-miyamae/teuthology,yghannam/teuthology,michaelsevilla/teuthology,ktdreyer/teuthology,ktdreyer/teuthology,tchaikov/teuthology,dmick/teuthology,robbat2/teuthology,ceph/teuthology,yghannam/teuthology,dmick/teuthology,caibo2014/teuthology,zhouyuan/teuthology,zhouyuan/teuthology,dreamhost/teuthology,robbat2/teuthology,tchaikov/teuthology,ceph/teuthology,ivotron/teuthology,SUSE/teuthology,dmick/teuthology,SUSE/teuthology,SUSE/teuthology,dreamhost/teuthology,t-miyamae/teuthology
teuthology/task/thrashosds.py
teuthology/task/thrashosds.py
import contextlib import logging import ceph_manager from teuthology import misc as teuthology log = logging.getLogger(__name__) @contextlib.contextmanager def task(ctx, config): """ "Thrash" the OSDs by randomly marking them out/down (and then back in) until the task is ended. This loops, and every op_delay seconds it randomly chooses to add or remove an OSD (even odds) unless there are fewer than min_out OSDs out of the cluster, or more than min_in OSDs in the cluster. All commands are run on mon0 and it stops when __exit__ is called. The config is optional, and is a dict containing some or all of: min_in: (default 2) the minimum number of OSDs to keep in the cluster min_out: (default 0) the minimum number of OSDs to keep out of the cluster op_delay: (5) the length of time to sleep between changing an OSD's status clean_interval: (60) the approximate length of time to loop before waiting until the cluster goes clean. (In reality this is used to probabilistically choose when to wait, and the method used makes it closer to -- but not identical to -- the half-life.) chance_down: (0) the probability that the thrasher will mark an OSD down rather than marking it out. (The thrasher will not consider that OSD out of the cluster, since presently an OSD wrongly marked down will mark itself back up again.) This value can be either an integer (eg, 75) or a float probability (eg 0.75). timeout: (360) the number of seconds to wait for the cluster to become clean before the task exits. If this doesn't happen, an exception will be raised. example: tasks: - ceph: - thrashosds: chance_down: 10 op_delay: 3 min_in: 1 timeout: 600 - interactive: """ if config is None: config = {} assert isinstance(config, dict), \ 'thrashosds task only accepts a dict for configuration' log.info('Beginning thrashosds...') first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() manager = ceph_manager.CephManager( mon, logger=log.getChild('ceph_manager'), ) thrash_proc = ceph_manager.Thrasher( manager, config, logger=log.getChild('thrasher') ) try: yield finally: log.info('joining thrashosds') thrash_proc.do_join() manager.wait_till_clean(config.get('timeout', 360))
import contextlib import logging import ceph_manager from teuthology import misc as teuthology log = logging.getLogger(__name__) @contextlib.contextmanager def task(ctx, config): """ "Thrash" the OSDs by randomly marking them out/down (and then back in) until the task is ended. This loops, and every op_delay seconds it randomly chooses to add or remove an OSD (even odds) unless there are fewer than min_out OSDs out of the cluster, or more than min_in OSDs in the cluster. All commands are run on mon0 and it stops when __exit__ is called. The config is optional, and is a dict containing some or all of: min_in: (default 2) the minimum number of OSDs to keep in the cluster min_out: (default 0) the minimum number of OSDs to keep out of the cluster op_delay: (5) the length of time to sleep between changing an OSD's status clean_interval: (60) the approximate length of time to loop before waiting until the cluster goes clean. (In reality this is used to probabilistically choose when to wait, and the method used makes it closer to -- but not identical to -- the half-life.) chance_down: (0) the probability that the thrasher will mark an OSD down rather than marking it out. (The thrasher will not consider that OSD out of the cluster, since presently an OSD wrongly marked down will mark itself back up again.) This value can be either an integer (eg, 75) or a float probability (eg 0.75). timeout: (360) the number of seconds to wait for the cluster to become clean before the task exits. If this doesn't happen, an exception will be raised. example: tasks: - ceph: - thrashosds: chance_down: 10 op_delay: 3 min_in: 1 timeout: 600 - interactive: """ log.info('Beginning thrashosds...') first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() manager = ceph_manager.CephManager( mon, logger=log.getChild('ceph_manager'), ) thrash_proc = ceph_manager.Thrasher( manager, config, logger=log.getChild('thrasher') ) try: yield finally: log.info('joining thrashosds') thrash_proc.do_join() manager.wait_till_clean(config.get('timeout', 360))
mit
Python
e7b7709784e105114d490eaab655a16e9842a1ed
optimize post processor shouldn't run 'call' with shell and pipe.
ui/django-thumbnails
thumbnails/post_processors.py
thumbnails/post_processors.py
import imghdr import os from subprocess import call import tempfile import uuid from django.core.files import File def get_or_create_temp_dir(): temp_dir = os.path.join(tempfile.gettempdir(), 'thumbnails') if not os.path.exists(temp_dir): os.mkdir(temp_dir) return temp_dir def process(thumbnail_file, **kwargs): """ Post processors are functions that receive file objects, performs necessary operations and return the results as file objects. """ from . import conf for processor in conf.POST_PROCESSORS: processor['processor'](thumbnail_file, **processor['kwargs']) return thumbnail_file def optimize(thumbnail_file, jpg_command=None, png_command=None, gif_command=None): """ A post processing function to optimize file size. Accepts commands to optimize JPG, PNG and GIF images as arguments. Example: THUMBNAILS = { # Other options... 'POST_PROCESSORS': [ { 'processor': 'thumbnails.post_processors.optimize', 'png_command': 'optipng -force -o7 "%(filename)s"', 'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"', }, ], } """ temp_dir = get_or_create_temp_dir() thumbnail_filename = os.path.join(temp_dir, "%s" % uuid.uuid4().hex) f = open(thumbnail_filename, 'wb') f.write(thumbnail_file.read()) f.close() # Detect filetype filetype = imghdr.what(thumbnail_filename) # Construct command to optimize image based on filetype command = None if filetype == "jpg" or filetype == "jpeg": command = jpg_command elif filetype == "png": command = png_command elif filetype == "gif": command = gif_command # Run Command if command: command = command % {'filename': thumbnail_filename} try: call(command) except OSError: raise OSError('Error while optimizing %s image' % filetype) optimized_file = File(open(thumbnail_filename, 'rb')) # Call _get_size() to prevent Django < 1.5 from throwing an AttributeError. # This is fixed in https://github.com/django/django/commit/5c954136eaef3d98d532368deec4c19cf892f664 # and can be removed when we stop supporting Django 1.4 optimized_file._get_size() os.remove(thumbnail_filename) return optimized_file
import imghdr import os from subprocess import call, PIPE import tempfile import uuid from django.core.files import File def get_or_create_temp_dir(): temp_dir = os.path.join(tempfile.gettempdir(), 'thumbnails') if not os.path.exists(temp_dir): os.mkdir(temp_dir) return temp_dir def process(thumbnail_file, **kwargs): """ Post processors are functions that receive file objects, performs necessary operations and return the results as file objects. """ from . import conf for processor in conf.POST_PROCESSORS: processor['processor'](thumbnail_file, **processor['kwargs']) return thumbnail_file def optimize(thumbnail_file, jpg_command=None, png_command=None, gif_command=None): """ A post processing function to optimize file size. Accepts commands to optimize JPG, PNG and GIF images as arguments. Example: THUMBNAILS = { # Other options... 'POST_PROCESSORS': [ { 'processor': 'thumbnails.post_processors.optimize', 'png_command': 'optipng -force -o7 "%(filename)s"', 'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"', }, ], } """ temp_dir = get_or_create_temp_dir() thumbnail_filename = os.path.join(temp_dir, "%s" % uuid.uuid4().hex) f = open(thumbnail_filename, 'wb') f.write(thumbnail_file.read()) f.close() # Detect filetype filetype = imghdr.what(thumbnail_filename) # Construct command to optimize image based on filetype command = None if filetype == "jpg" or filetype == "jpeg": command = jpg_command elif filetype == "png": command = png_command elif filetype == "gif": command = gif_command # Run Command if command: command = command % {'filename': thumbnail_filename} call(command, shell=True, stdout=PIPE) optimized_file = File(open(thumbnail_filename, 'rb')) # _get_size() is needed to prevent Django < 1.5 from throwing an AttributeError. # This is fixed in https://github.com/django/django/commit/5c954136eaef3d98d532368deec4c19cf892f664 # and can be removed when we stop supporting Django 1.4 optimized_file._get_size() os.remove(thumbnail_filename) return optimized_file
mit
Python
27e30c4172f2da79168640799188f0394b88c9ec
Fix circular import between querysets.workflow and models.domain
botify-labs/python-simple-workflow,botify-labs/python-simple-workflow
swf/models/domain.py
swf/models/domain.py
# -*- coding: utf-8 -*- from boto.swf.exceptions import SWFResponseError, SWFDomainAlreadyExistsError from swf.constants import REGISTERED from swf.core import ConnectedSWFObject from swf.exceptions import AlreadyExistsError, DoesNotExistError class Domain(ConnectedSWFObject): """Simple Workflow Domain wrapper Params ------ * name: * type: String * value: Name of the domain to register (unique) * retention_period * type: Integer * value: Domain's workflow executions records retention in days * status * type: swf.core.ConnectedSWFObject.{REGISTERED, DEPRECATED} * value: the domain status * description * type: String * value: Textual description of the domain """ def __init__(self, name, status=REGISTERED, description=None, retention_period=30, *args, **kwargs): super(Domain, self).__init__(*args, **kwargs) self.name = name self.status = status self.retention_period = retention_period self.description = description def save(self): """Creates the domain amazon side""" try: self.connection.register_domain(self.name, str(self.retention_period), self.description) except SWFDomainAlreadyExistsError: raise AlreadyExistsError("Domain %s already exists amazon-side" % self.name) def delete(self): """Deprecates the domain amazon side""" try: self.connection.deprecate_domain(self.name) except SWFResponseError as e: if e.error_code == 'UnknownResourceFault': raise DoesNotExistError("Domain %s does not exist amazon-side" % self.name) def workflows(self, status=REGISTERED): """Lists the current domain's workflows""" from swf.querysets.workflow import WorkflowTypeQuerySet qs = WorkflowTypeQuerySet(self.name) return qs.all(registration_status=status) @property def executions(self): pass def __repr__(self): return '<{} name={} status={}>'.format( self.__class__.__name__, self.name, self.status)
# -*- coding: utf-8 -*- from boto.swf.exceptions import SWFResponseError, SWFDomainAlreadyExistsError from swf.constants import REGISTERED from swf.core import ConnectedSWFObject from swf.querysets.workflow import WorkflowTypeQuerySet from swf.exceptions import AlreadyExistsError, DoesNotExistError class Domain(ConnectedSWFObject): """Simple Workflow Domain wrapper Params ------ * name: * type: String * value: Name of the domain to register (unique) * retention_period * type: Integer * value: Domain's workflow executions records retention in days * status * type: swf.core.ConnectedSWFObject.{REGISTERED, DEPRECATED} * value: the domain status * description * type: String * value: Textual description of the domain """ def __init__(self, name, status=REGISTERED, description=None, retention_period=30, *args, **kwargs): super(Domain, self).__init__(*args, **kwargs) self.name = name self.status = status self.retention_period = retention_period self.description = description def save(self): """Creates the domain amazon side""" try: self.connection.register_domain(self.name, str(self.retention_period), self.description) except SWFDomainAlreadyExistsError: raise AlreadyExistsError("Domain %s already exists amazon-side" % self.name) def delete(self): """Deprecates the domain amazon side""" try: self.connection.deprecate_domain(self.name) except SWFResponseError as e: if e.error_code == 'UnknownResourceFault': raise DoesNotExistError("Domain %s does not exist amazon-side" % self.name) def workflows(self, status=REGISTERED): """Lists the current domain's workflows""" qs = WorkflowTypeQuerySet(self.name) return qs.all(registration_status=status) @property def executions(self): pass def __repr__(self): return '<{} name={} status={}>'.format( self.__class__.__name__, self.name, self.status)
mit
Python
157a09187bccfbfae9b4698159f3a889cb619dd6
Call resp.json()
molly/boston-snowbot
utils.py
utils.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2015–2020 Molly White # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from datetime import datetime import os import requests __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) headers = {'User-Agent': 'Boston Snowbot (https://github.com/molly/boston-snowbot)'} def log(message): """Write message to a logfile.""" with open(os.path.join(__location__, "snowbot.log"), 'a') as f: f.write("\n" + datetime.today().strftime("%H:%M %Y-%m-%d") + " " + message) def fetch(url, is_json = False): """Make a request to a URL, and handle errors as needed.""" try: resp = requests.get(url, headers=headers, timeout=5) except requests.exceptions.Timeout: log("Request timed out when trying to hit {}".format(url)) except requests.exceptions.ConnectionError: log("Connection error when trying to hit {}".format(url)) except requests.exceptions.HTTPError: log("HTTP error when trying to hit {}".format(url)) else: if is_json: return resp.json() return resp.text
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2015–2020 Molly White # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from datetime import datetime import os import requests __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) headers = {'User-Agent': 'Boston Snowbot (https://github.com/molly/boston-snowbot)'} def log(message): """Write message to a logfile.""" with open(os.path.join(__location__, "snowbot.log"), 'a') as f: f.write("\n" + datetime.today().strftime("%H:%M %Y-%m-%d") + " " + message) def fetch(url, is_json = False): """Make a request to a URL, and handle errors as needed.""" try: resp = requests.get(url, headers=headers, timeout=5) except requests.exceptions.Timeout: log("Request timed out when trying to hit {}".format(url)) except requests.exceptions.ConnectionError: log("Connection error when trying to hit {}".format(url)) except requests.exceptions.HTTPError: log("HTTP error when trying to hit {}".format(url)) else: if is_json: return resp.json return resp.text
mit
Python
e2dd97f16f4f8223c25dbaf661863b3e7323a302
add more make errors. i now need to add context lines.
escapewindow/mozharness,kartikgupta0909/mozilla-mozharness,escapewindow/mozharness,kartikgupta0909/build-mozharness,mozilla/build-mozharness,kartikgupta0909/build-mozharness,simar7/build-mozharness,armenzg/build-mozharness,armenzg/build-mozharness,simar7/build-mozharness,lissyx/build-mozharness,mozilla/build-mozharness,lundjordan/mozharness,lundjordan/mozharness,mrrrgn/build-mozharness,lissyx/build-mozharness,kartikgupta0909/gittest,walac/build-mozharness,kartikgupta0909/gittest,walac/build-mozharness,mrrrgn/build-mozharness,kartikgupta0909/mozilla-mozharness
mozharness/base/errors.py
mozharness/base/errors.py
#!/usr/bin/env python """Generic error regexes. We could also create classes that generate these, but with the appropriate level (please don't die on any errors; please die on any warning; etc.) """ # ErrorLists {{{1 """ TODO: more of these. We could have a generic shell command error list (e.g. File not found, permission denied) that others could be based on. """ # For ssh, scp, rsync over ssh SSHErrorList=[ {'substr': 'Name or service not known', 'level': 'error'}, {'substr': 'Could not resolve hostname', 'level': 'error'}, {'substr': 'POSSIBLE BREAK-IN ATTEMPT', 'level': 'warning'}, {'substr': 'Network error:', 'level': 'error'}, {'substr': 'Access denied', 'level': 'error'}, {'substr': 'Authentication refused', 'level': 'error'}, {'substr': 'Out of memory', 'level': 'error'}, {'substr': 'Connection reset by peer', 'level': 'warning'}, {'substr': 'Host key verification failed', 'level': 'error'}, {'substr': 'command not found', 'level': 'error'}, {'substr': 'WARNING:', 'level': 'warning'}, {'substr': 'rsync error:', 'level': 'error'}, {'substr': 'Broken pipe:', 'level': 'error'}, {'substr': 'connection unexpectedly closed:', 'level': 'error'}, ] HgErrorList=[ {'regex': '^abort:', 'level': 'error'}, {'substr': 'command not found', 'level': 'error'}, {'substr': 'unknown exception encountered', 'level': 'error'}, ] PythonErrorList=[ {'substr': 'Traceback (most recent call last)', 'level': 'error'}, {'substr': 'SyntaxError: ', 'level': 'error'}, {'substr': 'TypeError: ', 'level': 'error'}, {'substr': 'NameError: ', 'level': 'error'}, {'substr': 'ZeroDivisionError: ', 'level': 'error'}, {'substr': 'command not found', 'level': 'error'}, ] # TODO determine if I've got enough from # http://www.gnu.org/software/automake/manual/make/Error-Messages.html MakefileErrorList = [ {'substr': 'No rule to make target ', 'level': 'error'}, {'regex': 'akefile.*was not found\.', 'level': 'error'}, {'regex': 'Stop\.$', 'level': 'error'}, {'regex': ':\d+: error:', 'level': 'error'}, {'regex': 'make\[\d+\]: \*\*\* \[.*\] Error \d+', 'level': 'error'}, {'substr': 'Warning: ', 'level': 'warning'}, ] # __main__ {{{1 if __name__ == '__main__': """TODO: unit tests. """ pass
#!/usr/bin/env python """Generic error regexes. We could also create classes that generate these, but with the appropriate level (please don't die on any errors; please die on any warning; etc.) """ # ErrorLists {{{1 """ TODO: more of these. We could have a generic shell command error list (e.g. File not found, permission denied) that others could be based on. """ # For ssh, scp, rsync over ssh SSHErrorList=[ {'substr': 'Name or service not known', 'level': 'error'}, {'substr': 'Could not resolve hostname', 'level': 'error'}, {'substr': 'POSSIBLE BREAK-IN ATTEMPT', 'level': 'warning'}, {'substr': 'Network error:', 'level': 'error'}, {'substr': 'Access denied', 'level': 'error'}, {'substr': 'Authentication refused', 'level': 'error'}, {'substr': 'Out of memory', 'level': 'error'}, {'substr': 'Connection reset by peer', 'level': 'warning'}, {'substr': 'Host key verification failed', 'level': 'error'}, {'substr': 'command not found', 'level': 'error'}, {'substr': 'WARNING:', 'level': 'warning'}, {'substr': 'rsync error:', 'level': 'error'}, {'substr': 'Broken pipe:', 'level': 'error'}, {'substr': 'connection unexpectedly closed:', 'level': 'error'}, ] HgErrorList=[ {'regex': '^abort:', 'level': 'error'}, {'substr': 'command not found', 'level': 'error'}, {'substr': 'unknown exception encountered', 'level': 'error'}, ] PythonErrorList=[ {'substr': 'Traceback (most recent call last)', 'level': 'error'}, {'substr': 'SyntaxError: ', 'level': 'error'}, {'substr': 'TypeError: ', 'level': 'error'}, {'substr': 'NameError: ', 'level': 'error'}, {'substr': 'ZeroDivisionError: ', 'level': 'error'}, {'substr': 'command not found', 'level': 'error'}, ] # TODO determine if I've got enough from # http://www.gnu.org/software/automake/manual/make/Error-Messages.html MakefileErrorList = [ {'substr': 'No rule to make target ', 'level': 'error'}, {'regex': 'akefile.*was not found\.', 'level': 'error'}, {'regex': 'Stop\.$', 'level': 'error'}, ] # __main__ {{{1 if __name__ == '__main__': """TODO: unit tests. """ pass
mpl-2.0
Python
c967f59da33dec46ccbe73d7e7878e01715da236
Add docstrings and comments to video module
richgieg/RichEmu86
video.py
video.py
import graphics class VideoController(): """Represents a computer system's video controller.""" def power_on(self): """Powers on this video controller.""" print("VideoController.power_on()") self._create_terminal_window() def _create_terminal_window(self): # Creates the terminal window using John Zelle's graphics module. win = graphics.GraphWin("RichEmu86", 890, 408) win.setBackground("black") s = "RichEmu86 " * 8 i = 0 x = 446 y = 12 height = 16 fontSize = 14 for i in range(0, 25): t = graphics.Text(graphics.Point(x, y), s) t.setSize(fontSize) t.setFace("courier") t.setTextColor("white") t.draw(win) y = y + height win.getMouse() win.close()
import graphics class VideoController(): def power_on(self): print("VideoController.power_on()") self._create_terminal_window() def _create_terminal_window(self): win = graphics.GraphWin("RichEmu86", 890, 408) win.setBackground("black") s = "RichEmu86 " * 8 i = 0 x = 446 y = 12 height = 16 fontSize = 14 for i in range(0, 25): t = graphics.Text(graphics.Point(x, y), s) t.setSize(fontSize) t.setFace("courier") t.setTextColor("white") t.draw(win) y = y + height win.getMouse() win.close()
mit
Python
c705ef83607e09b2ed6e2b8d14aa6a6a7f9f57ea
Update __init__.py
selvakarthik21/newspaper,selvakarthik21/newspaper
newspaperdemo/__init__.py
newspaperdemo/__init__.py
from flask import Flask, request, render_template, redirect, url_for from newspaper import Article from xml.etree import ElementTree app = Flask(__name__) # Debug logging import logging import sys # Defaults to stdout logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) try: log.info('Logging to console') except: _, ex, _ = sys.exc_info() log.error(ex.message) @app.route('/') def index(): return render_template('index.html') @app.route('/articles/show') def show_article(): url_to_clean = request.args.get('url_to_clean') if not url_to_clean: return redirect(url_for('index')) article = Article(url_to_clean) article.download() article.parse() try: html_string = ElementTree.tostring(article.clean_top_node) except: html_string = "Error converting html to string." try: article.nlp() except: log.error("Couldn't process with NLP") a = { 'html': html_string, 'authors': str(', '.join(article.authors)), 'title': article.title, 'text': article.text, 'top_image': article.top_image, 'videos': str(', '.join(article.movies)), 'keywords': str(', '.join(article.keywords)), 'summary': article.summary } return render_template('article/index.html', article=a, url=url_to_clean)
from flask import Flask, request, render_template, redirect, url_for, json from newspaper import Article from xml.etree import ElementTree app = Flask(__name__) # Debug logging import logging import sys # Defaults to stdout logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) try: log.info('Logging to console') except: _, ex, _ = sys.exc_info() log.error(ex.message) @app.route('/') def index(): return render_template('index.html') @app.route('/scrape') def show_article(): url_to_clean = request.args.get('url_to_clean') if not url_to_clean: a= { 'authors': '', 'title': '', 'text': '', 'keywords': '', 'summary': '' } response = app.response_class( response=json.dumps(a), status=200, mimetype='application/json' ) return response article = Article(url_to_clean) article.download() article.parse() try: html_string = ElementTree.tostring(article.clean_top_node) except: html_string = "Error converting html to string." try: article.nlp() except: log.error("Couldn't process with NLP") a = { 'authors': str(', '.join(article.authors)), 'title': article.title, 'text': article.text, 'keywords': str(', '.join(article.keywords)), 'summary': article.summary } response = app.response_class( response=json.dumps(a), status=200, mimetype='application/json' ) return response
mit
Python
844270b6eee2eabfaa1b43c73ed8ffcab833586f
Bump to version 0.19.4
reubano/meza,reubano/tabutils,reubano/meza,reubano/meza,reubano/tabutils,reubano/tabutils
tabutils/__init__.py
tabutils/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: sw=4:ts=4:expandtab """ tabutils ~~~~~~~~ Provides methods for reading and processing data from tabular formatted files Attributes: CURRENCIES [tuple(unicode)]: Currency symbols to remove from decimal strings. ENCODING (str): Default file encoding. DEFAULT_DATETIME (obj): Default datetime object """ from __future__ import ( absolute_import, division, print_function, with_statement, unicode_literals) from datetime import datetime as dt __title__ = 'tabutils' __package_name__ = 'tabutils' __author__ = 'Reuben Cummings' __description__ = 'tabular data utility methods' __email__ = '[email protected]' __version__ = '0.19.4' __license__ = 'MIT' __copyright__ = 'Copyright 2015 Reuben Cummings' CURRENCIES = ('$', '£', '€') ENCODING = 'utf-8' DEFAULT_DATETIME = dt(9999, 12, 31, 0, 0, 0)
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: sw=4:ts=4:expandtab """ tabutils ~~~~~~~~ Provides methods for reading and processing data from tabular formatted files Attributes: CURRENCIES [tuple(unicode)]: Currency symbols to remove from decimal strings. ENCODING (str): Default file encoding. DEFAULT_DATETIME (obj): Default datetime object """ from __future__ import ( absolute_import, division, print_function, with_statement, unicode_literals) from datetime import datetime as dt __title__ = 'tabutils' __package_name__ = 'tabutils' __author__ = 'Reuben Cummings' __description__ = 'tabular data utility methods' __email__ = '[email protected]' __version__ = '0.19.3' __license__ = 'MIT' __copyright__ = 'Copyright 2015 Reuben Cummings' CURRENCIES = ('$', '£', '€') ENCODING = 'utf-8' DEFAULT_DATETIME = dt(9999, 12, 31, 0, 0, 0)
mit
Python
c7665ba1988215fd27f0eb7f547a34104d8b921f
add MA
harry0519/nsnqt
nsnqtlib/tkpi/momentum.py
nsnqtlib/tkpi/momentum.py
import numpy as np import pandas as pd #Moving average def MA(data=[], timeperiod=10): ma = [] ma_a = pd.DataFrame(data,columns=['MA']).rolling(window=timeperiod).mean() for i in ma_a['MA']: ma.append(i) return ma #MACD related indicators #Moving average: there will be unstable period in the beginning #input: list of close price def EMA(close=[], timeperiod=10): ema = [] current = close[0] for i in close: current = (current*(timeperiod-1)+ 2*i)/(timeperiod+1) ema.append(current) return ema def DIF(close=[], fastperiod=12, slowperiod=26): dif = [] s_ema = EMA(close, slowperiod) f_ema = EMA(close, fastperiod) for i in range(len(close)): dif.append(f_ema[i]-s_ema[i]) return dif def DEA(close=[], fastperiod=12, slowperiod=26, signalperiod=9): dif = DIF(close,fastperiod,slowperiod) return EMA(dif, signalperiod) def MACD(close=[], fastperiod=12, slowperiod=26, signalperiod=9): macd = [] dif = DIF(close,fastperiod,slowperiod) dea = EMA(dif, signalperiod) for i in range(len(close)): macd.append(2*(dif[i]-dea[i])) return macd # 夏普比率: 平均收益率/收益率标准差 #Sharpe Ratio: Sharpe ratio = Excess return / Standard deviation #input: # erp: Portfolio expected return rate # within fixed timeperiod (e.g.yearly/monthly) # rf: risk-free/expect rate of interest def sharpe(erp=[], rf=0): a = np.array(erp) return (np.mean(a)-rf)/np.std(a,ddof=1) #最大回撤率 #Max draw down #input: # worth: net worth ratio history # period: To be added.... # >0 means short-term MADD within input period -> worth list def MDD(worth=[],period=0): current_mdd = mdd = 0 for i in range(len(worth)): if period>0 and i>period: j = i-period else: j = 0 if i > 0: current_mdd = max(worth[int(j):int(i)])-worth[i] if mdd < current_mdd: mdd = current_mdd return mdd #To be added: #DMI related indicators #KDJ #RSI #BIAS if __name__ == '__main__': test = [11.9,10.8,20.0,9.1,7.9,4.1,31.2,16,29.9,15.1,11,12] print(MA(test,3))
import numpy as np #MACD related indicators #Moving average: there will be unstable period in the beginning #input: list of close price def EMA(close=[], timeperiod=10): ema = [] current = close[0] for i in close: current = (current*(timeperiod-1)+ 2*i)/(timeperiod+1) ema.append(current) return ema def DIF(close=[], fastperiod=12, slowperiod=26): dif = [] s_ema = EMA(close, slowperiod) f_ema = EMA(close, fastperiod) for i in range(len(close)): dif.append(f_ema[i]-s_ema[i]) return dif def DEA(close=[], fastperiod=12, slowperiod=26, signalperiod=9): dif = DIF(close,fastperiod,slowperiod) return EMA(dif, signalperiod) def MACD(close=[], fastperiod=12, slowperiod=26, signalperiod=9): macd = [] dif = DIF(close,fastperiod,slowperiod) dea = EMA(dif, signalperiod) for i in range(len(close)): macd.append(2*(dif[i]-dea[i])) return macd # 夏普比率: 平均收益率/收益率标准差 #Sharpe Ratio: Sharpe ratio = Excess return / Standard deviation #input: # erp: Portfolio expected return rate # within fixed timeperiod (e.g.yearly/monthly) # rf: risk-free/expect rate of interest def sharpe(erp=[], rf=0): a = np.array(erp) return (np.mean(a)-rf)/np.std(a,ddof=1) #最大回撤率 #Max draw down #input: # worth: net worth ratio history # period: To be added.... # >0 means short-term MADD within input period -> worth list def MDD(worth=[],period=0): current_mdd = mdd = 0 for i in range(len(worth)): if period>0 and i>period: j = i-period else: j = 0 if i > 0: current_mdd = max(worth[int(j):int(i)])-worth[i] if mdd < current_mdd: mdd = current_mdd return mdd #To be added: #DMI related indicators #KDJ #RSI #BIAS
bsd-2-clause
Python
1045f8a2cedf86a401a2868f4092f5d416e8f3e9
Bump to 0.26
Calysto/octave_kernel,Calysto/octave_kernel
octave_kernel/__init__.py
octave_kernel/__init__.py
"""An Octave kernel for Jupyter""" __version__ = '0.26.0'
"""An Octave kernel for Jupyter""" __version__ = '0.25.1'
bsd-3-clause
Python
dcc89b0d4757a4d2e0a541172ce3ded1f7e92014
Create CDAP's HDFS directory
cdapio/cdap-ambari-service,cdapio/cdap-ambari-service
package/scripts/master.py
package/scripts/master.py
import sys import ambari_helpers as helpers from resource_management import * class Master(Script): def install(self, env): print 'Install the CDAP Master'; import params self.configure(env) # Add repository file helpers.add_repo(params.files_dir + params.repo_file, params.os_repo_dir) # Install any global packages self.install_packages(env) # Install package helpers.package('cdap-master') def start(self, env): print 'Start the CDAP Master'; import params self.configure(env) create_hdfs_dir(params.hdfs_namespace, params.hdfs_user, 755) Execute('service cdap-master start') def stop(self, env): print 'Stop the CDAP Master'; import params self.configure(env) Execute('service cdap-master stop') def status(self, env): print 'Status of the CDAP Master'; import params self.configure(env) Execute('service cdap-master status') def configure(self, env): print 'Configure the CDAP Master'; if __name__ == "__main__": Master().execute()
import sys import ambari_helpers as helpers from resource_management import * class Master(Script): def install(self, env): print 'Install the CDAP Master'; import params self.configure(env) # Add repository file helpers.add_repo(params.files_dir + params.repo_file, params.os_repo_dir) # Install any global packages self.install_packages(env) # Install package helpers.package('cdap-master') def start(self, env): print 'Start the CDAP Master'; import params self.configure(env) Execute('service cdap-master start') def stop(self, env): print 'Stop the CDAP Master'; import params self.configure(env) Execute('service cdap-master stop') def status(self, env): print 'Status of the CDAP Master'; import params self.configure(env) Execute('service cdap-master status') def configure(self, env): print 'Configure the CDAP Master'; if __name__ == "__main__": Master().execute()
apache-2.0
Python
00bfbae48af80fd12db31aecc663373dce3fa1a8
Format code
megaprojectske/megaprojects.co.ke,megaprojectske/megaprojects.co.ke,megaprojectske/megaprojects.co.ke
megaprojects/core/models.py
megaprojects/core/models.py
import uuid from django.conf import settings from django.db import models class TimeStampedModel(models.Model): """ An abstract base class model that provides self-updating ``created`` and ``modified`` fields. """ created = models.DateTimeField( auto_now_add=True, help_text='The time when this entity was created.') changed = models.DateTimeField( auto_now=True, help_text='The time when this entity was most recently saved.') class Meta: abstract = True class BaseModel(TimeStampedModel): """ Abstract model for main entities. Provides a ``title`` and ``uuid`` field. """ title = models.CharField( max_length=255, help_text='The title of this entity, always treated as non-markup plain text.') uuid = models.CharField('UUID', max_length=255, unique=True, help_text='Unique Key: Universally unique identifier for this entity.') def __unicode__(self): return self.title def save(self, *args, **kwargs): if not self.uuid: self.uuid = uuid.uuid4() super(BaseModel, self).save(*args, **kwargs) class Meta: abstract = True class AuthorModel(BaseModel): """ Builds upon ``BaseModel`` by adding a ``author`` field. """ author = models.ForeignKey( settings.AUTH_USER_MODEL, help_text='The user that owns this entity; Initially, this is the user that created it.') class Meta: abstract = True class ImageModel(BaseModel): """ Abstract base class model for Image fields. """ alt = models.CharField(max_length=255, blank=True, help_text='Alternative image text, for the image\'s \'alt\' attribute.') status = models.BooleanField( default=True, help_text='Boolean indicating whether the entity is published (visible to non-administrators).') reviewed = models.BooleanField( help_text='Object has been reviewed (quality control).') thumbnail = models.BooleanField(help_text='Boolean indicating whether the entity is the main model thumbnail.') def __unicode__(self): return self.uuid class Meta: abstract = True
import uuid from django.conf import settings from django.db import models class TimeStampedModel(models.Model): """ An abstract base class model that provides self-updating ``created`` and ``modified`` fields. """ created = models.DateTimeField( auto_now_add=True, help_text='The time when this entity was created.') changed = models.DateTimeField( auto_now=True, help_text='The time when this entity was most recently saved.') class Meta: abstract = True class BaseModel(TimeStampedModel): """ Abstract model for main entities. Provides a ``title`` and ``uuid`` field. """ title = models.CharField( max_length=255, help_text='The title of this entity, always treated as non-markup plain text.') uuid = models.CharField('UUID', max_length=255, unique=True, help_text='Unique Key: Universally unique identifier for this entity.') def __unicode__(self): return self.title def save(self, *args, **kwargs): if not self.uuid: self.uuid = uuid.uuid4() super(BaseModel, self).save(*args, **kwargs) class Meta: abstract = True class AuthorModel(BaseModel): """ Builds upon ``BaseModel`` by adding a ``author`` field. """ author = models.ForeignKey( settings.AUTH_USER_MODEL, help_text='The user that owns this entity; Initially, this is the user that created it.') class Meta: abstract = True class ImageModel(BaseModel): """ Abstract base class model for Image fields. """ alt = models.CharField(max_length=255, blank=True, help_text='Alternative image text, for the image\'s \'alt\' attribute.') status = models.BooleanField( default=True, help_text='Boolean indicating whether the entity is published (visible to non-administrators).') reviewed = models.BooleanField( help_text='Object has been reviewed (quality control).') thumbnail = models.BooleanField(help_text='Set as main object thumbnail.') def __unicode__(self): return self.uuid class Meta: abstract = True
apache-2.0
Python
cdcae64d095a7cbab99e439bc37ee7009fe5c482
Mark version 0.3.1
sebasmagri/mezzanine_polls
mezzanine_polls/__init__.py
mezzanine_polls/__init__.py
__version__ = 0.3.1
__version__ = 0.3
bsd-2-clause
Python
afaa2dd700a7474b81b981b266ee5aaa977d28d5
Update team_rank_request.py to use response.json()
prcutler/nflpool,prcutler/nflpool
team_rank_request.py
team_rank_request.py
import requests from requests.auth import HTTPBasicAuth import secret import json x = 0 y = 0 parameters = 'teamstats' response = requests.get( 'https://www.mysportsfeeds.com/api/feed/pull/nfl/2016-2017-regular/playoff_team_standings.json?teamstats', auth=HTTPBasicAuth(secret.msf_username, secret.msf_pw)) data = response.json() # rawdata = response.content # data = json.loads(rawdata.decode()) teamlist = data["playoffteamstandings"]["conference"][0]["teamentry"] for afc_team_list in teamlist: afc_team_name = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["team"]["Name"] afc_team_city = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["team"]["City"] afc_team_id = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["team"]["ID"] afc_team_abbr = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["team"]["Abbreviation"] afc_rank = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["rank"] print((afc_team_name), (afc_team_city), (afc_team_id), (afc_team_abbr), afc_rank) x = x + 1 for nfc_team_list in teamlist: nfc_team_name = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["team"]["Name"] nfc_team_city = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["team"]["City"] nfc_team_id = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["team"]["ID"] nfc_team_abbr = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["team"]["Abbreviation"] nfc_rank = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["rank"] y = y + 1 print((nfc_team_name), (nfc_team_city), (nfc_team_id), (nfc_team_abbr), nfc_rank) last_update = data["playoffteamstandings"]["lastUpdatedOn"] print(last_update)
import requests from requests.auth import HTTPBasicAuth import secret import json x = 0 y = 0 parameters = 'teamstats' response = requests.get( 'https://www.mysportsfeeds.com/api/feed/pull/nfl/2016-2017-regular/playoff_team_standings.json?teamstats', auth=HTTPBasicAuth(secret.msf_username, secret.msf_pw)) rawdata = response.content data = json.loads(rawdata.decode()) teamlist = data["playoffteamstandings"]["conference"][0]["teamentry"] for afc_team_list in teamlist: afc_team_name = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["team"]["Name"] afc_team_city = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["team"]["City"] afc_team_id = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["team"]["ID"] afc_team_abbr = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["team"]["Abbreviation"] afc_rank = data["playoffteamstandings"]["conference"][0]["teamentry"][x]["rank"] print((afc_team_name), (afc_team_city), (afc_team_id), (afc_team_abbr), afc_rank) x = x + 1 for nfc_team_list in teamlist: nfc_team_name = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["team"]["Name"] nfc_team_city = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["team"]["City"] nfc_team_id = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["team"]["ID"] nfc_team_abbr = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["team"]["Abbreviation"] nfc_rank = data["playoffteamstandings"]["conference"][1]["teamentry"][y]["rank"] y = y + 1 print((nfc_team_name), (nfc_team_city), (nfc_team_id), (nfc_team_abbr), nfc_rank) last_update = data["playoffteamstandings"]["lastUpdatedOn"] print(last_update)
mit
Python
f326dd569e9240c2b883e9c5f436728f321a0c61
Add TransactionMiddleware
allanlei/django-multitenant
tenant/middleware.py
tenant/middleware.py
from django.core.urlresolvers import resolve from django.shortcuts import get_object_or_404 from django.db import transaction from tenant.models import Tenant from tenant.utils import connect_tenant_provider, disconnect_tenant_provider class TenantMiddleware(object): def process_request(self, request): request.tenant = None name = resolve(request.path).kwargs.get('tenant') or request.GET.get('tenant') if name: tenant = get_object_or_404(Tenant, name=name) request.tenant = tenant connect_tenant_provider(request, tenant.name) def process_response(self, request, response): disconnect_tenant_provider(request) request.tenant = None return response class TransactionMiddleware(object): def get_tenant(self, request): tenant = getattr(request, 'tenant', None) if tenant: return tenant.ident def process_request(self, request): """Enters transaction management""" transaction.enter_transaction_management(using=self.get_tenant(request)) transaction.managed(True, using=self.get_tenant(request)) def process_exception(self, request, exception): """Rolls back the database and leaves transaction management""" if transaction.is_dirty(using=self.get_tenant(request)): transaction.rollback(using=self.get_tenant(request)) transaction.leave_transaction_management(using=self.get_tenant(request)) def process_response(self, request, response): """Commits and leaves transaction management.""" if transaction.is_managed(using=self.get_tenant(request)): if transaction.is_dirty(using=self.get_tenant(request)): transaction.commit(using=self.get_tenant(request)) transaction.leave_transaction_management(using=self.get_tenant(request)) return response
from django.core.urlresolvers import resolve from django.shortcuts import get_object_or_404 from tenant.models import Tenant from tenant.utils import connect_tenant_provider, disconnect_tenant_provider class TenantMiddleware(object): def process_request(self, request): request.tenant = None name = resolve(request.path).kwargs.get('tenant') or request.GET.get('tenant') if name: tenant = get_object_or_404(Tenant, name=name) request.tenant = tenant connect_tenant_provider(request, tenant.name) def process_response(self, request, response): disconnect_tenant_provider(request) request.tenant = None return response
bsd-3-clause
Python
5168c98ea9b903a06cb52c79da81fe598abcb570
use correct import
missionpinball/mpf,missionpinball/mpf
mpf/devices/shot_profile.py
mpf/devices/shot_profile.py
"""Shot profiles.""" from mpf.core.mode import Mode from mpf.core.system_wide_device import SystemWideDevice from mpf.core.mode_device import ModeDevice class ShotProfile(ModeDevice, SystemWideDevice): """A shot profile.""" config_section = 'shot_profiles' collection = 'shot_profiles' class_label = 'shot_profile' def device_removed_from_mode(self, mode: Mode) -> None: """Remove from mode.""" pass
"""Shot profiles.""" from mpfmc.core.mode import Mode from mpf.core.system_wide_device import SystemWideDevice from mpf.core.mode_device import ModeDevice class ShotProfile(ModeDevice, SystemWideDevice): """A shot profile.""" config_section = 'shot_profiles' collection = 'shot_profiles' class_label = 'shot_profile' def device_removed_from_mode(self, mode: Mode) -> None: """Remove from mode.""" pass
mit
Python
6e6e2e03da2f4ef141b51843ca16fdb52f0770ca
Use tokenized no-reply address in send_test_email.
dhcrzf/zulip,timabbott/zulip,hackerkid/zulip,brainwane/zulip,showell/zulip,synicalsyntax/zulip,punchagan/zulip,timabbott/zulip,synicalsyntax/zulip,andersk/zulip,hackerkid/zulip,jackrzhang/zulip,shubhamdhama/zulip,jackrzhang/zulip,showell/zulip,synicalsyntax/zulip,eeshangarg/zulip,shubhamdhama/zulip,synicalsyntax/zulip,brainwane/zulip,tommyip/zulip,hackerkid/zulip,dhcrzf/zulip,zulip/zulip,eeshangarg/zulip,rishig/zulip,tommyip/zulip,tommyip/zulip,zulip/zulip,punchagan/zulip,kou/zulip,eeshangarg/zulip,punchagan/zulip,shubhamdhama/zulip,brainwane/zulip,andersk/zulip,rht/zulip,rht/zulip,brainwane/zulip,zulip/zulip,punchagan/zulip,showell/zulip,zulip/zulip,rishig/zulip,jackrzhang/zulip,rht/zulip,dhcrzf/zulip,kou/zulip,timabbott/zulip,dhcrzf/zulip,tommyip/zulip,showell/zulip,dhcrzf/zulip,jackrzhang/zulip,brainwane/zulip,tommyip/zulip,kou/zulip,zulip/zulip,brainwane/zulip,shubhamdhama/zulip,synicalsyntax/zulip,jackrzhang/zulip,timabbott/zulip,kou/zulip,shubhamdhama/zulip,jackrzhang/zulip,showell/zulip,shubhamdhama/zulip,kou/zulip,hackerkid/zulip,eeshangarg/zulip,kou/zulip,rht/zulip,zulip/zulip,rishig/zulip,hackerkid/zulip,timabbott/zulip,eeshangarg/zulip,rishig/zulip,punchagan/zulip,rishig/zulip,rishig/zulip,brainwane/zulip,punchagan/zulip,tommyip/zulip,eeshangarg/zulip,andersk/zulip,andersk/zulip,shubhamdhama/zulip,eeshangarg/zulip,dhcrzf/zulip,showell/zulip,andersk/zulip,hackerkid/zulip,rht/zulip,synicalsyntax/zulip,andersk/zulip,hackerkid/zulip,dhcrzf/zulip,rht/zulip,punchagan/zulip,synicalsyntax/zulip,kou/zulip,timabbott/zulip,tommyip/zulip,rht/zulip,jackrzhang/zulip,andersk/zulip,rishig/zulip,zulip/zulip,timabbott/zulip,showell/zulip
zerver/management/commands/send_test_email.py
zerver/management/commands/send_test_email.py
from typing import Any from django.conf import settings from django.core.mail import mail_admins, mail_managers, send_mail from django.core.management import CommandError from django.core.management.commands import sendtestemail from zerver.lib.send_email import FromAddress class Command(sendtestemail.Command): def handle(self, *args: Any, **kwargs: str) -> None: if settings.WARN_NO_EMAIL: raise CommandError("Outgoing email not yet configured, see\n " "https://zulip.readthedocs.io/en/latest/production/email.html") message = ("Success! If you receive this message, you've " "successfully configured sending email from your " "Zulip server. Remember that you need to restart " "the Zulip server with /home/zulip/deployments/current/scripts/restart-server " "after changing the settings in /etc/zulip before your changes will take effect.") send_mail("Zulip email test", message, FromAddress.SUPPORT, kwargs['email']) send_mail("Zulip noreply email test", message, FromAddress.tokenized_no_reply_address(), kwargs['email']) if kwargs['managers']: mail_managers("Zulip manager email test", "This email was sent to the site managers.") if kwargs['admins']: mail_admins("Zulip admins email test", "This email was sent to the site admins.")
from typing import Any from django.conf import settings from django.core.mail import mail_admins, mail_managers, send_mail from django.core.management import CommandError from django.core.management.commands import sendtestemail from zerver.lib.send_email import FromAddress class Command(sendtestemail.Command): def handle(self, *args: Any, **kwargs: str) -> None: if settings.WARN_NO_EMAIL: raise CommandError("Outgoing email not yet configured, see\n " "https://zulip.readthedocs.io/en/latest/production/email.html") message = ("Success! If you receive this message, you've " "successfully configured sending email from your " "Zulip server. Remember that you need to restart " "the Zulip server with /home/zulip/deployments/current/scripts/restart-server " "after changing the settings in /etc/zulip before your changes will take effect.") send_mail("Zulip email test", message, FromAddress.SUPPORT, kwargs['email']) send_mail("Zulip noreply email test", message, FromAddress.NOREPLY, kwargs['email']) if kwargs['managers']: mail_managers("Zulip manager email test", "This email was sent to the site managers.") if kwargs['admins']: mail_admins("Zulip admins email test", "This email was sent to the site admins.")
apache-2.0
Python
a971b84541b991bbc14be73e94b633c88edcd567
Remove unused vars
PolyFloyd/ledcubesim,PolyFloyd/ledcubesim,PolyFloyd/ledcubesim
programs/ledcube/audio.py
programs/ledcube/audio.py
# # Copyright (c) 2014 PolyFloyd # import numpy.fft import os class Source: def get_spectrum(self, signal): signal = numpy.array([(s + 1) / 2 for s in signal], dtype=float) spectrum = numpy.abs(numpy.fft.rfft(signal)) freqs = numpy.fft.fftfreq(spectrum.size, 1 / self.get_sample_rate()) spectrum = spectrum[1:] return (spectrum, freqs) def get_input(self): return self.input def set_input(self, input): if type(input) == str: self.input = os.fdopen(os.open(input, os.O_RDONLY), 'rb') else: self.input = input def get_signal(self, seconds): return [self.get_next_sample() for i in range(0, int(self.get_sample_rate() * seconds))] def get_next_sample(self): pass # virtual def get_sample_rate(self): pass # virtual class PCMSource(Source): def __init__(self, input_file, sample_rate, sample_bits, sample_endianness='little', sample_sign='signed'): assert(sample_endianness == 'little' or sample_endianness == 'big') assert(sample_sign == 'signed' or sample_sign == 'unsigned') self.set_input(input_file) self.sample_rate = sample_rate self.sample_bits = sample_bits self.sample_endianness = sample_endianness self.sample_sign = sample_sign def sample_from_raw_data(self, raw_data): intval = int.from_bytes(raw_data, self.sample_endianness, signed=self.sample_sign == 'signed') return intval / (2 ** (len(raw_data) * 8 - 1)) def get_next_sample(self): return self.sample_from_raw_data(self.get_input().read(self.sample_bits // 8)) def get_sample_rate(self): return self.sample_rate
# # Copyright (c) 2014 PolyFloyd # import io import numpy.fft import os import pyaudio class Source: def get_spectrum(self, signal): n = len(signal) signal = numpy.array([(s + 1) / 2 for s in signal], dtype=float) spectrum = numpy.abs(numpy.fft.rfft(signal)) freqs = numpy.fft.fftfreq(spectrum.size, 1 / self.get_sample_rate()) spectrum = spectrum[1:] return (spectrum, freqs) def get_input(self): return self.input def set_input(self, input): if type(input) == str: self.input = os.fdopen(os.open(input, os.O_RDONLY), 'rb') else: self.input = input def get_signal(self, seconds): return [self.get_next_sample() for i in range(0, int(self.get_sample_rate() * seconds))] def get_next_sample(self): pass # virtual def get_sample_rate(self): pass # virtual class PCMSource(Source): def __init__(self, input_file, sample_rate, sample_bits, sample_endianness='little', sample_sign='signed'): assert(sample_endianness == 'little' or sample_endianness == 'big') assert(sample_sign == 'signed' or sample_sign == 'unsigned') self.set_input(input_file) self.sample_rate = sample_rate self.sample_bits = sample_bits self.sample_endianness = sample_endianness self.sample_sign = sample_sign def sample_from_raw_data(self, raw_data): intval = int.from_bytes(raw_data, self.sample_endianness, signed=self.sample_sign == 'signed') return intval / (2 ** (len(raw_data) * 8 - 1)) def get_next_sample(self): return self.sample_from_raw_data(self.get_input().read(self.sample_bits // 8)) def get_sample_rate(self): return self.sample_rate
mit
Python
50628685c310703fb24f266dfd4d72b666eecfa4
Update version to 1.0.0 (not yet tagged)
desihub/desisurvey,desihub/desisurvey
py/desisurvey/_version.py
py/desisurvey/_version.py
__version__ = '1.0.0'
__version__ = '0.8.2.dev415'
bsd-3-clause
Python
5934d94c9644eaea850a27773db5890b68078477
Load all api items
alexandermendes/pybossa-analyst,alexandermendes/pybossa-analyst,alexandermendes/pybossa-analyst,LibCrowds/libcrowds-analyst
pybossa_analyst/client.py
pybossa_analyst/client.py
# -*- coding: utf8 -*- """API client module for pybossa-analyst.""" import enki class PyBossaClient(object): """A class for interacting with PyBossa.""" def __init__(self, app=None): """Init method.""" self.app = app if app is not None: # pragma: no cover self.init_app(app) def init_app(self, app): self.api_key = app.config['API_KEY'] self.endpoint = app.config['ENDPOINT'] enki.pbclient.set('api_key', self.api_key) enki.pbclient.set('endpoint', self.endpoint) def _load(self, func, query): items = func(**query) last_fetched = items while self._not_exhausted(last_fetched, query): query['last_id'] = last_fetched[-1].id last_fetched = func(**query) items += last_fetched return items def _not_exhausted(self, last_fetched, query): return (len(last_fetched) != 0 and len(last_fetched) == query['limit'] and query.get('id') is None) def get_results(self, project_id, **kwargs): """Return results.""" query = dict(project_id=project_id, all='1', limit=100, **kwargs) if kwargs.get('limit'): return enki.pbclient.find_results(**query) return self._load(enki.pbclient.find_results, query) def get_tasks(self, project_id, **kwargs): """Return tasks.""" query = dict(project_id=project_id, all='1', limit=100, **kwargs) if kwargs.get('limit'): return enki.pbclient.find_tasks(**query) return self._load(enki.pbclient.find_tasks, query) def get_task_runs(self, project_id, **kwargs): """Return task runs.""" query = dict(project_id=project_id, all='1', limit=100, **kwargs) if kwargs.get('limit'): return enki.pbclient.find_taskruns(**query) return self._load(enki.pbclient.find_taskruns, query) def get_projects(self, **kwargs): """Return projects.""" query = dict(all='1', limit=100, **kwargs) if kwargs.get('limit'): return enki.pbclient.find_project(**query) return self._load(enki.pbclient.find_project, query) def update_result(self, result): """Update a result.""" return enki.pbclient.update_result(result) def get_task_run_dataframe(self, project_id, task_id): """Return a dataframe containing all task run info for a task.""" p = self.get_projects(id=project_id, limit=1)[0] e = enki.Enki(self.api_key, self.endpoint, p.short_name, all=1) e.get_tasks(task_id=task_id) e.get_task_runs() t = e.tasks[0] return e.task_runs_df[t.id]
# -*- coding: utf8 -*- """API client module for pybossa-analyst.""" import enki class PyBossaClient(object): """A class for interacting with PyBossa.""" def __init__(self, app=None): """Init method.""" self.app = app if app is not None: # pragma: no cover self.init_app(app) def init_app(self, app): self.api_key = app.config['API_KEY'] self.endpoint = app.config['ENDPOINT'] enki.pbclient.set('api_key', self.api_key) enki.pbclient.set('endpoint', self.endpoint) def get_results(self, project_id, **kwargs): """Return results.""" return enki.pbclient.find_results(project_id, all=1, **kwargs) def get_tasks(self, project_id, **kwargs): """Return tasks.""" return enki.pbclient.find_tasks(project_id, all=1, **kwargs) def get_task_runs(self, project_id, **kwargs): """Return task runs.""" return enki.pbclient.find_taskruns(project_id, all=1, **kwargs) def get_projects(self, **kwargs): """Return projects.""" return enki.pbclient.find_project(all=1, **kwargs) def update_result(self, result): """Update a result.""" return enki.pbclient.update_result(result) def get_task_run_dataframe(self, project_id, task_id): """Return a dataframe containing all task run info for a task.""" p = self.get_projects(id=project_id)[0] e = enki.Enki(self.api_key, self.endpoint, p.short_name, all=1) e.get_tasks(task_id=task_id) e.get_task_runs() t = e.tasks[0] return e.task_runs_df[t.id]
unknown
Python
73b7d0670414ec65a152d239a5c5c60464ce8ff9
Fix bad import. Fixes #2196
davidwaroquiers/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,vorwerkc/pymatgen,davidwaroquiers/pymatgen,fraricci/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,gVallverdu/pymatgen,gVallverdu/pymatgen,gmatteo/pymatgen,davidwaroquiers/pymatgen,gmatteo/pymatgen,davidwaroquiers/pymatgen,vorwerkc/pymatgen,vorwerkc/pymatgen,vorwerkc/pymatgen,fraricci/pymatgen
pymatgen/core/__init__.py
pymatgen/core/__init__.py
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This package contains core modules and classes for representing structures and operations on them. """ import os try: from ruamel import yaml except ImportError: try: import ruamel_yaml as yaml # type: ignore # noqa except ImportError: import yaml # type: ignore # noqa from .composition import Composition # noqa from .lattice import Lattice # noqa from .operations import SymmOp # noqa from .periodic_table import DummySpecies, Element, Species # noqa from .sites import PeriodicSite, Site # noqa from .structure import IMolecule, IStructure, Molecule, Structure # noqa from .units import ArrayWithUnit, FloatWithUnit, Unit # noqa __author__ = "Pymatgen Development Team" __email__ = "[email protected]" __maintainer__ = "Shyue Ping Ong" __maintainer_email__ = "[email protected]" __version__ = "2022.0.10" SETTINGS_FILE = os.path.join(os.path.expanduser("~"), ".pmgrc.yaml") def _load_pmg_settings(): # Load environment variables by default as backup d = {} for k, v in os.environ.items(): if k.startswith("PMG_"): d[k] = v elif k in ["VASP_PSP_DIR", "MAPI_KEY", "DEFAULT_FUNCTIONAL"]: d["PMG_" + k] = v # Override anything in env vars with that in yml file try: with open(SETTINGS_FILE, "rt") as f: d_yml = yaml.safe_load(f) d.update(d_yml) except IOError: # If there are any errors, default to using environment variables # if present. pass d = d or {} return dict(d) SETTINGS = _load_pmg_settings() locals().update(SETTINGS)
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This package contains core modules and classes for representing structures and operations on them. """ import os try: from ruamal import yaml except ImportError: try: import ruamel_yaml as yaml # type: ignore # noqa except ImportError: import yaml # type: ignore # noqa from .composition import Composition # noqa from .lattice import Lattice # noqa from .operations import SymmOp # noqa from .periodic_table import DummySpecies, Element, Species # noqa from .sites import PeriodicSite, Site # noqa from .structure import IMolecule, IStructure, Molecule, Structure # noqa from .units import ArrayWithUnit, FloatWithUnit, Unit # noqa __author__ = "Pymatgen Development Team" __email__ = "[email protected]" __maintainer__ = "Shyue Ping Ong" __maintainer_email__ = "[email protected]" __version__ = "2022.0.10" SETTINGS_FILE = os.path.join(os.path.expanduser("~"), ".pmgrc.yaml") def _load_pmg_settings(): # Load environment variables by default as backup d = {} for k, v in os.environ.items(): if k.startswith("PMG_"): d[k] = v elif k in ["VASP_PSP_DIR", "MAPI_KEY", "DEFAULT_FUNCTIONAL"]: d["PMG_" + k] = v # Override anything in env vars with that in yml file try: with open(SETTINGS_FILE, "rt") as f: d_yml = yaml.safe_load(f) d.update(d_yml) except IOError: # If there are any errors, default to using environment variables # if present. pass d = d or {} return dict(d) SETTINGS = _load_pmg_settings() locals().update(SETTINGS)
mit
Python
d55f2b98822faa7d71f5fce2bfa980f8265e0610
Use take() instead of takeSample() in PySpark kmeans example.
rednaxelafx/apache-spark,maropu/spark,ron8hu/spark,ddna1021/spark,mzl9039/spark,sachintyagi22/spark,sureshthalamati/spark,map222/spark,nchammas/spark,mzl9039/spark,xflin/spark,yanboliang/spark,dotunolafunmiloye/spark,cin/spark,xuanyuanking/spark,andrewor14/iolap,koeninger/spark,joseph-torres/spark,esi-mineset/spark,mdespriee/spark,jianran/spark,someorz/spark,cloudera/spark,xflin/spark,UndeadBaneGitHub/spark,janewangfb/spark,byakuinss/spark,joseph-torres/spark,hvanhovell/spark,aray/spark,spark-test/spark,ioana-delaney/spark,guoxiaolongzte/spark,highfei2011/spark,kiszk/spark,esi-mineset/spark,ericvandenbergfb/spark,vinodkc/spark,techaddict/spark,SnappyDataInc/spark,mzl9039/spark,wangyum/spark,markhamstra/spark,techaddict/spark,debugger87/spark,szhem/spark,aokolnychyi/spark,vax11780/spark,zzcclp/spark,SHASHANKB/spark,someorz/spark,spark-test/spark,lyogavin/spark,markhamstra/spark,icexelloss/spark,andrewor14/spark,techaddict/spark,mkolod/incubator-spark,holdenk/spark,zuotingbing/spark,JerryLead/spark,nchammas/spark,stanzhai/spark,aokolnychyi/spark,sureshthalamati/spark,rikima/spark,LantaoJin/spark,rikima/spark,huang1900/spark,maropu/spark,bOOm-X/spark,SHASHANKB/spark,gioenn/xSpark,bravo-zhang/spark,shaneknapp/spark,michalsenkyr/spark,zuotingbing/spark,highfei2011/spark,zhouyejoe/spark,hhbyyh/spark,kevinyu98/spark,koeninger/spark,wzhfy/spark,lvdongr/spark,bOOm-X/spark,markhamstra/spark,saltstar/spark,UndeadBaneGitHub/spark,MLnick/spark,cloud-fan/spark,JoshRosen/spark,esi-mineset/spark,stanzhai/spark,minixalpha/spark,rezasafi/spark,BryanCutler/spark,highfei2011/spark,mike0sv/spark,szhem/spark,zzcclp/spark,dongjoon-hyun/spark,liyichao/spark,apache-spark-on-k8s/spark,liutang123/spark,apache/spark,hvanhovell/spark,zero323/spark,apache-spark-on-k8s/spark,tengpeng/spark,mahak/spark,zuotingbing/spark,bravo-zhang/spark,jianran/spark,darionyaphet/spark,ericvandenbergfb/spark,akopich/spark,andrewor14/iolap,patrick-nicholson/spark,adrian-ionescu/apache-spark,michalsenkyr/spark,ioana-delaney/spark,haowu80s/spark,1haodian/spark,map222/spark,ddna1021/spark,rikima/spark,aokolnychyi/spark,saturday-shi/spark,ioana-delaney/spark,cloud-fan/spark,caneGuy/spark,JoshRosen/spark,huang1900/spark,setjet/spark,mike0sv/spark,srowen/spark,xuanyuanking/spark,bdrillard/spark,holdenk/spark,ericvandenbergfb/spark,aokolnychyi/spark,tejasapatil/spark,darionyaphet/spark,aosagie/spark,poffuomo/spark,facaiy/spark,zuotingbing/spark,tengpeng/spark,lxsmnv/spark,mdespriee/spark,eyalfa/spark,saturday-shi/spark,maropu/spark,ptkool/spark,aokolnychyi/spark,kevinyu98/spark,ueshin/apache-spark,shuangshuangwang/spark,sachintyagi22/spark,hvanhovell/spark,Panos-Bletsos/spark-cost-model-optimizer,1haodian/spark,highfei2011/spark,publicRoman/spark,pronix/spark,map222/spark,michalsenkyr/spark,gengliangwang/spark,aray/spark,bdrillard/spark,mike0sv/spark,debugger87/spark,alunarbeach/spark,hvanhovell/spark,zhouyejoe/spark,rednaxelafx/apache-spark,liyichao/spark,WeichenXu123/spark,panfengfeng/spark,dbtsai/spark,WindCanDie/spark,shubhamchopra/spark,shaneknapp/spark,xuanyuanking/spark,byakuinss/spark,icexelloss/spark,zhouyejoe/spark,jiangxb1987/spark,rezasafi/spark,spark-test/spark,pgandhi999/spark,jrshust/spark,cloud-fan/spark,shubhamchopra/spark,panfengfeng/spark,andrewor14/spark,dongjoon-hyun/spark,SHASHANKB/spark,ericvandenbergfb/spark,jkbradley/spark,apache-spark-on-k8s/spark,vax11780/spark,mkolod/incubator-spark,srowen/spark,vinodkc/spark,ioana-delaney/spark,maropu/spark,kissmetrics/spark,kiszk/spark,LantaoJin/spark,wzhfy/spark,nlalevee/spark,xuanyuanking/spark,dongjoon-hyun/spark,ron8hu/spark,goldmedal/spark,SnappyDataInc/spark,metamx/spark,ericvandenbergfb/spark,byakuinss/spark,skonto/spark,1haodian/spark,Aegeaner/spark,shaneknapp/spark,nilsgrabbert/spark,mzl9039/spark,jrshust/spark,pgandhi999/spark,wzhfy/spark,aosagie/spark,metamx/spark,apache/spark,ron8hu/spark,ajaysaini725/spark,joseph-torres/spark,pgandhi999/spark,hvanhovell/spark,Panos-Bletsos/spark-cost-model-optimizer,highfei2011/spark,sahilTakiar/spark,ConeyLiu/spark,aray/spark,dongjoon-hyun/spark,gioenn/xSpark,rezasafi/spark,zero323/spark,debugger87/spark,WeichenXu123/spark,akopich/spark,pronix/spark,minixalpha/spark,jlopezmalla/spark,wzhfy/spark,xflin/spark,sureshthalamati/spark,aray/spark,jianran/spark,ajaysaini725/spark,MLnick/spark,LantaoJin/spark,aray/spark,xflin/spark,Panos-Bletsos/spark-cost-model-optimizer,debugger87/spark,narahari92/spark,publicRoman/spark,pgandhi999/spark,saturday-shi/spark,joseph-torres/spark,haowu80s/spark,dongjoon-hyun/spark,eyalfa/spark,kimoonkim/spark,publicRoman/spark,bOOm-X/spark,yanboliang/spark,SnappyDataInc/spark,aosagie/spark,nilsgrabbert/spark,wgpshashank/spark,Aegeaner/spark,LantaoJin/spark,dhruve/spark,narahari92/spark,kissmetrics/spark,wgpshashank/spark,SHASHANKB/spark,janewangfb/spark,patrick-nicholson/spark,lvdongr/spark,byakuinss/spark,ioana-delaney/spark,spark-test/spark,sahilTakiar/spark,kiszk/spark,wangyum/spark,shaneknapp/spark,nlalevee/spark,saltstar/spark,Aegeaner/spark,LantaoJin/spark,loneknightpy/spark,liyichao/spark,huang1900/spark,ddna1021/spark,witgo/spark,lvdongr/spark,srowen/spark,poffuomo/spark,bdrillard/spark,ueshin/apache-spark,dotunolafunmiloye/spark,maropu/spark,1haodian/spark,ptkool/spark,jrshust/spark,hhbyyh/spark,apache/spark,jrshust/spark,jianran/spark,spark-test/spark,MLnick/spark,lxsmnv/spark,yanboliang/spark,ajaysaini725/spark,eyalfa/spark,rikima/spark,andrewor14/spark,rjpower/spark,esi-mineset/spark,ddna1021/spark,mahak/spark,setjet/spark,skonto/spark,holdenk/spark,big-pegasus/spark,taroplus/spark,kimoonkim/spark,mdespriee/spark,ConeyLiu/spark,ajaysaini725/spark,SnappyDataInc/spark,wangmiao1981/spark,yanboliang/spark,map222/spark,tengpeng/spark,apache/spark,actuaryzhang/spark,joseph-torres/spark,jiangxb1987/spark,huang1900/spark,wzhfy/spark,saltstar/spark,MLnick/spark,saturday-shi/spark,debugger87/spark,JerryLead/spark,UndeadBaneGitHub/spark,ron8hu/spark,MLnick/spark,bdrillard/spark,guoxiaolongzte/spark,byakuinss/spark,mahak/spark,gioenn/xSpark,jlopezmalla/spark,shuangshuangwang/spark,brad-kaiser/spark,loneknightpy/spark,mahak/spark,guoxiaolongzte/spark,ibm-research-ireland/sparkoscope,cloudera/spark,apache-spark-on-k8s/spark,patrick-nicholson/spark,saltstar/spark,matthewfranglen/spark,rekhajoshm/spark,holdenk/spark,mzl9039/spark,HyukjinKwon/spark,UndeadBaneGitHub/spark,wangmiao1981/spark,xuanyuanking/spark,dotunolafunmiloye/spark,WindCanDie/spark,jiangxb1987/spark,pronix/spark,markhamstra/spark,bdrillard/spark,shaneknapp/spark,eyalfa/spark,sahilTakiar/spark,jlopezmalla/spark,guoxiaolongzte/spark,dbtsai/spark,tejasapatil/spark,ueshin/apache-spark,sryza/spark,rednaxelafx/apache-spark,nlalevee/spark,zero323/spark,rednaxelafx/apache-spark,kimoonkim/spark,sryza/spark,hhbyyh/spark,kiszk/spark,brad-kaiser/spark,nilsgrabbert/spark,bOOm-X/spark,eyalfa/spark,chuckchen/spark,mzl9039/spark,ibm-research-ireland/sparkoscope,andrewor14/spark,adrian-ionescu/apache-spark,dbtsai/spark,WindCanDie/spark,WeichenXu123/spark,minixalpha/spark,janewangfb/spark,jkbradley/spark,wgpshashank/spark,rjpower/spark,rekhajoshm/spark,dbtsai/spark,poffuomo/spark,panfengfeng/spark,debugger87/spark,minixalpha/spark,liutang123/spark,hhbyyh/spark,markhamstra/spark,mahak/spark,rekhajoshm/spark,caneGuy/spark,goldmedal/spark,milliman/spark,pronix/spark,srowen/spark,gengliangwang/spark,michalsenkyr/spark,cloudera/spark,ueshin/apache-spark,HyukjinKwon/spark,nilsgrabbert/spark,jiangxb1987/spark,caneGuy/spark,sureshthalamati/spark,rednaxelafx/apache-spark,patrick-nicholson/spark,ptkool/spark,adrian-ionescu/apache-spark,hvanhovell/spark,rekhajoshm/spark,cloud-fan/spark,witgo/spark,adrian-ionescu/apache-spark,bOOm-X/spark,nchammas/spark,byakuinss/spark,LantaoJin/spark,techaddict/spark,1haodian/spark,holdenk/spark,taroplus/spark,aosagie/spark,highfei2011/spark,jrshust/spark,tengpeng/spark,chuckchen/spark,zhouyejoe/spark,Panos-Bletsos/spark-cost-model-optimizer,gengliangwang/spark,andrewor14/iolap,narahari92/spark,setjet/spark,darionyaphet/spark,BryanCutler/spark,WindCanDie/spark,tengpeng/spark,nlalevee/spark,JerryLead/spark,WindCanDie/spark,skonto/spark,rednaxelafx/apache-spark,wangmiao1981/spark,xuanyuanking/spark,lvdongr/spark,ahnqirage/spark,spark-test/spark,jiangxb1987/spark,jiangxb1987/spark,loneknightpy/spark,shubhamchopra/spark,darionyaphet/spark,publicRoman/spark,caneGuy/spark,andrewor14/spark,lxsmnv/spark,adrian-ionescu/apache-spark,dhruve/spark,LantaoJin/spark,cloudera/spark,WeichenXu123/spark,chuckchen/spark,liutang123/spark,ueshin/apache-spark,nlalevee/spark,hvanhovell/spark,ptkool/spark,ddna1021/spark,aokolnychyi/spark,kiszk/spark,tejasapatil/spark,saltstar/spark,ron8hu/spark,byakuinss/spark,pgandhi999/spark,ahnqirage/spark,JerryLead/spark,jianran/spark,alunarbeach/spark,vax11780/spark,patrick-nicholson/spark,liyichao/spark,shuangshuangwang/spark,shuangshuangwang/spark,aosagie/spark,poffuomo/spark,xflin/spark,huang1900/spark,mahak/spark,techaddict/spark,JoshRosen/spark,janewangfb/spark,ibm-research-ireland/sparkoscope,ConeyLiu/spark,joseph-torres/spark,JoshRosen/spark,Aegeaner/spark,vinodkc/spark,sryza/spark,rjpower/spark,liutang123/spark,skonto/spark,SnappyDataInc/spark,ptkool/spark,loneknightpy/spark,big-pegasus/spark,milliman/spark,actuaryzhang/spark,rikima/spark,sahilTakiar/spark,JerryLead/spark,zhouyejoe/spark,shubhamchopra/spark,akopich/spark,vinodkc/spark,jkbradley/spark,debugger87/spark,srowen/spark,guoxiaolongzte/spark,cin/spark,jlopezmalla/spark,jlopezmalla/spark,gioenn/xSpark,skonto/spark,kissmetrics/spark,lxsmnv/spark,metamx/spark,ptkool/spark,cloud-fan/spark,rikima/spark,UndeadBaneGitHub/spark,haowu80s/spark,zzcclp/spark,shuangshuangwang/spark,apache/spark,gioenn/xSpark,pronix/spark,WeichenXu123/spark,zero323/spark,mkolod/incubator-spark,alunarbeach/spark,JerryLead/spark,sureshthalamati/spark,ron8hu/spark,liutang123/spark,ibm-research-ireland/sparkoscope,apache-spark-on-k8s/spark,holdenk/spark,dotunolafunmiloye/spark,mdespriee/spark,rezasafi/spark,wzhfy/spark,zhouyejoe/spark,milliman/spark,MLnick/spark,lvdongr/spark,panfengfeng/spark,map222/spark,sryza/spark,setjet/spark,ahnqirage/spark,facaiy/spark,caneGuy/spark,goldmedal/spark,stanzhai/spark,zero323/spark,maropu/spark,setjet/spark,ahnqirage/spark,gengliangwang/spark,haowu80s/spark,adrian-ionescu/apache-spark,icexelloss/spark,lxsmnv/spark,taroplus/spark,liyichao/spark,patrick-nicholson/spark,publicRoman/spark,saltstar/spark,zzcclp/spark,dbtsai/spark,wangmiao1981/spark,maropu/spark,HyukjinKwon/spark,esi-mineset/spark,szhem/spark,bdrillard/spark,wzhfy/spark,goldmedal/spark,cin/spark,skonto/spark,rezasafi/spark,lyogavin/spark,wangyum/spark,sahilTakiar/spark,janewangfb/spark,nchammas/spark,koeninger/spark,aosagie/spark,guoxiaolongzte/spark,brad-kaiser/spark,haowu80s/spark,srowen/spark,rekhajoshm/spark,nchammas/spark,nlalevee/spark,gengliangwang/spark,big-pegasus/spark,narahari92/spark,tejasapatil/spark,rekhajoshm/spark,darionyaphet/spark,minixalpha/spark,kimoonkim/spark,ConeyLiu/spark,dongjoon-hyun/spark,janewangfb/spark,sachintyagi22/spark,lyogavin/spark,kiszk/spark,ConeyLiu/spark,huang1900/spark,JoshRosen/spark,zzcclp/spark,ueshin/apache-spark,facaiy/spark,zzcclp/spark,markhamstra/spark,janewangfb/spark,szhem/spark,pronix/spark,someorz/spark,kiszk/spark,kimoonkim/spark,gengliangwang/spark,lvdongr/spark,icexelloss/spark,tejasapatil/spark,cloudera/spark,ericvandenbergfb/spark,goldmedal/spark,akopich/spark,big-pegasus/spark,michalsenkyr/spark,zhouyejoe/spark,stanzhai/spark,ddna1021/spark,jianran/spark,esi-mineset/spark,ahnqirage/spark,yanboliang/spark,koeninger/spark,mkolod/incubator-spark,xuanyuanking/spark,poffuomo/spark,eyalfa/spark,ahnqirage/spark,cin/spark,kevinyu98/spark,sureshthalamati/spark,narahari92/spark,narahari92/spark,bravo-zhang/spark,SHASHANKB/spark,someorz/spark,facaiy/spark,xflin/spark,zuotingbing/spark,ajaysaini725/spark,aokolnychyi/spark,wangyum/spark,huang1900/spark,akopich/spark,kimoonkim/spark,ajaysaini725/spark,andrewor14/iolap,sahilTakiar/spark,chuckchen/spark,techaddict/spark,stanzhai/spark,kevinyu98/spark,WindCanDie/spark,ahnqirage/spark,poffuomo/spark,publicRoman/spark,ptkool/spark,BryanCutler/spark,kissmetrics/spark,actuaryzhang/spark,hhbyyh/spark,witgo/spark,esi-mineset/spark,Aegeaner/spark,guoxiaolongzte/spark,brad-kaiser/spark,WeichenXu123/spark,witgo/spark,taroplus/spark,alunarbeach/spark,dhruve/spark,milliman/spark,wangmiao1981/spark,someorz/spark,szhem/spark,saturday-shi/spark,BryanCutler/spark,ericvandenbergfb/spark,joseph-torres/spark,milliman/spark,stanzhai/spark,cloud-fan/spark,lyogavin/spark,BryanCutler/spark,holdenk/spark,Panos-Bletsos/spark-cost-model-optimizer,sahilTakiar/spark,WeichenXu123/spark,mike0sv/spark,setjet/spark,cin/spark,shuangshuangwang/spark,tengpeng/spark,SnappyDataInc/spark,liyichao/spark,highfei2011/spark,minixalpha/spark,panfengfeng/spark,ajaysaini725/spark,goldmedal/spark,JoshRosen/spark,tejasapatil/spark,kissmetrics/spark,liutang123/spark,JerryLead/spark,witgo/spark,panfengfeng/spark,caneGuy/spark,andrewor14/iolap,mkolod/incubator-spark,jkbradley/spark,michalsenkyr/spark,yanboliang/spark,skonto/spark,UndeadBaneGitHub/spark,mdespriee/spark,mzl9039/spark,metamx/spark,akopich/spark,jlopezmalla/spark,bravo-zhang/spark,nchammas/spark,HyukjinKwon/spark,liyichao/spark,MLnick/spark,shubhamchopra/spark,eyalfa/spark,cloud-fan/spark,wangyum/spark,mike0sv/spark,BryanCutler/spark,dbtsai/spark,ibm-research-ireland/sparkoscope,icexelloss/spark,witgo/spark,facaiy/spark,chuckchen/spark,zero323/spark,hhbyyh/spark,sachintyagi22/spark,UndeadBaneGitHub/spark,ddna1021/spark,vax11780/spark,SHASHANKB/spark,lyogavin/spark,andrewor14/iolap,ron8hu/spark,mdespriee/spark,gioenn/xSpark,SHASHANKB/spark,wangmiao1981/spark,adrian-ionescu/apache-spark,nilsgrabbert/spark,1haodian/spark,bravo-zhang/spark,actuaryzhang/spark,rednaxelafx/apache-spark,jkbradley/spark,dhruve/spark,loneknightpy/spark,xflin/spark,saturday-shi/spark,vinodkc/spark,pgandhi999/spark,liutang123/spark,taroplus/spark,ConeyLiu/spark,ibm-research-ireland/sparkoscope,tejasapatil/spark,apache-spark-on-k8s/spark,dhruve/spark,WindCanDie/spark,metamx/spark,wgpshashank/spark,hhbyyh/spark,vinodkc/spark,ueshin/apache-spark,shaneknapp/spark,Panos-Bletsos/spark-cost-model-optimizer,someorz/spark,rezasafi/spark,jlopezmalla/spark,Panos-Bletsos/spark-cost-model-optimizer,big-pegasus/spark,apache/spark,dbtsai/spark,aray/spark,facaiy/spark,rikima/spark,darionyaphet/spark,facaiy/spark,SnappyDataInc/spark,haowu80s/spark,goldmedal/spark,kevinyu98/spark,andrewor14/spark,lxsmnv/spark,shaneknapp/spark,shuangshuangwang/spark,apache/spark,map222/spark,apache-spark-on-k8s/spark,BryanCutler/spark,actuaryzhang/spark,taroplus/spark,sachintyagi22/spark,vax11780/spark,milliman/spark,patrick-nicholson/spark,bravo-zhang/spark,spark-test/spark,sachintyagi22/spark,map222/spark,akopich/spark,sryza/spark,michalsenkyr/spark,markhamstra/spark,aray/spark,mahak/spark,techaddict/spark,shubhamchopra/spark,zuotingbing/spark,bOOm-X/spark,saltstar/spark,wangyum/spark,nilsgrabbert/spark,icexelloss/spark,wangmiao1981/spark,yanboliang/spark,HyukjinKwon/spark,wangyum/spark,dongjoon-hyun/spark,loneknightpy/spark,caneGuy/spark,rjpower/spark,mdespriee/spark,cin/spark,alunarbeach/spark,ConeyLiu/spark,aosagie/spark,chuckchen/spark,big-pegasus/spark,milliman/spark,1haodian/spark,JoshRosen/spark,Aegeaner/spark,andrewor14/iolap,srowen/spark,setjet/spark,sachintyagi22/spark,HyukjinKwon/spark,zero323/spark,kissmetrics/spark,ibm-research-ireland/sparkoscope,actuaryzhang/spark,shubhamchopra/spark,zuotingbing/spark,bOOm-X/spark,cin/spark,mike0sv/spark,panfengfeng/spark,HyukjinKwon/spark,ioana-delaney/spark,publicRoman/spark,szhem/spark,actuaryzhang/spark,gioenn/xSpark,poffuomo/spark,saturday-shi/spark,rekhajoshm/spark,szhem/spark,rjpower/spark,gengliangwang/spark,brad-kaiser/spark,nchammas/spark,alunarbeach/spark,zzcclp/spark,alunarbeach/spark,chuckchen/spark,darionyaphet/spark,brad-kaiser/spark,icexelloss/spark,kevinyu98/spark,lxsmnv/spark,kimoonkim/spark,minixalpha/spark,metamx/spark,brad-kaiser/spark,lvdongr/spark,loneknightpy/spark,sureshthalamati/spark,bdrillard/spark,koeninger/spark,bravo-zhang/spark,dhruve/spark,ioana-delaney/spark,vinodkc/spark,taroplus/spark,rezasafi/spark,dhruve/spark,metamx/spark,pgandhi999/spark,jkbradley/spark,witgo/spark,andrewor14/spark,jkbradley/spark,jrshust/spark,kevinyu98/spark,someorz/spark,jiangxb1987/spark,mike0sv/spark,kissmetrics/spark,stanzhai/spark,nilsgrabbert/spark,jianran/spark,big-pegasus/spark,Aegeaner/spark,tengpeng/spark,jrshust/spark,narahari92/spark,nlalevee/spark
python/examples/kmeans.py
python/examples/kmeans.py
""" This example requires numpy (http://www.numpy.org/) """ import sys import numpy as np from pyspark import SparkContext def parseVector(line): return np.array([float(x) for x in line.split(' ')]) def closestPoint(p, centers): bestIndex = 0 closest = float("+inf") for i in range(len(centers)): tempDist = np.sum((p - centers[i]) ** 2) if tempDist < closest: closest = tempDist bestIndex = i return bestIndex if __name__ == "__main__": if len(sys.argv) < 5: print >> sys.stderr, \ "Usage: PythonKMeans <master> <file> <k> <convergeDist>" exit(-1) sc = SparkContext(sys.argv[1], "PythonKMeans") lines = sc.textFile(sys.argv[2]) data = lines.map(parseVector).cache() K = int(sys.argv[3]) convergeDist = float(sys.argv[4]) # TODO: change this after we port takeSample() #kPoints = data.takeSample(False, K, 34) kPoints = data.take(K) tempDist = 1.0 while tempDist > convergeDist: closest = data.map( lambda p : (closestPoint(p, kPoints), (p, 1))) pointStats = closest.reduceByKey( lambda (x1, y1), (x2, y2): (x1 + x2, y1 + y2)) newPoints = pointStats.map( lambda (x, (y, z)): (x, y / z)).collect() tempDist = sum(np.sum((kPoints[x] - y) ** 2) for (x, y) in newPoints) for (x, y) in newPoints: kPoints[x] = y print "Final centers: " + str(kPoints)
""" This example requires numpy (http://www.numpy.org/) """ import sys import numpy as np from pyspark import SparkContext def parseVector(line): return np.array([float(x) for x in line.split(' ')]) def closestPoint(p, centers): bestIndex = 0 closest = float("+inf") for i in range(len(centers)): tempDist = np.sum((p - centers[i]) ** 2) if tempDist < closest: closest = tempDist bestIndex = i return bestIndex if __name__ == "__main__": if len(sys.argv) < 5: print >> sys.stderr, \ "Usage: PythonKMeans <master> <file> <k> <convergeDist>" exit(-1) sc = SparkContext(sys.argv[1], "PythonKMeans") lines = sc.textFile(sys.argv[2]) data = lines.map(parseVector).cache() K = int(sys.argv[3]) convergeDist = float(sys.argv[4]) kPoints = data.takeSample(False, K, 34) tempDist = 1.0 while tempDist > convergeDist: closest = data.map( lambda p : (closestPoint(p, kPoints), (p, 1))) pointStats = closest.reduceByKey( lambda (x1, y1), (x2, y2): (x1 + x2, y1 + y2)) newPoints = pointStats.map( lambda (x, (y, z)): (x, y / z)).collect() tempDist = sum(np.sum((kPoints[x] - y) ** 2) for (x, y) in newPoints) for (x, y) in newPoints: kPoints[x] = y print "Final centers: " + str(kPoints)
apache-2.0
Python
d680fca8bef783bd6fad7c71989ca51fb4725bc8
upgrade to latest chatexchange+fix
NickVolynkin/SmokeDetector,NickVolynkin/SmokeDetector,ArtOfCode-/SmokeDetector,Charcoal-SE/SmokeDetector,Charcoal-SE/SmokeDetector,ArtOfCode-/SmokeDetector
ws.py
ws.py
#requires https://pypi.python.org/pypi/websocket-client/ import websocket import threading import json,os,sys,getpass,time from findspam import FindSpam from ChatExchange.chatexchange.client import * import HTMLParser parser=HTMLParser.HTMLParser() if("ChatExchangeU" in os.environ): username=os.environ["ChatExchangeU"] else: print "Username: " username=raw_input() if("ChatExchangeP" in os.environ): password=os.environ["ChatExchangeP"] else: password=getpass.getpass("Password: ") lasthost=None lastid=None wrap=Client("stackexchange.com") wrap.login(username,password) wrapm=Client("meta.stackexchange.com") wrapm.login(username,password) s="[ [SmokeDetector](https://github.com/Charcoal-SE/SmokeDetector) ] SmokeDetector started" room = wrap.get_room("11540") roomm = wrapm.get_room("89") room.send_message(s) def checkifspam(data): return True global lasthost,lastid d=json.loads(json.loads(data)["data"]) s= d["titleEncodedFancy"] print time.strftime("%Y-%m-%d %H:%M:%S"),parser.unescape(s).encode("ascii",errors="replace") site = d["siteBaseHostAddress"] site=site.encode("ascii",errors="replace") sys.stdout.flush() test=FindSpam.testpost(s,site) if (0<len(test)): if(lastid==d["id"] and lasthost == d["siteBaseHostAddress"]): return False # Don't repost. Reddit will hate you. lastid=d["id"] lasthost = d["siteBaseHostAddress"] return True return False def handlespam(data): try: d=json.loads(json.loads(data)["data"]) reason=",".join(FindSpam.testpost(d["titleEncodedFancy"],d["siteBaseHostAddress"])) s="[ [SmokeDetector](https://github.com/Charcoal-SE/SmokeDetector) ] %s: [%s](%s) on `%s`" % (reason,d["titleEncodedFancy"],d["url"],d["siteBaseHostAddress"]) print parser.unescape(s).encode('ascii',errors='replace') room.send_message(s) roomm.send_message(s) except UnboundLocalError: print "NOP" ws = websocket.create_connection("ws://qa.sockets.stackexchange.com/") ws.send("155-questions-active") room.join() def watcher(ev,wrap2): if ev.type_id != 1: return; if(ev.content.startswith("!!/stappit")): if(str(ev.data["user_id"]) in ["31768","103081","73046"]): room.send_message("Goodbye, cruel world") os._exit(1) room.watch_socket(watcher) while True: a=ws.recv() if(a!= None and a!= ""): if(checkifspam(a)): threading.Thread(target=handlespam,args=(a,)).start() s="[ [SmokeDetector](https://github.com/Charcoal-SE/SmokeDetector) ] SmokeDetector aborted" room.sendMessage(s)
#requires https://pypi.python.org/pypi/websocket-client/ import websocket import threading import json,os,sys,getpass,time from findspam import FindSpam from ChatExchange.chatexchange.client import * import HTMLParser parser=HTMLParser.HTMLParser() if("ChatExchangeU" in os.environ): username=os.environ["ChatExchangeU"] else: print "Username: " username=raw_input() if("ChatExchangeP" in os.environ): password=os.environ["ChatExchangeP"] else: password=getpass.getpass("Password: ") lasthost=None lastid=None wrap=Client("stackexchange.com") wrap.login(username,password) wrapm=Client("meta.stackexchange.com") wrapm.login(username,password) s="[ [SmokeDetector](https://github.com/Charcoal-SE/SmokeDetector) ] SmokeDetector started" room = wrap.get_room("11540") roomm = wrapm.get_room("89") room.send_message(s) def checkifspam(data): global lasthost,lastid d=json.loads(json.loads(data)["data"]) s= d["titleEncodedFancy"] print time.strftime("%Y-%m-%d %H:%M:%S"),parser.unescape(s).encode("ascii",errors="replace") site = d["siteBaseHostAddress"] site=site.encode("ascii",errors="replace") sys.stdout.flush() test=FindSpam.testpost(s,site) if (0<len(test)): if(lastid==d["id"] and lasthost == d["siteBaseHostAddress"]): return False # Don't repost. Reddit will hate you. lastid=d["id"] lasthost = d["siteBaseHostAddress"] return True return False def handlespam(data): try: d=json.loads(json.loads(data)["data"]) reason=",".join(FindSpam.testpost(d["titleEncodedFancy"],d["siteBaseHostAddress"])) s="[ [SmokeDetector](https://github.com/Charcoal-SE/SmokeDetector) ] %s: [%s](%s) on `%s`" % (reason,d["titleEncodedFancy"],d["url"],d["siteBaseHostAddress"]) print parser.unescape(s).encode('ascii',errors='replace') wrap.sendMessage("11540",s) wrapm.sendMessage("89",s) except UnboundLocalError: print "NOP" ws = websocket.create_connection("ws://qa.sockets.stackexchange.com/") ws.send("155-questions-active") room.join() def watcher(ev,wrap2): if ev.type_id != 1: return; if(msg.content.startswith("!!/stappit")): if(str(msg.data["user_id"]) in ["31768","103081","73046"]): room.send_message("11540","Goodbye, cruel world") os._exit(1) room.watch_socket(watcher) while True: a=ws.recv() if(a!= None and a!= ""): if(checkifspam(a)): threading.Thread(target=handlespam,args=(a,)).start() s="[ [SmokeDetector](https://github.com/Charcoal-SE/SmokeDetector) ] SmokeDetector aborted" room.sendMessage(s)
apache-2.0
Python
af5b574fb785e65fd1292bb28e90d005be1ecd03
Fix pep8 errors
meomancer/field-campaigner,meomancer/field-campaigner,meomancer/field-campaigner
reporter/test/test_osm.py
reporter/test/test_osm.py
# coding=utf-8 """Test cases for the OSM module. :copyright: (c) 2013 by Tim Sutton :license: GPLv3, see LICENSE for more details. """ import os from reporter.utilities import LOGGER from reporter.osm import load_osm_document, extract_buildings_shapefile from reporter.test.helpers import FIXTURE_PATH from reporter.test.logged_unittest import LoggedTestCase class OsmTestCase(LoggedTestCase): """Test the OSM retrieval functions.""" def test_load_osm_document(self): """Check that we can fetch an osm doc and that it caches properly.""" # # NOTE - INTERNET CONNECTION NEEDED FOR THIS TEST # myUrl = ('http://overpass-api.de/api/interpreter?data=' '(node(-34.03112731086964,20.44997155666351,' '-34.029571310785315,20.45501410961151);<;);out+meta;') myFilePath = '/tmp/test_load_osm_document.osm' if os.path.exists(myFilePath): os.remove(myFilePath) # We test twice - once to ensure its fetched from the overpass api # and once to ensure the cached file is used on second access # Note: There is a small chance the second test could fail if it # exactly straddles the cache expiry time. try: myFile = load_osm_document(myFilePath, myUrl) except: myMessage = 'load_osm_document from overpass failed %s' % myUrl LOGGER.exception(myMessage) raise myString = myFile.read() myMessage = 'load_osm_document from overpass content check failed.' assert 'Jacoline' in myString, myMessage #myFile = load_osm_document(myFilePath, myUrl) myFileTime = os.path.getmtime(myFilePath) # # This one should be cached now.... # load_osm_document(myFilePath, myUrl) myFileTime2 = os.path.getmtime(myFilePath) myMessage = 'load_osm_document cache test failed.' self.assertEqual(myFileTime, myFileTime2, myMessage) def test_extract_buildings_shapefile(self): """Test the osm to shp converter.""" myZipPath = extract_buildings_shapefile(FIXTURE_PATH) print myZipPath self.assertTrue(os.path.exists(myZipPath), myZipPath)
# coding=utf-8 """Test cases for the OSM module. :copyright: (c) 2013 by Tim Sutton :license: GPLv3, see LICENSE for more details. """ import os from reporter.utilities import LOGGER from reporter.osm import load_osm_document, extract_buildings_shapefile from reporter.test.helpers import FIXTURE_PATH from reporter.test.logged_unittest import LoggedTestCase class OsmTestCase(LoggedTestCase): """Test the OSM retrieval functions.""" def test_load_osm_document(self): """Check that we can fetch an osm doc and that it caches properly.""" # # NOTE - INTERNET CONNECTION NEEDED FOR THIS TEST # myUrl = ('http://overpass-api.de/api/interpreter?data=' '(node(-34.03112731086964,20.44997155666351,' '-34.029571310785315,20.45501410961151);<;);out+meta;') myFilePath = '/tmp/test_load_osm_document.osm' if os.path.exists(myFilePath): os.remove(myFilePath) # We test twice - once to ensure its fetched from the overpass api # and once to ensure the cached file is used on second access # Note: There is a small chance the second test could fail if it # exactly straddles the cache expiry time. try: myFile = load_osm_document(myFilePath, myUrl) except: myMessage = 'load_osm_document from overpass test failed %s' % myUrl LOGGER.exception(myMessage) raise myString = myFile.read() myMessage = 'load_osm_document from overpass content check failed.' assert 'Jacoline' in myString, myMessage #myFile = load_osm_document(myFilePath, myUrl) myFileTime = os.path.getmtime(myFilePath) # # This one should be cached now.... # load_osm_document(myFilePath, myUrl) myFileTime2 = os.path.getmtime(myFilePath) myMessage = 'load_osm_document cache test failed.' self.assertEqual(myFileTime, myFileTime2, myMessage) def test_extract_buildings_shapefile(self): """Test the osm to shp converter.""" myZipPath = extract_buildings_shapefile(FIXTURE_PATH) print myZipPath self.assertTrue(os.path.exists(myZipPath), myZipPath)
bsd-3-clause
Python
acf2d57fa49a5ed25275a279d04946178f8cedde
Fix formatting and add doc strings
McGillX/edx_data_research,McGillX/edx_data_research,McGillX/edx_data_research
reporting_scripts/base.py
reporting_scripts/base.py
import csv from pymongo import MongoClient class BaseEdX(object): def __init__(self, args): self.url = args.url client = MongoClient(self.url) self.db = client[args.db_name] self.collections = None self.output_directory = args.output_directory self.row_limit = args.row_limit self.csv_data = None self.list_of_headers = None def generate_csv(self, csv_data, list_of_headers, output_file): """ Genersate csv report from generated data and given list of headers """ self.csv_data = csv_data self.list_of_headers = list_of_headers number_of_rows = len(csv_data) + 1 if number_of_rows <= self.row_limit: self._write_to_csv(self.output_file) else: if number_of_rows % self.row_limit: number_of_splits = number_of_rows // self.row_limit + 1 else: number_of_splits = number_of_rows // self.row_limit for index in xrange(number_of_splits): self._write_to_csv(output_file.split('.')[0] + '_' + str(index) + '.csv', index) def _write_to_csv(self, output_file, number_of_splits=0): """ Helper method to write rows to csv files """ with open(output_file, 'w') as csv_file: writer = csv.writer(csv_file) writer.writerow(self.list_of_headers) for row in self.csv_data[number_of_splits * self.row_limit : (number_of_splits + 1) * self.row_limit]: # This loop looks for unicode objects and encodes them to ASCII to avoif Unicode errors, # for e.g. UnicodeEncodeError: 'ascii' codec can't encode character u'\xf1' for index,item in enumerate(row[:]): if type(item) is unicode: row[index] = item.encode('ascii', 'ignore') writer.writerow(row)
import csv from pymongo import MongoClient class BaseEdX(object): def __init__(self, args): self.url = args.url client = MongoClient(self.url) self.db = client[args.db_name] self.collections = None self.output_directory = args.output_directory self.row_limit = args.row_limit self.csv_data = None self.list_of_headers = None def generate_csv(self, csv_data, list_of_headers, output_file): self.csv_data = csv_data self.list_of_headers = list_of_headers number_of_rows = len(csv_data) + 1 if number_of_rows <= self.row_limit: self._write_to_csv(self.output_file) else: if number_of_rows % self.row_limit: number_of_splits = number_of_rows // self.row_limit + 1 else: number_of_splits = number_of_rows // self.row_limit def _write_to_csv(self, output_file, number_of_splits=0): with open(output_file, 'w') as csv_file: writer = csv.writer(csv_file) writer.writerow(self.list_of_headers) for row in self.csv_data[number_of_splits * self.row_limit : (number_of_splits + 1) * self.row_limit]: # This loop looks for unicode objects and encodes them to ASCII to avoif Unicode errors, # for e.g. UnicodeEncodeError: 'ascii' codec can't encode character u'\xf1' for index,item in enumerate(row[:]): if type(item) is unicode: row[index] = item.encode('ascii', 'ignore') writer.writerow(row) for index in xrange(number_of_splits): self._write_to_csv(output_file.split('.')[0] + '_' + str(index) + '.csv', index)
mit
Python
0e58ef45f45df0192be6c52cd34df5f1b5c5a028
correct if condition
sathishcodes/Webhookskeleton-py,sathishcodes/Webhookskeleton-py
app.py
app.py
#!/usr/bin/env python import urllib import json import os from flask import Flask from flask import request from flask import make_response # Flask app should start in global layout app = Flask(__name__) @app.route('/webhook', methods=['POST']) def webhook(): req = request.get_json(silent=True, force=True) print("Request:") print(json.dumps(req, indent=4)) res = makeWebhookResult(req) res = json.dumps(res, indent=4) print(res) r = make_response(res) r.headers['Content-Type'] = 'application/json' return r def makeWebhookResult(req): if req.get("result").get("action") == "tell.hours": result = req.get("result") parameters = result.get("parameters") timetype = parameters.get("time-type") portaltype = parameters.get("portal-types") DteTime = {'CS':'9 hours', 'PTO':'8 hours'} StaffitTime = {'CS':'8 hours', 'PTO':'8 hours'} #if time-type == "DTE" speech = "You should book" + str(DteTime[timetype]) + "for" + timetype #speech = "Webhook called!!" print("Response:") print(speech) return { "speech": speech, "displayText": speech, #"data": {}, # "contextOut": [], "source": "apiai-onlinestore-shipping" } if __name__ == '__main__': port = int(os.getenv('PORT', 5000)) print "Starting app on port %d" % port app.run(debug=True, port=port, host='0.0.0.0')
#!/usr/bin/env python import urllib import json import os from flask import Flask from flask import request from flask import make_response # Flask app should start in global layout app = Flask(__name__) @app.route('/webhook', methods=['POST']) def webhook(): req = request.get_json(silent=True, force=True) print("Request:") print(json.dumps(req, indent=4)) res = makeWebhookResult(req) res = json.dumps(res, indent=4) print(res) r = make_response(res) r.headers['Content-Type'] = 'application/json' return r def makeWebhookResult(req): if req.get("result").get("action") = "tell.hours": result = req.get("result") parameters = result.get("parameters") timetype = parameters.get("time-type") portaltype = parameters.get("portal-types") DteTime = {'CS':'9 hours', 'PTO':'8 hours'} StaffitTime = {'CS':'8 hours', 'PTO':'8 hours'} #if time-type == "DTE" speech = "You should book" + str(DteTime[timetype]) + "for" + timetype #speech = "Webhook called!!" print("Response:") print(speech) return { "speech": speech, "displayText": speech, #"data": {}, # "contextOut": [], "source": "apiai-onlinestore-shipping" } if __name__ == '__main__': port = int(os.getenv('PORT', 5000)) print "Starting app on port %d" % port app.run(debug=True, port=port, host='0.0.0.0')
apache-2.0
Python
e33a06ad4d4a7494f925a96e9d272e32e4dc18ba
Return json when error occurs
philipbl/SpeakerCast,philipbl/talk_feed
app.py
app.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from flask import Flask, request, json from flask.ext.cors import CORS import database import rsser import logging import threading logging.basicConfig(level=logging.INFO) logging.getLogger("gospellibrary").setLevel(logging.WARNING) logging.getLogger("requests").setLevel(logging.WARNING) logger = logging.getLogger(__name__) # Update data base in background logger.info("Updating database in background...") threading.Thread(target=database.update_database).start() logger.info("Starting server...") app = Flask(__name__) CORS(app) @app.route('/speakercast/speakers') def speakers(): logger.info("Getting speakers") speakers = [{'name': name, 'talks': count} for count, name in database.get_all_speaker_and_counts()] return json.dumps(speakers) @app.route('/speakercast/generate', methods=['POST', 'OPTIONS']) def generate(): if request.method == 'OPTIONS': return "" data = json.loads(request.data) speakers = data.get('speakers') if speakers is None: logger.error("No \"speakers\" field in request data!") return json.dumps({"error": "No \"speakers\" field in request data!"}) if len(speakers) == 0: logger.warning("Speaker list was empty. Ignoring request.") return json.dumps({"error": "Speaker list was empty. Ignoring request."}) id_ = database.generate_id(speakers) logger.info("Generated id ({}) for {}".format(id_, speakers)) return id_ @app.route('/speakercast/feed/<id>') def feed(id): speakers = database.get_speakers(id) if speakers is None: # TODO: Send some error logger.error("No speakers match {}!".format(id)) return json.dumps({"error": "No speakers match {}!".format(id)}) talks = database.get_talks(speakers) logger.info("Creating RSS feed for {}: {}".format(id, speakers)) return rsser.create_rss_feed(talks=talks, speakers=list(speakers)) if __name__ == "__main__": app.run(debug=True)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from flask import Flask, request, json from flask.ext.cors import CORS import database import rsser import logging import threading logging.basicConfig(level=logging.INFO) logging.getLogger("gospellibrary").setLevel(logging.WARNING) logging.getLogger("requests").setLevel(logging.WARNING) logger = logging.getLogger(__name__) # Update data base in background logger.info("Updating database in background...") threading.Thread(target=database.update_database).start() logger.info("Starting server...") app = Flask(__name__) CORS(app) @app.route('/speakercast/speakers') def speakers(): logger.info("Getting speakers") speakers = [{'name': name, 'talks': count} for count, name in database.get_all_speaker_and_counts()] return json.dumps(speakers) @app.route('/speakercast/generate', methods=['POST', 'OPTIONS']) def generate(): if request.method == 'OPTIONS': return "" data = json.loads(request.data) speakers = data.get('speakers') if speakers is None: logger.error("No \"speakers\" field in request data!") return "ERROR" if len(speakers) == 0: logger.warning("Speaker list was empty. Ignoring request.") return "" id_ = database.generate_id(speakers) logger.info("Generated id ({}) for {}".format(id_, speakers)) return id_ @app.route('/speakercast/feed/<id>') def feed(id): speakers = database.get_speakers(id) if speakers is None: # TODO: Send some error logger.error("No speakers match {}!".format(id)) return "ERROR" talks = database.get_talks(speakers) logger.info("Creating RSS feed for {}: {}".format(id, speakers)) return rsser.create_rss_feed(talks=talks, speakers=list(speakers)) if __name__ == "__main__": app.run(debug=True)
bsd-3-clause
Python
d177d63ebc87208fdba4227377b2e1aebda8f077
Add port code for Heroku
smizell/maze_server
app.py
app.py
import os from flask import Flask, Response, request from hypermedia_resource import HypermediaResource from hypermedia_resource.wrappers import HypermediaResponse, ResponseBuilder import maze app = Flask(__name__) # Helper functions for the views def maze_resource(type_of): """ Sets up a HypermediaResource for the resource """ resource = HypermediaResource() resource.meta.attributes.add("type", type_of) return resource def maze_response(resource): """ Build a HypermediaResponse """ response_builder = ResponseBuilder("application/vnd.amundsen.maze+xml") response = response_builder.build(resource, request.headers.get("Accept")) return Response(response.body, mimetype=response.media_type) # Route and views @app.route('/', methods=["GET"]) def root(): """ Root resource """ resource = maze_resource(type_of='item') resource.links.add(rel='start', href=maze.link_to_cell(0)) return maze_response(resource) @app.route('/cells/999', methods=["GET"]) def exit(): """ Exit resource """ resource = maze_resource(type_of='completed') resource.links.add(rel='start', href=maze.link_to_cell(0)) return maze_response(resource) @app.route('/cells/<cell_num>', methods=["GET"]) def cell(cell_num): """ Cell resource """ resource = maze_resource(type_of='cell') links = maze.get_links_for_cell(int(cell_num)) for rel, link in links.iteritems(): resource.links.add(rel=rel, href=link) return maze_response(resource) if __name__ == "__main__": port = int(os.environ.get("PORT", 5000)) app.run(host='0.0.0.0', port=port)
from flask import Flask, Response, request from hypermedia_resource import HypermediaResource from hypermedia_resource.wrappers import HypermediaResponse, ResponseBuilder import maze app = Flask(__name__) # Helper functions for the views def maze_resource(type_of): """ Sets up a HypermediaResource for the resource """ resource = HypermediaResource() resource.meta.attributes.add("type", type_of) return resource def maze_response(resource): """ Build a HypermediaResponse """ response_builder = ResponseBuilder("application/vnd.amundsen.maze+xml") response = response_builder.build(resource, request.headers.get("Accept")) return Response(response.body, mimetype=response.media_type) # Route and views @app.route('/', methods=["GET"]) def root(): """ Root resource """ resource = maze_resource(type_of='item') resource.links.add(rel='start', href=maze.link_to_cell(0)) return maze_response(resource) @app.route('/cells/999', methods=["GET"]) def exit(): """ Exit resource """ resource = maze_resource(type_of='completed') resource.links.add(rel='start', href=maze.link_to_cell(0)) return maze_response(resource) @app.route('/cells/<cell_num>', methods=["GET"]) def cell(cell_num): """ Cell resource """ resource = maze_resource(type_of='cell') links = maze.get_links_for_cell(int(cell_num)) for rel, link in links.iteritems(): resource.links.add(rel=rel, href=link) return maze_response(resource) if __name__ == "__main__": app.debug = True app.run()
mit
Python
096cd81f318e8446855fb806772c674328adc6b2
Create app.py
rajeshrao04/news-api
app.py
app.py
#!/usr/bin/env python from __future__ import print_function from future.standard_library import install_aliases install_aliases() from urllib.parse import urlparse, urlencode from urllib.request import urlopen, Request from urllib.error import HTTPError import json import os from flask import Flask from flask import request from flask import make_response # Flask app should start in global layout app = Flask(__name__) @app.route('/webhook', methods=['POST']) def webhook(): req = request.get_json(silent=True, force=True) print("Request:") print(json.dumps(req, indent=4)) res = processRequest(req) res = json.dumps(res, indent=4) # print(res) r = make_response(res) r.headers['Content-Type'] = 'application/json' return r def processRequest(req): if req.get("result").get("action") != "news.search": return {} baseurl = "https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=60969da0a38e4cf1aad619158c413030" if yql_query is None: return {} yql_url = baseurl + urlencode({'': yql_query}) + "&format=json" result = urlopen(yql_url).read() data = json.loads(result) res = makeWebhookResult(data) return res def makeYqlQuery(req): result = req.get("result") parameters = result.get("parameters") news = parameters.get("news.search") if news is None: return None def makeWebhookResult(res): articles = res.get('articles') if articles is None: return {} author = articles.get('author') if author is None: return {} title = articles.get('title') if title is None: return {} description= articles.get('description') url = articles.get('url') #units = channel.get('units') #condition = item.get('condition') #if condition is None: # return {} print(json.dumps(item, indent=4)) speech = "latest news" +author.get()+""+title.get()+""+description.get()+""+url.get() #print("Response:") print(speech) return { "speech": speech, "displayText": speech, # "data": data, # "contextOut": [], "source": "apiai-news-search" } if __name__ == '__main__': port = int(os.getenv('PORT', 5000)) print("Starting app on port %d" % port) app.run(debug=False, port=port, host='0.0.0.0')
#!/usr/bin/env python from __future__ import print_function from future.standard_library import install_aliases install_aliases() from urllib.parse import urlparse, urlencode from urllib.request import urlopen, Request from urllib.error import HTTPError import json import os from flask import Flask from flask import request from flask import make_response # Flask app should start in global layout app = Flask(__name__) @app.route('/webhook', methods=['POST']) def webhook(): req = request.get_json(silent=True, force=True) print("Request:") print(json.dumps(req, indent=4)) res = processRequest(req) res = json.dumps(res, indent=4) # print(res) r = make_response(res) r.headers['Content-Type'] = 'application/json' return r def processRequest(req): if req.get("result").get("action") != "news.search": return {} baseurl = "https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=60969da0a38e4cf1aad619158c413030" if yql_query is None: return {} yql_url = baseurl + urlencode({'': yql_query}) + "&format=json" result = urlopen(yql_url).read() data = json.loads(result) res = makeWebhookResult(data) return res #def makeYqlQuery(req): # result = req.get("result") # parameters = result.get("parameters") #city = parameters.get("news.search") #if city is None: # return None def makeWebhookResult(res): articles = res.get('articles') if articles is None: return {} author = articles.get('author') if author is None: return {} title = articles.get('title') if title is None: return {} description= articles.get('description') url = articles.get('url') #units = channel.get('units') #condition = item.get('condition') #if condition is None: # return {} print(json.dumps(item, indent=4)) speech = "latest news" +author.get()+""+title.get()+""+description.get()+""+url.get() #print("Response:") print(speech) return { "speech": speech, "displayText": speech, # "data": data, # "contextOut": [], "source": "apiai-news-search" } if __name__ == '__main__': port = int(os.getenv('PORT', 5000)) print("Starting app on port %d" % port) app.run(debug=False, port=port, host='0.0.0.0')
apache-2.0
Python
8fc38abecd4a9cba6579c7a422b957748115f450
disable CSRF token
Mouleshwar/Flask-S3-Uploader,themouli/Flask-S3-Uploader,Mouleshwar/Flask-S3-Uploader,themouli/Flask-S3-Uploader
app.py
app.py
from flask import Flask, render_template, flash from flask_wtf import Form from flask_wtf.file import FileField from tools import s3_upload import json app = Flask(__name__) app.config.from_object('config') class UploadForm(Form): example = FileField('Example File') @app.route('/', methods=['POST', 'GET']) def upload_page(): form = UploadForm(csrf_enabled=False) if form.validate_on_submit(): output = s3_upload(form.example) flash('{src} uploaded to S3 as {dst} and its urs is {url}'.format(src=form.example.data.filename, dst=output.split(" ")[0], url=output.split(" ")[1])) response = {} response['url'] = output.split(" ")[1] return json.dumps(response, indent=4) return render_template('example.html', form=form) if __name__ == '__main__': app.run()
from flask import Flask, render_template, flash from flask_wtf import Form from flask_wtf.file import FileField from tools import s3_upload import json app = Flask(__name__) app.config.from_object('config') class UploadForm(Form): example = FileField('Example File') @app.route('/', methods=['POST', 'GET']) def upload_page(): form = UploadForm() if form.validate_on_submit(): output = s3_upload(form.example) flash('{src} uploaded to S3 as {dst} and its urs is {url}'.format(src=form.example.data.filename, dst=output.split(" ")[0], url=output.split(" ")[1])) response = {} response['url'] = output.split(" ")[1] return json.dumps(response, indent=4) return render_template('example.html', form=form) if __name__ == '__main__': app.run()
mit
Python
e33174b6fcf8110478ec84016781ed65df7eb055
Add web-interface to utility
tildecross/tildex-notify
app.py
app.py
#!notify/bin/python3 import hug import os from pushbullet import Pushbullet @hug.get() @hug.cli() def create_note(title: hug.types.text, content: hug.types.text): api_key = os.environ["PB_API_KEY"] pb = Pushbullet(api_key) pb.push_note(title, content) if __name__ == '__main__': create_note.interface.cli()
#!notify/bin/python3 import hug import os from pushbullet import Pushbullet @hug.cli() def create_note(title: hug.types.text, content: hug.types.text): api_key = os.environ["PB_API_KEY"] pb = Pushbullet(api_key) pb.push_note(title, content) if __name__ == '__main__': create_note.interface.cli()
isc
Python
ed3ee7caae9ce754e2ec098e8889bfbed2198aa6
Print log msg before trying to write to log file
jiko/lovecraft_ebooks
bot.py
bot.py
#!/usr/bin/python import init_twit as tw import markovgen, time, re, random, codecs # make a separate file for these reusable functions: bot.py # main bot-specific app logic in app.py corpus_file = 'corpus.txt' with open(corpus_file) as text: markov = markovgen.Markov(text) def log(msg): print msg with codecs.open('log','a','utf-8') as f: f.write(msg+"\n") def genTweet(): wc = random.randint(6,18) return markov.generate_markov_text(size=wc) def tweet(status,irtsi=None,at=None): try: if at and irtsi: status = "@"+at+" "+status tw.poster.statuses.update(status=status,in_reply_to_status_id=irtsi) else: pass tw.poster.statuses.update(status=status) except tw.TwitterError as error: log(error.response_data) else: if irtsi: status = "In reply to "+irtsi+": "+status log(status) def reply(txt,mention): asker = mention['from_user'] log(asker + " said " + mention['text']) status_id = str(mention['id']) if tw.last_id_replied < status_id: tw.last_id_replied = status_id while len(txt) > 123: txt = genTweet() tweet(txt,status_id,asker) while True: results = [] #results = tw.twitter.search(q="@"+tw.handle,since_id=tw.last_id_replied)['results'] #retweets = re.compile('rt\s',flags=re.I) #results = [response for response in results if not retweets.search(response['text'])] if not results: log("Nobody's talking to me...") else: [reply(genTweet(),result) for result in results] tweet(genTweet()) log("Sweet Dreams...") time.sleep(7600) # waits for two hours
#!/usr/bin/python import init_twit as tw import markovgen, time, re, random, codecs # make a separate file for these reusable functions: bot.py # main bot-specific app logic in app.py corpus_file = 'corpus.txt' with open(corpus_file) as text: markov = markovgen.Markov(text) def log(msg): with codecs.open('log','a','utf-8') as f: f.write(msg+"\n") print msg def genTweet(): wc = random.randint(6,18) return markov.generate_markov_text(size=wc) def tweet(status,irtsi=None,at=None): try: if at and irtsi: status = "@"+at+" "+status tw.poster.statuses.update(status=status,in_reply_to_status_id=irtsi) else: pass tw.poster.statuses.update(status=status) except tw.TwitterError as error: log(error.response_data) else: if irtsi: status = "In reply to "+irtsi+": "+status log(status) def reply(txt,mention): asker = mention['from_user'] log(asker + " said " + mention['text']) status_id = str(mention['id']) if tw.last_id_replied < status_id: tw.last_id_replied = status_id while len(txt) > 123: txt = genTweet() tweet(txt,status_id,asker) while True: results = [] #results = tw.twitter.search(q="@"+tw.handle,since_id=tw.last_id_replied)['results'] #retweets = re.compile('rt\s',flags=re.I) #results = [response for response in results if not retweets.search(response['text'])] if not results: log("Nobody's talking to me...") else: [reply(genTweet(),result) for result in results] tweet(genTweet()) log("Sweet Dreams...") time.sleep(7600) # waits for two hours
mit
Python
132b422e81c8a3f3de4d1600acdc6a71327bfc1e
Update bro .py
jacobdshimer/Bro-Log-Utility-Script
bro.py
bro.py
def bro(verbose, files=[]): import subprocess import os import time files = files.split("\n") files.pop(-1) if verbose == False: epoch = time.time() path = os.getcwd() os.mkdir(path + "/" + str(epoch)) os.chdir(path + "/" + str(epoch)) for file in files: subprocess.check_output(["bro","-r",file]) newfiles = subprocess.check_output(["ls"]).split() for newfile in newfiles: combinedfile = open("combined_" + str(newfile),"a+") newfile = open(newfile,"r") combinedfile.write(newfile) combinedfile.close() newfile.close() elif verbose == True: count = 1 epoch = time.time() path = os.getcwd() os.mkdir(path + "/" + str(epoch)) os.chdir(path + "/" + str(epoch)) print "Creating the folder " + str(epoch) + "in order to store Bro Logs safely." for file in files: print "Working on " + str(count) + " out of " + str(len(files)) subprocess.check_output(["bro","-r",file]) newfiles = subprocess.check_output(["ls"]).split() for newfile in newfiles: combinedfile = open("combined_" + str(newfile),"a+") newfile = open(newfile,"r") combinedfile.write(newfile) combinedfile.close() newfile.close() count += 1
def combiner(verbose, files=[]): import subprocess import os import time files = files.split("\n") files.pop(-1) if verbose == False: epoch = time.time() path = os.getcwd() os.mkdir(path + "/" + str(epoch)) os.chdir(path + "/" + str(epoch)) for file in files: subprocess.check_output(["bro","-r",file]) newfiles = subprocess.check_output(["ls"]).split() for newfile in newfiles: combinedfile = open("combined_" + str(newfile),"a+") newfile = open(newfile,"r") combinedfile.write(newfile) combinedfile.close() newfile.close() elif verbose == True: count = 1 epoch = time.time() path = os.getcwd() os.mkdir(path + "/" + str(epoch)) os.chdir(path + "/" + str(epoch)) print "Creating the folder " + str(epoch) + "in order to store Bro Logs safely." for file in files: print "Working on " + str(count) + " out of " + str(len(files)) subprocess.check_output(["bro","-r",file]) newfiles = subprocess.check_output(["ls"]).split() for newfile in newfiles: combinedfile = open("combined_" + str(newfile),"a+") newfile = open(newfile,"r") combinedfile.write(newfile) combinedfile.close() newfile.close() count += 1
mit
Python
4aad9aeb5acf0c8aba609a53f20107ec48cdfa2b
Initialise gpio
thomas-vl/car
car.py
car.py
import time, os, sys import wiringpi as io class light(object): def __init__(self, pin): #make pins into output io.pinMode(pin,1) #set output low io.digitalWrite(pin,0) #set variables self.status = 0 self.pin = pin def on(self): #turn light on io.digitalWrite(self.pin,1) self.status = 1 def off(self): io.digitalWrite(self.pin,0) self.status = 0 def blink(self,times): for _ in times: self.on() time.sleep(2) self.off() time.sleep(2) #initialise try: io.wiringPiSetupGpio() except: print "GPIO issue", sys.exc_info()[0] lightFL = light(21) lightFL.on() lights = { "LeftFront":{"pin":21,"status":0}, "RightFront":{"pin":16,"status":0} } def lightCtrl(names,status): for i in names: io.digitalWrite(lights[i]["pin"],status) lights[i]["status"] = status def initResource(): try: io.wiringPiSetupGpio() except: print "GPIO issue", sys.exc_info()[0] for key, value in lights.items(): #make pins into output io.pinMode(value["pin"],1) #set output low io.digitalWrite(value["pin"],0) def lightTest(): lightCtrl(["LeftFront"],1) time.sleep(3) lightCtrl(["LeftFront"],0) lightCtrl(["RightFront"],1) time.sleep(3) lightCtrl(["RightFront"],0) time.sleep(1) lightCtrl(["LeftFront","RightFront"],1) time.sleep(3) lightCtrl(["LeftFront","RightFront"],0) #initResource() #lightTest()
import time, os, sys import wiringpi as io class light(object): def __init__(self, pin): #make pins into output io.pinMode(pin,1) #set output low io.digitalWrite(pin,0) #set variables self.status = 0 self.pin = pin def on(self): #turn light on io.digitalWrite(self.pin,1) self.status = 1 def off(self): io.digitalWrite(self.pin,0) self.status = 0 def blink(self,times): for _ in times: self.on() time.sleep(2) self.off() time.sleep(2) lightFL = light(21) lightFL.on() lights = { "LeftFront":{"pin":21,"status":0}, "RightFront":{"pin":16,"status":0} } def lightCtrl(names,status): for i in names: io.digitalWrite(lights[i]["pin"],status) lights[i]["status"] = status def initResource(): try: io.wiringPiSetupGpio() except: print "GPIO issue", sys.exc_info()[0] for key, value in lights.items(): #make pins into output io.pinMode(value["pin"],1) #set output low io.digitalWrite(value["pin"],0) def lightTest(): lightCtrl(["LeftFront"],1) time.sleep(3) lightCtrl(["LeftFront"],0) lightCtrl(["RightFront"],1) time.sleep(3) lightCtrl(["RightFront"],0) time.sleep(1) lightCtrl(["LeftFront","RightFront"],1) time.sleep(3) lightCtrl(["LeftFront","RightFront"],0) #initResource() #lightTest()
mit
Python
43a26a77d84fb8547564518a8469be69ed852cf1
add discourse to csp
HTTPArchive/beta.httparchive.org,rviscomi/beta.httparchive.org,rviscomi/beta.httparchive.org,HTTPArchive/beta.httparchive.org,HTTPArchive/beta.httparchive.org,rviscomi/beta.httparchive.org
csp.py
csp.py
csp = { 'default-src': '\'self\'', 'style-src': [ '\'self\'', '\'unsafe-inline\'', 'fonts.googleapis.com' ], 'script-src': [ '\'self\'', 'cdn.httparchive.org', 'www.google-analytics.com', 'use.fontawesome.com', 'cdn.speedcurve.com', 'spdcrv.global.ssl.fastly.net' ], 'font-src': [ '\'self\'', 'fonts.gstatic.com' ], 'connect-src': [ '\'self\'', 'cdn.httparchive.org', 'discuss.httparchive.org', 'raw.githubusercontent.com', 'www.webpagetest.org' ], 'img-src': [ '\'self\'', 'discuss.httparchive.org', 'avatars.discourse.org', 'www.google-analytics.com', 's.g.doubleclick.net', 'stats.g.doubleclick.net' ] }
csp = { 'default-src': '\'self\'', 'style-src': [ '\'self\'', '\'unsafe-inline\'', 'fonts.googleapis.com' ], 'script-src': [ '\'self\'', 'cdn.httparchive.org', 'www.google-analytics.com', 'use.fontawesome.com', 'cdn.speedcurve.com', 'spdcrv.global.ssl.fastly.net' ], 'font-src': [ '\'self\'', 'fonts.gstatic.com' ], 'connect-src': [ '\'self\'', 'cdn.httparchive.org', 'discuss.httparchive.org', 'raw.githubusercontent.com', 'www.webpagetest.org' ], 'img-src': [ '\'self\'', 'discuss.httparchive.org', 'www.google-analytics.com', 's.g.doubleclick.net', 'stats.g.doubleclick.net' ] }
apache-2.0
Python
5000dc1045d2771b85528b60991e9ac2aad7d69d
fix bug to merge dict
mfuentesg/SyncSettings,adnedelcu/SyncSettings
sync_settings/libs/exceptions.py
sync_settings/libs/exceptions.py
# -*- coding: utf-8 -*- import json import sys import traceback class GistException(Exception): def to_json(self): json_error = json.loads(json.dumps(self.args[0])) trace = traceback.extract_tb(sys.exc_info()[2])[-1] return dict({ 'filename': str(trace[0]), 'line': str(trace[1]) }, **json_error)
# -*- coding: utf-8 -*- import json import sys import traceback class GistException(Exception): def to_json(self): json_error = json.loads(json.dumps(self.args[0])) trace = traceback.extract_tb(sys.exc_info()[2])[-1] return json_error.update({ 'filename': str(trace[0]), 'line': str(trace[1]) })
mit
Python
94c30a0efe0c3597678c64f46735ca7cd9990ccd
Revert Settings.py, only added admin schema
miguelcleon/ODM2-Admin,ocefpaf/ODM2-Admin,miguelcleon/ODM2-Admin,ocefpaf/ODM2-Admin,ocefpaf/ODM2-Admin,ocefpaf/ODM2-Admin,miguelcleon/ODM2-Admin,miguelcleon/ODM2-Admin
templatesAndSettings/settings.py
templatesAndSettings/settings.py
""" Keep this file untracked """ # SECURITY WARNING: keep the secret key used in production secret! secret_key = 'random_secret_key_like_so_7472873649836' media_root = 'C:/Users/leonmi/Google Drive/ODM2Djangoadmin/ODM2CZOData/upfiles/' media_url = '/odm2testapp/upfiles/' # Application definition custom_template_path = '/admin/ODM2CZOData/' #admin_shortcuts_path = '/admin/' url_path = 'admin/' static_root = 'C:/Users/leonmi/Google Drive/ODM2Djangoadmin/static' debug = True template_debug = True # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases ODM2_configs = { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'x', 'USER': 'x', 'PASSWORD': 'x', 'HOST': 'x', #micro server '52.20.81.11' 'PORT': 'x', 'OPTIONS': { 'options': '-c search_path=admin,odm2,odm2extra' } } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ static_url = '/static/' from templatesAndSettings.base import *
""" Keep this file untracked """ # SECURITY WARNING: keep the secret key used in production secret! secret_key = 'random_secret_key_like_so_7472873649836' media_root = '/Users/lsetiawan/Desktop/shared_ubuntu/APL/ODM2/ODM2-Admin/ODM2CZOData/upfiles/' media_url = '/odm2testapp/upfiles/' # Application definition custom_template_path = '/admin/ODM2CZOData/' #admin_shortcuts_path = '/admin/' url_path = 'admin/' static_root = '/Users/lsetiawan/Desktop/shared_ubuntu/APL/ODM2/ODM2-Admin/static'#'C:/Users/leonmi/Google Drive/ODM2Djangoadmin/static' debug = True template_debug = True # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases ODM2_configs = { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'odm2', 'USER': 'lsetiawan', 'PASSWORD': '', 'HOST': 'localhost', #micro server '52.20.81.11' 'PORT': '5432', 'OPTIONS': { 'options': '-c search_path=admin,odm2,odm2extra' } } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ static_url = '/static/' from templatesAndSettings.base import *
mit
Python
2536526a383d1b2a921277970584ef5d3ba6073d
revert LIF base model
mwalton/artificial-olfaction,mwalton/artificial-olfaction,mwalton/artificial-olfaction
lif.py
lif.py
from numpy import * from pylab import * ## setup parameters and state variables T = 200 # total time to simulate (msec) dt = 0.125 # simulation time step (msec) time = arange(0, T+dt, dt) # time array t_rest = 0 # initial refractory time ## LIF properties Vm = zeros(len(time)) # potential (V) trace over time Rm = 1 # resistance (kOhm) Cm = 10 # capacitance (uF) tau_m = Rm*Cm # time constant (msec) tau_ref = 4 # refractory period (msec) Vth = 1 # spike threshold (V) V_spike = 0.5 # spike delta (V) I = 1.5 ## Input stimulus #I = 1.5 # input current (A) ## iterate over each time step for i, t in enumerate(time): if t > t_rest: Vm[i] = Vm[i-1] + (-Vm[i-1] + I*Rm) / tau_m * dt if Vm[i] >= Vth: Vm[i] += V_spike t_rest = t + tau_ref ## plot membrane potential trace plot(time, Vm) title('Leaky Integrate-and-Fire Example') ylabel('Membrane Potential (V)') xlabel('Time (msec)') ylim([0,2]) show()
from numpy import * from pylab import * ## setup parameters and state variables T = 1000 # total time to simulate (msec) dt = 0.125 # simulation time step (msec) time = arange(0, T+dt, dt) # time array t_rest = 0 # initial refractory time ## LIF properties Vm = zeros(len(time)) # potential (V) trace over time Rm = 1 # resistance (kOhm) Cm = 10 # capacitance (uF) tau_m = Rm*Cm # time constant (msec) tau_ref = 4 # refractory period (msec) Vth = 1 # spike threshold (V) V_spike = 0.5 # spike delta (V) ## Input stimulus #I = 1.5 # input current (A) ## iterate over each time step for i, t in enumerate(time): if t > t_rest: I = (math.sin(t / 50) + 1) Vm[i] = Vm[i-1] + (-Vm[i-1] + I*Rm) / tau_m * dt if Vm[i] >= Vth: Vm[i] += V_spike t_rest = t + tau_ref ## plot membrane potential trace plot(time, Vm) title('Leaky Integrate-and-Fire Example') ylabel('Membrane Potential (V)') xlabel('Time (msec)') ylim([0,2]) show()
mit
Python
3228b640d74dd1b06e9d96fb8265cc8c952074f6
solve Flatten layer issue
AlexandruBurlacu/keras_squeezenet,AlexandruBurlacu/keras_squeezenet
run.py
run.py
from keras.models import Model, Sequential from keras.layers import (Activation, Dropout, AveragePooling2D, Input, Flatten, MaxPooling2D, Convolution2D) from firemodule import FireModule from keras.datasets import cifar10, mnist from keras.optimizers import SGD from keras.utils import np_utils import numpy as np datasets = { "mnist": mnist, "cifar": cifar10 } (x_train, y_train), (x_test, y_test) = datasets["cifar"].load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. y_train = np_utils.to_categorical(y_train) y_test = np_utils.to_categorical(y_test) inputs = Input(x_train.shape[1:]) layer = Convolution2D(96, 7, 7)(inputs) layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(layer) layer = FireModule(16, 64)(layer) layer = FireModule(16, 64)(layer) layer = FireModule(32, 128)(layer) layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(layer) layer = FireModule(32, 128)(layer) layer = FireModule(48, 192)(layer) layer = FireModule(48, 192)(layer) layer = FireModule(64, 256)(layer) layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(layer) layer = FireModule(64, 256)(layer) layer = Dropout(0.5)(layer) layer = Convolution2D(10, 1, 1)(layer) layer = AveragePooling2D((2, 2))(layer) layer = Flatten()(layer) layer = Activation("softmax")(layer) model = Model(input = inputs, output = layer) model.compile(loss = "categorical_crossentropy", optimizer = "rmsprop", metrics = ["accuracy"]) model.fit(x_train, y_train) model.predict(x_test, y_test) model.save("squeezenet.dump")
from keras.models import Model from keras.layers import (Activation, Dropout, AveragePooling2D, Input, Flatten, MaxPooling2D, Convolution2D) from firemodule import FireModule from keras.datasets import cifar10, mnist from keras.optimizers import SGD from keras.utils import np_utils import numpy as np datasets = { "mnist": mnist, "cifar": cifar10 } (x_train, y_train), (x_test, y_test) = datasets["cifar"].load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. y_train = np_utils.to_categorical(y_train) y_test = np_utils.to_categorical(y_test) inputs = Input(x_train.shape[1:]) layer = Convolution2D(96, 7, 7)(inputs) layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(layer) layer = FireModule(16, 64)(layer) layer = FireModule(16, 64)(layer) layer = FireModule(32, 128)(layer) layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(layer) layer = FireModule(32, 128)(layer) layer = FireModule(48, 192)(layer) layer = FireModule(48, 192)(layer) layer = FireModule(64, 256)(layer) layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(layer) layer = FireModule(64, 256)(layer) layer = Dropout(0.5)(layer) layer = Convolution2D(10, 1, 1)(layer) layer = AveragePooling2D((10, 10))(layer) layer = Flatten()(layer) layer = Activation("softmax")(layer) model = Model(input = inputs, output = layer) model.compile(x_train, y_train, optimizer = SGD(0.01, momentum = 0.85)) model.predict(x_test, y_test) model.save("squeezenet.dump")
mit
Python
454740f2657efa88efa16abdba93dc427bcf4d70
Add try catch to capture all the exceptions that might generate anywhere todo: need to capture exceptions in specific places and raise them to log from the main catch
manishgs/pdf-processor,anjesh/pdf-processor,manishgs/pdf-processor,anjesh/pdf-processor
run.py
run.py
from PdfProcessor import * import argparse from datetime import datetime import ConfigParser import ProcessLogger import traceback parser = argparse.ArgumentParser(description='Processes the pdf and extracts the text') parser.add_argument('-i','--infile', help='File path of the input pdf file.', required=True) parser.add_argument('-o','--outdir', help='File name of the output csv file.', required=True) results = parser.parse_args() try: logger = ProcessLogger.getLogger('run') logger.info("Processing started at %s ", str(datetime.now())) logger.info("input: %s", results.infile) logger.info("outdir: %s", results.outdir) configParser = ConfigParser.RawConfigParser() configParser.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings.config')) pdfProcessor = PDFProcessor(results.infile, results.outdir) pdfProcessor.setConfigParser(configParser) if pdfProcessor.isStructured(): pdfProcessor.extractTextFromStructuredDoc() else: pdfProcessor.extractTextFromScannedDoc() pdfProcessor.writeStats() except OSError as e: logger.error("OSError: %s [%s] in %s", e.strerror, e.errno, e.filename); logger.debug(traceback.format_exception(*sys.exc_info())) except Exception as e: logger.error("Exception: %s ", e); logger.debug(traceback.format_exception(*sys.exc_info())) logger.info("Processing ended at %s ", str(datetime.now()));
from PdfProcessor import * import argparse from datetime import datetime import ConfigParser import ProcessLogger parser = argparse.ArgumentParser(description='Processes the pdf and extracts the text') parser.add_argument('-i','--infile', help='File path of the input pdf file.', required=True) parser.add_argument('-o','--outdir', help='File name of the output csv file.', required=True) results = parser.parse_args() logger = ProcessLogger.getLogger('run') logger.info("Processing started at %s ", str(datetime.now())) logger.info("input: %s", results.infile) logger.info("outdir: %s", results.outdir) configParser = ConfigParser.RawConfigParser() configParser.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings.config')) pdfProcessor = PDFProcessor(results.infile, results.outdir) pdfProcessor.setConfigParser(configParser) if pdfProcessor.isStructured(): pdfProcessor.extractTextFromStructuredDoc() else: pdfProcessor.extractTextFromScannedDoc() pdfProcessor.writeStats() logger.info("Processing ended at %s ", str(datetime.now()));
mit
Python
ec09e3b35d431232feb0df1577b3fe6578b68704
Remove old SSL code from run.py
virtool/virtool,virtool/virtool,igboyes/virtool,igboyes/virtool
run.py
run.py
import logging import os import sys import json import uvloop import asyncio from aiohttp import web from setproctitle import setproctitle from virtool.app import create_app from virtool.app_init import get_args, configure sys.dont_write_bytecode = True logger = logging.getLogger("aiohttp.server") setproctitle("virtool") args = get_args() configure(verbose=args.verbose) asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) loop = asyncio.get_event_loop() settings_path = os.path.join(sys.path[0], "settings.json") skip_setup = os.path.isfile(settings_path) try: with open(settings_path, "r") as handle: settings_temp = json.load(handle) except FileNotFoundError: settings_temp = dict() if __name__ == "__main__": app = create_app( loop, skip_setup=skip_setup, force_version=args.force_version, no_sentry=args.no_sentry ) host = args.host or settings_temp.get("server_host", "localhost") if args.port: port = int(args.port) else: port = settings_temp.get("server_port", 9950) web.run_app(app, host=host, port=port)
import logging import os import sys import ssl import json import uvloop import asyncio from aiohttp import web from setproctitle import setproctitle from virtool.app import create_app from virtool.app_init import get_args, configure sys.dont_write_bytecode = True logger = logging.getLogger("aiohttp.server") setproctitle("virtool") args = get_args() configure(verbose=args.verbose) asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) loop = asyncio.get_event_loop() settings_path = os.path.join(sys.path[0], "settings.json") skip_setup = os.path.isfile(settings_path) try: with open(settings_path, "r") as handle: settings_temp = json.load(handle) except FileNotFoundError: settings_temp = dict() if __name__ == "__main__": ssl_context = None if settings_temp.get("use_ssl", False): cert_path = settings_temp.get("cert_path", None) key_path = settings_temp.get("key_path", None) if cert_path and key_path: ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain(cert_path, key_path) app = create_app( loop, skip_setup=skip_setup, force_version=args.force_version, no_sentry=args.no_sentry ) host = args.host or settings_temp.get("server_host", "localhost") if args.port: port = int(args.port) else: port = settings_temp.get("server_port", 9950) web.run_app(app, host=host, port=port, ssl_context=ssl_context)
mit
Python
c15de13fa8dae840349463f6853f3edd3784ba6d
Update connect_db.py
zelongc/cloud-project,zelongc/cloud-project,zelongc/cloud-project
connect_db.py
connect_db.py
#!/usr/bin/python from couchdb import Server # server = Server() # connects to the local_server # >>> remote_server = Server('http://example.com:5984/') # >>> secure_remote_server = Server('https://username:[email protected]:5984/') class db_server(object): def __init__(self,username,login): self.secure_server=Server('http://%s:%[email protected]:5984' %(username,login)) self.db=self.secure_server["tweet"] def insert(self,data): try: doc_id,doc_rev=self.db.save(data) except Exception as e: with open('log','a') as f: f.write(str(e)+'\n') f.write((data['_id']+'\n'))
from couchdb import Server # server = Server() # connects to the local_server # >>> remote_server = Server('http://example.com:5984/') # >>> secure_remote_server = Server('https://username:[email protected]:5984/') class db_server(object): def __init__(self,username,login): self.secure_server=Server('http://%s:%[email protected]:5984' %(username,login)) self.db=self.secure_server["tweet"] def insert(self,data): try: doc_id,doc_rev=self.db.save(data) except Exception as e: with open('log','a') as f: f.write(str(e)+'\n') f.write((data['_id']+'\n'))
mit
Python
76a7b89cd8c935dec87ac89ec36b174c9a0636c4
change lambda with broken typing to def
willmcgugan/rich
rich/pager.py
rich/pager.py
from abc import ABC, abstractmethod from typing import Any, Callable class Pager(ABC): """Base class for a pager.""" @abstractmethod def show(self, content: str) -> None: """Show content in pager. Args: content (str): Content to be displayed. """ class SystemPager(Pager): """Uses the pager installed on the system.""" def _pager(self, content: str) -> Any: return __import__("pydoc").pager(content) def show(self, content: str) -> None: """Use the same pager used by pydoc.""" self._pager(content) if __name__ == "__main__": # pragma: no cover from .__main__ import make_test_card from .console import Console console = Console() with console.pager(styles=True): console.print(make_test_card())
from abc import ABC, abstractmethod from typing import Any, Callable class Pager(ABC): """Base class for a pager.""" @abstractmethod def show(self, content: str) -> None: """Show content in pager. Args: content (str): Content to be displayed. """ class SystemPager(Pager): """Uses the pager installed on the system.""" _pager: Callable[[Any, str], Any] = lambda self, content: __import__("pydoc").pager( content ) def show(self, content: str) -> None: """Use the same pager used by pydoc.""" self._pager(content) if __name__ == "__main__": # pragma: no cover from .__main__ import make_test_card from .console import Console console = Console() with console.pager(styles=True): console.print(make_test_card())
mit
Python
9bd5662194007d995c924d2d57f6af5c75075472
fix dashboard json output
mutantmonkey/ctfengine,mutantmonkey/ctfengine
ctfengine/views.py
ctfengine/views.py
import hashlib from flask import abort, flash, jsonify, render_template, request, redirect, \ url_for from ctfengine import app from ctfengine import database from ctfengine import lib from ctfengine import models @app.route('/') def index(): scores = models.Handle.topscores() total_points = database.conn.query(models.Flag.total_points()).first()[0] if request.wants_json(): return jsonify({ 'scores': [(x.handle, x.score) for x in scores], 'total_points': total_points, }) return render_template('index.html', scores=scores, total_points=total_points) @app.route('/submit', methods=['POST']) def submit_flag(): entered_handle = request.form['handle'].strip() entered_flag = request.form['flag'].strip() if len(entered_handle) <= 0 or len(entered_flag) <= 0: return make_error("Please enter a handle and a flag.") flag = models.Flag.get(entered_flag) if not flag: return make_error(request, "That is not a valid flag.") # search for handle handle = models.Handle.get(entered_handle) if not handle: handle = models.Handle(entered_handle, 0) database.conn.add(handle) database.conn.commit() existing_entry = models.FlagEntry.query.filter( models.FlagEntry.handle == handle.id, models.FlagEntry.flag == flag.id).first() if existing_entry: return make_error(request, "You may not resubmit flags.") # update points for user handle.score += flag.points database.conn.commit() # log flag submission entry = models.FlagEntry(handle.id, flag.id, request.remote_addr, request.user_agent.string) database.conn.add(entry) database.conn.commit() # mark machine as dirty if necessary if flag.machine: machine = database.conn.query(models.Machine).get(flag.machine) machine.dirty = True database.conn.commit() if request.wants_json(): return jsonify(entry.serialize()) flash("Flag scored.") return redirect(url_for('index')) @app.route('/dashboard') def dashboard(): machines = database.conn.query(models.Machine).all() if request.wants_json(): return jsonify({ 'machines': [m.serialize() for m in machines], }) return render_template('dashboard.html', machines=machines) def make_error(request, msg, code=400): if request.wants_json(): response = jsonify({'message': msg}) response.status_code = code return response else: flash(msg) return redirect(url_for('index'))
import hashlib from flask import abort, flash, jsonify, render_template, request, redirect, \ url_for from ctfengine import app from ctfengine import database from ctfengine import lib from ctfengine import models @app.route('/') def index(): scores = models.Handle.topscores() total_points = database.conn.query(models.Flag.total_points()).first()[0] if request.wants_json(): return jsonify({ 'scores': [(x.handle, x.score) for x in scores], 'total_points': total_points, }) return render_template('index.html', scores=scores, total_points=total_points) @app.route('/submit', methods=['POST']) def submit_flag(): entered_handle = request.form['handle'].strip() entered_flag = request.form['flag'].strip() if len(entered_handle) <= 0 or len(entered_flag) <= 0: return make_error("Please enter a handle and a flag.") flag = models.Flag.get(entered_flag) if not flag: return make_error(request, "That is not a valid flag.") # search for handle handle = models.Handle.get(entered_handle) if not handle: handle = models.Handle(entered_handle, 0) database.conn.add(handle) database.conn.commit() existing_entry = models.FlagEntry.query.filter( models.FlagEntry.handle == handle.id, models.FlagEntry.flag == flag.id).first() if existing_entry: return make_error(request, "You may not resubmit flags.") # update points for user handle.score += flag.points database.conn.commit() # log flag submission entry = models.FlagEntry(handle.id, flag.id, request.remote_addr, request.user_agent.string) database.conn.add(entry) database.conn.commit() # mark machine as dirty if necessary if flag.machine: machine = database.conn.query(models.Machine).get(flag.machine) machine.dirty = True database.conn.commit() if request.wants_json(): return jsonify(entry.serialize()) flash("Flag scored.") return redirect(url_for('index')) @app.route('/dashboard') def dashboard(): machines = database.conn.query(models.Machine).all() if request.wants_json(): return jsonify({ 'machines': machines, }) return render_template('dashboard.html', machines=machines) def make_error(request, msg, code=400): if request.wants_json(): response = jsonify({'message': msg}) response.status_code = code return response else: flash(msg) return redirect(url_for('index'))
isc
Python
65e8aba17517247770ba27d796016c49fa41e0ab
correct handling of measure.ref() and aggregation selection in statutils' calculated aggregations
ubreddy/cubes,pombredanne/cubes,zejn/cubes,cesarmarinhorj/cubes,cesarmarinhorj/cubes,she11c0de/cubes,zejn/cubes,jell0720/cubes,zejn/cubes,pombredanne/cubes,noyeitan/cubes,noyeitan/cubes,jell0720/cubes,pombredanne/cubes,jell0720/cubes,ubreddy/cubes,ubreddy/cubes,she11c0de/cubes,cesarmarinhorj/cubes,noyeitan/cubes,she11c0de/cubes
cubes/statutils.py
cubes/statutils.py
from collections import deque from cubes.model import Attribute def _wma(values): n = len(values) denom = n * (n + 1) / 2 total = 0.0 idx = 1 for val in values: total += float(idx) * float(val) idx += 1 return round(total / denom, 4) def _sma(values): # use all the values return round(reduce(lambda i, c: c + i, values, 0.0) / len(values), 2) def weighted_moving_average_factory(measure, drilldown_paths): return _moving_average_factory(measure, drilldown_paths, _wma, 'wma') def simple_moving_average_factory(measure, drilldown_paths): return _moving_average_factory(measure, drilldown_paths, _sma, 'sma') def _moving_average_factory(measure, drilldown_paths, avg_func, aggregation_name): if not drilldown_paths: return lambda item: None # if the level we're drilling to doesn't have aggregation_units configured, # we're not doing any calculations key_drilldown_paths = [] num_units = None for path in drilldown_paths: relevant_level = path[2][-1] these_num_units = None if relevant_level.info: these_num_units = relevant_level.info.get('aggregation_units', None) if these_num_units is None: key_drilldown_paths.append(path) else: num_units = these_num_units if num_units is None or not isinstance(num_units, int) or num_units < 2: return lambda item: None # determine the measure on which to calculate. measure_ref = measure.ref() for agg in measure.aggregations: if agg == aggregation_name: continue if agg != "identity": measure_ref += "_" + agg break field_name = measure_ref + '_' + aggregation_name # if no key_drilldown_paths, the key is always the empty tuple. def key_extractor(item): vals = [] for dim, hier, levels in key_drilldown_paths: for level in levels: vals.append( item.get(level.key.ref()) ) return tuple(vals) by_value_map = {} def f(item): by_value = key_extractor(item) val_list = by_value_map.get(by_value) if val_list is None: val_list = deque() by_value_map[by_value] = val_list val = item.get(measure_ref) if val is not None: val_list.append(val) while len(val_list) > num_units: val_list.popleft() if len(val_list) >= num_units: item[field_name] = avg_func(val_list) return f
from collections import deque from cubes.model import Attribute def _wma(values): n = len(values) denom = n * (n + 1) / 2 total = 0.0 idx = 1 for val in values: total += float(idx) * float(val) idx += 1 return round(total / denom, 4) def _sma(values): # use all the values return round(reduce(lambda i, c: c + i, values, 0.0) / len(values), 2) def weighted_moving_average_factory(measure, drilldown_paths): return _moving_average_factory(measure, drilldown_paths, _wma, '_wma') def simple_moving_average_factory(measure, drilldown_paths): return _moving_average_factory(measure, drilldown_paths, _sma, '_sma') def _moving_average_factory(measure, drilldown_paths, avg_func, field_suffix): if not drilldown_paths: return lambda item: None # if the level we're drilling to doesn't have aggregation_units configured, # we're not doing any calculations relevant_level = drilldown_paths[-1][2][-1] if not relevant_level.info: return lambda item: None num_units = relevant_level.info.get('aggregation_units', None) if num_units is None or not isinstance(num_units, int) or num_units < 2: return lambda item: None def key_extractor(item): vals = [] for dim, hier, levels in drilldown_paths[:-1]: for level in levels: vals.append( item.get(level.key.ref()) ) return tuple(vals) field_name = measure.ref() + field_suffix by_value_map = {} def f(item): by_value = key_extractor(item) val_list = by_value_map.get(by_value) if val_list is None: val_list = deque() by_value_map[by_value] = val_list val = item.get(measure.ref()) if val is not None: val_list.append(val) while len(val_list) > num_units: val_list.popleft() if len(val_list) >= num_units: item[field_name] = avg_func(val_list) return f
mit
Python
582cacac1411312ad5e5dc132562883693f3877a
bump version
brentp/cyvcf2,brentp/cyvcf2,brentp/cyvcf2
cyvcf2/__init__.py
cyvcf2/__init__.py
from .cyvcf2 import (VCF, Variant, Writer, r_ as r_unphased, par_relatedness, par_het) Reader = VCFReader = VCF __version__ = "0.8.7"
from .cyvcf2 import (VCF, Variant, Writer, r_ as r_unphased, par_relatedness, par_het) Reader = VCFReader = VCF __version__ = "0.8.6"
mit
Python
e53ae572ac6c232a6afc01ae9ad2988ea1ef456a
Bump version.
Crandy/robobrowser,palaniyappanBala/robobrowser,jmcarp/robobrowser,emijrp/robobrowser,rcutmore/robobrowser
robobrowser/__init__.py
robobrowser/__init__.py
__version__ = '0.4.1' from .browser import RoboBrowser
__version__ = '0.4.0' from .browser import RoboBrowser
bsd-3-clause
Python
70b4be757d671bc86876b4568632bb6fe6064001
Fix a Django deprecation warning
fabiocaccamo/django-admin-interface,fabiocaccamo/django-admin-interface,fabiocaccamo/django-admin-interface
admin_interface/templatetags/admin_interface_tags.py
admin_interface/templatetags/admin_interface_tags.py
# -*- coding: utf-8 -*- from django import template from admin_interface.models import Theme register = template.Library() @register.simple_tag(takes_context = True) def get_admin_interface_theme(context): theme = None request = context.get('request', None) if request: theme = getattr(request, 'admin_interface_theme', None) if not theme: theme = Theme.get_active_theme() if request: request.admin_interface_theme = theme return theme
# -*- coding: utf-8 -*- from django import template from admin_interface.models import Theme register = template.Library() @register.assignment_tag(takes_context = True) def get_admin_interface_theme(context): theme = None request = context.get('request', None) if request: theme = getattr(request, 'admin_interface_theme', None) if not theme: theme = Theme.get_active_theme() if request: request.admin_interface_theme = theme return theme
mit
Python
2f3b5a6e0600f92ae0803ad3df44948dd5408444
comment out stdout log handler
bweck/cssbot
cssbot/log.py
cssbot/log.py
# # Copyright (C) 2011 by Brian Weck # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # import logging from datetime import date import utils def __configure_logging(): # configure the base logger for the pkg l = logging.getLogger("cssbot") l.setLevel(logging.DEBUG) # format. formatter = logging.Formatter("%(asctime)s : [%(levelname)s] %(name)s : %(message)s") # stdout handler. # ch = logging.StreamHandler() # ch.setLevel(logging.WARN) # ch.setFormatter(formatter) # l.addHandler(ch) # file handler today = date.today() log_date = "%d%02d" % (today.year, today.month) #"%d%02d%02d" % (today.year, today.month, today.day) fh = logging.FileHandler("log/cssbot-%s.log" % log_date) fh.setLevel(logging.INFO) fh.setFormatter(formatter) l.addHandler(fh) def getLogger(name=None): if not name: name = "cssbot" return logging.getLogger(name) # utils.dirs.switch_cwd_to_script_loc() __configure_logging()
# # Copyright (C) 2011 by Brian Weck # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # import logging from datetime import date import utils def __configure_logging(): # configure the base logger for the pkg l = logging.getLogger("cssbot") l.setLevel(logging.DEBUG) # format. formatter = logging.Formatter("%(asctime)s : [%(levelname)s] %(name)s : %(message)s") # stdout handler. ch = logging.StreamHandler() ch.setLevel(logging.WARN) ch.setFormatter(formatter) l.addHandler(ch) # file handler today = date.today() log_date = "%d%02d" % (today.year, today.month) #"%d%02d%02d" % (today.year, today.month, today.day) fh = logging.FileHandler("log/cssbot-%s.log" % log_date) fh.setLevel(logging.INFO) fh.setFormatter(formatter) l.addHandler(fh) def getLogger(name=None): if not name: name = "cssbot" return logging.getLogger(name) # utils.dirs.switch_cwd_to_script_loc() __configure_logging()
mit
Python
057110e3aa4007ad7221873029bed383ee1e0e3b
Remove platform check
Mortal/aiotkinter
aiotkinter.py
aiotkinter.py
import asyncio import tkinter class _TkinterSelector(asyncio.selectors._BaseSelectorImpl): def __init__(self): super().__init__() self._tk = tkinter.Tk(useTk=0) self._ready = [] def register(self, fileobj, events, data=None): key = super().register(fileobj, events, data) mask = 0 if events & asyncio.selectors.EVENT_READ: mask |= tkinter.READABLE if events & asyncio.selectors.EVENT_WRITE: mask |= tkinter.WRITABLE def ready(fd, mask): assert key.fd == fd events = 0 if mask & tkinter.READABLE: events |= asyncio.selectors.EVENT_READ if mask & tkinter.WRITABLE: events |= asyncio.selectors.EVENT_WRITE self._ready.append((key, events)) self._tk.createfilehandler(key.fd, mask, ready) return key def unregister(self, fileobj): key = super().unregister(fileobj) self._tk.deletefilehandler(key.fd) return key def select(self, timeout=None): self._ready = [] if timeout is not None: timeout = int(timeout*1000) token = self._tk.createtimerhandler(timeout, lambda: True) self._tk.dooneevent() if timeout is not None: token.deletetimerhandler() return self._ready class TkinterEventLoopPolicy(asyncio.DefaultEventLoopPolicy): def new_event_loop(self): try: return self._loop_factory(selector=_TkinterSelector()) except TypeError: raise Exception('The default event loop is not a selector event loop')
import asyncio import tkinter import sys if sys.platform == 'win32': raise ImportError('%s is not available on your platform'.format(__name__)) class _TkinterSelector(asyncio.selectors._BaseSelectorImpl): def __init__(self): super().__init__() self._tk = tkinter.Tk(useTk=0) self._ready = [] def register(self, fileobj, events, data=None): key = super().register(fileobj, events, data) mask = 0 if events & asyncio.selectors.EVENT_READ: mask |= tkinter.READABLE if events & asyncio.selectors.EVENT_WRITE: mask |= tkinter.WRITABLE def ready(fd, mask): assert key.fd == fd events = 0 if mask & tkinter.READABLE: events |= asyncio.selectors.EVENT_READ if mask & tkinter.WRITABLE: events |= asyncio.selectors.EVENT_WRITE self._ready.append((key, events)) self._tk.createfilehandler(key.fd, mask, ready) return key def unregister(self, fileobj): key = super().unregister(fileobj) self._tk.deletefilehandler(key.fd) return key def select(self, timeout=None): self._ready = [] if timeout is not None: timeout = int(timeout*1000) token = self._tk.createtimerhandler(timeout, lambda: True) self._tk.dooneevent() if timeout is not None: token.deletetimerhandler() return self._ready class TkinterEventLoopPolicy(asyncio.DefaultEventLoopPolicy): def new_event_loop(self): try: return self._loop_factory(selector=_TkinterSelector()) except TypeError: raise Exception('The default event loop is not a selector event loop')
mit
Python
7ab744fe8464ce85a27431adf94039c45551010f
Remove Google analytics code.
gustavofoa/dicasdejava.com.br,gustavofoa/dicasdejava.com.br,gustavofoa/dicasdejava.com.br,gustavofoa/dicasdejava.com.br,gustavofoa/dicasdejava.com.br
publishconf.py
publishconf.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals # This file is only used if you use `make publish` or # explicitly specify it as your config file. import os import sys sys.path.append(os.curdir) from pelicanconf import * SITEURL = 'https://dicasdejava.com.br' RELATIVE_URLS = False FEED_ALL_ATOM = 'feeds/all.atom.xml' CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml' DELETE_OUTPUT_DIRECTORY = True # Plugins PLUGIN_PATHS = ['./pelican-plugins'] PLUGINS = ['sitemap', 'minify', 'tag_cloud'] SITEMAP = { 'format': 'xml', 'exclude': ['autor/', 'tag/', 'categoria/', 'arquivo/'], 'priorities': { 'articles': 0.5, 'indexes': 0.5, 'pages': 0.5 }, 'changefreqs': { 'articles': 'monthly', 'indexes': 'daily', 'pages': 'monthly' } } # Following items are often useful when publishing DISQUS_SITENAME = "dicas-de-java" MINIFY = { 'remove_comments': True, 'remove_all_empty_space': True, 'remove_optional_attribute_quotes': False }
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals # This file is only used if you use `make publish` or # explicitly specify it as your config file. import os import sys sys.path.append(os.curdir) from pelicanconf import * SITEURL = 'https://dicasdejava.com.br' RELATIVE_URLS = False FEED_ALL_ATOM = 'feeds/all.atom.xml' CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml' DELETE_OUTPUT_DIRECTORY = True # Plugins PLUGIN_PATHS = ['./pelican-plugins'] PLUGINS = ['sitemap', 'minify', 'tag_cloud'] SITEMAP = { 'format': 'xml', 'exclude': ['autor/', 'tag/', 'categoria/', 'arquivo/'], 'priorities': { 'articles': 0.5, 'indexes': 0.5, 'pages': 0.5 }, 'changefreqs': { 'articles': 'monthly', 'indexes': 'daily', 'pages': 'monthly' } } # Following items are often useful when publishing DISQUS_SITENAME = "dicas-de-java" GOOGLE_ANALYTICS = "UA-39997045-4" MINIFY = { 'remove_comments': True, 'remove_all_empty_space': True, 'remove_optional_attribute_quotes': False }
mit
Python
07a4cb667e702a1cbb758a3761ec41b89fa98313
Add options to python script
colloquium/rhevm-api,markmc/rhevm-api,markmc/rhevm-api,colloquium/rhevm-api,markmc/rhevm-api
python/test.py
python/test.py
#!/usr/bin/env python # Copyright (C) 2010 Red Hat, Inc. # # This is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of # the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this software; if not, write to the Free # Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA, or see the FSF site: http://www.fsf.org. import http import xmlfmt import yamlfmt import jsonfmt import sys import getopt opts = { 'host' : 'localhost', 'port' : 8080, 'impl' : "dummy", } if len(sys.argv) > 1: options, oargs = getopt.getopt(sys.argv[1:], "h:p:i:", ["host=", "port=", "impl="]) for opt, a in options: if opt in ("-h", "--host"): opts['host'] = a if opt in ("-p", "--port"): opts['port'] = a if opt in ("-i", "--impl"): opts['impl'] = a opts['uri'] = 'http://%(host)s:%(port)s/rhevm-api-%(impl)s/' % opts links = http.HEAD_for_links(opts) for fmt in [xmlfmt, yamlfmt, jsonfmt]: print "=== ", fmt.MEDIA_TYPE, " ===" for host in fmt.parseHostCollection(http.GET(opts, links['hosts'], fmt.MEDIA_TYPE)): print fmt.parseHost(http.GET(opts, host.link.href, fmt.MEDIA_TYPE)) for vm in fmt.parseVmCollection(http.GET(opts, links['vms'], fmt.MEDIA_TYPE)): print fmt.parseVM(http.GET(opts, vm.link.href, fmt.MEDIA_TYPE)) foo_vm = fmt.VM() foo_vm.name = 'foo' foo_vm = fmt.parseVM(http.POST(opts, links['vms'], foo_vm.dump(), fmt.MEDIA_TYPE)) bar_host = fmt.Host() bar_host.name = 'bar' bar_host = fmt.parseHost(http.POST(opts, links['hosts'], bar_host.dump(), fmt.MEDIA_TYPE)) print http.POST(opts, foo_vm.link.href + "/start", type = fmt.MEDIA_TYPE) print http.GET(opts, foo_vm.link.href, type = fmt.MEDIA_TYPE) foo_vm.name = 'bar' print http.PUT(opts, foo_vm.link.href, foo_vm.dump(), fmt.MEDIA_TYPE) bar_host.name = 'foo' print http.PUT(opts, bar_host.link.href, bar_host.dump(), fmt.MEDIA_TYPE) print http.DELETE(opts, foo_vm.link.href) print http.DELETE(opts, bar_host.link.href)
#!/usr/bin/env python # Copyright (C) 2010 Red Hat, Inc. # # This is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of # the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this software; if not, write to the Free # Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA, or see the FSF site: http://www.fsf.org. opts = { 'host' : 'localhost', 'port' : 8080, 'impl' : "dummy", } opts['uri'] = 'http://%(host)s:%(port)s/rhevm-api-%(impl)s-war/' % opts import http import xmlfmt import yamlfmt import jsonfmt links = http.HEAD_for_links(opts) for fmt in [xmlfmt, yamlfmt, jsonfmt]: print "=== ", fmt.MEDIA_TYPE, " ===" for host in fmt.parseHostCollection(http.GET(opts, links['hosts'], fmt.MEDIA_TYPE)): print fmt.parseHost(http.GET(opts, host.link.href, fmt.MEDIA_TYPE)) for vm in fmt.parseVmCollection(http.GET(opts, links['vms'], fmt.MEDIA_TYPE)): print fmt.parseVM(http.GET(opts, vm.link.href, fmt.MEDIA_TYPE)) foo_vm = fmt.VM() foo_vm.name = 'foo' foo_vm = fmt.parseVM(http.POST(opts, links['vms'], foo_vm.dump(), fmt.MEDIA_TYPE)) bar_host = fmt.Host() bar_host.name = 'bar' bar_host = fmt.parseHost(http.POST(opts, links['hosts'], bar_host.dump(), fmt.MEDIA_TYPE)) print http.POST(opts, foo_vm.link.href + "/start", type = fmt.MEDIA_TYPE) print http.GET(opts, foo_vm.link.href, type = fmt.MEDIA_TYPE) foo_vm.name = 'bar' print http.PUT(opts, foo_vm.link.href, foo_vm.dump(), fmt.MEDIA_TYPE) bar_host.name = 'foo' print http.PUT(opts, bar_host.link.href, bar_host.dump(), fmt.MEDIA_TYPE) print http.DELETE(opts, foo_vm.link.href) print http.DELETE(opts, bar_host.link.href)
lgpl-2.1
Python
4656f7834f2c56f9dffcb775a5c9833304a3a55f
Fix doctests with Python 2
jtauber/pyuca
pyuca/utils.py
pyuca/utils.py
""" utilities for formatting the datastructures used in pyuca. Useful mostly for debugging output. """ from __future__ import unicode_literals def hexstrings2int(hexstrings): """ list of hex strings to list of integers >>> hexstrings2int(["0000", "0001", "FFFF"]) [0, 1, 65535] """ return [int(hexstring, 16) for hexstring in hexstrings] def int2hexstrings(number_list): """ list of integers to list of 4-digit hex strings >>> int2hexstrings([0, 1, 65535]) ['0000', '0001', 'FFFF'] """ return [str("{:04X}".format(n)) for n in number_list] def format_collation_elements(collation_elements): """ format collation element array (list of list of integer weights) >>> str(format_collation_elements([[1, 2, 3], [4, 5]])) '[0001.0002.0003], [0004.0005]' >>> format_collation_elements(None) """ if collation_elements is None: return None else: return ", ".join( "[" + ".".join( int2hexstrings(collation_element) ) + "]" for collation_element in collation_elements ) def format_sort_key(sort_key): """ format sort key (list of integers) with | level boundaries >>> str(format_sort_key([1, 0, 65535])) '0001 | FFFF' """ return " ".join( ("{:04X}".format(x) if x else "|") for x in sort_key )
""" utilities for formatting the datastructures used in pyuca. Useful mostly for debugging output. """ from __future__ import unicode_literals def hexstrings2int(hexstrings): """ list of hex strings to list of integers >>> hexstrings2int(["0000", "0001", "FFFF"]) [0, 1, 65535] """ return [int(hexstring, 16) for hexstring in hexstrings] def int2hexstrings(number_list): """ list of integers to list of 4-digit hex strings >>> int2hexstrings([0, 1, 65535]) ['0000', '0001', 'FFFF'] """ return ["{:04X}".format(n) for n in number_list] def format_collation_elements(collation_elements): """ format collation element array (list of list of integer weights) >>> format_collation_elements([[1, 2, 3], [4, 5]]) '[0001.0002.0003], [0004.0005]' >>> format_collation_elements(None) """ if collation_elements is None: return None else: return ", ".join( "[" + ".".join( int2hexstrings(collation_element) ) + "]" for collation_element in collation_elements ) def format_sort_key(sort_key): """ format sort key (list of integers) with | level boundaries >>> format_sort_key([1, 0, 65535]) '0001 | FFFF' """ return " ".join( ("{:04X}".format(x) if x else "|") for x in sort_key )
mit
Python
a23374939583b3954baa1418f12ce309442d31ff
Mark certain resources as uncompressible
heiths/allura,apache/incubator-allura,apache/allura,Bitergia/allura,lym/allura-git,apache/allura,apache/allura,leotrubach/sourceforge-allura,apache/incubator-allura,heiths/allura,heiths/allura,apache/incubator-allura,apache/incubator-allura,heiths/allura,lym/allura-git,leotrubach/sourceforge-allura,Bitergia/allura,Bitergia/allura,heiths/allura,Bitergia/allura,lym/allura-git,apache/allura,leotrubach/sourceforge-allura,lym/allura-git,lym/allura-git,apache/allura,leotrubach/sourceforge-allura,Bitergia/allura
pyforge/pyforge/lib/widgets/form_fields.py
pyforge/pyforge/lib/widgets/form_fields.py
from pylons import c from pyforge.model import User from formencode import validators as fev import ew class MarkdownEdit(ew.InputField): template='genshi:pyforge.lib.widgets.templates.markdown_edit' validator = fev.UnicodeString() params=['name','value','show_label'] show_label=True name=None value=None def resources(self): yield ew.resource.JSLink('js/jquery.markitup.pack.js', compress=False) yield ew.resource.JSLink('js/jquery.markitup.markdown.js') yield ew.resource.JSLink('js/sf_markitup.js') yield ew.resource.CSSLink('css/markitup.css', compress=False) yield ew.resource.CSSLink('css/markitup_markdown.css', compress=False) yield ew.resource.CSSLink('css/markitup_sf.css') class UserTagEdit(ew.InputField): template='genshi:pyforge.lib.widgets.templates.user_tag_edit' validator = fev.UnicodeString() params=['name','user_tags', 'className', 'show_label'] show_label=True name=None user_tags=None className='' def resources(self): yield ew.resource.JSLink('js/jquery.tag.editor.js') class LabelEdit(ew.InputField): template='genshi:pyforge.lib.widgets.templates.label_edit' validator = fev.UnicodeString() params=['name', 'className', 'show_label', 'value'] show_label=True name=None value=None className='' def resources(self): yield ew.resource.JSLink('js/jquery.tag.editor.js') class ProjectUserSelect(ew.InputField): template='genshi:pyforge.lib.widgets.templates.project_user_select' params=['name', 'value', 'size', 'all', 'users', 'show_label'] show_label=True name=None value=None size=None all=False def __init__(self, **kw): self.users = User.query.find({'_id':{'$in':[role.user_id for role in c.project.roles]}}).all() if not isinstance(self.value, list): self.value=[self.value] super(ProjectUserSelect, self).__init__(**kw) class AttachmentList(ew.Widget): template='genshi:pyforge.lib.widgets.templates.attachment_list' params=['attachments','edit_mode'] attachments=None edit_mode=None class SubmitButton(ew.SubmitButton): attrs={'class':'ui-state-default ui-button ui-button-text'}
from pylons import c from pyforge.model import User from formencode import validators as fev import ew class MarkdownEdit(ew.InputField): template='genshi:pyforge.lib.widgets.templates.markdown_edit' validator = fev.UnicodeString() params=['name','value','show_label'] show_label=True name=None value=None def resources(self): yield ew.resource.JSLink('js/jquery.markitup.pack.js') yield ew.resource.JSLink('js/jquery.markitup.markdown.js') yield ew.resource.JSLink('js/sf_markitup.js') yield ew.resource.CSSLink('css/markitup.css') yield ew.resource.CSSLink('css/markitup_markdown.css') yield ew.resource.CSSLink('css/markitup_sf.css') class UserTagEdit(ew.InputField): template='genshi:pyforge.lib.widgets.templates.user_tag_edit' validator = fev.UnicodeString() params=['name','user_tags', 'className', 'show_label'] show_label=True name=None user_tags=None className='' def resources(self): yield ew.resource.JSLink('js/jquery.tag.editor.js') class LabelEdit(ew.InputField): template='genshi:pyforge.lib.widgets.templates.label_edit' validator = fev.UnicodeString() params=['name', 'className', 'show_label', 'value'] show_label=True name=None value=None className='' def resources(self): yield ew.resource.JSLink('js/jquery.tag.editor.js') class ProjectUserSelect(ew.InputField): template='genshi:pyforge.lib.widgets.templates.project_user_select' params=['name', 'value', 'size', 'all', 'users', 'show_label'] show_label=True name=None value=None size=None all=False def __init__(self, **kw): self.users = User.query.find({'_id':{'$in':[role.user_id for role in c.project.roles]}}).all() if not isinstance(self.value, list): self.value=[self.value] super(ProjectUserSelect, self).__init__(**kw) class AttachmentList(ew.Widget): template='genshi:pyforge.lib.widgets.templates.attachment_list' params=['attachments','edit_mode'] attachments=None edit_mode=None class SubmitButton(ew.SubmitButton): attrs={'class':'ui-state-default ui-button ui-button-text'}
apache-2.0
Python
2b5e94f6c301932eb9387bba9a80414a714e2b38
Tidy up the references
studiawan/pygraphc
pygraphc/abstraction/ClusterAbstraction.py
pygraphc/abstraction/ClusterAbstraction.py
class ClusterAbstraction(object): """Get cluster abstraction based on longest common substring [jtjacques2010]_. References ---------- .. [jtjacques2010] jtjacques, Longest common substring from more than two strings - Python. http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python. """ @staticmethod def dp_lcs(graph, clusters): """The processed string are preprocessed message from raw event log messages. Parameters ---------- graph : graph A graph to be processed. clusters : dict[list] Dictionary containing a list of node identifier per cluster. Returns ------- abstraction : dict[str] Dictionary of abstraction string per cluster. """ abstraction = {} for cluster_id, nodes in clusters.iteritems(): data = [] for node_id in nodes: data.append(graph.node[node_id]['preprocessed_event']) abstraction[cluster_id] = ClusterAbstraction.lcs(data) return abstraction @staticmethod def lcs(data): """Get longest common substring from multiple string. Parameters ---------- data : list[str] List of string to be processed. Returns ------- substr : str A single string as longest common substring. """ substr = '' if len(data) > 1 and len(data[0]) > 0: for i in range(len(data[0])): for j in range(len(data[0]) - i + 1): if j > len(substr) and all(data[0][i:i + j] in x for x in data): substr = data[0][i:i + j] return substr
class ClusterAbstraction(object): """Get cluster abstraction based on longest common substring [jtjacques2010]_. References ---------- .. [jtjacques2010] jtjacques, Longest common substring from more than two strings - Python. http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python. """ @staticmethod def dp_lcs(graph, clusters): """The processed string are preprocessed message from raw event log messages. Parameters ---------- graph : graph A graph to be processed. clusters : dict[list] Dictionary containing a list of node identifier per cluster. Returns ------- abstraction : dict[str] Dictionary of abstraction string per cluster. """ abstraction = {} for cluster_id, nodes in clusters.iteritems(): data = [] for node_id in nodes: data.append(graph.node[node_id]['preprocessed_event']) abstraction[cluster_id] = ClusterAbstraction.lcs(data) return abstraction @staticmethod def lcs(data): """Get longest common substring from multiple string. Parameters ---------- data : list[str] List of string to be processed. Returns ------- substr : str A single string as longest common substring. """ substr = '' if len(data) > 1 and len(data[0]) > 0: for i in range(len(data[0])): for j in range(len(data[0]) - i + 1): if j > len(substr) and all(data[0][i:i + j] in x for x in data): substr = data[0][i:i + j] return substr
mit
Python
ac61b2f99f91a274572e96be8f0136871288f1bb
update timer to be able to measure time more times
adaptive-learning/proso-apps,adaptive-learning/proso-apps,adaptive-learning/proso-apps
proso/util.py
proso/util.py
import re import importlib import time _timers = {} def timer(name): now = time.time() diff = None if name in _timers: diff = now - _timers[name] _timers[name] = now return diff def instantiate(classname, *args, **kwargs): matched = re.match('(.*)\.(\w+)', classname) if matched is None: raise Exception('can instantiate only class with packages: %s' % classname) module = importlib.import_module(matched.groups()[0]) return getattr(module, matched.groups()[1])(*args, **kwargs)
import re import importlib import time _timers = {} def timer(name): now = time.clock() if name in _timers: diff = now - _timers[name] return diff _timers[name] = now def instantiate(classname, *args, **kwargs): matched = re.match('(.*)\.(\w+)', classname) if matched is None: raise Exception('can instantiate only class with packages: %s' % classname) module = importlib.import_module(matched.groups()[0]) return getattr(module, matched.groups()[1])(*args, **kwargs)
mit
Python
ce1350bb42028ad29356af275ab5b90257ccf0cb
fix import
Abbe98/yr-py
yr/__init__.py
yr/__init__.py
from .yr import YR
from yr import YR
mit
Python
1cfc885597f14282245c68179922e27e3974a26f
use environment var for file location
SouthAfricaDigitalScience/gmp-deploy,SouthAfricaDigitalScience/gmp-deploy
publish-ci.py
publish-ci.py
import requests import json import os # import tarfile # def make_tarfile(output_filename, source_dir): # with tarfile.open(output_filename, "w:gz") as tar: # tar.add(source_dir, arcname=os.path.basename(source_dir)) uri = 'https://zenodo.org/api/deposit/depositions' access_token = os.environ['ZENODO_API_KEY'] headers = {"Content-Type": "application/json"} # login response = requests.get(uri, params={'access_token': access_token }) # get env # data will be sent as a parameter to the request data = { 'filename': os.environ['TARBALL'] } # TODO - load from file metadata = { 'metadata': { 'upload_type': 'software', 'publication_type': 'softwaredocumentation', 'title': 'GMP build for CODE-RADE CI phase', 'creators': [ { 'name': 'Bruce Becker', 'affiliation': 'EGI Foundation', 'orcid': '0000-0002-6607-7145' } ], 'description': 'See the README', 'access_right': 'open', 'license': 'Apache-2.0', 'prereserve_doi': 'true', 'communities': 'code-rade' } } # check if json is present if os.path.isfile('zenodo.json'): print("file is there") # Check that DOI has been registered and that url is valid with open('zenodo.json') as deposition: zenodo = json.load(deposition) id = zenodo['id'] print 'id is ',id # Check that this is the right ID else: # deposit the file print("no deposition yet") # create deposition create = requests.post(uri, params={'access_token': access_token}, json={}, headers=headers) create.json() with open('zenodo.json', 'w') as deposition: json.dump(create.json(), deposition) id = create.json['id'] # files is an array of files to be sent as parameters to the request files = {'file': open(os.environ['TARBALL'], 'rb')} deposit = requests.post(uri + '/%s/files' % id, params={'access_token': access_token}, data=data, files=files) print(deposit.json()) # update with metadata meta = requests.put(uri + '/%s' % id, params={'access_token': access_token}, data=json.dumps(metadata), headers=headers) print(meta.json())
import requests import json import os # import tarfile # def make_tarfile(output_filename, source_dir): # with tarfile.open(output_filename, "w:gz") as tar: # tar.add(source_dir, arcname=os.path.basename(source_dir)) uri = 'https://zenodo.org/api/deposit/depositions' access_token = os.environ['ZENODO_API_KEY'] headers = {"Content-Type": "application/json"} # login response = requests.get(uri, params={'access_token': access_token }) # get env # data will be sent as a parameter to the request data = { 'filename': '/data/artefacts/gmp-6.1.0-generic-x86_64-centos6.tar.gz' } # TODO - load from file metadata = { 'metadata': { 'upload_type': 'software', 'publication_type': 'softwaredocumentation', 'title': 'GMP build for CODE-RADE CI phase', 'creators': [ { 'name': 'Bruce Becker', 'affiliation': 'EGI Foundation', 'orcid': '0000-0002-6607-7145' } ], 'description': 'See the README', 'access_right': 'open', 'license': 'Apache-2.0', 'prereserve_doi': 'true', 'communities': 'code-rade' } } # check if json is present if os.path.isfile('zenodo.json'): print("file is there") # Check that DOI has been registered and that url is valid with open('zenodo.json') as deposition: zenodo = json.load(deposition) id = zenodo['id'] print 'id is ',id # Check that this is the right ID else: # deposit the file print("no deposition yet") # create deposition create = requests.post(uri, params={'access_token': access_token}, json={}, headers=headers) create.json() with open('zenodo.json', 'w') as deposition: json.dump(create.json(), deposition) id = create.json['id'] # files is an array of files to be sent as parameters to the request files = {'file': open(os.environ['TARBALL'], 'rb')} deposit = requests.post(uri + '/%s/files' % id, params={'access_token': access_token}, data=data, files=files) print(deposit.json()) # update with metadata meta = requests.put(uri + '/%s' % id, params={'access_token': access_token}, data=json.dumps(metadata), headers=headers) print(meta.json())
apache-2.0
Python
99b65f7308a4b5719f5cf2e15200767af6780775
deploy keynote images
pytexas/PyTexasBackend,pytexas/PyTexasBackend,pytexas/PyTexasBackend,pytexas/PyTexasBackend
pytx/files.py
pytx/files.py
import os from django.conf import settings JS_HEAD = [] JS = [ # 'raven.min.js', # 'plugins/vue.min.js', # 'showdown.min.js', 'pytexas.js', ] CSS = [ 'vuetify.min.css', 'global.css', 'pytexas.css', ] IMAGES = [ 'img/atx.svg', 'img/banner80.png', 'img/icon.svg', 'img/icons/about.svg', 'img/icons/blog.svg', 'img/icons/chat.svg', 'img/icons/talks.svg', 'img/icons/community.svg', 'img/icons/sponsors.svg', 'img/icons/venue.svg', 'img/icons/external.svg', 'img/icons/external-white.svg', 'img/icons/background.png', 'img/social/about.me.png', 'img/social/facebook.png', 'img/social/github.png', 'img/social/google.png', 'img/social/linkedin.png', 'img/social/twitter.png', 'img/social/website.png', ] FONTS = [ 'Roboto-Regular.woff2', 'Roboto-Bold.woff2', 'Roboto-Slab-Regular.woff2', 'Roboto-Slab-Bold.woff2', 'MaterialIcons-Regular.woff2', ] MD = [] MD_PATH = settings.FRONTEND_MD for root, dirs, files in os.walk(MD_PATH): for f in files: path = os.path.join(root, f) path = path.replace(MD_PATH, '') path = path[1:] MD.append(path) def tpl_files(): tpls = [] base_dir = settings.FRONTEND_TEMPLATES for root, dirs, files in os.walk(base_dir): for file in files: if file.endswith('.html'): fullpath = os.path.join(root, file) relpath = fullpath.replace(base_dir + '/', '') relpath = relpath.replace('/', '-') relpath = relpath[:-5] with open(fullpath, 'r') as fh: tpls.append({'path': relpath, 'content': fh.read()}) return tpls if settings.DEBUG: for i, f in enumerate(JS): if '.min.' in f: JS[i] = f.replace('.min.', '.')
import os from django.conf import settings JS_HEAD = [] JS = [ # 'raven.min.js', # 'plugins/vue.min.js', # 'showdown.min.js', 'pytexas.js', ] CSS = [ 'vuetify.min.css', 'global.css', 'pytexas.css', ] IMAGES = [ 'img/atx.svg', 'img/banner80.png', 'img/icon.svg', 'img/icons/about.svg', 'img/icons/blog.svg', 'img/icons/chat.svg', 'img/icons/talks.svg', 'img/icons/community.svg', 'img/icons/sponsors.svg', 'img/icons/venue.svg', 'img/icons/external.svg', 'img/icons/external-white.svg', 'img/icons/background.png', 'img/social/about.me.png', 'img/social/facebook.png', 'img/social/github.png', 'img/social/google.png', 'img/social/linkedin.png', 'img/social/twitter.png', 'img/social/website.png', 'img/apl/library1.png', 'img/apl/library2.png', 'img/keynote/emily.jpg', 'img/keynote/adrienne.jpg', ] FONTS = [ 'Roboto-Regular.woff2', 'Roboto-Bold.woff2', 'Roboto-Slab-Regular.woff2', 'Roboto-Slab-Bold.woff2', 'MaterialIcons-Regular.woff2', ] MD = [] MD_PATH = settings.FRONTEND_MD for root, dirs, files in os.walk(MD_PATH): for f in files: path = os.path.join(root, f) path = path.replace(MD_PATH, '') path = path[1:] MD.append(path) def tpl_files(): tpls = [] base_dir = settings.FRONTEND_TEMPLATES for root, dirs, files in os.walk(base_dir): for file in files: if file.endswith('.html'): fullpath = os.path.join(root, file) relpath = fullpath.replace(base_dir + '/', '') relpath = relpath.replace('/', '-') relpath = relpath[:-5] with open(fullpath, 'r') as fh: tpls.append({'path': relpath, 'content': fh.read()}) return tpls if settings.DEBUG: for i, f in enumerate(JS): if '.min.' in f: JS[i] = f.replace('.min.', '.')
mit
Python
e9314b02c482314efeb7e36ecf3f6613f9a99adb
fix fetch loop block server
hainesc/daochain,DaoCloud/daochain,hainesc/daochain,DaoCloud/dao-chain,DaoCloud/dao-chain,Revolution1/daochain,Revolution1/daochain,DaoCloud/daochain,DaoCloud/daochain,DaoCloud/daochain,hainesc/daochain,DaoCloud/dao-chain,Revolution1/daochain,DaoCloud/dao-chain,hainesc/daochain,Revolution1/daochain
app/server.py
app/server.py
import logging import os import sys import requests from flask import Flask from flask import send_file, send_from_directory from api import load_api from blockchain import web3_client from settings import SOURCE_ROOT from storage import Cache log = logging.getLogger(__name__) console_handler = logging.StreamHandler(sys.stderr) def setup_logging(): root_logger = logging.getLogger() root_logger.addHandler(console_handler) root_logger.setLevel(logging.DEBUG) # Disable requests logging logging.getLogger("requests").propagate = False def setup_routes(app): @app.route('/') def index(): return send_file(os.path.join(SOURCE_ROOT, 'static', 'index.html')) @app.route('/<path:path>') def static_files(path): return send_from_directory(os.path.join(SOURCE_ROOT, 'static'), path) @app.after_request def after_request(response): response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE') return response def fetch_nodes(): from time import sleep from threading import Thread def fetch_loop(): while True: try: nodes = requests.get('http://blockchain.daocloud.io/nodes.json').json() w3 = web3_client() peers = w3.admin.peers for n in nodes: w3.admin.addPeer(n) if not len(peers) == len(nodes): log.info('fetched nodes: %s' % ', '.join(nodes)) sleep(60) except Exception: log.error('Fail to fetch nodes.json') sleep(5) t = Thread(target=fetch_loop) t.setDaemon(True) t.start() def create_app(name=None): setup_logging() app = Flask(name or 'app') app.config.root_path = os.path.dirname(os.path.abspath(__file__)) app.config.from_pyfile('settings.py') Cache.init() load_api(app) setup_routes(app) fetch_nodes() return app if __name__ == '__main__': app = create_app() app.run('0.0.0.0', 8000, True, use_reloader=True)
import logging import os import sys import requests from flask import Flask from flask import send_file, send_from_directory from gevent import sleep, spawn from api import load_api from blockchain import web3_client from settings import SOURCE_ROOT from storage import Cache log = logging.getLogger(__name__) console_handler = logging.StreamHandler(sys.stderr) def setup_logging(): root_logger = logging.getLogger() root_logger.addHandler(console_handler) root_logger.setLevel(logging.DEBUG) # Disable requests logging logging.getLogger("requests").propagate = False def setup_routes(app): @app.route('/') def index(): return send_file(os.path.join(SOURCE_ROOT, 'static', 'index.html')) @app.route('/<path:path>') def static_files(path): return send_from_directory(os.path.join(SOURCE_ROOT, 'static'), path) @app.after_request def after_request(response): response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE') return response def fetch_nodes(): while True: try: nodes = requests.get('http://blockchain.daocloud.io/nodes.json').json() w3 = web3_client() for n in nodes: w3.admin.addPeer(n) log.info('fetched nodes: %s' % ', '.join(nodes)) sleep(30) except Exception: log.error('Fail to fetch nodes.json') sleep(10) def create_app(name=None): setup_logging() app = Flask(name or 'app') app.config.root_path = os.path.dirname(os.path.abspath(__file__)) app.config.from_pyfile('settings.py') Cache.init() load_api(app) setup_routes(app) spawn(fetch_nodes()) return app if __name__ == '__main__': app = create_app() app.run('0.0.0.0', 8000, True, use_reloader=True)
apache-2.0
Python
69f24cd7a1936fb7dc4cfb03e3e97997332f633e
add portforward_get method
dreamhost/akanda-horizon,dreamhost/akanda-horizon
akanda/horizon/client.py
akanda/horizon/client.py
import requests def portforward_get(request): headers = { "User-Agent" : "python-quantumclient", "Content-Type" : "application/json", "Accept" : "application/json", "X-Auth-Token" : request.user.token.id } r = requests.get('http://0.0.0.0/v2.0/dhportforward.json', headers=headers) r.json
apache-2.0
Python
b37814280dc06dbf8aefec4490f6b73a47f05c1a
Simplify python3 unicode fixer and make it replace all occurrences of __unicode__ with __str__.
live-clones/pybtex
custom_fixers/fix_alt_unicode.py
custom_fixers/fix_alt_unicode.py
# Taken from jinja2. Thanks, Armin Ronacher. # See also http://lucumr.pocoo.org/2010/2/11/porting-to-python-3-a-guide from lib2to3 import fixer_base class FixAltUnicode(fixer_base.BaseFix): PATTERN = "'__unicode__'" def transform(self, node, results): new = node.clone() new.value = '__str__' return new
# Taken from jinja2. Thanks, Armin Ronacher. # See also http://lucumr.pocoo.org/2010/2/11/porting-to-python-3-a-guide from lib2to3 import fixer_base from lib2to3.fixer_util import Name, BlankLine class FixAltUnicode(fixer_base.BaseFix): PATTERN = """ func=funcdef< 'def' name='__unicode__' parameters< '(' NAME ')' > any+ > """ def transform(self, node, results): name = results['name'] name.replace(Name('__str__', prefix=name.prefix))
mit
Python
8f4918a63e312309e835c3a9fc0513ddd6b4bbc1
test restore resnet
MohammadChavosh/VQA
restore_resnet.py
restore_resnet.py
__author__ = 'Mohammad' import tensorflow as tf sess = tf.Session() #First let's load meta graph and restore weights saver = tf.train.import_meta_graph('data/tensorflow-resnet-pretrained-20160509/ResNet-L152.meta') saver.restore(sess, 'data/tensorflow-resnet-pretrained-20160509/ResNet-L152.ckpt') for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='scale5'): print i.name # i.name if you want just a name # # Access saved Variables directly # print(sess.run('bias:0')) # # This will print 2, which is the value of bias that we saved # # # Now, let's access and create placeholders variables and # # create feed-dict to feed new data # graph = tf.get_default_graph() w1 = graph.get_tensor_by_name("scale5/x") # w2 = graph.get_tensor_by_name("w2:0") # feed_dict ={w1:13.0,w2:17.0} # # #Now, access the op that you want to run. # op_to_restore = graph.get_tensor_by_name("op_to_restore:0") # # print sess.run(op_to_restore,feed_dict) # #This will print 60 which is calculated
__author__ = 'Mohammad' import tensorflow as tf sess = tf.Session() #First let's load meta graph and restore weights saver = tf.train.import_meta_graph('data/tensorflow-resnet-pretrained-20160509/ResNet-L152.meta') saver.restore(sess, 'data/tensorflow-resnet-pretrained-20160509/ResNet-L152.ckpt') for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='scale5'): print i.name # i.name if you want just a name # # Access saved Variables directly # print(sess.run('bias:0')) # # This will print 2, which is the value of bias that we saved # # # Now, let's access and create placeholders variables and # # create feed-dict to feed new data # # graph = tf.get_default_graph() # w1 = graph.get_tensor_by_name("w1:0") # w2 = graph.get_tensor_by_name("w2:0") # feed_dict ={w1:13.0,w2:17.0} # # #Now, access the op that you want to run. # op_to_restore = graph.get_tensor_by_name("op_to_restore:0") # # print sess.run(op_to_restore,feed_dict) # #This will print 60 which is calculated
apache-2.0
Python
5632447a202ef3a83e5b96d11cbbc653fafac99b
Use os.getlogin to get login user name.
j717273419/ibus,Keruspe/ibus,ibus/ibus,luoxsbupt/ibus,ueno/ibus,luoxsbupt/ibus,fujiwarat/ibus,j717273419/ibus,ueno/ibus,ibus/ibus-cros,ibus/ibus,fujiwarat/ibus,phuang/ibus,j717273419/ibus,ueno/ibus,Keruspe/ibus,ueno/ibus,j717273419/ibus,ueno/ibus,luoxsbupt/ibus,ibus/ibus-cros,ibus/ibus,luoxsbupt/ibus,phuang/ibus,luoxsbupt/ibus,Keruspe/ibus,ibus/ibus,phuang/ibus,ibus/ibus-cros,ibus/ibus-cros,phuang/ibus,Keruspe/ibus,fujiwarat/ibus,fujiwarat/ibus
ibus/common.py
ibus/common.py
# vim:set et sts=4 sw=4: # # ibus - The Input Bus # # Copyright (c) 2007-2008 Huang Peng <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA __all__ = ( "IBUS_ADDR", "IBUS_IFACE", "IBUS_NAME", "IBUS_PATH", "IBUS_CONFIG_IFACE", "IBUS_ENGINE_FACTORY_IFACE", "IBUS_ENGINE_IFACE", "IBUS_PANEL_IFACE", "default_reply_handler", "default_error_handler", "DEFAULT_ASYNC_HANDLERS" ) import os import sys display = os.environ["DISPLAY"] if "." not in display: display += ".0" IBUS_ADDR = "unix:path=/tmp/ibus-%s/ibus-%s" % (os.getlogin(), display.replace(":", "-")) # IBUS_ADDR = "tcp:host=localhost,port=7799" IBUS_IFACE = "org.freedesktop.IBus" IBUS_PATH = "/org/freedesktop/IBus" IBUS_NAME = "org.freedesktop.IBus" IBUS_CONFIG_IFACE = "org.freedesktop.IBus.Config" IBUS_ENGINE_FACTORY_IFACE = "org.freedesktop.IBus.EngineFactory" IBUS_ENGINE_IFACE = "org.freedesktop.IBus.Engine" IBUS_PANEL_IFACE = "org.freedesktop.IBus.Panel" def default_reply_handler( *args): pass def default_error_handler(e): print >> sys.stderr, e DEFAULT_ASYNC_HANDLERS = { "reply_handler" : default_reply_handler, "error_handler" : default_error_handler }
# vim:set et sts=4 sw=4: # # ibus - The Input Bus # # Copyright (c) 2007-2008 Huang Peng <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA __all__ = ( "IBUS_ADDR", "IBUS_IFACE", "IBUS_NAME", "IBUS_PATH", "IBUS_CONFIG_IFACE", "IBUS_ENGINE_FACTORY_IFACE", "IBUS_ENGINE_IFACE", "IBUS_PANEL_IFACE", "default_reply_handler", "default_error_handler", "DEFAULT_ASYNC_HANDLERS" ) import os import sys import getpass display = os.environ["DISPLAY"] if "." not in display: display += ".0" IBUS_ADDR = "unix:path=/tmp/ibus-%s/ibus-%s" % (getpass.getuser(), display.replace(":", "-")) # IBUS_ADDR = "tcp:host=localhost,port=7799" IBUS_IFACE = "org.freedesktop.IBus" IBUS_PATH = "/org/freedesktop/IBus" IBUS_NAME = "org.freedesktop.IBus" IBUS_CONFIG_IFACE = "org.freedesktop.IBus.Config" IBUS_ENGINE_FACTORY_IFACE = "org.freedesktop.IBus.EngineFactory" IBUS_ENGINE_IFACE = "org.freedesktop.IBus.Engine" IBUS_PANEL_IFACE = "org.freedesktop.IBus.Panel" def default_reply_handler( *args): pass def default_error_handler(e): print >> sys.stderr, e DEFAULT_ASYNC_HANDLERS = { "reply_handler" : default_reply_handler, "error_handler" : default_error_handler }
lgpl-2.1
Python
eb4cda636a0b0ceb5312b161e97ae5f8376c9f8e
Change biolookup test to work around service bug
johnbachman/indra,bgyori/indra,johnbachman/indra,bgyori/indra,bgyori/indra,sorgerlab/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/indra
indra/tests/test_biolookup_client.py
indra/tests/test_biolookup_client.py
from indra.databases import biolookup_client def test_lookup_curie(): curie = 'pubchem.compound:40976' res = biolookup_client.lookup_curie(curie) assert res['name'] == '(17R)-13-ethyl-17-ethynyl-17-hydroxy-11-' \ 'methylidene-2,6,7,8,9,10,12,14,15,16-decahydro-1H-' \ 'cyclopenta[a]phenanthren-3-one', res def test_lookup(): res = biolookup_client.lookup('HGNC', '1097') assert res['name'] == 'BRAF', res def test_get_name(): res = biolookup_client.get_name('CHEBI', 'CHEBI:408174') assert res == 'arformoterol', res
from indra.databases import biolookup_client def test_lookup_curie(): curie = 'pubchem.compound:40976' res = biolookup_client.lookup_curie(curie) assert res['name'] == '(17R)-13-ethyl-17-ethynyl-17-hydroxy-11-' \ 'methylidene-2,6,7,8,9,10,12,14,15,16-decahydro-1H-' \ 'cyclopenta[a]phenanthren-3-one', res def test_lookup(): res = biolookup_client.lookup('FPLX', 'ERK') assert res['name'] == 'ERK', res def test_get_name(): res = biolookup_client.get_name('CHEBI', 'CHEBI:408174') assert res == 'arformoterol', res
bsd-2-clause
Python
7fd2060f2241bcff6849d570406dc057b9c7f8d1
Fix the message string
taedori81/satchless,fusionbox/satchless,fusionbox/satchless,fusionbox/satchless
satchless/cart/views.py
satchless/cart/views.py
# -*- coding: utf-8 -*- from django.contrib import messages from django.shortcuts import get_object_or_404, redirect from django.template.response import TemplateResponse from django.utils.translation import ugettext as _ from django.views.decorators.http import require_POST from . import models from . import forms def cart(request, typ, form_class=forms.EditCartItemForm): cart = models.Cart.objects.get_or_create_from_request(request, typ) cart_item_forms = [] for item in cart.items.all(): form = form_class(data=request.POST or None, instance=item, prefix='%s-%i'%(typ, item.id)) if request.method == 'POST' and form.is_valid(): messages.success(request, _("Cart contents were updated successfully.")) form.save() return redirect(request.get_full_path()) cart_item_forms.append(form) templates = [ 'satchless/cart/%s/view.html' % typ, 'satchless/cart/view.html' ] return TemplateResponse(request, templates, { 'cart': cart, 'cart_item_forms': cart_item_forms, }) @require_POST def remove_item(request, typ, item_pk): cart = models.Cart.objects.get_or_create_from_request(request, typ) item = get_object_or_404(cart.items, pk=item_pk) cart.set_quantity(item.variant, 0) return redirect('satchless-cart-view', typ=typ)
# -*- coding: utf-8 -*- from django.contrib import messages from django.shortcuts import get_object_or_404, redirect from django.template.response import TemplateResponse from django.utils.translation import ugettext from django.views.decorators.http import require_POST from . import models from . import forms def cart(request, typ, form_class=forms.EditCartItemForm): cart = models.Cart.objects.get_or_create_from_request(request, typ) cart_item_forms = [] for item in cart.items.all(): form = form_class(data=request.POST or None, instance=item, prefix='%s-%i'%(typ, item.id)) if request.method == 'POST' and form.is_valid(): messages.success(request, ugettext("Cart's content updated successfully.")) form.save() return redirect(request.get_full_path()) cart_item_forms.append(form) templates = [ 'satchless/cart/%s/view.html' % typ, 'satchless/cart/view.html' ] return TemplateResponse(request, templates, { 'cart': cart, 'cart_item_forms': cart_item_forms, }) @require_POST def remove_item(request, typ, item_pk): cart = models.Cart.objects.get_or_create_from_request(request, typ) item = get_object_or_404(cart.items, pk=item_pk) cart.set_quantity(item.variant, 0) return redirect('satchless-cart-view', typ=typ)
bsd-3-clause
Python
80691fa6d517b39a6656a2afc0635f485fd49974
add dependencies (#18406)
LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack
var/spack/repos/builtin/packages/gconf/package.py
var/spack/repos/builtin/packages/gconf/package.py
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Gconf(AutotoolsPackage): """GConf is a system for storing application preferences.""" homepage = "https://projects.gnome.org/gconf/" url = "http://ftp.gnome.org/pub/gnome/sources/GConf/3.2/GConf-3.2.6.tar.xz" version('3.2.6', sha256='1912b91803ab09a5eed34d364bf09fe3a2a9c96751fde03a4e0cfa51a04d784c') depends_on('pkgconfig', type='build') depends_on('[email protected]:') depends_on('libxml2') depends_on('dbus') depends_on('dbus-glib') depends_on('orbit2') depends_on('perl-xml-parser', type=('build', 'run'))
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Gconf(AutotoolsPackage): """GConf is a system for storing application preferences.""" homepage = "https://projects.gnome.org/gconf/" url = "http://ftp.gnome.org/pub/gnome/sources/GConf/3.2/GConf-3.2.6.tar.xz" version('3.2.6', sha256='1912b91803ab09a5eed34d364bf09fe3a2a9c96751fde03a4e0cfa51a04d784c') depends_on('[email protected]:') depends_on('libxml2') # TODO: add missing dependencies # gio-2.0 >= 2.31.0 # gthread-2.0 # gmodule-2.0 >= 2.7.0 # gobject-2.0 >= 2.7.0 # dbus-1 >= 1.0.0 # dbus-glib-1 >= 0.74
lgpl-2.1
Python
bd0960cda8a66b843035935c7caa9f20b38b4d0d
Add 0.16.0 and address test suite issues (#27604)
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
var/spack/repos/builtin/packages/gpgme/package.py
var/spack/repos/builtin/packages/gpgme/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Gpgme(AutotoolsPackage): """GPGME is the standard library to access GnuPG functions from programming languages.""" homepage = "https://www.gnupg.org/software/gpgme/index.html" url = "https://www.gnupg.org/ftp/gcrypt/gpgme/gpgme-1.16.0.tar.bz2" executables = ['^gpgme-config$'] version('1.16.0', sha256='6c8cc4aedb10d5d4c905894ba1d850544619ee765606ac43df7405865de29ed0') version('1.12.0', sha256='b4dc951c3743a60e2e120a77892e9e864fb936b2e58e7c77e8581f4d050e8cd8') # https://dev.gnupg.org/T5509 - New test t-edit-sign test crashes with GCC 11.1.0 patch( 'https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gpgme.git;a=commitdiff_plain;h=81a33ea5e1b86d586b956e893a5b25c4cd41c969;hp=e8e055e682f8994d62012574e1c8d862ca72a35d', sha256='b934e3cb0b3408ad27990d97b594c89801a4748294e2eb5804a455a312821411', when='@1.16.0', ) depends_on('gnupg', type='build') depends_on('libgpg-error', type='build') depends_on('libassuan', type='build') @classmethod def determine_version(cls, exe): return Executable(exe)('--version', output=str, error=str).rstrip() def configure_args(self): """Fix the build when incompatible Qt libraries are installed on the host""" return ['--enable-languages=cpp'] def setup_build_environment(self, env): """Build tests create a public keyring in ~/.gnupg if $HOME is not redirected""" if self.run_tests: env.set('HOME', self.build_directory) env.prepend_path('LD_LIBRARY_PATH', self.spec['libgpg-error'].prefix.lib) @property def make_tests(self): """Use the Makefile's tests variable to control if the build tests shall run""" return 'tests=tests' if self.run_tests else 'tests=' def build(self, spec, prefix): make(self.make_tests) def install(self, spec, prefix): make(self.make_tests, 'install')
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Gpgme(AutotoolsPackage): """GPGME is the standard library to access GnuPG functions from programming languages.""" homepage = "https://www.gnupg.org/software/gpgme/index.html" url = "https://www.gnupg.org/ftp/gcrypt/gpgme/gpgme-1.12.0.tar.bz2" executables = ['^gpgme-config$'] version('1.12.0', sha256='b4dc951c3743a60e2e120a77892e9e864fb936b2e58e7c77e8581f4d050e8cd8') depends_on('gnupg', type='build') depends_on('libgpg-error', type='build') depends_on('libassuan', type='build') @classmethod def determine_version(cls, exe): return Executable(exe)('--version', output=str, error=str).rstrip()
lgpl-2.1
Python