commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
a2a4a8e4636051fa84a5cfbaf7f4ff796c59171a
Add build.parent to api response
wfxiang08/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes,bowlofstew/changes,wfxiang08/changes,dropbox/changes,bowlofstew/changes,dropbox/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes
changes/api/serializer/models/build.py
changes/api/serializer/models/build.py
from changes.api.serializer import Serializer, register from changes.constants import Result, Status from changes.models.build import Build @register(Build) class BuildSerializer(Serializer): def serialize(self, instance): # TODO(dcramer): this shouldnt be calculated at runtime last_5_builds = list(Build.query.filter_by( result=Result.passed, status=Status.finished, project=instance.project, ).order_by(Build.date_finished.desc())[:3]) if last_5_builds: avg_build_time = sum( b.duration for b in last_5_builds if b.duration ) / len(last_5_builds) else: avg_build_time = None data = instance.data or {} backend_details = data.get('backend') if backend_details: external = { 'link': backend_details['uri'], 'label': backend_details['label'], } else: external = None if instance.parent_id: parent = { 'id': instance.parent_id.hex, 'link': '/builds/%s/' % (instance.parent_id.hex,), } else: parent = None return { 'id': instance.id.hex, 'name': instance.label, 'result': instance.result, 'status': instance.status, 'project': instance.project, 'cause': instance.cause, 'author': instance.author, 'parent_revision': { 'sha': instance.parent_revision_sha, }, 'parent': parent, 'message': instance.message, 'duration': instance.duration, 'estimatedDuration': avg_build_time, 'link': '/builds/%s/' % (instance.id.hex,), 'external': external, 'dateCreated': instance.date_created.isoformat(), 'dateModified': instance.date_modified.isoformat() if instance.date_modified else None, 'dateStarted': instance.date_started.isoformat() if instance.date_started else None, 'dateFinished': instance.date_finished.isoformat() if instance.date_finished else None, }
from changes.api.serializer import Serializer, register from changes.constants import Result, Status from changes.models.build import Build @register(Build) class BuildSerializer(Serializer): def serialize(self, instance): # TODO(dcramer): this shouldnt be calculated at runtime last_5_builds = list(Build.query.filter_by( result=Result.passed, status=Status.finished, project=instance.project, ).order_by(Build.date_finished.desc())[:3]) if last_5_builds: avg_build_time = sum( b.duration for b in last_5_builds if b.duration ) / len(last_5_builds) else: avg_build_time = None data = instance.data or {} backend_details = data.get('backend') if backend_details: external = { 'link': backend_details['uri'], 'label': backend_details['label'], } else: external = None return { 'id': instance.id.hex, 'name': instance.label, 'result': instance.result, 'status': instance.status, 'project': instance.project, 'cause': instance.cause, 'author': instance.author, 'parent_revision': { 'sha': instance.parent_revision_sha, }, 'message': instance.message, 'duration': instance.duration, 'estimatedDuration': avg_build_time, 'link': '/builds/%s/' % (instance.id.hex,), 'external': external, 'dateCreated': instance.date_created.isoformat(), 'dateModified': instance.date_modified.isoformat() if instance.date_modified else None, 'dateStarted': instance.date_started.isoformat() if instance.date_started else None, 'dateFinished': instance.date_finished.isoformat() if instance.date_finished else None, }
apache-2.0
Python
2413a2042745a00b5a220a753aa46177065f3793
bump version to 0.0.3
Carreau/nose_warnings_filters
nose_warnings_filters/__init__.py
nose_warnings_filters/__init__.py
""" Nose plugin to add warnings filters (turn them into error) using nose.cfg file. """ __version__ = '0.0.3' from nose.plugins import Plugin import warnings import sys if sys.version_info < (3,): import builtins else: builtins = __builtins__ class WarningFilter(Plugin): def options(self, parser, env): """ Add options to command line. """ super(WarningFilter, self).options(parser, env) parser.add_option("--warningfilters", default=None, help="Treat warnings that occur WITHIN tests as errors.") def configure(self, options, conf): """ Configure plugin. """ for opt in options.warningfilters.split( '\n'): vs = [s.strip() for s in opt.split('|')] vs[2] = getattr(builtins, vs[2]) warnings.filterwarnings(*vs) super(WarningFilter, self).configure(options, conf) def prepareTestRunner(self, runner): """ Treat warnings as errors. """ return WarningFilterRunner(runner) class WarningFilterRunner(object): def __init__(self, runner): self.runner=runner def run(self, test): return self.runner.run(test)
""" Nose plugin to add warnings filters (turn them into error) using nose.cfg file. """ __version__ = '0.0.2' from nose.plugins import Plugin import warnings import sys if sys.version_info < (3,): import builtins else: builtins = __builtins__ class WarningFilter(Plugin): def options(self, parser, env): """ Add options to command line. """ super(WarningFilter, self).options(parser, env) parser.add_option("--warningfilters", default=None, help="Treat warnings that occur WITHIN tests as errors.") def configure(self, options, conf): """ Configure plugin. """ for opt in options.warningfilters.split( '\n'): vs = [s.strip() for s in opt.split('|')] vs[2] = getattr(builtins, vs[2]) warnings.filterwarnings(*vs) super(WarningFilter, self).configure(options, conf) def prepareTestRunner(self, runner): """ Treat warnings as errors. """ return WarningFilterRunner(runner) class WarningFilterRunner(object): def __init__(self, runner): self.runner=runner def run(self, test): return self.runner.run(test)
mit
Python
f8685d8ca3d4d18ca5895d765185993ed2d5bcd7
Fix citizen subscription to report : DatabaseError: current transaction is aborted, commands ignored until end of transaction block
IMIO/django-fixmystreet,IMIO/django-fixmystreet,IMIO/django-fixmystreet,IMIO/django-fixmystreet
django_fixmystreet/fixmystreet/views/reports/subscribers.py
django_fixmystreet/fixmystreet/views/reports/subscribers.py
from django.shortcuts import get_object_or_404 from django.http import HttpResponseRedirect from django.utils.translation import ugettext as _ from django.contrib import messages from django.db import IntegrityError from django_fixmystreet.fixmystreet.models import FMSUser from django_fixmystreet.fixmystreet.models import Report, ReportSubscription def create(request, report_id): report = get_object_or_404(Report, id=report_id) #CREATE USER CITIZEN IF NECESSARY try: user = FMSUser.objects.get(email=request.REQUEST.get('citizen_email')) except FMSUser.DoesNotExist: #Add information about the citizen connected if it does not exist user = FMSUser.objects.create(username=request.REQUEST.get('citizen_email'), email=request.REQUEST.get('citizen_email'), first_name='ANONYMOUS', last_name='ANONYMOUS', agent=False, contractor=False, manager=False, leader=False) #VERIFY THAT A SUBSCRIPTION DOES NOT ALREADY EXIST if not ReportSubscription.objects.filter(subscriber=user, report=report).exists(): subscriber = ReportSubscription(subscriber=user, report=report) subscriber.save() messages.add_message(request, messages.SUCCESS, _("You have subscribed from updates successfully")) return HttpResponseRedirect(report.get_absolute_url()) def remove(request, report_id): report = get_object_or_404(Report, id=report_id) try: user = FMSUser.objects.get(email=request.REQUEST.get('citizen_email')) except FMSUser.DoesNotExist: HttpResponseRedirect(report.get_absolute_url()) #VERIFY THAT A SUBSCRIPTION DOES NOT ALREADY EXIST try: subscription = ReportSubscription.objects.get(subscriber=user, report=report) subscription.delete() messages.add_message(request, messages.SUCCESS, _("You have unsubscribed from updates successfully")) except ReportSubscription.DoesNotExist: #Do nothing. A subscription for this user already exists... messages.add_message(request, messages.SUCCESS, _("You have unsubscribed from updates successfully")) return HttpResponseRedirect(report.get_absolute_url())
from django.shortcuts import get_object_or_404 from django.http import HttpResponseRedirect from django.utils.translation import ugettext as _ from django.contrib import messages from django.db import IntegrityError from django_fixmystreet.fixmystreet.models import FMSUser from django_fixmystreet.fixmystreet.models import Report, ReportSubscription def create(request, report_id): report = get_object_or_404(Report, id=report_id) #CREATE USER CITIZEN IF NECESSARY try: user = FMSUser.objects.get(email=request.REQUEST.get('citizen_email')) except FMSUser.DoesNotExist: #Add information about the citizen connected if it does not exist user = FMSUser.objects.create(username=request.REQUEST.get('citizen_email'), email=request.REQUEST.get('citizen_email'), first_name='ANONYMOUS', last_name='ANONYMOUS', agent=False, contractor=False, manager=False, leader=False) #VERIFY THAT A SUBSCRIPTION DOES NOT ALREADY EXIST try: subscriber = ReportSubscription(subscriber=user, report=report) subscriber.save() messages.add_message(request, messages.SUCCESS, _("You have subscribed from updates successfully")) except IntegrityError: #Do nothing. A subscription for this user already exists... messages.add_message(request, messages.SUCCESS, _("You have subscribed from updates successfully")) return HttpResponseRedirect(report.get_absolute_url()) def remove(request, report_id): report = get_object_or_404(Report, id=report_id) try: user = FMSUser.objects.get(email=request.REQUEST.get('citizen_email')) except FMSUser.DoesNotExist: HttpResponseRedirect(report.get_absolute_url()) #VERIFY THAT A SUBSCRIPTION DOES NOT ALREADY EXIST try: subscription = ReportSubscription.objects.get(subscriber=user, report=report) subscription.delete() messages.add_message(request, messages.SUCCESS, _("You have unsubscribed from updates successfully")) except ReportSubscription.DoesNotExist: #Do nothing. A subscription for this user already exists... messages.add_message(request, messages.SUCCESS, _("You have unsubscribed from updates successfully")) return HttpResponseRedirect(report.get_absolute_url())
agpl-3.0
Python
9a87f83c7060b66f7f95f2823db11b5e86a4fd67
fix #210
cnbeining/you-get,shanyimin/you-get,candlewill/you-get,dream1986/you-get,chares-zhang/you-get,qzane/you-get,FelixYin66/you-get,xyuanmu/you-get,lilydjwg/you-get,pastebt/you-get,smart-techs/you-get,pitatensai/you-get,XiWenRen/you-get,forin-xyz/you-get,Red54/you-get,rain1988/you-get,flwh/you-get,jindaxia/you-get,cnbeining/you-get,kzganesan/you-get,xyuanmu/you-get,runningwolf666/you-get,specter4mjy/you-get,j4s0nh4ck/you-get,zmwangx/you-get,power12317/you-get,zmwangx/you-get,linhua55/you-get,qzane/you-get,smart-techs/you-get,lilydjwg/you-get,linhua55/you-get,tigerface/you-get,CzBiX/you-get,fffonion/you-get
src/you_get/downloader/dailymotion.py
src/you_get/downloader/dailymotion.py
#!/usr/bin/env python __all__ = ['dailymotion_download'] from ..common import * def dailymotion_download(url, output_dir = '.', merge = True, info_only = False): """Downloads Dailymotion videos by URL. """ id = match1(url, r'/video/([^\?]+)') embed_url = 'http://www.dailymotion.com/embed/video/%s' % id html = get_content(embed_url) info = json.loads(match1(html, r'var\s*info\s*=\s*({.+}),\n')) title = info['title'] for quality in ['stream_h264_hd1080_url', 'stream_h264_hd_url', 'stream_h264_hq_url', 'stream_h264_url', 'stream_h264_ld_url']: real_url = info[quality] if real_url: break type, ext, size = url_info(real_url) print_info(site_info, title, type, size) if not info_only: download_urls([real_url], title, ext, size, output_dir, merge = merge) site_info = "Dailymotion.com" download = dailymotion_download download_playlist = playlist_not_supported('dailymotion')
#!/usr/bin/env python __all__ = ['dailymotion_download'] from ..common import * def dailymotion_download(url, output_dir = '.', merge = True, info_only = False): html = get_html(url) html = parse.unquote(html).replace('\/', '/') title = r1(r'meta property="og:title" content="([^"]+)"', html) title = escape_file_path(title) for quality in ['hd720URL', 'hqURL', 'sdURL']: real_url = r1(r',\"' + quality + '\"\:\"([^\"]+?)\",', html) if real_url: break type, ext, size = url_info(real_url) print_info(site_info, title, type, size) if not info_only: download_urls([real_url], title, ext, size, output_dir, merge = merge) site_info = "Dailymotion.com" download = dailymotion_download download_playlist = playlist_not_supported('dailymotion')
mit
Python
9358060c648c0ee71498f173dcbf6fc839ba6ff8
Update expected release date
CodeForPhilly/chime,CodeForPhilly/chime,CodeForPhilly/chime
src/penn_chime/constants.py
src/penn_chime/constants.py
"""Constants.""" from datetime import date """ This reflects a date from which previously-run reports will no longer match current results, indicating when users should re-run their reports """ CHANGE_DATE = date(year=2020, month=4, day=8) VERSION = 'v1.1.3' DATE_FORMAT = "%b, %d" # see https://strftime.org DOCS_URL = "https://code-for-philly.gitbook.io/chime" EPSILON = 1.0e-7 FLOAT_INPUT_MIN = 0.0001 FLOAT_INPUT_STEP = 0.1
"""Constants.""" from datetime import date """ This reflects a date from which previously-run reports will no longer match current results, indicating when users should re-run their reports """ CHANGE_DATE = date(year=2020, month=4, day=6) VERSION = 'v1.1.3' DATE_FORMAT = "%b, %d" # see https://strftime.org DOCS_URL = "https://code-for-philly.gitbook.io/chime" EPSILON = 1.0e-7 FLOAT_INPUT_MIN = 0.0001 FLOAT_INPUT_STEP = 0.1
mit
Python
2de30c0acdbcc2560ee7c9c472df956441cb2bab
use better filterType
lilydjwg/nvchecker
nvchecker_source/vsmarketplace.py
nvchecker_source/vsmarketplace.py
# MIT licensed # Copyright (c) 2013-2021 Th3Whit3Wolf <[email protected]>, et al. from nvchecker.api import ( VersionResult, Entry, AsyncCache, KeyManager, TemporaryError, session, GetVersionError, ) API_URL = 'https://marketplace.visualstudio.com/_apis/public/gallery/extensionquery' HEADERS = { 'Accept': 'application/json;api-version=6.1-preview.1', 'Content-Type': 'application/json' } async def get_version(name: str, conf: Entry, *, cache: AsyncCache, **kwargs): name = conf.get('vsmarketplace') or name q = { 'filters': [ { 'criteria': [ { 'filterType': 8, 'value': 'Microsoft.VisualStudio.Code' }, { 'filterType': 7, 'value': name }, { 'filterType': 12, 'value': '4096' } ], 'pageNumber': 1, 'pageSize': 2, 'sortBy': 0, 'sortOrder': 0 } ], 'assetTypes': [], 'flags': 946 } res = await session.post( API_URL, headers = HEADERS, json = q, ) j = res.json() version = j['results'][0]['extensions'][0]['versions'][0]['version'] return version
# MIT licensed # Copyright (c) 2013-2021 Th3Whit3Wolf <[email protected]>, et al. from nvchecker.api import ( VersionResult, Entry, AsyncCache, KeyManager, TemporaryError, session, GetVersionError, ) API_URL = 'https://marketplace.visualstudio.com/_apis/public/gallery/extensionquery' HEADERS = { 'Accept': 'application/json;api-version=6.1-preview.1', 'Content-Type': 'application/json' } async def get_version(name: str, conf: Entry, *, cache: AsyncCache, **kwargs): name = conf.get('vsmarketplace') or name q = { 'filters': [ { 'criteria': [ { 'filterType': 8, 'value': 'Microsoft.VisualStudio.Code' }, { 'filterType': 10, 'value': name }, { 'filterType': 12, 'value': '4096' } ], 'pageNumber': 1, 'pageSize': 2, 'sortBy': 0, 'sortOrder': 0 } ], 'assetTypes': [], 'flags': 946 } res = await session.post( API_URL, headers = HEADERS, json = q, ) j = res.json() version = j['results'][0]['extensions'][0]['versions'][0]['version'] return version
mit
Python
355372ff51a84c0a6d7d86c0ef1fb12def341436
Add the score to Engine.chat return values
carrotflakes/invada
invada/engine.py
invada/engine.py
# -*- coding: utf-8 -*- class Engine: def __init__(self, response_pairs, knowledge={}): self.response_pairs = response_pairs self.knowledge = knowledge def chat(self, user_utterance, context): best_score = 0 best_response_pair = None best_captured = {} for response_pair in self.response_pairs: captured = response_pair.match(user_utterance, self.knowledge) if captured is None: continue score = response_pair.score(captured, context, self.knowledge) if best_score < score: best_score, best_response_pair, best_captured = score, response_pair, captured response, new_context = best_response_pair.generate(best_captured, context, self.knowledge) return response, new_context, best_score
# -*- coding: utf-8 -*- class Engine: def __init__(self, response_pairs, knowledge={}): self.response_pairs = response_pairs self.knowledge = knowledge def chat(self, user_utterance, context): best_score = 0 best_response_pair = None best_captured = {} for response_pair in self.response_pairs: captured = response_pair.match(user_utterance, self.knowledge) if captured is None: continue score = response_pair.score(captured, context, self.knowledge) if best_score < score: best_score, best_response_pair, best_captured = score, response_pair, captured return best_response_pair.generate(best_captured, context, self.knowledge)
mit
Python
ef29e402c58751a938cb11cee480ac4f4e31aef5
Add warning
mkusz/invoke,pfmoore/invoke,pyinvoke/invoke,tyewang/invoke,frol/invoke,kejbaly2/invoke,singingwolfboy/invoke,sophacles/invoke,frol/invoke,mkusz/invoke,pyinvoke/invoke,kejbaly2/invoke,pfmoore/invoke,mattrobenolt/invoke,mattrobenolt/invoke
invoke/config.py
invoke/config.py
from .vendor.etcaetera.config import Config as EtcConfig from .vendor.etcaetera.adapter import File class Config(object): """ Invoke's primary configuration handling class. See :doc:`/concepts/configuration` for details on the configuration system this class implements, including the :ref:`configuration hierarchy <config-hierarchy>`. Lightly wraps ``etcaetera.config.Config``, allowing for another level of configurability (re: which files are loaded and in what order) as well as convenient access to configuration values, which may be accessed using dict syntax:: config['foo'] or attribute syntax:: config.foo .. warning:: Any "real" attributes (methods, etc) on `Config` take precedence over settings values - so if you e.g. have a top level setting named ``load``, you *must* use dict syntax to access it. Nesting works the same way - dict config values are transparently turned into objects which honor both the dictionary protocol and the attribute-access method:: config['foo']['bar'] config.foo.bar """ def __init__(self): """ Creates a new config object, but does not load any configuration data. .. note:: To load configuration data, call `~.Config.load` after initialization. For convenience, keyword arguments not listed below will be interpreted as top-level configuration keys, so one may say e.g.:: c = Config(my_setting='my_value') print(c['my_setting']) # => 'my_value' :param str global_prefix: Path & partial filename for the global config file location. Should include everything but the dot & file extension. The final result (including extension) will be turned into a fully qualified file path and have system-appropriate expansion performed (tildes and so forth). Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or ``/etc/invoke.json``). :param str user_prefix: Like ``global_prefix`` but for the per-user config file. Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``). """ pass def load(self): """ Performs loading and merging of all config sources. See :ref:`config-hierarchy` for details on load order and file locations. """ pass
from .vendor.etcaetera.config import Config as EtcConfig from .vendor.etcaetera.adapter import File class Config(object): """ Invoke's primary configuration handling class. See :doc:`/concepts/configuration` for details on the configuration system this class implements, including the :ref:`configuration hierarchy <config-hierarchy>`. Lightly wraps ``etcaetera.config.Config``, allowing for another level of configurability (re: which files are loaded and in what order) as well as convenient access to configuration values, which may be accessed using dict syntax:: config['foo'] or attribute syntax:: config.foo Nesting works the same way - dict config values are transparently turned into objects which honor both the dictionary protocol and the attribute-access method:: config['foo']['bar'] config.foo.bar """ def __init__(self): """ Creates a new config object, but does not load any configuration data. .. note:: To load configuration data, call `~.Config.load` after initialization. For convenience, keyword arguments not listed below will be interpreted as top-level configuration keys, so one may say e.g.:: c = Config(my_setting='my_value') print(c['my_setting']) # => 'my_value' :param str global_prefix: Path & partial filename for the global config file location. Should include everything but the dot & file extension. The final result (including extension) will be turned into a fully qualified file path and have system-appropriate expansion performed (tildes and so forth). Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or ``/etc/invoke.json``). :param str user_prefix: Like ``global_prefix`` but for the per-user config file. Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``). """ pass def load(self): """ Performs loading and merging of all config sources. See :ref:`config-hierarchy` for details on load order and file locations. """ pass
bsd-2-clause
Python
aa459c2db7f1995fda486ef80c30b541ff1895d8
Remove unnessesaty params
yshalenyk/openprocurement.ocds.export,yshalenyk/openprocurement.ocds.export,yshalenyk/ocds.export
ocds/databridge/contrib/client.py
ocds/databridge/contrib/client.py
import requests import requests.adapters from gevent.pool import Pool import logging logger = logging.getLogger(__name__) class APIClient(object): def __init__(self, api_key, api_host, api_version, **options): self.base_url = "{}/api/{}".format(api_host, api_version) self.session = requests.Session() self.session.auth = (api_key, '') self.session.headers = {"Accept": "applicaiton/json", "Content-type": "application/json"} resourse = options.get('resourse', 'tenders') self.resourse_url = "{}/{}".format(self.base_url, resourse) APIAdapter = requests.adapters.HTTPAdapter(max_retries=5, pool_connections=50, pool_maxsize=30) self.session.mount(self.resourse_url, APIAdapter) # retrieve cookie self.session.head("{}/{}".format(self.base_url, 'spore')) self.pool = Pool(10) def get_tenders(self, params=None): if not params: params = {'feed': 'chages'} resp = self.session.get(self.resourse_url, params=params) if resp.ok: return resp.json() else: resp.raise_for_status() def get_tender(self, tender_id): resp = self.session.get( "{}/{}".format(self.resourse_url, tender_id) ) if resp.ok: return resp.json()['data'] else: resp.raise_for_status() def fetch(self, tender_ids): resp = self.pool.map(self.get_tender, [t['id'] for t in tender_ids]) return [r for r in resp if r] def get_retreive_clients(api_key, api_host, api_version): forward = APIClient(api_key, api_host, api_version) backward = APIClient(api_key, api_host, api_version) origin_cookie = forward.session.cookies backward.session.cookies = origin_cookie return origin_cookie, forward, backward
import requests import requests.adapters from gevent.pool import Pool import logging logger = logging.getLogger(__name__) class APIClient(object): def __init__(self, api_key, api_host, api_version, **options): self.base_url = "{}/api/{}".format(api_host, api_version) self.session = requests.Session() self.session.auth = (api_key, '') self.session.headers = {"Accept": "applicaiton/json", "Content-type": "application/json"} resourse = options.get('resourse', 'tenders') self.resourse_url = "{}/{}".format(self.base_url, resourse) APIAdapter = requests.adapters.HTTPAdapter(max_retries=5, pool_connections=50, pool_maxsize=30) self.session.mount(self.resourse_url, APIAdapter) # retrieve cookie self.session.head("{}/{}".format(self.base_url, 'spore')) self.pool = Pool(10) def get_tenders(self, params=None): if not params: params = {'feed': 'chages'} resp = self.session.get(self.resourse_url, params=params) if resp.ok: return resp.json() else: resp.raise_for_status() def get_tender(self, tender_id, params=None): resp = self.session.get( "{}/{}".format(self.resourse_url, tender_id), params=params ) if resp.ok: return resp.json()['data'] else: resp.raise_for_status() def fetch(self, tender_ids): resp = self.pool.map(self.get_tender, [t['id'] for t in tender_ids]) return [r for r in resp] def get_retreive_clients(api_key, api_host, api_version): forward = APIClient(api_key, api_host, api_version) backward = APIClient(api_key, api_host, api_version) origin_cookie = forward.session.cookies backward.session.cookies = origin_cookie return origin_cookie, forward, backward
apache-2.0
Python
48cb3e901917c598294c5431c66efe6ed56e465a
set DEBUG to true
zhemao/speakeasy,zhemao/speakeasy
wsgi/settings.py
wsgi/settings.py
import os MONGO_HOST = os.getenv('OPENSHIFT_NOSQL_DB_HOST') MONGO_PORT = os.getenv('OPENSHIFT_NOSQL_DB_PORT') MONGO_USERNAME = os.getenv('OPENSHIFT_NOSQL_DB_USERNAME') MONGO_PASSWORD = os.getenv('OPENSHIFT_NOSQL_DB_PASSWORD') PRIV_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_private.pem' PUB_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_public.pem' DEBUG = True
import os MONGO_HOST = os.getenv('OPENSHIFT_NOSQL_DB_HOST') MONGO_PORT = os.getenv('OPENSHIFT_NOSQL_DB_PORT') MONGO_USERNAME = os.getenv('OPENSHIFT_NOSQL_DB_USERNAME') MONGO_PASSWORD = os.getenv('OPENSHIFT_NOSQL_DB_PASSWORD') PRIV_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_private.pem' PUB_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_public.pem'
bsd-2-clause
Python
99f45d201b3513096bf8ebe7c877c836d8e6611a
Add logging to web client
sema/reliable-email
clients/web/rewebclient/rewebclient.py
clients/web/rewebclient/rewebclient.py
from flask import Flask, request, render_template, flash, redirect, url_for from reclient.client import ReClient, ReClientException import os import logging DEBUG = False SECRET_KEY = 'CHANGE ME' app = Flask(__name__) app.config.from_object(__name__) app.config.from_envvar('REWEBCLIENT_SETTINGS', silent=True) app.config['RE_FRONTEND_URL'] = app.config.get('RE_FRONTEND_URL', None) if app.config['RE_FRONTEND_URL'] is None: app.config['RE_FRONTEND_URL'] = os.getenv('RE_FRONTEND_URL') if app.config['RE_FRONTEND_URL'] is None: raise RuntimeError("RE_FRONTEND_URL environment variable must be set and point to a reliable-email web frontend!") # Logging if app.config.get('LOG', None) is not None: file_handler = logging.FileHandler(app.config['LOG']) file_handler.setLevel(logging.DEBUG) app.logger.addHandler(file_handler) app.logger.setLevel(logging.DEBUG) client = ReClient(app.config['RE_FRONTEND_URL']) @app.route('/', methods=['GET', 'POST']) def index(): # We could use something like WTForms here, but I'll just keep it simple. # I'm ignoring all kinds of i'llformed user input, and let the web frontend handle the small amount of validation if request.method == 'POST': try: client.submit( request.form.get('subject', ''), request.form.get('body', ''), request.form.get('to_email', ''), request.form.get('to_name', '') ) flash(u'Frontend returned a OK, job submitted!') except ReClientException, ex: flash(u'Job failed submission: %s' % ex.message) redirect(url_for('index')) return render_template('index.html') if __name__ == '__main__': app.run()
from flask import Flask, request, render_template, flash, redirect, url_for from reclient.client import ReClient, ReClientException import os DEBUG = False SECRET_KEY = 'CHANGE ME' app = Flask(__name__) app.config.from_object(__name__) app.config.from_envvar('REWEBCLIENT_SETTINGS', silent=True) app.config['RE_FRONTEND_URL'] = app.config.get('RE_FRONTEND_URL', None) or os.getenv('RE_FRONTEND_URL') if app.config['RE_FRONTEND_URL'] is None: raise RuntimeError("RE_FRONTEND_URL environment variable must be set and point to a reliable-email web frontend!") client = ReClient(app.config['RE_FRONTEND_URL']) @app.route('/', methods=['GET', 'POST']) def index(): # We could use something like WTForms here, but I'll just keep it simple. # I'm ignoring all kinds of i'llformed user input, and let the web frontend handle the small amount of validation if request.method == 'POST': try: client.submit( request.form.get('subject', ''), request.form.get('body', ''), request.form.get('to_email', ''), request.form.get('to_name', '') ) flash(u'Frontend returned a OK, job submitted!') except ReClientException, ex: flash(u'Job failed submission: %s' % ex.message) redirect(url_for('index')) return render_template('index.html') if __name__ == '__main__': app.run()
mit
Python
bffb0c7fb099039afb444cfc641ae7b1978c59f8
Exit main script when no observations found
jorisvandenbossche/ircelsos
ircelsos/main.py
ircelsos/main.py
# -*- coding: utf-8 -*- """ Created on Wed Apr 07 23:11:39 2015 @author: Joris Van den Bossche """ from __future__ import print_function def main(): import argparse parser = argparse.ArgumentParser( prog='ircelsos', description='Download air quality data from the SOS of IRCEL - CELINE.') parser.add_argument('pollutant', help='The pollutant') parser.add_argument('--station', '-s', nargs=1, help='Station number. If no provided, use all available' ' stations for that pollutant') parser.add_argument('--period', '-p', type=str, nargs=2, help='Period of the measurements given as "start stop"') args = parser.parse_args() from .query_ircelsos import query_ircelsos from .sosparser import get_observations, parse_observation print("SOS of IRCEL - CELINE") print("Downloading ...") pollutant = args.pollutant if args.station: station = args.station[0] else: station = None response = query_ircelsos(pollutant, station, args.period[0], args.period[1]) observations = get_observations(response) if not observations: print('No observations found') import sys sys.exit() for obs in observations: st_info, raw_data = parse_observation(obs) filename = '{0}_{1}.csv'.format(pollutant, st_info['name']) print("Writing file '{}'".format(filename)) with open(filename, 'w') as f: f.writelines(raw_data.replace(';', '\n'))
# -*- coding: utf-8 -*- """ Created on Wed Apr 07 23:11:39 2015 @author: Joris Van den Bossche """ from __future__ import print_function def main(): import argparse parser = argparse.ArgumentParser( prog='ircelsos', description='Download air quality data from the SOS of IRCEL - CELINE.') parser.add_argument('pollutant', help='The pollutant') parser.add_argument('--station', '-s', nargs=1, help='Station number. If no provided, use all available' ' stations for that pollutant') parser.add_argument('--period', '-p', type=str, nargs=2, help='Period of the measurements given as "start stop"') args = parser.parse_args() from .query_ircelsos import query_ircelsos from .sosparser import get_observations, parse_observation print("SOS of IRCEL - CELINE") print("Downloading ...") pollutant = args.pollutant if args.station: station = args.station[0] else: station = None response = query_ircelsos(pollutant, station, args.period[0], args.period[1]) observations = get_observations(response) if not observations: print('No observations found') for obs in observations: st_info, raw_data = parse_observation(obs) filename = '{0}_{1}.csv'.format(pollutant, st_info['name']) print("Writing file '{}'".format(filename)) with open(filename, 'w') as f: f.writelines(raw_data.replace(';', '\n'))
bsd-2-clause
Python
e85e1021ae20ebecb344c592f60f2ad6607a1db1
refactor rename variables for clarity
pybuilder/pybuilder,esc/pybuilder,arcivanov/pybuilder,locolupo/pybuilder,locolupo/pybuilder,TimYi/pybuilder,elkingtonmcb/pybuilder,arcivanov/pybuilder,pybuilder/pybuilder,Danielweber7624/pybuilder,TimYi/pybuilder,elkingtonmcb/pybuilder,paolodedios/pybuilder,Designist/pybuilder,Danielweber7624/pybuilder,alex-dow/pybuilder,pybuilder/pybuilder,paolodedios/pybuilder,paolodedios/pybuilder,alex-dow/pybuilder,onesfreedom/pybuilder,onesfreedom/pybuilder,Designist/pybuilder,arcivanov/pybuilder,esc/pybuilder,esc/pybuilder
src/main/python/pybuilder/plugins/filter_resources_plugin.py
src/main/python/pybuilder/plugins/filter_resources_plugin.py
# -*- coding: utf-8 -*- # # This file is part of PyBuilder # # Copyright 2011-2014 PyBuilder Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import string from pybuilder.core import init, after, use_plugin from pybuilder.utils import apply_on_files, read_file, write_file use_plugin("core") @init def init_filter_resources_plugin(project): project.set_property_if_unset("filter_resources_target", "$dir_target") project.set_property_if_unset("filter_resources_glob", []) @after("package", only_once=True) def filter_resources(project, logger): globs = project.get_mandatory_property("filter_resources_glob") if not globs: logger.warn("No resources to filter configured. Consider removing plugin.") return target = project.expand_path("$filter_resources_target") logger.info("Filter resources matching %s in %s", " ".join(globs), target) project_dict_wrapper = ProjectDictWrapper(project) apply_on_files(target, filter_resource, globs, project_dict_wrapper, logger) def filter_resource(absolute_file_name, relative_file_name, dictionary, logger): logger.debug("Filtering resource %s", absolute_file_name) content = "".join(read_file(absolute_file_name)) filtered = string.Template(content).safe_substitute(dictionary) write_file(absolute_file_name, filtered) class ProjectDictWrapper(object): def __init__(self, project): self.project = project def __getitem__(self, key): fallback_when_no_substitution_found = "${%s}" % key project_property_or_fallback = self.project.get_property(key, fallback_when_no_substitution_found) return getattr(self.project, key, project_property_or_fallback)
# -*- coding: utf-8 -*- # # This file is part of PyBuilder # # Copyright 2011-2014 PyBuilder Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import string from pybuilder.core import init, after, use_plugin from pybuilder.utils import apply_on_files, read_file, write_file use_plugin("core") @init def init_filter_resources_plugin(project): project.set_property_if_unset("filter_resources_target", "$dir_target") project.set_property_if_unset("filter_resources_glob", []) @after("package", only_once=True) def filter_resources(project, logger): globs = project.get_mandatory_property("filter_resources_glob") if not globs: logger.warn("No resources to filter configured. Consider removing plugin.") return target = project.expand_path("$filter_resources_target") logger.info("Filter resources matching %s in %s", " ".join(globs), target) project_dict_wrapper = ProjectDictWrapper(project) apply_on_files(target, filter_resource, globs, project_dict_wrapper, logger) def filter_resource(absolute_file_name, relative_file_name, dictionary, logger): logger.debug("Filtering resource %s", absolute_file_name) content = "".join(read_file(absolute_file_name)) filtered = string.Template(content).safe_substitute(dictionary) write_file(absolute_file_name, filtered) class ProjectDictWrapper(object): def __init__(self, project): self.project = project def __getitem__(self, key): default_value = "${%s}" % key fallback_value = self.project.get_property(key, default_value) return getattr(self.project, key, fallback_value)
apache-2.0
Python
ab49b0be58975156f96bd5340da8d06f5b8626a5
Change to batch_size = 64
tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples,tensorflow/examples
tensorflow_examples/models/nmt_with_attention/distributed_test.py
tensorflow_examples/models/nmt_with_attention/distributed_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for distributed nmt_with_attention.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import tensorflow as tf # TF2 from tensorflow_examples.models.nmt_with_attention import distributed_train from tensorflow_examples.models.nmt_with_attention import utils assert tf.__version__.startswith('2') class NmtDistributedBenchmark(tf.test.Benchmark): def __init__(self, output_dir=None, **kwargs): self.output_dir = output_dir def benchmark_one_epoch_1_gpu(self): kwargs = utils.get_common_kwargs() kwargs.update({'enable_function': False}) self._run_and_report_benchmark(**kwargs) def benchmark_one_epoch_1_gpu_function(self): kwargs = utils.get_common_kwargs() self._run_and_report_benchmark(**kwargs) def benchmark_ten_epochs_2_gpus(self): kwargs = utils.get_common_kwargs() kwargs.update({'epochs': 10, 'num_gpu': 2, 'batch_size': 64}) self._run_and_report_benchmark(**kwargs) def _run_and_report_benchmark(self, **kwargs): start_time_sec = time.time() train_loss, test_loss = distributed_train.main(**kwargs) wall_time_sec = time.time() - start_time_sec extras = {'train_loss': train_loss, 'test_loss': test_loss} self.report_benchmark( wall_time=wall_time_sec, extras=extras) if __name__ == '__main__': tf.test.main()
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for distributed nmt_with_attention.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import tensorflow as tf # TF2 from tensorflow_examples.models.nmt_with_attention import distributed_train from tensorflow_examples.models.nmt_with_attention import utils assert tf.__version__.startswith('2') class NmtDistributedBenchmark(tf.test.Benchmark): def __init__(self, output_dir=None, **kwargs): self.output_dir = output_dir def benchmark_one_epoch_1_gpu(self): kwargs = utils.get_common_kwargs() kwargs.update({'enable_function': False}) self._run_and_report_benchmark(**kwargs) def benchmark_one_epoch_1_gpu_function(self): kwargs = utils.get_common_kwargs() self._run_and_report_benchmark(**kwargs) def benchmark_ten_epochs_2_gpus(self): kwargs = utils.get_common_kwargs() kwargs.update({'epochs': 10, 'num_gpu': 2, 'batch_size': 128}) self._run_and_report_benchmark(**kwargs) def _run_and_report_benchmark(self, **kwargs): start_time_sec = time.time() train_loss, test_loss = distributed_train.main(**kwargs) wall_time_sec = time.time() - start_time_sec extras = {'train_loss': train_loss, 'test_loss': test_loss} self.report_benchmark( wall_time=wall_time_sec, extras=extras) if __name__ == '__main__': tf.test.main()
apache-2.0
Python
93ad5396bb1d574c86a6b3323199e75fe3bb34f4
implement protection for non existing directories
morgenst/PyAnalysisTools,morgenst/PyAnalysisTools,morgenst/PyAnalysisTools
PyAnalysisTools/base/ShellUtils.py
PyAnalysisTools/base/ShellUtils.py
import shutil import os import subprocess def make_dirs(path): path = os.path.expanduser(path) if os.path.exists(path): return try: os.makedirs(path) except OSError as e: raise OSError def resolve_path_from_symbolic_links(symbolic_link, relative_path): def is_symbolic_link(path): return os.path.islink(path) if symbolic_link is None or relative_path is None: return relative_path if os.path.isabs(relative_path): return relative_path if not symbolic_link.endswith("/"): symbolic_link += "/" top_level_dir = symbolic_link.split("/") for n in range(1, len(top_level_dir)): if is_symbolic_link("/".join(top_level_dir[:-n])): return os.path.abspath(os.path.join(symbolic_link, relative_path)) return relative_path def move(src, dest): try: shutil.move(src, dest) except IOError: raise def copy(src, dest): try: shutil.copy(src, dest) except: raise def remove_directory(path, safe=False): if not os.path.exists(path): return if safe: try: os.removedirs(path) except OSError: raise else: try: shutil.rmtree(path) except OSError as e: raise e def source(script_name): pipe = subprocess.Popen(". %s; env" % script_name, stdout=subprocess.PIPE, shell=True) output = pipe.communicate()[0] output = filter(lambda l: len(l.split("=")) == 2, output.splitlines()) env = dict((line.split("=", 1) for line in output)) os.environ.update(env)
import shutil import os import subprocess def make_dirs(path): path = os.path.expanduser(path) if os.path.exists(path): return try: os.makedirs(path) except OSError as e: raise OSError def resolve_path_from_symbolic_links(symbolic_link, relative_path): def is_symbolic_link(path): return os.path.islink(path) if symbolic_link is None or relative_path is None: return relative_path if os.path.isabs(relative_path): return relative_path if not symbolic_link.endswith("/"): symbolic_link += "/" top_level_dir = symbolic_link.split("/") for n in range(1, len(top_level_dir)): if is_symbolic_link("/".join(top_level_dir[:-n])): return os.path.abspath(os.path.join(symbolic_link, relative_path)) return relative_path def move(src, dest): try: shutil.move(src, dest) except IOError: raise def copy(src, dest): try: shutil.copy(src, dest) except: raise def remove_directory(path, safe=False): if safe: try: os.removedirs(path) except OSError: raise else: try: shutil.rmtree(path) except OSError as e: raise e def source(script_name): pipe = subprocess.Popen(". %s; env" % script_name, stdout=subprocess.PIPE, shell=True) output = pipe.communicate()[0] output = filter(lambda l: len(l.split("=")) == 2, output.splitlines()) env = dict((line.split("=", 1) for line in output)) os.environ.update(env)
mit
Python
632f70d64bac45365974db834a3a6ddcb16e13ad
Add GuardianModelMixin in users/models.py
watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder
feder/users/models.py
feder/users/models.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django.contrib.auth.models import AbstractUser from django.utils.encoding import python_2_unicode_compatible from guardian.mixins import GuardianUserMixin @python_2_unicode_compatible class User(GuardianUserMixin, AbstractUser): def __str__(self): return self.username
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django.contrib.auth.models import AbstractUser from django.utils.encoding import python_2_unicode_compatible # from django.db import models # from django.utils.translation import ugettext_lazy as _ @python_2_unicode_compatible class User(AbstractUser): def __str__(self): return self.username
mit
Python
81b2519f575d35d2f1b735bcaef1901539ee06fa
refactor mgmt cmd update-toplist to use just CouchDB
gpodder/mygpo,gpodder/mygpo,gpodder/mygpo,gpodder/mygpo
mygpo/directory/management/commands/update-toplist.py
mygpo/directory/management/commands/update-toplist.py
from datetime import datetime from django.core.management.base import BaseCommand from mygpo.core.models import Podcast, SubscriberData from mygpo.users.models import PodcastUserState from mygpo.utils import progress from mygpo.decorators import repeat_on_conflict class Command(BaseCommand): def handle(self, *args, **options): # couchdbkit doesn't preserve microseconds started = datetime.utcnow().replace(microsecond=0) podcasts = Podcast.all_podcasts() total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows for n, podcast in enumerate(podcasts): subscriber_count = self.get_subscriber_count(podcast.get_id()) self.update(podcast=podcast, started=started, subscriber_count=subscriber_count) progress(n, total) @repeat_on_conflict(['podcast']) def update(self, podcast, started, subscriber_count): # We've already updated this podcast if started in [e.timestamp for e in podcast.subscribers]: return data = SubscriberData( timestamp = started, subscriber_count = max(0, subscriber_count), ) podcast.subscribers = sorted(podcast.subscribers + [data], key=lambda e: e.timestamp) podcast.save() @staticmethod def get_subscriber_count(podcast_id): db = PodcastUserState.get_db() x = db.view('users/subscriptions_by_podcast', startkey = [podcast_id, None], endkey = [podcast_id, {}], reduce = True, group = True, group_level = 2, ) return x.count()
from datetime import datetime from django.core.management.base import BaseCommand from couchdbkit import ResourceConflict from mygpo.core.models import Podcast, SubscriberData from mygpo.users.models import PodcastUserState from mygpo.utils import progress, multi_request_view from mygpo.decorators import repeat_on_conflict class Command(BaseCommand): def handle(self, *args, **options): started = datetime.now() entries = multi_request_view(Podcast, 'core/podcasts_by_oldid', include_docs=True) total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows for n, entry in enumerate(entries): subscriber_count = self.get_subscriber_count(entry.get_id()) self.update(entry=entry, started=started, subscriber_count=subscriber_count) progress(n, total) @repeat_on_conflict(['entry']) def update(self, entry, started, subscriber_count): data = SubscriberData( timestamp = started, subscriber_count = max(0, subscriber_count), ) entry.subscribers.append(data) entry.save() @staticmethod def get_subscriber_count(podcast_id): db = PodcastUserState.get_db() x = db.view('users/subscriptions_by_podcast', startkey = [podcast_id, None], endkey = [podcast_id, {}], ) return x.count()
agpl-3.0
Python
48e09e446943b695cc7208bc2a7cad7e53437957
Bump to 0.1.1 since I apparently pushed 0.1.0 at some point =/
bitprophet/botox
botox/__init__.py
botox/__init__.py
__version__ = "0.1.1"
__version__ = "0.1.0"
bsd-2-clause
Python
68b1b9d824da9225b8b568348a56d5770195d8f8
Fix method with classmethod
edx/edx-ora2,edx/edx-ora2,EDUlib/edx-ora2,edx/edx-ora2,edx/edx-ora2,EDUlib/edx-ora2,EDUlib/edx-ora2,EDUlib/edx-ora2
openassessment/xblock/openassesment_template_mixin.py
openassessment/xblock/openassesment_template_mixin.py
class OpenAssessmentTemplatesMixin(object): """ This helps to get templates for different type of assessment that is offered. """ @classmethod def templates(cls): """ Returns a list of dictionary field: value objects that describe possible templates. VALID_ASSESSMENT_TYPES needs to be declared as a class variable to use it. """ templates = [] for assesment_type in cls.VALID_ASSESSMENT_TYPES: template_id = assesment_type display_name = cls.VALID_ASSESSMENT_TYPES_DISPLAY_NAMES.get( assesment_type) template = cls._create_template_dict(template_id, display_name) templates.append(template) return templates @classmethod def _create_template_dict(cls, template_id, display_name): """ Returns a template dictionary which can be used with Studio API """ return { "template_id": template_id, "metadata": { "display_name": display_name, } }
class OpenAssessmentTemplatesMixin(object): """ This helps to get templates for different type of assessment that is offered. """ @classmethod def templates(cls): """ Returns a list of dictionary field: value objects that describe possible templates. VALID_ASSESSMENT_TYPES needs to be declared as a class variable to use it. """ templates = [] for assesment_type in cls.VALID_ASSESSMENT_TYPES: template_id = assesment_type display_name = cls.VALID_ASSESSMENT_TYPES_DISPLAY_NAMES.get( assesment_type) template = cls._create_template_dict(template_id, display_name) templates.append(template) return templates def _create_template_dict(cls, template_id, display_name): """ Returns a template dictionary which can be used with Studio API """ return { "template_id": template_id, "metadata": { "display_name": display_name, } }
agpl-3.0
Python
69582dd80518ccc29fc8de9cf5bff54caf62468b
Truncate to exact length
BuildingLink/sentry,jokey2k/sentry,argonemyth/sentry,hongliang5623/sentry,1tush/sentry,songyi199111/sentry,BuildingLink/sentry,zenefits/sentry,felixbuenemann/sentry,mvaled/sentry,drcapulet/sentry,boneyao/sentry,kevinlondon/sentry,jean/sentry,beni55/sentry,gg7/sentry,zenefits/sentry,hongliang5623/sentry,kevinastone/sentry,looker/sentry,zenefits/sentry,mvaled/sentry,felixbuenemann/sentry,gencer/sentry,wong2/sentry,felixbuenemann/sentry,kevinlondon/sentry,JamesMura/sentry,ifduyue/sentry,imankulov/sentry,JackDanger/sentry,pauloschilling/sentry,TedaLIEz/sentry,zenefits/sentry,nicholasserra/sentry,pauloschilling/sentry,argonemyth/sentry,wujuguang/sentry,wong2/sentry,looker/sentry,JamesMura/sentry,alexm92/sentry,camilonova/sentry,NickPresta/sentry,NickPresta/sentry,pauloschilling/sentry,gencer/sentry,beeftornado/sentry,nicholasserra/sentry,JamesMura/sentry,mvaled/sentry,fotinakis/sentry,BuildingLink/sentry,daevaorn/sentry,ewdurbin/sentry,alexm92/sentry,beeftornado/sentry,fotinakis/sentry,ifduyue/sentry,SilentCircle/sentry,songyi199111/sentry,ifduyue/sentry,1tush/sentry,rdio/sentry,jean/sentry,NickPresta/sentry,mvaled/sentry,wujuguang/sentry,fotinakis/sentry,JTCunning/sentry,rdio/sentry,jokey2k/sentry,alexm92/sentry,TedaLIEz/sentry,jean/sentry,fuziontech/sentry,beni55/sentry,rdio/sentry,JamesMura/sentry,Natim/sentry,daevaorn/sentry,JackDanger/sentry,drcapulet/sentry,camilonova/sentry,looker/sentry,imankulov/sentry,looker/sentry,Kryz/sentry,mvaled/sentry,ngonzalvez/sentry,wong2/sentry,korealerts1/sentry,gg7/sentry,zenefits/sentry,BuildingLink/sentry,imankulov/sentry,korealerts1/sentry,rdio/sentry,argonemyth/sentry,korealerts1/sentry,fuziontech/sentry,jean/sentry,NickPresta/sentry,camilonova/sentry,TedaLIEz/sentry,daevaorn/sentry,vperron/sentry,hongliang5623/sentry,ifduyue/sentry,mvaled/sentry,mitsuhiko/sentry,BuildingLink/sentry,daevaorn/sentry,ifduyue/sentry,ngonzalvez/sentry,wujuguang/sentry,1tush/sentry,SilentCircle/sentry,SilentCircle/sentry,vperron/sentry,kevinastone/sentry,JTCunning/sentry,ngonzalvez/sentry,SilentCircle/sentry,Kryz/sentry,ewdurbin/sentry,BayanGroup/sentry,jokey2k/sentry,JamesMura/sentry,kevinastone/sentry,llonchj/sentry,llonchj/sentry,Natim/sentry,ewdurbin/sentry,gg7/sentry,fotinakis/sentry,fuziontech/sentry,looker/sentry,BayanGroup/sentry,llonchj/sentry,Natim/sentry,beeftornado/sentry,songyi199111/sentry,Kryz/sentry,boneyao/sentry,beni55/sentry,gencer/sentry,boneyao/sentry,JackDanger/sentry,BayanGroup/sentry,vperron/sentry,jean/sentry,drcapulet/sentry,kevinlondon/sentry,JTCunning/sentry,nicholasserra/sentry,gencer/sentry,mitsuhiko/sentry,gencer/sentry
src/sentry/utils/strings.py
src/sentry/utils/strings.py
""" sentry.utils.strings ~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import base64 import zlib def truncatechars(value, arg): """ Truncates a string after a certain number of chars. Argument: Number of chars to truncate after. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. if len(value) > length: return value[:length - 3] + '...' return value def compress(value): return base64.b64encode(zlib.compress(value)) def decompress(value): return zlib.decompress(base64.b64decode(value)) def gunzip(value): return zlib.decompress(value, 16 + zlib.MAX_WBITS)
""" sentry.utils.strings ~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import base64 import zlib def truncatechars(value, arg): """ Truncates a string after a certain number of chars. Argument: Number of chars to truncate after. """ try: length = int(arg) except ValueError: # Invalid literal for int(). return value # Fail silently. if len(value) > length: return value[:length] + '...' return value def compress(value): return base64.b64encode(zlib.compress(value)) def decompress(value): return zlib.decompress(base64.b64decode(value)) def gunzip(value): return zlib.decompress(value, 16 + zlib.MAX_WBITS)
bsd-3-clause
Python
a8fb92840ff487c61564175efbf637fec538b480
Add signup view to fix error
stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten
features/gestalten/urls.py
features/gestalten/urls.py
from allauth.socialaccount import views as socialaccount_views from allauth.socialaccount.providers.facebook import views as facebook_views from django.conf.urls import url from . import views urlpatterns = [ url( r'^stadt/gestalten/$', views.List.as_view(), name='gestalten'), url( r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/$', views.Update.as_view(), name='gestalt-update'), url( r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/avatar/$', views.UpdateAvatar.as_view(), name='gestalt-avatar-update'), url( r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/background/$', views.UpdateBackground.as_view(), name='gestalt-background-update'), url( r'^stadt/login/$', views.Login.as_view(), name='login'), url(r'^stadt/login/cancelled/$', socialaccount_views.login_cancelled, name='socialaccount_login_cancelled'), url(r'^stadt/login/error/$', socialaccount_views.login_error, name='socialaccount_login_error'), url(r'^stadt/login/signup/$', socialaccount_views.signup, name='socialaccount_signup'), url(r'^stadt/login/facebook/$', facebook_views.oauth2_login, name='facebook_login'), url(r'^stadt/login/facebook/callback/$', facebook_views.oauth2_callback, name='facebook_callback'), url(r'^stadt/login/facebook/token/$', facebook_views.login_by_token, name='facebook_login_by_token'), ]
from allauth.socialaccount import views as socialaccount_views from allauth.socialaccount.providers.facebook import views as facebook_views from django.conf.urls import url from . import views urlpatterns = [ url( r'^stadt/gestalten/$', views.List.as_view(), name='gestalten'), url( r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/$', views.Update.as_view(), name='gestalt-update'), url( r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/avatar/$', views.UpdateAvatar.as_view(), name='gestalt-avatar-update'), url( r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/background/$', views.UpdateBackground.as_view(), name='gestalt-background-update'), url( r'^stadt/login/$', views.Login.as_view(), name='login'), url(r'^stadt/login/cancelled/$', socialaccount_views.login_cancelled, name='socialaccount_login_cancelled'), url(r'^stadt/login/error/$', socialaccount_views.login_error, name='socialaccount_login_error'), url(r'^stadt/login/facebook/$', facebook_views.oauth2_login, name='facebook_login'), url(r'^stadt/login/facebook/callback/$', facebook_views.oauth2_callback, name='facebook_callback'), url(r'^stadt/login/facebook/token/$', facebook_views.login_by_token, name='facebook_login_by_token'), ]
agpl-3.0
Python
1d52996a88eb5aed643fe61ee959bd88373401b3
Throw a linebreak in there upon completion
jhaals/filebutler-upload
filebutler_upload/utils.py
filebutler_upload/utils.py
from datetime import datetime, timedelta import sys class ProgressBar(object): def __init__(self, filename, fmt): self.filename = filename self.fmt = fmt self.progress = 0 self.total = 0 self.time_started = datetime.now() self.time_updated = self.time_started def __call__(self, current, total): self.progress = current self.total = total final_update = current == total if datetime.now() - self.time_updated > timedelta(seconds=0.5) or final_update: output = self.fmt.format( filename=self.filename, percent=self.get_percent(), speed=self.get_mbps() ) sys.stdout.write('\r' + output) if final_update: sys.stdout.write('\n') sys.stdout.flush() self.time_updated = datetime.now() def get_percent(self): return self.progress / float(self.total) def get_mbps(self): time_delta = datetime.now() - self.time_started if not time_delta.seconds: return 0 return self.progress * 8 / float(time_delta.seconds) / 1000 / 1000
from datetime import datetime, timedelta import sys class ProgressBar(object): def __init__(self, filename, fmt): self.filename = filename self.fmt = fmt self.progress = 0 self.total = 0 self.time_started = datetime.now() self.time_updated = self.time_started def __call__(self, current, total): self.progress = current self.total = total if datetime.now() - self.time_updated > timedelta(seconds=0.5): output = self.fmt.format( filename=self.filename, percent=self.get_percent(), speed=self.get_mbps() ) sys.stdout.write('\r' + output) sys.stdout.flush() self.time_updated = datetime.now() def get_percent(self): return self.progress / float(self.total) def get_mbps(self): time_delta = datetime.now() - self.time_started if not time_delta.seconds: return 0 return self.progress * 8 / float(time_delta.seconds) / 1000 / 1000
bsd-3-clause
Python
07f96a22afe2d010809d03077d9cdd5ecb43d017
Update data source unique name migration to support another name of constraint
akariv/redash,chriszs/redash,jmvasquez/redashtest,pubnative/redash,akariv/redash,amino-data/redash,ninneko/redash,ninneko/redash,denisov-vlad/redash,EverlyWell/redash,ninneko/redash,amino-data/redash,44px/redash,moritz9/redash,guaguadev/redash,moritz9/redash,guaguadev/redash,chriszs/redash,pubnative/redash,44px/redash,rockwotj/redash,44px/redash,vishesh92/redash,guaguadev/redash,getredash/redash,EverlyWell/redash,M32Media/redash,ninneko/redash,stefanseifert/redash,alexanderlz/redash,useabode/redash,pubnative/redash,useabode/redash,denisov-vlad/redash,ninneko/redash,denisov-vlad/redash,M32Media/redash,getredash/redash,hudl/redash,alexanderlz/redash,chriszs/redash,guaguadev/redash,hudl/redash,denisov-vlad/redash,M32Media/redash,chriszs/redash,rockwotj/redash,EverlyWell/redash,moritz9/redash,imsally/redash,vishesh92/redash,moritz9/redash,M32Media/redash,imsally/redash,jmvasquez/redashtest,stefanseifert/redash,amino-data/redash,crowdworks/redash,alexanderlz/redash,hudl/redash,easytaxibr/redash,getredash/redash,crowdworks/redash,guaguadev/redash,denisov-vlad/redash,crowdworks/redash,akariv/redash,jmvasquez/redashtest,EverlyWell/redash,akariv/redash,imsally/redash,rockwotj/redash,jmvasquez/redashtest,easytaxibr/redash,imsally/redash,easytaxibr/redash,stefanseifert/redash,pubnative/redash,pubnative/redash,akariv/redash,stefanseifert/redash,vishesh92/redash,amino-data/redash,getredash/redash,hudl/redash,jmvasquez/redashtest,useabode/redash,rockwotj/redash,44px/redash,stefanseifert/redash,crowdworks/redash,vishesh92/redash,useabode/redash,alexanderlz/redash,easytaxibr/redash,easytaxibr/redash,getredash/redash
migrations/0020_change_ds_name_to_non_uniqe.py
migrations/0020_change_ds_name_to_non_uniqe.py
from redash.models import db import peewee from playhouse.migrate import PostgresqlMigrator, migrate if __name__ == '__main__': migrator = PostgresqlMigrator(db.database) with db.database.transaction(): # Change the uniqueness constraint on data source name to be (org, name): success = False for constraint in ['unique_name', 'data_sources_name']: try: db.database.execute_sql("ALTER TABLE data_sources DROP CONSTRAINT {}".format(constraint)) success = True break except peewee.ProgrammingError: db.close_db(None) if not success: print "Failed removing uniqueness constraint on data source name." print "Please verify its name in the schema, update the migration and run again." exit() migrate( migrator.add_index('data_sources', ('org_id', 'name'), unique=True) ) db.close_db(None)
from redash.models import db from playhouse.migrate import PostgresqlMigrator, migrate if __name__ == '__main__': migrator = PostgresqlMigrator(db.database) with db.database.transaction(): # Change the uniqueness constraint on data source name to be (org, name): db.database.execute_sql("ALTER TABLE data_sources DROP CONSTRAINT unique_name") migrate( migrator.add_index('data_sources', ('org_id', 'name'), unique=True) ) db.close_db(None)
bsd-2-clause
Python
bc5621afa044a486ef7514e1654224102b3cfd54
Rename chunk list
WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder,WycliffeAssociates/translationRecorder
RecordingApp/app/src/scripts/get_chunks.py
RecordingApp/app/src/scripts/get_chunks.py
""" Script to generate a json file containing book name, number of chapters, number of chunks """ import json import urllib.request import re RESULT_JSON_NAME = "chunks.json" with open("catalog.json") as file: DATA = json.load(file) OUTPUT = [] #skip obs for now, loop over all books for x in range(1, 67): #gives book name and order (the books are stored out of order in the json) slug = DATA[x]["slug"] sort = DATA[x]["sort"] #Get languages.json url_lang_cat = DATA[x]["lang_catalog"] response_lang_cat = urllib.request.urlopen(url_lang_cat) lang_catalog = json.loads(response_lang_cat.read().decode('utf-8')) name = lang_catalog[0]["project"]["name"] #Get resources.json #0 is for udb, are chunks the same for both? url_res = lang_catalog[0]["res_catalog"] response_res = urllib.request.urlopen(url_res) res_cat = json.loads(response_res.read().decode('utf-8')) #Get the usfm file url_usfm = res_cat[0]["usfm"] response_usfm = urllib.request.urlopen(url_usfm) usfm_data = response_usfm.read().decode('utf-8') lines = usfm_data.splitlines() #keep a count of \c and \s5 tags (chapter and chunk respectively) chapter = 0 num_chunks = 0 chapters_in_book = [] for line in lines: chunk_match = re.search(r'\\s5', line) #add to the number of chunks seen so far if chunk_match: num_chunks += 1 #on a new chapter, append the number of chunks tallied and reset the count chapter_match = re.search(r'\\c', line) if chapter_match: chapters_in_book.append(num_chunks) num_chunks = 0 chapter += 1 #append the last chapter chapters_in_book.append(num_chunks+1) #Account for the off by one introduced from chunks coming before chapters chunk_list_fixed = [] length = len(chapters_in_book)-1 #eliminate chapter "0" for i in range(length): chunk_list_fixed.append(chapters_in_book[i+1]) #create a dictionary to store the book's data book = {} book['slug'] = slug book['name'] = name book['sort'] = sort book['chapters'] = len(chunk_list_fixed) book['chunks'] = chunk_list_fixed #add to the list of books OUTPUT.append(book) break # DEBUG -- only process one book for testing #output all book data to a json file with open(RESULT_JSON_NAME, 'w') as outfile: json.dump(OUTPUT, outfile)
""" Script to generate a json file containing book name, number of chapters, number of chunks """ import json import urllib.request import re RESULT_JSON_NAME = "chunks.json" with open("catalog.json") as file: DATA = json.load(file) OUTPUT = [] #skip obs for now, loop over all books for x in range(1, 67): #gives book name and order (the books are stored out of order in the json) slug = DATA[x]["slug"] sort = DATA[x]["sort"] #Get languages.json url_lang_cat = DATA[x]["lang_catalog"] response_lang_cat = urllib.request.urlopen(url_lang_cat) lang_catalog = json.loads(response_lang_cat.read().decode('utf-8')) name = lang_catalog[0]["project"]["name"] #Get resources.json #0 is for udb, are chunks the same for both? url_res = lang_catalog[0]["res_catalog"] response_res = urllib.request.urlopen(url_res) res_cat = json.loads(response_res.read().decode('utf-8')) #Get the usfm file url_usfm = res_cat[0]["usfm"] response_usfm = urllib.request.urlopen(url_usfm) usfm_data = response_usfm.read().decode('utf-8') lines = usfm_data.splitlines() #keep a count of \c and \s5 tags (chapter and chunk respectively) chapter = 0 num_chunks = 0 chunk_list = [] for line in lines: chunk_match = re.search(r'\\s5', line) #add to the number of chunks seen so far if chunk_match: num_chunks += 1 #on a new chapter, append the number of chunks tallied and reset the count chapter_match = re.search(r'\\c', line) if chapter_match: chunk_list.append(num_chunks) num_chunks = 0 chapter += 1 #append the last chapter chunk_list.append(num_chunks+1) #Account for the off by one introduced from chunks coming before chapters chunk_list_fixed = [] length = len(chunk_list)-1 #eliminate chapter "0" for i in range(length): chunk_list_fixed.append(chunk_list[i+1]) #create a dictionary to store the book's data book = {} book['slug'] = slug book['name'] = name book['sort'] = sort book['chapters'] = len(chunk_list_fixed) book['chunks'] = chunk_list_fixed #add to the list of books OUTPUT.append(book) #output all book data to a json file with open(RESULT_JSON_NAME, 'w') as outfile: json.dump(OUTPUT, outfile)
mit
Python
d949c21c4b0a54a9a697a07bf12e22a98dc59ff1
Add `attach` method so we can wrap apps like WSGI middleware
bradwright/flask-mustachejs,bradwright/flask-mustachejs,bradleywright/flask-mustachejs,bradleywright/flask-mustachejs
flask_mustache/__init__.py
flask_mustache/__init__.py
# flask-mustache Flask plugin import os from jinja2 import Template from flask import current_app, Blueprint __all__ = ('FlaskMustache',) mustache_app = Blueprint('mustache', __name__, static_folder='static') class FlaskMustache(object): "Wrapper to inject Mustache stuff into Flask" def __init__(self, app=None): self.app = app if app is not None: self.init_app(app) def init_app(self, app): self.app = app app.register_blueprint(mustache_app) # set up global `mustache` function app.jinja_env.globals['mustache'] = mustache # attach context processor with template content app.context_processor(mustache_templates) @staticmethod def attach(app): "This is written so it can work like WSGI middleware" # noop _ = FlaskMustache(app) return app # context processor def mustache_templates(): "Returns the content of all Mustache templates in the Jinja environment" # short circuit development if current_app.debug: return {} # get all the templates this env knows about all_templates = current_app.jinja_loader.list_templates() mustache_templates = {} for template_name in all_templates: # TODO: make this configurable # we only want a specific extension if template_name.endswith('mustache'): # throw away everything except the file content template, _, _ = \ current_app.jinja_loader.get_source(current_app.jinja_env, template_name) mustache_templates[template_name] = template # now we need to render the templates template_string = """{% if mustache_templates %} {% for template_name, content in mustache_templates.items() %} <script type="text/x-mustache-template" id="{{ template_name|replace('/', '-') }}" charset="utf-8"> {{ content|e }} </script> {% endfor %} {% endif %}""" context = { 'mustache_templates': mustache_templates } # returns the full HTML, ready to use in JavaScript return {'mustache_templates': Template(template_string).render(context)} # template helper function def mustache(template, **kwargs): """Usage: {{ mustache('path/to/whatever.mustache', key=value, key1=value1.. keyn=valuen) }} This uses the regular Jinja2 loader to find the templates, so your *.mustache files will need to be available in that path. """ template, _, _ = current_app.jinja_loader.get_source(current_app.jinja_env, template) return pystache.render(template, kwargs, encoding='utf-8')
# flask-mustache Flask plugin import os from jinja2 import Template from flask import current_app, Blueprint __all__ = ('FlaskMustache',) mustache_app = Blueprint('mustache', __name__, static_folder='static') class FlaskMustache(object): "Wrapper to inject Mustache stuff into Flask" def __init__(self, app=None): self.app = app if app is not None: self.init_app(app) def init_app(self, app): self.app = app app.register_blueprint(mustache_app) # set up global `mustache` function app.jinja_env.globals['mustache'] = mustache # attach context processor with template content app.context_processor(mustache_templates) # context processor def mustache_templates(): "Returns the content of all Mustache templates in the Jinja environment" # short circuit development if current_app.debug: return {} # get all the templates this env knows about all_templates = current_app.jinja_loader.list_templates() mustache_templates = {} for template_name in all_templates: # TODO: make this configurable # we only want a specific extension if template_name.endswith('mustache'): # throw away everything except the file content template, _, _ = \ current_app.jinja_loader.get_source(current_app.jinja_env, template_name) mustache_templates[template_name] = template # now we need to render the templates template_string = """{% if mustache_templates %} {% for template_name, content in mustache_templates.items() %} <script type="text/x-mustache-template" id="{{ template_name|replace('/', '-') }}" charset="utf-8"> {{ content|e }} </script> {% endfor %} {% endif %}""" context = { 'mustache_templates': mustache_templates } # returns the full HTML, ready to use in JavaScript return {'mustache_templates': Template(template_string).render(context)} # template helper function def mustache(template, **kwargs): """Usage: {{ mustache('path/to/whatever.mustache', key=value, key1=value1.. keyn=valuen) }} This uses the regular Jinja2 loader to find the templates, so your *.mustache files will need to be available in that path. """ template, _, _ = current_app.jinja_loader.get_source(current_app.jinja_env, template) return pystache.render(template, kwargs, encoding='utf-8')
bsd-3-clause
Python
c1c5fbdc2d7cda67668df38d91a2becf546fa852
Update transform config in development
alphagov/backdrop,alphagov/backdrop,alphagov/backdrop
backdrop/transformers/config/development.py
backdrop/transformers/config/development.py
TRANSFORMER_AMQP_URL = 'amqp://transformer:notarealpw@localhost:5672/%2Ftransformations' STAGECRAFT_URL = 'http://localhost:3103' STAGECRAFT_OAUTH_TOKEN = 'development-oauth-access-token' BACKDROP_READ_URL = 'http://backdrop-read.dev.gov.uk/data' BACKDROP_WRITE_URL = 'http://backdrop-write.dev.gov.uk/data'
TRANSFORMER_AMQP_URL = 'amqp://transformer:notarealpw@localhost:5672/%2Ftransformations' STAGECRAFT_URL = 'http://localhost:3204' STAGECRAFT_OAUTH_TOKEN = 'development-oauth-access-token' BACKDROP_READ_URL = 'http://localhost:3038/data' BACKDROP_WRITE_URL = 'http://localhost:3039/data'
mit
Python
13c74e663dd511f53e6c0b1bb37b5baa12bba016
add tokens for fco transaction buckets
alphagov/backdrop,alphagov/backdrop,alphagov/backdrop
backdrop/write/config/development_tokens.py
backdrop/write/config/development_tokens.py
TOKENS = { '_foo_bucket': '_foo_bucket-bearer-token', 'bucket': 'bucket-bearer-token', 'foo': 'foo-bearer-token', 'foo_bucket': 'foo_bucket-bearer-token', 'licensing': 'licensing-bearer-token', 'licensing_journey': 'licensing_journey-bearer-token', 'pay_legalisation_post_journey': 'pay_legalisation_post_journey-bearer-token', 'pay_legalisation_drop_off_journey': 'pay_legalisation_drop_off_journey-bearer-token', 'pay_register_birth_abroad_journey': 'pay_register_birth_abroad_journey-bearer-token', 'pay_register_death_abroad_journey': 'pay_register_death_abroad_journey-bearer-token', 'pay_foreign_marriage_certificates_journey': 'pay_foreign_marriage_certificates_journey-bearer-token', 'deposit_foreign_marriage_journey': 'deposit_foreign_marriage_journey-bearer-token' }
TOKENS = { '_foo_bucket': '_foo_bucket-bearer-token', 'bucket': 'bucket-bearer-token', 'foo': 'foo-bearer-token', 'foo_bucket': 'foo_bucket-bearer-token', 'licensing': 'licensing-bearer-token', 'licensing_journey': 'licensing_journey-bearer-token' }
mit
Python
7b27423bef813befe1bb9dd5cb14843d847bff42
Fix mailhog settings
vintasoftware/django-react-boilerplate,vintasoftware/django-react-boilerplate,vintasoftware/django-react-boilerplate,vintasoftware/django-react-boilerplate
backend/project_name/settings/local_base.py
backend/project_name/settings/local_base.py
from .base import * # noqa DEBUG = True HOST = "http://localhost:8000" SECRET_KEY = "secret" DATABASES = { "default": {"ENGINE": "django.db.backends.sqlite3", "NAME": base_dir_join("db.sqlite3"),} } STATIC_ROOT = base_dir_join("staticfiles") STATIC_URL = "/static/" MEDIA_ROOT = base_dir_join("mediafiles") MEDIA_URL = "/media/" DEFAULT_FILE_STORAGE = "django.core.files.storage.FileSystemStorage" STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage" AUTH_PASSWORD_VALIDATORS = [] # allow easy passwords only on local # Celery CELERY_TASK_ALWAYS_EAGER = True CELERY_TASK_EAGER_PROPAGATES = True # Email settings for mailhog EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" EMAIL_HOST = 'mailhog' EMAIL_PORT = 1025 # Logging LOGGING = { "version": 1, "disable_existing_loggers": False, "formatters": {"standard": {"format": "%(levelname)-8s [%(asctime)s] %(name)s: %(message)s"},}, "handlers": { "console": {"level": "DEBUG", "class": "logging.StreamHandler", "formatter": "standard",}, }, "loggers": { "": {"handlers": ["console"], "level": "INFO"}, "celery": {"handlers": ["console"], "level": "INFO"}, }, } JS_REVERSE_JS_MINIFY = False
from .base import * # noqa DEBUG = True HOST = "http://localhost:8000" SECRET_KEY = "secret" DATABASES = { "default": {"ENGINE": "django.db.backends.sqlite3", "NAME": base_dir_join("db.sqlite3"),} } STATIC_ROOT = base_dir_join("staticfiles") STATIC_URL = "/static/" MEDIA_ROOT = base_dir_join("mediafiles") MEDIA_URL = "/media/" DEFAULT_FILE_STORAGE = "django.core.files.storage.FileSystemStorage" STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage" AUTH_PASSWORD_VALIDATORS = [] # allow easy passwords only on local # Celery CELERY_TASK_ALWAYS_EAGER = True CELERY_TASK_EAGER_PROPAGATES = True # Email EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" EMAIL_HOST = config("EMAIL_HOST") EMAIL_HOST_USER = config("EMAIL_HOST_USER") EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD") EMAIL_PORT = 587 EMAIL_USE_TLS = True # Logging LOGGING = { "version": 1, "disable_existing_loggers": False, "formatters": {"standard": {"format": "%(levelname)-8s [%(asctime)s] %(name)s: %(message)s"},}, "handlers": { "console": {"level": "DEBUG", "class": "logging.StreamHandler", "formatter": "standard",}, }, "loggers": { "": {"handlers": ["console"], "level": "INFO"}, "celery": {"handlers": ["console"], "level": "INFO"}, }, } JS_REVERSE_JS_MINIFY = False
mit
Python
bce815a12a3ce18d23644c08beda5f97271e559e
update token
datawire/forge,sipplified/forge,datawire/forge,sipplified/forge,sipplified/forge,datawire/forge
forge/tests/test_github.py
forge/tests/test_github.py
# Copyright 2017 datawire. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time, os from forge.tasks import TaskError from forge.github import Github from .common import mktree from tempfile import mkdtemp from shutil import rmtree # github will deactivate this token if it detects it in our source, so # we obfuscate it slightly numbers = [48, 49, 51, 99, 99, 101, 52, 51, 48, 53, 54, 100, 57, 56, 97, 50, 55, 97, 54, 53, 55, 55, 49, 48, 49, 55, 48, 54, 55, 102, 100, 48, 102, 57, 49, 51, 97, 48, 102, 51] token = "".join(chr(c) for c in numbers) def test_list(): gh = Github(token) repos = gh.list("forgeorg") assert repos == [(u'forgeorg/foo', u'https://github.com/forgeorg/foo.git')] def test_pull(): gh = Github(token) repos = gh.list("forgeorg") name, url = repos[0] output = mkdtemp() gh.pull(url, os.path.join(output, name)) assert os.path.exists(os.path.join(output, name, "README.md")) rmtree(output) def test_exists(): gh = Github(token) assert gh.exists("https://github.com/forgeorg/foo.git") assert not gh.exists("https://github.com/forgeorg/nosuchrepo.git") unauth_gh = Github(None) try: unauth_gh.exists("https://github.com/forgeorg/nosuchrepo.git") assert False except TaskError, e: assert "Authentication failed" in str(e) def test_clone(): gh = Github(token) output = mkdtemp() gh.clone("https://github.com/forgeorg/foo.git", os.path.join(output, 'foo')) assert os.path.exists(os.path.join(output, 'foo', "README.md")) rmtree(output)
# Copyright 2017 datawire. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time, os from forge.tasks import TaskError from forge.github import Github from .common import mktree from tempfile import mkdtemp from shutil import rmtree token = "8c91e6c758b16e7b5d7f0676d3475f9fa33693dd" def test_list(): gh = Github(token) repos = gh.list("forgeorg") assert repos == [(u'forgeorg/foo', u'https://github.com/forgeorg/foo.git')] def test_pull(): gh = Github(token) repos = gh.list("forgeorg") name, url = repos[0] output = mkdtemp() gh.pull(url, os.path.join(output, name)) assert os.path.exists(os.path.join(output, name, "README.md")) rmtree(output) def test_exists(): gh = Github(token) assert gh.exists("https://github.com/forgeorg/foo.git") assert not gh.exists("https://github.com/forgeorg/nosuchrepo.git") unauth_gh = Github(None) try: unauth_gh.exists("https://github.com/forgeorg/nosuchrepo.git") assert False except TaskError, e: assert "Authentication failed" in str(e) def test_clone(): gh = Github(token) output = mkdtemp() gh.clone("https://github.com/forgeorg/foo.git", os.path.join(output, 'foo')) assert os.path.exists(os.path.join(output, 'foo', "README.md")) rmtree(output)
apache-2.0
Python
307e0c4bbd7e76c9a8becf39df539413fef20e60
Add line magic %cpp
gbitzes/root,CristinaCristescu/root,abhinavmoudgil95/root,CristinaCristescu/root,lgiommi/root,davidlt/root,mattkretz/root,krafczyk/root,gganis/root,satyarth934/root,zzxuanyuan/root,agarciamontoro/root,vukasinmilosevic/root,lgiommi/root,BerserkerTroll/root,davidlt/root,veprbl/root,sirinath/root,gbitzes/root,nilqed/root,buuck/root,mhuwiler/rootauto,evgeny-boger/root,veprbl/root,sirinath/root,perovic/root,veprbl/root,thomaskeck/root,beniz/root,agarciamontoro/root,Y--/root,zzxuanyuan/root,mkret2/root,esakellari/root,buuck/root,gbitzes/root,buuck/root,mattkretz/root,abhinavmoudgil95/root,davidlt/root,jrtomps/root,gganis/root,esakellari/root,arch1tect0r/root,pspe/root,zzxuanyuan/root-compressor-dummy,mhuwiler/rootauto,krafczyk/root,perovic/root,perovic/root,beniz/root,thomaskeck/root,jrtomps/root,zzxuanyuan/root-compressor-dummy,simonpf/root,karies/root,vukasinmilosevic/root,sawenzel/root,perovic/root,pspe/root,buuck/root,gbitzes/root,krafczyk/root,Duraznos/root,gbitzes/root,beniz/root,arch1tect0r/root,bbockelm/root,vukasinmilosevic/root,lgiommi/root,evgeny-boger/root,karies/root,vukasinmilosevic/root,perovic/root,sawenzel/root,bbockelm/root,root-mirror/root,mkret2/root,jrtomps/root,CristinaCristescu/root,zzxuanyuan/root-compressor-dummy,krafczyk/root,sawenzel/root,CristinaCristescu/root,sirinath/root,root-mirror/root,pspe/root,mhuwiler/rootauto,pspe/root,vukasinmilosevic/root,gbitzes/root,veprbl/root,CristinaCristescu/root,beniz/root,esakellari/root,agarciamontoro/root,davidlt/root,gganis/root,olifre/root,jrtomps/root,esakellari/root,gganis/root,CristinaCristescu/root,Y--/root,zzxuanyuan/root-compressor-dummy,mkret2/root,nilqed/root,mhuwiler/rootauto,Y--/root,thomaskeck/root,esakellari/root,abhinavmoudgil95/root,root-mirror/root,sawenzel/root,vukasinmilosevic/root,simonpf/root,zzxuanyuan/root-compressor-dummy,gbitzes/root,BerserkerTroll/root,simonpf/root,Y--/root,mkret2/root,Duraznos/root,georgtroska/root,sirinath/root,mhuwiler/rootauto,root-mirror/root,Duraznos/root,beniz/root,simonpf/root,nilqed/root,sirinath/root,evgeny-boger/root,karies/root,lgiommi/root,sawenzel/root,agarciamontoro/root,perovic/root,zzxuanyuan/root,zzxuanyuan/root-compressor-dummy,BerserkerTroll/root,davidlt/root,thomaskeck/root,arch1tect0r/root,mhuwiler/rootauto,mattkretz/root,satyarth934/root,Y--/root,zzxuanyuan/root-compressor-dummy,sirinath/root,jrtomps/root,evgeny-boger/root,Duraznos/root,gganis/root,Y--/root,davidlt/root,zzxuanyuan/root-compressor-dummy,perovic/root,simonpf/root,Duraznos/root,abhinavmoudgil95/root,arch1tect0r/root,sirinath/root,arch1tect0r/root,thomaskeck/root,mkret2/root,karies/root,georgtroska/root,olifre/root,zzxuanyuan/root-compressor-dummy,satyarth934/root,veprbl/root,zzxuanyuan/root,vukasinmilosevic/root,arch1tect0r/root,georgtroska/root,mkret2/root,jrtomps/root,georgtroska/root,zzxuanyuan/root,thomaskeck/root,pspe/root,georgtroska/root,georgtroska/root,mhuwiler/rootauto,krafczyk/root,pspe/root,lgiommi/root,krafczyk/root,zzxuanyuan/root,sawenzel/root,simonpf/root,satyarth934/root,lgiommi/root,olifre/root,veprbl/root,mattkretz/root,lgiommi/root,beniz/root,agarciamontoro/root,abhinavmoudgil95/root,lgiommi/root,georgtroska/root,agarciamontoro/root,veprbl/root,BerserkerTroll/root,lgiommi/root,evgeny-boger/root,georgtroska/root,mhuwiler/rootauto,mkret2/root,davidlt/root,bbockelm/root,pspe/root,CristinaCristescu/root,root-mirror/root,vukasinmilosevic/root,karies/root,mkret2/root,BerserkerTroll/root,sirinath/root,mkret2/root,vukasinmilosevic/root,gganis/root,root-mirror/root,beniz/root,abhinavmoudgil95/root,BerserkerTroll/root,abhinavmoudgil95/root,sawenzel/root,abhinavmoudgil95/root,davidlt/root,olifre/root,sirinath/root,esakellari/root,karies/root,buuck/root,arch1tect0r/root,lgiommi/root,Y--/root,sawenzel/root,georgtroska/root,buuck/root,BerserkerTroll/root,gbitzes/root,veprbl/root,BerserkerTroll/root,agarciamontoro/root,bbockelm/root,Duraznos/root,jrtomps/root,CristinaCristescu/root,davidlt/root,CristinaCristescu/root,satyarth934/root,mattkretz/root,davidlt/root,satyarth934/root,gganis/root,esakellari/root,davidlt/root,lgiommi/root,nilqed/root,buuck/root,perovic/root,nilqed/root,abhinavmoudgil95/root,zzxuanyuan/root,Duraznos/root,satyarth934/root,gganis/root,Duraznos/root,mattkretz/root,mkret2/root,CristinaCristescu/root,olifre/root,zzxuanyuan/root,georgtroska/root,agarciamontoro/root,arch1tect0r/root,gganis/root,evgeny-boger/root,simonpf/root,satyarth934/root,Y--/root,sawenzel/root,jrtomps/root,mkret2/root,CristinaCristescu/root,sawenzel/root,krafczyk/root,karies/root,perovic/root,vukasinmilosevic/root,pspe/root,karies/root,olifre/root,BerserkerTroll/root,sirinath/root,mattkretz/root,evgeny-boger/root,esakellari/root,karies/root,mattkretz/root,perovic/root,gbitzes/root,mhuwiler/rootauto,mhuwiler/rootauto,mattkretz/root,gbitzes/root,agarciamontoro/root,root-mirror/root,evgeny-boger/root,bbockelm/root,bbockelm/root,zzxuanyuan/root,thomaskeck/root,Y--/root,agarciamontoro/root,thomaskeck/root,pspe/root,mattkretz/root,krafczyk/root,bbockelm/root,olifre/root,jrtomps/root,satyarth934/root,gganis/root,gganis/root,jrtomps/root,zzxuanyuan/root,krafczyk/root,buuck/root,Y--/root,root-mirror/root,veprbl/root,nilqed/root,georgtroska/root,olifre/root,root-mirror/root,nilqed/root,bbockelm/root,jrtomps/root,esakellari/root,sirinath/root,nilqed/root,evgeny-boger/root,simonpf/root,olifre/root,olifre/root,esakellari/root,zzxuanyuan/root-compressor-dummy,Y--/root,buuck/root,veprbl/root,beniz/root,root-mirror/root,beniz/root,satyarth934/root,krafczyk/root,zzxuanyuan/root-compressor-dummy,pspe/root,vukasinmilosevic/root,Duraznos/root,arch1tect0r/root,beniz/root,bbockelm/root,sawenzel/root,buuck/root,karies/root,karies/root,perovic/root,abhinavmoudgil95/root,beniz/root,Duraznos/root,zzxuanyuan/root,veprbl/root,nilqed/root,arch1tect0r/root,evgeny-boger/root,simonpf/root,arch1tect0r/root,mhuwiler/rootauto,pspe/root,BerserkerTroll/root,olifre/root,abhinavmoudgil95/root,bbockelm/root,BerserkerTroll/root,simonpf/root,zzxuanyuan/root,mattkretz/root,agarciamontoro/root,satyarth934/root,thomaskeck/root,esakellari/root,bbockelm/root,simonpf/root,evgeny-boger/root,krafczyk/root,Duraznos/root,buuck/root,nilqed/root,root-mirror/root,thomaskeck/root,gbitzes/root,nilqed/root
bindings/pyroot/ROOTaaS/iPyROOT/cppmagic.py
bindings/pyroot/ROOTaaS/iPyROOT/cppmagic.py
import IPython.core.magic as ipym import ROOT import utils @ipym.magics_class class CppMagics(ipym.Magics): @ipym.line_cell_magic def cpp(self, line, cell=None): """Inject into root.""" if cell is None: # this is a line magic utils.processCppCode(line) else: utils.processCppCode(cell) def load_ipython_extension(ipython): ipython.register_magics(CppMagics)
import IPython.core.magic as ipym import ROOT import utils @ipym.magics_class class CppMagics(ipym.Magics): @ipym.cell_magic def cpp(self, line, cell=None): """Inject into root.""" if cell: utils.processCppCode(cell) def load_ipython_extension(ipython): ipython.register_magics(CppMagics)
lgpl-2.1
Python
082cc2590f7b263e37fe214e3c4e6fc86039327a
correct pyunit
madmax983/h2o-3,spennihana/h2o-3,weaver-viii/h2o-3,h2oai/h2o-3,junwucs/h2o-3,weaver-viii/h2o-3,h2oai/h2o-dev,datachand/h2o-3,PawarPawan/h2o-v3,PawarPawan/h2o-v3,kyoren/https-github.com-h2oai-h2o-3,pchmieli/h2o-3,brightchen/h2o-3,spennihana/h2o-3,mrgloom/h2o-3,michalkurka/h2o-3,nilbody/h2o-3,mrgloom/h2o-3,datachand/h2o-3,printedheart/h2o-3,datachand/h2o-3,mrgloom/h2o-3,h2oai/h2o-dev,YzPaul3/h2o-3,bospetersen/h2o-3,pchmieli/h2o-3,tarasane/h2o-3,madmax983/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,madmax983/h2o-3,bospetersen/h2o-3,datachand/h2o-3,junwucs/h2o-3,tarasane/h2o-3,junwucs/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,madmax983/h2o-3,brightchen/h2o-3,weaver-viii/h2o-3,madmax983/h2o-3,datachand/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,printedheart/h2o-3,printedheart/h2o-3,h2oai/h2o-3,kyoren/https-github.com-h2oai-h2o-3,YzPaul3/h2o-3,pchmieli/h2o-3,pchmieli/h2o-3,madmax983/h2o-3,tarasane/h2o-3,datachand/h2o-3,mrgloom/h2o-3,PawarPawan/h2o-v3,nilbody/h2o-3,michalkurka/h2o-3,junwucs/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,YzPaul3/h2o-3,printedheart/h2o-3,tarasane/h2o-3,weaver-viii/h2o-3,pchmieli/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,kyoren/https-github.com-h2oai-h2o-3,PawarPawan/h2o-v3,PawarPawan/h2o-v3,bospetersen/h2o-3,mathemage/h2o-3,mathemage/h2o-3,pchmieli/h2o-3,weaver-viii/h2o-3,YzPaul3/h2o-3,nilbody/h2o-3,mrgloom/h2o-3,mrgloom/h2o-3,nilbody/h2o-3,nilbody/h2o-3,bospetersen/h2o-3,mrgloom/h2o-3,spennihana/h2o-3,PawarPawan/h2o-v3,spennihana/h2o-3,brightchen/h2o-3,michalkurka/h2o-3,junwucs/h2o-3,jangorecki/h2o-3,nilbody/h2o-3,h2oai/h2o-dev,kyoren/https-github.com-h2oai-h2o-3,h2oai/h2o-dev,weaver-viii/h2o-3,PawarPawan/h2o-v3,mathemage/h2o-3,YzPaul3/h2o-3,kyoren/https-github.com-h2oai-h2o-3,tarasane/h2o-3,h2oai/h2o-3,brightchen/h2o-3,tarasane/h2o-3,h2oai/h2o-3,kyoren/https-github.com-h2oai-h2o-3,mathemage/h2o-3,spennihana/h2o-3,YzPaul3/h2o-3,junwucs/h2o-3,datachand/h2o-3,bospetersen/h2o-3,h2oai/h2o-3,h2oai/h2o-3,brightchen/h2o-3,pchmieli/h2o-3,printedheart/h2o-3,printedheart/h2o-3,jangorecki/h2o-3,brightchen/h2o-3,kyoren/https-github.com-h2oai-h2o-3,YzPaul3/h2o-3,jangorecki/h2o-3,nilbody/h2o-3,jangorecki/h2o-3,junwucs/h2o-3,tarasane/h2o-3,bospetersen/h2o-3,brightchen/h2o-3,bospetersen/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,madmax983/h2o-3,printedheart/h2o-3,mathemage/h2o-3,weaver-viii/h2o-3,jangorecki/h2o-3
h2o-py/tests/testdir_algos/deeplearning/pyunit_tweedie_weightsDeeplearning.py
h2o-py/tests/testdir_algos/deeplearning/pyunit_tweedie_weightsDeeplearning.py
import sys sys.path.insert(1, "../../../") import h2o def tweedie_weights(ip,port): data = h2o.import_frame(h2o.locate("smalldata/glm_test/cancar_logIn.csv")) data["C1M3"] = (data["Class"] == 1 and data["Merit"] == 3).asfactor() data["C3M3"] = (data["Class"] == 3 and data["Merit"] == 3).asfactor() data["C4M3"] = (data["Class"] == 4 and data["Merit"] == 3).asfactor() data["C1M2"] = (data["Class"] == 1 and data["Merit"] == 2).asfactor() data["Merit"] = data["Merit"].asfactor() data["Class"] = data["Class"].asfactor() loss = data["Cost"] / data["Insured"] loss.setName(0,"Loss") cancar = loss.cbind(data) # Without weights myX = ["Merit","Class","C1M3","C4M3"] dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000, train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False, force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0, score_validation_samples = 0) mean_residual_deviance = dl.mean_residual_deviance() # With weights dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000, train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False, force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0, score_validation_samples = 0,weights_column = "Insured",training_frame = cancar) if __name__ == "__main__": h2o.run_test(sys.argv, tweedie_weights)
import sys sys.path.insert(1, "../../../") import h2o #def tweedie_weights(ip,port): h2o.init() data = h2o.import_frame(h2o.locate("smalldata/glm_test/cancar_logIn.csv")) data["C1M3"] = (data["Class"] == 1 and data["Merit"] == 3).asfactor() data["C3M3"] = (data["Class"] == 3 and data["Merit"] == 3).asfactor() data["C4M3"] = (data["Class"] == 4 and data["Merit"] == 3).asfactor() data["C1M2"] = (data["Class"] == 1 and data["Merit"] == 2).asfactor() data["Merit"] = data["Merit"].asfactor() data["Class"] = data["Class"].asfactor() loss = data["Cost"] / data["Insured"] loss.setName(0,"Loss") cancar = loss.cbind(data) # Without weights myX = ["Merit","Class","C1M3","C4M3"] dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000, train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False, force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0, score_validation_samples = 0) mean_residual_deviance = dl.mean_residual_deviance() # With weights dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000, train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False, force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0, score_validation_samples = 0,weights_column = "Insured",training_frame = cancar) if __name__ == "__main__": h2o.run_test(sys.argv, tweedie_weights)
apache-2.0
Python
169dda227f85f77ac52a4295e8fb7acd1b3184f5
Make byte-separator mandatory in MAC addresses
yeti-platform/yeti,yeti-platform/yeti,yeti-platform/yeti,yeti-platform/yeti
core/observables/mac_address.py
core/observables/mac_address.py
from __future__ import unicode_literals import re from core.observables import Observable class MacAddress(Observable): regex = r'(?P<search>(([0-9A-Fa-f]{1,2}[.:-]){5,7}([0-9A-Fa-f]{1,2})))' exclude_fields = Observable.exclude_fields DISPLAY_FIELDS = Observable.DISPLAY_FIELDS @classmethod def is_valid(cls, match): value = match.group('search') return len(value) > 0 def normalize(self): value = re.sub(r'[.:\-]', '', self.value).upper() self.value = ':'.join( value[i:i + 2] for i in xrange(0, len(value), 2) )
from __future__ import unicode_literals import re from core.observables import Observable class MacAddress(Observable): regex = r'(?P<search>(([0-9A-Fa-f]{1,2}[.:-]?){5,7}([0-9A-Fa-f]{1,2})))' exclude_fields = Observable.exclude_fields DISPLAY_FIELDS = Observable.DISPLAY_FIELDS @classmethod def is_valid(cls, match): value = match.group('search') return len(value) > 0 def normalize(self): self.value = re.sub(r'[.:\-]', '', self.value) self.value = self.value.upper() self.value = \ ':'.join([self.value[i:i + 2] for i in range(0, len(self.value), 2)])
apache-2.0
Python
a28f8fe4427c12c2523b16903325d0362b53123e
Drop version dependency
bsmr-misc-forks/letsencrypt,jtl999/certbot,wteiken/letsencrypt,dietsche/letsencrypt,jtl999/certbot,mitnk/letsencrypt,brentdax/letsencrypt,stweil/letsencrypt,thanatos/lets-encrypt-preview,bsmr-misc-forks/letsencrypt,dietsche/letsencrypt,jsha/letsencrypt,kuba/letsencrypt,DavidGarciaCat/letsencrypt,mitnk/letsencrypt,TheBoegl/letsencrypt,letsencrypt/letsencrypt,TheBoegl/letsencrypt,wteiken/letsencrypt,thanatos/lets-encrypt-preview,jsha/letsencrypt,VladimirTyrin/letsencrypt,twstrike/le_for_patching,stweil/letsencrypt,lmcro/letsencrypt,VladimirTyrin/letsencrypt,twstrike/le_for_patching,DavidGarciaCat/letsencrypt,brentdax/letsencrypt,kuba/letsencrypt,lmcro/letsencrypt,letsencrypt/letsencrypt
acme/setup.py
acme/setup.py
import sys from setuptools import setup from setuptools import find_packages version = '0.2.0.dev0' install_requires = [ # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13) 'PyOpenSSL>=0.13', 'pyrfc3339', 'pytz', 'requests', 'setuptools', # pkg_resources 'six', 'werkzeug', ] # env markers in extras_require cause problems with older pip: #517 if sys.version_info < (2, 7): install_requires.extend([ # only some distros recognize stdlib argparse as already satisfying 'argparse', 'mock<1.1.0', ]) else: install_requires.append('mock') docs_extras = [ 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags 'sphinx_rtd_theme', 'sphinxcontrib-programoutput', ] testing_extras = [ 'nose', 'tox', ] setup( name='acme', version=version, description='ACME protocol implementation in Python', url='https://github.com/letsencrypt/letsencrypt', author="Let's Encrypt Project", author_email='[email protected]', license='Apache License 2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Security', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, extras_require={ 'docs': docs_extras, 'testing': testing_extras, }, entry_points={ 'console_scripts': [ 'jws = acme.jose.jws:CLI.run', ], }, test_suite='acme', )
import sys from setuptools import setup from setuptools import find_packages version = '0.2.0.dev0' install_requires = [ # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pyrfc3339', 'pytz', 'requests', 'setuptools', # pkg_resources 'six', 'werkzeug', ] # env markers in extras_require cause problems with older pip: #517 if sys.version_info < (2, 7): install_requires.extend([ # only some distros recognize stdlib argparse as already satisfying 'argparse', 'mock<1.1.0', ]) else: install_requires.append('mock') docs_extras = [ 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags 'sphinx_rtd_theme', 'sphinxcontrib-programoutput', ] testing_extras = [ 'nose', 'tox', ] setup( name='acme', version=version, description='ACME protocol implementation in Python', url='https://github.com/letsencrypt/letsencrypt', author="Let's Encrypt Project", author_email='[email protected]', license='Apache License 2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Security', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, extras_require={ 'docs': docs_extras, 'testing': testing_extras, }, entry_points={ 'console_scripts': [ 'jws = acme.jose.jws:CLI.run', ], }, test_suite='acme', )
apache-2.0
Python
b7c1c4bca84031cdb115d38d30e86cded02f1fdd
Expand scope for pylint: disable=not-supported-yet.
deepmind/acme,deepmind/acme
acme/types.py
acme/types.py
# python3 # Copyright 2018 DeepMind Technologies Limited. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common types used throughout Acme.""" from typing import Any, Callable, Iterable, Mapping, NamedTuple, Union from acme import specs # Define types for nested arrays and tensors. # TODO(b/144758674): Replace these with recursive type definitions. NestedArray = Any NestedTensor = Any # pytype: disable=not-supported-yet NestedSpec = Union[ specs.Array, Iterable['NestedSpec'], Mapping[Any, 'NestedSpec'], ] # pytype: enable=not-supported-yet # TODO(b/144763593): Replace all instances of nest with the tensor/array types. Nest = Union[NestedArray, NestedTensor, NestedSpec] TensorTransformation = Callable[[NestedTensor], NestedTensor] TensorValuedCallable = Callable[..., NestedTensor] class Transition(NamedTuple): """Container for a transition.""" observation: NestedArray action: NestedArray reward: NestedArray discount: NestedArray next_observation: NestedArray extras: NestedArray = ()
# python3 # Copyright 2018 DeepMind Technologies Limited. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common types used throughout Acme.""" from typing import Any, Callable, Iterable, Mapping, NamedTuple, Union from acme import specs # Define types for nested arrays and tensors. # TODO(b/144758674): Replace these with recursive type definitions. NestedArray = Any NestedTensor = Any NestedSpec = Union[ specs.Array, Iterable['NestedSpec'], Mapping[Any, 'NestedSpec'], # pytype: disable=not-supported-yet ] # TODO(b/144763593): Replace all instances of nest with the tensor/array types. Nest = Union[NestedArray, NestedTensor, NestedSpec] TensorTransformation = Callable[[NestedTensor], NestedTensor] TensorValuedCallable = Callable[..., NestedTensor] class Transition(NamedTuple): """Container for a transition.""" observation: NestedArray action: NestedArray reward: NestedArray discount: NestedArray next_observation: NestedArray extras: NestedArray = ()
apache-2.0
Python
e7a632718f379fb1ede70d1086f55279e4251e11
fix geotag access - not an obj
spring-week-topos/cinder-week,spring-week-topos/cinder-week
cinder/scheduler/filters/geo_tags_filter.py
cinder/scheduler/filters/geo_tags_filter.py
# Copyright (c) 2014 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import db from cinder.openstack.common import log as logging from cinder.openstack.common.scheduler import filters LOG = logging.getLogger(__name__) class GeoTagsFilter(filters.BaseHostFilter): """GeoTags Filter.""" def host_passes(self, host_state, filter_properties): """Return True if host has sufficient capacity.""" #(licostan): Add geotag data to the host_state instead of #querying it... #TODO: add scheduler hints to cinder. metadata_hints = filter_properties.get('metadata') or {} gt_hints = metadata_hints.get('geo_tags', None) context = filter_properties['context'] geo_tag = db.geo_tag_get_by_node_name(context, host_state.host) if not geo_tag: LOG.info('NO GEO TAG FOUND FOR %s' % host_state.host) return True #do other geotags check here based on gt-hints if geo_tag['valid_invalid'].lower() == 'valid': LOG.info('GEO TAG FOUND FOR %s' % host_state.host) return True LOG.info('GEO TAG INVALID FOR %s' % host_state.host) return False
# Copyright (c) 2014 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import db from cinder.openstack.common import log as logging from cinder.openstack.common.scheduler import filters LOG = logging.getLogger(__name__) class GeoTagsFilter(filters.BaseHostFilter): """GeoTags Filter.""" def host_passes(self, host_state, filter_properties): """Return True if host has sufficient capacity.""" #(licostan): Add geotag data to the host_state instead of #querying it... #TODO: add scheduler hints to cinder. metadata_hints = filter_properties.get('metadata') or {} gt_hints = metadata_hints.get('geo_tags', None) context = filter_properties['context'] geo_tag = db.geo_tag_get_by_node_name(context, host_state.host) if not geo_tag: LOG.info('NO GEO TAG FOUND FOR %s' % host_state.host) return True #do other geotags check here based on gt-hints if geo_tag.valid_invalid.lower() == 'valid': LOG.info('GEO TAG FOUND FOR %s' % host_state.host) return True LOG.info('GEO TAG INVALID FOR %s' % host_state.host) return False
apache-2.0
Python
16ab5dcf1f6e52f89435adccdfa7021ce24e29a8
fix formatting via make fix
HazyResearch/metal,HazyResearch/metal
tests/metal/contrib/test_baselines.py
tests/metal/contrib/test_baselines.py
import numpy as np import torch from metal.end_model import SparseLogisticRegression def test_sparselogreg(self): """Confirm sparse logreg can overfit, works on padded data""" F = 1000 # total number of possible features N = 50 # number of data points S = [10, 100] # range of features per data point X = np.zeros((N, S[1])) for i in range(N): Si = np.random.randint(S[0], S[1]) X[i, :Si] = np.random.randint(F, size=(1, Si)) X = torch.from_numpy(X).long() Y = torch.from_numpy(np.random.randint(1, 3, size=(N,))) em = SparseLogisticRegression( seed=1, input_dim=F, padding_idx=0, verbose=False ) em.train_model((X, Y), n_epochs=5, optimizer="sgd", lr=0.0005) self.assertEqual(float(em.network[-1].W.weight.data[0, :].sum()), 0.0) score = em.score((X, Y), verbose=False) self.assertGreater(score, 0.95)
import numpy as np import torch from metal.end_model import SparseLogisticRegression def test_sparselogreg(self): """Confirm sparse logreg can overfit, works on padded data""" F = 1000 # total number of possible features N = 50 # number of data points S = [10, 100] # range of features per data point X = np.zeros((N, S[1])) for i in range(N): Si = np.random.randint(S[0], S[1]) X[i, :Si] = np.random.randint(F, size=(1, Si)) X = torch.from_numpy(X).long() Y = torch.from_numpy(np.random.randint(1, 3, size=(N,))) em = SparseLogisticRegression( seed=1, input_dim=F, padding_idx=0, verbose=False ) em.train_model((X, Y), n_epochs=5, optimizer="sgd", lr=0.0005) self.assertEqual(float(em.network[-1].W.weight.data[0, :].sum()), 0.0) score = em.score((X, Y), verbose=False) self.assertGreater(score, 0.95)
apache-2.0
Python
0341c38dff42ae5e86353c6d53c2d30aabca555e
update py-jupyter-client and new setuptools dependency (#13425)
iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack
var/spack/repos/builtin/packages/py-jupyter-client/package.py
var/spack/repos/builtin/packages/py-jupyter-client/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyJupyterClient(PythonPackage): """Jupyter protocol client APIs""" homepage = "https://github.com/jupyter/jupyter_client" url = "https://github.com/jupyter/jupyter_client/archive/4.4.0.tar.gz" version('5.3.4', sha256='2af6f0e0e4d88009b11103490bea0bfb405c1c470e226c2b7b17c10e5dda9734') version('4.4.0', sha256='2fda7fe1af35f0b4a77c4a2fd4ee38ac3666ed7f4d92a5b6ff8aaf764c38e199') version('4.3.0', sha256='90b6ea3ced910ed94c5d558373490a81b33c672d877c1ffdc76b281e3216f1f6') version('4.2.2', sha256='bf3e8ea4c44f07dbe2991e41031f6dab242734be424f4d40b72cc58a12c7d2ca') version('4.2.1', sha256='547d443fb38ea667b468a6625ac374d476f8ac90fe17c3e35d75cab3cb8d40ba') version('4.2.0', sha256='00eab54615fb10f1e508d8e7a952fbeeb2a82cd67b17582bd61be51a08a61d89') version('4.1.1', sha256='ca6f3f66d5dc1e9bca81696ae607a93d652210c3ee9385a7c31c067d5ba88e6e') version('4.1.0', sha256='ecf76a159381ec9880fd2c31388c6983b1d855f92f0292cf0667a90dd63f51c0') version('4.0.0', sha256='33b15abb1307d8d3716b0d3b5d07aa22fdfbbf65a9f1aedf478a274a6adc11c0') depends_on('[email protected]:2.8,3.3:', type=('build', 'run')) depends_on('[email protected]:2.8,3.5:', type=('build', 'run'), when='@5:') depends_on('py-traitlets', type=('build', 'run')) depends_on('py-jupyter-core', type=('build', 'run')) depends_on('py-pyzmq@13:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run'), when='@5:') depends_on('[email protected]:', type=('build', 'run'), when='@5:') depends_on('py-setuptools', type='build', when='@5:')
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyJupyterClient(PythonPackage): """Jupyter protocol client APIs""" homepage = "https://github.com/jupyter/jupyter_client" url = "https://github.com/jupyter/jupyter_client/archive/4.4.0.tar.gz" version('4.4.0', sha256='2fda7fe1af35f0b4a77c4a2fd4ee38ac3666ed7f4d92a5b6ff8aaf764c38e199') version('4.3.0', sha256='90b6ea3ced910ed94c5d558373490a81b33c672d877c1ffdc76b281e3216f1f6') version('4.2.2', sha256='bf3e8ea4c44f07dbe2991e41031f6dab242734be424f4d40b72cc58a12c7d2ca') version('4.2.1', sha256='547d443fb38ea667b468a6625ac374d476f8ac90fe17c3e35d75cab3cb8d40ba') version('4.2.0', sha256='00eab54615fb10f1e508d8e7a952fbeeb2a82cd67b17582bd61be51a08a61d89') version('4.1.1', sha256='ca6f3f66d5dc1e9bca81696ae607a93d652210c3ee9385a7c31c067d5ba88e6e') version('4.1.0', sha256='ecf76a159381ec9880fd2c31388c6983b1d855f92f0292cf0667a90dd63f51c0') version('4.0.0', sha256='33b15abb1307d8d3716b0d3b5d07aa22fdfbbf65a9f1aedf478a274a6adc11c0') depends_on('[email protected]:2.8,3.3:') depends_on('py-traitlets', type=('build', 'run')) depends_on('py-jupyter-core', type=('build', 'run')) depends_on('py-pyzmq@13:', type=('build', 'run'))
lgpl-2.1
Python
bbe835c8aa561d8db58e116f0e55a5b19c4f9ca4
Fix sitemap memory consumption during generation
FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares
firecares/sitemaps.py
firecares/sitemaps.py
from django.contrib import sitemaps from firecares.firestation.models import FireDepartment from django.db.models import Max from django.core.urlresolvers import reverse class BaseSitemap(sitemaps.Sitemap): protocol = 'https' def items(self): return ['media', 'models_performance_score', 'models_community_risk', 'safe_grades', 'login', 'contact_us', 'firedepartment_list'] def priority(self, item): return 1 def location(self, item): return reverse(item) class DepartmentsSitemap(sitemaps.Sitemap): protocol = 'https' max_population = 1 def items(self): queryset = FireDepartment.objects.filter(archived=False).only('population', 'featured', 'name') self.max_population = queryset.aggregate(Max('population'))['population__max'] return queryset def location(self, item): return item.get_absolute_url() def priority(self, item): if item.featured is True: return 1 if item.population is None: return 0 # adding a bit to the total so featured items are always above others priority = item.population / float(self.max_population + 0.1) return priority def lastmod(self, item): return item.modified
from django.contrib import sitemaps from firecares.firestation.models import FireDepartment from django.db.models import Max from django.core.urlresolvers import reverse class BaseSitemap(sitemaps.Sitemap): protocol = 'https' def items(self): return ['media', 'models_performance_score', 'models_community_risk', 'safe_grades', 'login', 'contact_us', 'firedepartment_list'] def priority(self, item): return 1 def location(self, item): return reverse(item) class DepartmentsSitemap(sitemaps.Sitemap): protocol = 'https' max_population = 1 def items(self): queryset = FireDepartment.objects.filter(archived=False) self.max_population = queryset.aggregate(Max('population'))['population__max'] return queryset def location(self, item): return item.get_absolute_url() def priority(self, item): if item.featured is True: return 1 if item.population is None: return 0 # adding a bit to the total so featured items are always above others priority = item.population / float(self.max_population + 0.1) return priority def lastmod(self, item): return item.modified
mit
Python
9c7d1deba7dbde9285e49cb2966b1d242ac8ddc2
Use sphinxapi if available
matchbox/flask-sphinxsearch
flask_sphinxsearch.py
flask_sphinxsearch.py
try: import sphinxapi as sphinxsearch except ImportError: import sphinxsearch from flask import current_app # Find the stack on which we want to store the database connection. # Starting with Flask 0.9, the _app_ctx_stack is the correct one, # before that we need to use the _request_ctx_stack. try: from flask import _app_ctx_stack as stack except ImportError: from flask import _request_ctx_stack as stack class Sphinx(object): """ Simple wrapper around the `SphinxClient` object. Usage: from flask.ext.sphinxsearch import Sphinx from myapp import app sphinx = Sphinx(myapp) print sphinx.client.Query("query") """ def __init__(self, app=None): self.app = app if app is not None: self.init_app(app) def init_app(self, app): self.app = app app.config.setdefault('SPHINX_HOST', 'localhost') app.config.setdefault('SPHINX_PORT', 3312) def connect(self): client = sphinxsearch.SphinxClient() client.SetServer( current_app.config['SPHINX_HOST'], current_app.config['SPHINX_PORT']) return client @property def client(self): ctx = stack.top if ctx is not None: if not hasattr(ctx, 'sphinxclient'): ctx.sphinxclient = self.connect() return ctx.sphinxclient # set constants on the Sphinx object, for ease of use for key in dir(sphinxsearch): if key == key.upper(): setattr(Sphinx, key, getattr(sphinxsearch, key))
import sphinxsearch from flask import current_app # Find the stack on which we want to store the database connection. # Starting with Flask 0.9, the _app_ctx_stack is the correct one, # before that we need to use the _request_ctx_stack. try: from flask import _app_ctx_stack as stack except ImportError: from flask import _request_ctx_stack as stack class Sphinx(object): """ Simple wrapper around the `SphinxClient` object. Usage: from flask.ext.sphinxsearch import Sphinx from myapp import app sphinx = Sphinx(myapp) print sphinx.client.Query("query") """ def __init__(self, app=None): self.app = app if app is not None: self.init_app(app) def init_app(self, app): self.app = app app.config.setdefault('SPHINX_HOST', 'localhost') app.config.setdefault('SPHINX_PORT', 3312) def connect(self): client = sphinxsearch.SphinxClient() client.SetServer( current_app.config['SPHINX_HOST'], current_app.config['SPHINX_PORT']) return client @property def client(self): ctx = stack.top if ctx is not None: if not hasattr(ctx, 'sphinxclient'): ctx.sphinxclient = self.connect() return ctx.sphinxclient # set constants on the Sphinx object, for ease of use for key in dir(sphinxsearch): if key == key.upper(): setattr(Sphinx, key, getattr(sphinxsearch, key))
apache-2.0
Python
e959f849550fe4cfd2f2230c149a9bc0cb01bfe4
bump version
mpdavis/python-jose
jose/__init__.py
jose/__init__.py
__version__ = "2.0.1" __author__ = 'Michael Davis' __license__ = 'MIT' __copyright__ = 'Copyright 2016 Michael Davis' from .exceptions import JOSEError from .exceptions import JWSError from .exceptions import ExpiredSignatureError from .exceptions import JWTError
__version__ = "2.0.0" __author__ = 'Michael Davis' __license__ = 'MIT' __copyright__ = 'Copyright 2016 Michael Davis' from .exceptions import JOSEError from .exceptions import JWSError from .exceptions import ExpiredSignatureError from .exceptions import JWTError
mit
Python
7f38e297dcfc9a664af092f48a9dc596f5f6c27b
Fix PermissionError: [Errno 13] Permission denied on Windows
person142/scipy,perimosocordiae/scipy,mdhaber/scipy,jjhelmus/scipy,apbard/scipy,perimosocordiae/scipy,grlee77/scipy,tylerjereddy/scipy,rgommers/scipy,anntzer/scipy,pizzathief/scipy,ilayn/scipy,scipy/scipy,vigna/scipy,aarchiba/scipy,Eric89GXL/scipy,zerothi/scipy,vigna/scipy,jakevdp/scipy,josephcslater/scipy,Eric89GXL/scipy,endolith/scipy,vigna/scipy,jor-/scipy,aeklant/scipy,jamestwebber/scipy,rgommers/scipy,endolith/scipy,tylerjereddy/scipy,dominicelse/scipy,rgommers/scipy,gfyoung/scipy,jamestwebber/scipy,pbrod/scipy,matthew-brett/scipy,mdhaber/scipy,rgommers/scipy,befelix/scipy,anntzer/scipy,scipy/scipy,grlee77/scipy,vigna/scipy,anntzer/scipy,perimosocordiae/scipy,WarrenWeckesser/scipy,WarrenWeckesser/scipy,nmayorov/scipy,e-q/scipy,pbrod/scipy,tylerjereddy/scipy,Stefan-Endres/scipy,Eric89GXL/scipy,andyfaff/scipy,befelix/scipy,gertingold/scipy,arokem/scipy,apbard/scipy,e-q/scipy,perimosocordiae/scipy,person142/scipy,josephcslater/scipy,e-q/scipy,ilayn/scipy,scipy/scipy,nmayorov/scipy,jjhelmus/scipy,grlee77/scipy,Stefan-Endres/scipy,lhilt/scipy,andyfaff/scipy,jakevdp/scipy,Eric89GXL/scipy,josephcslater/scipy,anntzer/scipy,ilayn/scipy,person142/scipy,grlee77/scipy,gertingold/scipy,apbard/scipy,endolith/scipy,grlee77/scipy,dominicelse/scipy,perimosocordiae/scipy,WarrenWeckesser/scipy,arokem/scipy,e-q/scipy,jjhelmus/scipy,person142/scipy,e-q/scipy,gfyoung/scipy,perimosocordiae/scipy,jamestwebber/scipy,aeklant/scipy,Stefan-Endres/scipy,jor-/scipy,dominicelse/scipy,WarrenWeckesser/scipy,Eric89GXL/scipy,anntzer/scipy,aeklant/scipy,nmayorov/scipy,pbrod/scipy,gfyoung/scipy,gfyoung/scipy,Stefan-Endres/scipy,WarrenWeckesser/scipy,zerothi/scipy,aarchiba/scipy,Stefan-Endres/scipy,zerothi/scipy,gertingold/scipy,josephcslater/scipy,befelix/scipy,pizzathief/scipy,Eric89GXL/scipy,zerothi/scipy,anntzer/scipy,befelix/scipy,gertingold/scipy,ilayn/scipy,dominicelse/scipy,jamestwebber/scipy,Stefan-Endres/scipy,lhilt/scipy,aarchiba/scipy,andyfaff/scipy,lhilt/scipy,endolith/scipy,jamestwebber/scipy,matthew-brett/scipy,josephcslater/scipy,vigna/scipy,andyfaff/scipy,gfyoung/scipy,aeklant/scipy,matthew-brett/scipy,arokem/scipy,tylerjereddy/scipy,pbrod/scipy,matthew-brett/scipy,rgommers/scipy,befelix/scipy,scipy/scipy,pbrod/scipy,person142/scipy,pizzathief/scipy,ilayn/scipy,matthew-brett/scipy,aarchiba/scipy,zerothi/scipy,apbard/scipy,andyfaff/scipy,gertingold/scipy,apbard/scipy,dominicelse/scipy,jor-/scipy,jakevdp/scipy,jakevdp/scipy,nmayorov/scipy,arokem/scipy,pizzathief/scipy,lhilt/scipy,tylerjereddy/scipy,mdhaber/scipy,scipy/scipy,pbrod/scipy,lhilt/scipy,scipy/scipy,andyfaff/scipy,jjhelmus/scipy,arokem/scipy,mdhaber/scipy,aeklant/scipy,WarrenWeckesser/scipy,jakevdp/scipy,endolith/scipy,pizzathief/scipy,jor-/scipy,zerothi/scipy,nmayorov/scipy,mdhaber/scipy,jor-/scipy,endolith/scipy,mdhaber/scipy,jjhelmus/scipy,ilayn/scipy,aarchiba/scipy
scipy/sparse/tests/test_matrix_io.py
scipy/sparse/tests/test_matrix_io.py
import os import numpy as np import tempfile from numpy.testing import assert_array_almost_equal, run_module_suite, assert_ from scipy.sparse import csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix, save_npz, load_npz def _save_and_load(matrix): fd, tmpfile = tempfile.mkstemp(suffix='.npz') os.close(fd) try: save_npz(tmpfile, matrix) loaded_matrix = load_npz(tmpfile) finally: os.remove(tmpfile) return loaded_matrix def _check_save_and_load(dense_matrix): for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]: matrix = matrix_class(dense_matrix) loaded_matrix = _save_and_load(matrix) assert_(type(loaded_matrix) is matrix_class) assert_(loaded_matrix.shape == dense_matrix.shape) assert_(loaded_matrix.dtype == dense_matrix.dtype) assert_array_almost_equal(loaded_matrix.toarray(), dense_matrix) def test_save_and_load_random(): N = 10 np.random.seed(0) dense_matrix = np.random.random((N, N)) dense_matrix[dense_matrix > 0.7] = 0 _check_save_and_load(dense_matrix) def test_save_and_load_empty(): dense_matrix = np.zeros((4,6)) _check_save_and_load(dense_matrix) def test_save_and_load_one_entry(): dense_matrix = np.zeros((4,6)) dense_matrix[1,2] = 1 _check_save_and_load(dense_matrix) if __name__ == "__main__": run_module_suite()
import numpy as np import tempfile from numpy.testing import assert_array_almost_equal, run_module_suite, assert_ from scipy.sparse import csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix, save_npz, load_npz def _save_and_load(matrix): with tempfile.NamedTemporaryFile(suffix='.npz') as file: file = file.name save_npz(file, matrix) loaded_matrix = load_npz(file) return loaded_matrix def _check_save_and_load(dense_matrix): for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]: matrix = matrix_class(dense_matrix) loaded_matrix = _save_and_load(matrix) assert_(type(loaded_matrix) is matrix_class) assert_(loaded_matrix.shape == dense_matrix.shape) assert_(loaded_matrix.dtype == dense_matrix.dtype) assert_array_almost_equal(loaded_matrix.toarray(), dense_matrix) def test_save_and_load_random(): N = 10 np.random.seed(0) dense_matrix = np.random.random((N, N)) dense_matrix[dense_matrix > 0.7] = 0 _check_save_and_load(dense_matrix) def test_save_and_load_empty(): dense_matrix = np.zeros((4,6)) _check_save_and_load(dense_matrix) def test_save_and_load_one_entry(): dense_matrix = np.zeros((4,6)) dense_matrix[1,2] = 1 _check_save_and_load(dense_matrix) if __name__ == "__main__": run_module_suite()
bsd-3-clause
Python
b1277cd79102a30a894e370ab15773e6d86569ec
fix n/a causing issues for OT0010 ingest, sigh
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
scripts/ingestors/other/parse0010.py
scripts/ingestors/other/parse0010.py
"""ISU Agronomy Hall Vantage Pro 2 OT0010""" from __future__ import print_function import datetime import re import os import sys import pytz from pyiem.datatypes import speed, temperature, humidity from pyiem.observation import Observation from pyiem.meteorology import dewpoint from pyiem.util import get_dbconn def main(): """Go Main Go""" iemaccess = get_dbconn('iem') cursor = iemaccess.cursor() valid = datetime.datetime.utcnow() valid = valid.replace(tzinfo=pytz.utc) valid = valid.astimezone(pytz.timezone("America/Chicago")) fn = valid.strftime("/mesonet/ARCHIVE/data/%Y/%m/%d/text/ot/ot0010.dat") if not os.path.isfile(fn): sys.exit(0) lines = open(fn, "r").readlines() lastline = lines[-1].strip() tokens = re.split(r"[\s+]+", lastline) if len(tokens) != 20: return tparts = re.split(":", tokens[3]) valid = valid.replace(hour=int(tparts[0]), minute=int(tparts[1]), second=0, microsecond=0) iem = Observation("OT0010", "OT", valid) iem.data['tmpf'] = float(tokens[4]) iem.data['max_tmpf'] = float(tokens[5]) iem.data['min_tmpf'] = float(tokens[6]) iem.data['relh'] = int(tokens[7]) iem.data['dwpf'] = dewpoint(temperature(iem.data['tmpf'], 'F'), humidity(iem.data['relh'], '%')).value("F") iem.data['sknt'] = speed(float(tokens[8]), 'mph').value('KT') iem.data['drct'] = int(tokens[9]) iem.data['max_sknt'] = speed(float(tokens[10]), 'mph').value('KT') iem.data['alti'] = float(tokens[12]) iem.data['pday'] = float(tokens[13]) iem.data['srad'] = None if tokens[18] == 'n/a' else float(tokens[18]) iem.save(cursor) cursor.close() iemaccess.commit() if __name__ == '__main__': main()
"""ISU Agronomy Hall Vantage Pro 2 OT0010""" from __future__ import print_function import datetime import re import os import sys import pytz from pyiem.datatypes import speed, temperature, humidity from pyiem.observation import Observation from pyiem.meteorology import dewpoint from pyiem.util import get_dbconn def main(): """Go Main Go""" iemaccess = get_dbconn('iem') cursor = iemaccess.cursor() valid = datetime.datetime.utcnow() valid = valid.replace(tzinfo=pytz.utc) valid = valid.astimezone(pytz.timezone("America/Chicago")) fn = valid.strftime("/mesonet/ARCHIVE/data/%Y/%m/%d/text/ot/ot0010.dat") if not os.path.isfile(fn): sys.exit(0) lines = open(fn, "r").readlines() lastline = lines[-1].strip() tokens = re.split(r"[\s+]+", lastline) if len(tokens) != 20: return tparts = re.split(":", tokens[3]) valid = valid.replace(hour=int(tparts[0]), minute=int(tparts[1]), second=0, microsecond=0) iem = Observation("OT0010", "OT", valid) iem.data['tmpf'] = float(tokens[4]) iem.data['max_tmpf'] = float(tokens[5]) iem.data['min_tmpf'] = float(tokens[6]) iem.data['relh'] = int(tokens[7]) iem.data['dwpf'] = dewpoint(temperature(iem.data['tmpf'], 'F'), humidity(iem.data['relh'], '%')).value("F") iem.data['sknt'] = speed(float(tokens[8]), 'mph').value('KT') iem.data['drct'] = int(tokens[9]) iem.data['max_sknt'] = speed(float(tokens[10]), 'mph').value('KT') iem.data['alti'] = float(tokens[12]) iem.data['pday'] = float(tokens[13]) iem.data['srad'] = float(tokens[18]) iem.save(cursor) cursor.close() iemaccess.commit() if __name__ == '__main__': main()
mit
Python
c7a79f81734f360a232b2f91630872ad56a1ffa4
clean up audio init
algorithmic-music-exploration/amen,algorithmic-music-exploration/amen
amen/audio.py
amen/audio.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import librosa from amen.timing_list import TimingList class Audio(object): """ Audio object: should wrap the output from libRosa. """ def __init__(self, file_path, convert_to_mono=False, sample_rate=22050): """ Opens a file path, loads it with librosa. """ self.file_path = file_path y, sr = librosa.load(file_path, mono=convert_to_mono, sr=sample_rate) self.sample_rate = float(sr) self.raw_samples = y self.num_channels = y.ndim self.duration = librosa.get_duration(y=y, sr=sr) self.timings = self.create_timings() def create_timings(self): timings = {} timings['beats'] = TimingList('beats', self.get_beats(), self) return timings def get_beats(self): y_mono = librosa.to_mono(self.raw_samples) tempo, beat_frames = librosa.beat.beat_track( y=y_mono, sr=self.sample_rate, trim=False) # convert frames to times beat_times = librosa.frames_to_time(beat_frames, sr=self.sample_rate) # make the list of (start, duration)s that TimingList expects starts_durs = [] for i, start in enumerate(beat_times[:-1]): starts_durs.append((start, beat_times[i+1] - start)) # now get the last one starts_durs.append((beat_times[-1], self.duration - beat_times[-1])) return starts_durs
#!/usr/bin/env python # -*- coding: utf-8 -*- import librosa from amen.timing_list import TimingList class Audio(object): """ Audio object: should wrap the output from libRosa. """ def __init__(self, file_path, convert_to_mono=False, sample_rate=22050): """ Opens a file path, loads it with librosa. """ self.file_path = file_path y, sr = librosa.load(file_path, mono=convert_to_mono, sr=sample_rate) self.sample_rate = float(sr) self.raw_samples = y if convert_to_mono: self.num_channels = 1 else: self.num_channels = 2 self.duration = len(self.raw_samples) / self.sample_rate self.timings = self.create_timings() def create_timings(self): timings = {} timings['beats'] = TimingList('beats', self.get_beats(), self) return timings def get_beats(self): y_mono = librosa.to_mono(self.raw_samples) tempo, beat_frames = librosa.beat.beat_track( y=y_mono, sr=self.sample_rate, trim=False) # convert frames to times beat_times = librosa.frames_to_time(beat_frames, sr=self.sample_rate) # make the list of (start, duration)s that TimingList expects starts_durs = [] for i, start in enumerate(beat_times[:-1]): starts_durs.append((start, beat_times[i+1] - start)) # now get the last one starts_durs.append((beat_times[-1], self.duration - beat_times[-1])) return starts_durs
bsd-2-clause
Python
1c56aeb3d96dbb26da62203d690b4ff49b4b5c0e
bump version to 0.5.2
briney/abstar
abstar/version.py
abstar/version.py
# Store the version here so: # 1) we don't load dependencies by storing it in __init__.py # 2) we can import it in setup.py for the same reason # 3) we can import it into your module module __version__ = '0.5.2'
# Store the version here so: # 1) we don't load dependencies by storing it in __init__.py # 2) we can import it in setup.py for the same reason # 3) we can import it into your module module __version__ = '0.5.1'
mit
Python
609cffb674ba0494bbe450d8ce7839168a3d5a0a
remove unnecessary code from forms
mupi/timtec,mupi/escolamupi,mupi/tecsaladeaula,GustavoVS/timtec,virgilio/timtec,mupi/tecsaladeaula,mupi/timtec,virgilio/timtec,AllanNozomu/tecsaladeaula,mupi/escolamupi,hacklabr/timtec,hacklabr/timtec,AllanNozomu/tecsaladeaula,mupi/tecsaladeaula,virgilio/timtec,hacklabr/timtec,GustavoVS/timtec,hacklabr/timtec,GustavoVS/timtec,mupi/tecsaladeaula,AllanNozomu/tecsaladeaula,GustavoVS/timtec,AllanNozomu/tecsaladeaula,mupi/timtec,mupi/timtec,virgilio/timtec
accounts/forms.py
accounts/forms.py
# -*- coding: utf-8 -*- from django.contrib.auth import get_user_model from django import forms from django.utils.translation import ugettext_lazy as _ User = get_user_model() class ProfileEditForm(forms.ModelForm): email = forms.RegexField(label=_("email"), max_length=75, regex=r"^[\w.@+-]+$") password1 = forms.CharField(widget=forms.PasswordInput, label=_("Password"), required=False) password2 = forms.CharField(widget=forms.PasswordInput, label=_("Password (again)"), required=False) class Meta: model = User fields = ('username', 'email', 'first_name', 'last_name', 'picture', 'occupation', 'city', 'site', 'biography',) def clean_username(self): return self.instance.username def clean_password2(self): password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 and password2: if password1 != password2: raise forms.ValidationError(_("The two password fields didn't match.")) return password2 def save(self, commit=True): if self.cleaned_data['password1']: self.instance.set_password(self.cleaned_data['password1']) return super(ProfileEditForm, self).save(commit=commit)
# -*- coding: utf-8 -*- try: from django.contrib.auth import get_user_model except ImportError: from django.contrib.auth.models import User else: User = get_user_model() from django import forms from django.utils.translation import ugettext_lazy as _ class ProfileEditForm(forms.ModelForm): email = forms.RegexField(label=_("email"), max_length=75, regex=r"^[\w.@+-]+$") password1 = forms.CharField(widget=forms.PasswordInput, label=_("Password"), required=False) password2 = forms.CharField(widget=forms.PasswordInput, label=_("Password (again)"), required=False) class Meta: model = User fields = ('username', 'email', 'first_name', 'last_name', 'picture', 'occupation', 'city', 'site', 'biography',) def clean_username(self): return self.instance.username def clean_password2(self): password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 and password2: if password1 != password2: raise forms.ValidationError(_("The two password fields didn't match.")) return password2 def save(self, commit=True): if self.cleaned_data['password1']: self.instance.set_password(self.cleaned_data['password1']) return super(ProfileEditForm, self).save(commit=commit)
agpl-3.0
Python
67be76a3d65fa846c8888ef5415ec3df5ef9ab87
Add test for expired tokens
randomic/aniauth-tdd,randomic/aniauth-tdd
accounts/tests.py
accounts/tests.py
"""accounts app unittests """ import base64 from time import sleep from django.contrib.auth import get_user_model from django.test import TestCase from accounts.token import LoginTokenGenerator TEST_EMAIL = '[email protected]' class WelcomePageTest(TestCase): """Tests relating to the welcome_page view. """ def test_uses_welcome_template(self): """The root url should response with the welcome page template. """ response = self.client.get('/') self.assertTemplateUsed(response, 'accounts/welcome.html') class UserModelTest(TestCase): """Tests for passwordless user model. """ def test_user_valid_with_only_email(self): """Should not raise if the user model is happy with email only. """ user = get_user_model()(email=TEST_EMAIL) user.full_clean() def test_users_are_authenticated(self): """User objects should be authenticated for views/templates. """ user = get_user_model()() self.assertTrue(user.is_authenticated()) class TokenGeneratorTest(TestCase): """Tests for login token model. """ def setUp(self): self.generator = LoginTokenGenerator() def test_unique_tokens_generated(self): """Tokens generated one second apart should differ. """ token1 = self.generator.create_token(TEST_EMAIL) sleep(1) token2 = self.generator.create_token(TEST_EMAIL) self.assertNotEqual(token1, token2) def test_email_recovered_from_token(self): """A consumed token should yield the original email address. """ token = self.generator.create_token(TEST_EMAIL) email = self.generator.consume_token(token) self.assertEqual(email, TEST_EMAIL) def test_modified_token_fails(self): """A modified token returns None instead of an email. """ token = self.generator.create_token(TEST_EMAIL) # Modify the email address which is 'signed'. split_token = base64.urlsafe_b64decode( token.encode() ).decode().split('@') split_token[0] = 'maliciousvisitor' malicious_token = base64.urlsafe_b64encode( '@'.join(split_token).encode() ).decode() self.assertIsNone(self.generator.consume_token(malicious_token)) def test_expired_token_fails(self): """A token which has expired returns None instead of an email. """ token = self.generator.create_token(TEST_EMAIL) sleep(1) # Ensure the token is more than 0 seconds old. email = self.generator.consume_token(token, 0) self.assertIsNone(email)
"""accounts app unittests """ import base64 from time import sleep from django.contrib.auth import get_user_model from django.test import TestCase from accounts.token import LoginTokenGenerator TEST_EMAIL = '[email protected]' class WelcomePageTest(TestCase): """Tests relating to the welcome_page view. """ def test_uses_welcome_template(self): """The root url should response with the welcome page template. """ response = self.client.get('/') self.assertTemplateUsed(response, 'accounts/welcome.html') class UserModelTest(TestCase): """Tests for passwordless user model. """ def test_user_valid_with_only_email(self): """Should not raise if the user model is happy with email only. """ user = get_user_model()(email=TEST_EMAIL) user.full_clean() def test_users_are_authenticated(self): """User objects should be authenticated for views/templates. """ user = get_user_model()() self.assertTrue(user.is_authenticated()) class TokenGeneratorTest(TestCase): """Tests for login token model. """ def setUp(self): self.generator = LoginTokenGenerator() def test_unique_tokens_generated(self): """Tokens generated one second apart should differ. """ token1 = self.generator.create_token(TEST_EMAIL) sleep(1) token2 = self.generator.create_token(TEST_EMAIL) self.assertNotEqual(token1, token2) def test_email_recovered_from_token(self): """A consumed token should yield the original email address. """ token = self.generator.create_token(TEST_EMAIL) email = self.generator.consume_token(token) self.assertEqual(email, TEST_EMAIL) def test_modified_token_fails(self): """A modified token returns None instead of an email. """ token = self.generator.create_token(TEST_EMAIL) split_token = base64.urlsafe_b64decode( token.encode() ).decode().split('@') split_token[0] = 'maliciousvisitor' malicious_token = base64.urlsafe_b64encode( '@'.join(split_token).encode() ).decode() self.assertIsNone(self.generator.consume_token(malicious_token))
mit
Python
5a4f05cb0f3a00a2d4faf828bd7850085c302541
Implement functionality to delete logs created by digital justice users
ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend
cla_backend/apps/cla_eventlog/management/commands/find_and_delete_old_cases.py
cla_backend/apps/cla_eventlog/management/commands/find_and_delete_old_cases.py
import sys from django.core.management.base import BaseCommand from dateutil.relativedelta import relativedelta from legalaid.models import Case from cla_eventlog.models import Log from cla_butler.tasks import DeleteOldData class FindAndDeleteCasesUsingCreationTime(DeleteOldData): def get_eligible_cases(self): two_years = self.now - relativedelta(years=2) return Case.objects.filter(created__lte=two_years).exclude(log__created__gte=two_years) def get_digital_justice_user_logs(self): return Log.objects.filter(created_by__email__endswith="digital.justice.gov.uk") class Command(BaseCommand): help = """ Use cases: 1. Find or delete cases that are 2 years old or over that were not deleted prior to the task command being fixed 2. Delete logs created by users with a @digital.justice.gov.uk email """ def handle_test_command(self, args, cases): digital_justice_user_logs = self.instance.get_digital_justice_user_logs() if args[0] == "delete": self.instance.run() elif args[0] == "delete-logs": self.instance._delete_objects(digital_justice_user_logs) def handle_terminal_command(self, args, cases): digital_justice_user_logs = self.instance.get_digital_justice_user_logs() if args[0] == "delete": if len(args) > 1 and args[1] == "no-input": self.instance.run() else: answer = raw_input( "Number of cases that will be deleted: {0}\nAre you sure about this? (Yes/No) ".format( cases.count() ) ) if answer == "Yes": self.instance.run() elif args[0] == "delete-logs": answer = raw_input( "Number of digital justice user logs that will be deleted: {0}\nAre you sure about this? (Yes/No) ".format( digital_justice_user_logs.count() ) ) if answer == "Yes": self.instance._delete_objects(digital_justice_user_logs) def handle(self, *args, **kwargs): self.instance = FindAndDeleteCasesUsingCreationTime() cases = self.instance.get_eligible_cases() django_command = sys.argv[1] if django_command == "test": # If command is run in test if args: self.handle_test_command(args, cases) else: return cases else: # If command is run in terminal if args: self.handle_terminal_command(args, cases) else: print("Number of cases to be deleted: " + str(cases.count()))
import sys from django.core.management.base import BaseCommand from dateutil.relativedelta import relativedelta from legalaid.models import Case from cla_butler.tasks import DeleteOldData class FindAndDeleteCasesUsingCreationTime(DeleteOldData): def get_eligible_cases(self): two_years = self.now - relativedelta(years=2) return Case.objects.filter(created__lte=two_years).exclude(log__created__gte=two_years) class Command(BaseCommand): help = ( "Find or delete cases that are 2 years old or over that were not deleted prior to the task command being fixed" ) def handle(self, *args, **kwargs): instance = FindAndDeleteCasesUsingCreationTime() cases = instance.get_eligible_cases() django_command = sys.argv[1] if django_command == "test": # If command is run in test if args and args[0] == "delete": instance.run() else: return cases else: # If command is run in terminal if args and args[0] == "delete": if len(args) > 1 and args[1] == "no-input": instance.run() else: answer = raw_input( "Number of cases that will be deleted: {0}\nAre you sure about this? (Yes/No) ".format( cases.count() ) ) if answer == "Yes": instance.run() else: print("Number of cases to be deleted: " + str(cases.count()))
mit
Python
b9c953cffd0c9961c22c0c671648f5e5a3e4426c
Update server
colmcoughlan/alchemy-server
alchemy_server.py
alchemy_server.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Apr 30 01:14:12 2017 @author: colm """ from flask import Flask, jsonify import os from models import Charity, Logo, Description from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker import pandas as pd app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI'] @app.route("/gci") def gci(): global session query = session.query(Charity, Description.description, Logo.logo_url, Logo.has_face)\ .join(Logo, Charity.name == Logo.name)\ .join(Description, Charity.name == Description.name) charities = pd.read_sql(query.statement, con=session.bind, index_col = 'name') charities = charities[charities['has_face'] == False] charities.drop('has_face', axis=1) query = session.query(Charity.category).distinct() categories = pd.read_sql(query.statement, con = session.bind) categories = categories[~categories['category'].str.contains(',')] payload = {'categories':categories.values.tolist(), 'charities':charities.to_dict('index')} return jsonify(payload) if __name__ == "__main__": db = create_engine(os.environ['SQLALCHEMY_DATABASE_URI']) Session = sessionmaker(bind=db) session = Session() app.run(host='0.0.0.0')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Apr 30 01:14:12 2017 @author: colm """ from flask import Flask, jsonify import os from models import Charity, Logo, Description from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker import pandas as pd app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI'] @app.route("/gci") def gci(): global session query = session.query(Charity)\ .leftjoin(Logo, Charity.name == Logo.name)\ .leftjoin(Description, Charity.name == Description.name) charities = pd.read_sql(query.statment, session.bind) query = session.query(Charity.category).distinct() categories = pd.read_sql(query.statment, session.bind) payload = {'categories':categories.values(), 'charities':charities.to_dict('index')} return jsonify(payload) if __name__ == "__main__": db = create_engine(os.environ['SQLALCHEMY_DATABASE_URI']) Session = sessionmaker(bind=db) session = Session() app.run(host='0.0.0.0') print('test')
mit
Python
434e459059bba2a1e52e953813caae532a3cb16b
Update test_consume_4
lifan0127/2016-pycon-tutorial-project
test_wordcount.py
test_wordcount.py
import os.path import tempfile import wordcount_lib def _make_testfile(filename, data): "Make a temp file containing the given data; return full path to file." tempdir = tempfile.mkdtemp(prefix='wordcounttest_') testfile = os.path.join(tempdir, filename) with open(testfile, 'wt') as fp: fp.write(data) return testfile def test_consume_1(): # do a basic test of the consume function. testfile = _make_testfile('sometext.txt', 'a b cc\nddd') chars, words, lines = wordcount_lib.consume(testfile) assert chars == 10 assert words == 4 assert lines == 2 def test_consume_2(): # do another basic test of the consume function. testfile = _make_testfile('sometext.txt', 'a\nb\ncc\nddd\ne') chars, words, lines = wordcount_lib.consume(testfile) assert chars == 12 # includes whitespace in char count assert words == 5 assert lines == 5 def test_consume_3(): # check something tricky: whitespace at beginning & end of line testfile = _make_testfile('sometext.txt', ' a b c ') chars, words, lines = wordcount_lib.consume(testfile) assert chars == 7 # includes whitespace in char count assert words == 3 assert lines == 1 def test_consume_4(): # check something tricky: whitespace at beginning & end of line testfile = _make_testfile('sometext.txt', ' a b c d e') chars, words, lines = wordcount_lib.consume(testfile) assert chars == 10 # includes whitespace in char count assert words == 5 assert lines == 1
import os.path import tempfile import wordcount_lib def _make_testfile(filename, data): "Make a temp file containing the given data; return full path to file." tempdir = tempfile.mkdtemp(prefix='wordcounttest_') testfile = os.path.join(tempdir, filename) with open(testfile, 'wt') as fp: fp.write(data) return testfile def test_consume_1(): # do a basic test of the consume function. testfile = _make_testfile('sometext.txt', 'a b cc\nddd') chars, words, lines = wordcount_lib.consume(testfile) assert chars == 10 assert words == 4 assert lines == 2 def test_consume_2(): # do another basic test of the consume function. testfile = _make_testfile('sometext.txt', 'a\nb\ncc\nddd\ne') chars, words, lines = wordcount_lib.consume(testfile) assert chars == 12 # includes whitespace in char count assert words == 5 assert lines == 5 def test_consume_3(): # check something tricky: whitespace at beginning & end of line testfile = _make_testfile('sometext.txt', ' a b c ') chars, words, lines = wordcount_lib.consume(testfile) assert chars == 7 # includes whitespace in char count assert words == 3 assert lines == 1 def test_consume_4(): # check something tricky: whitespace at beginning & end of line testfile = _make_testfile('sometext.txt', ' a b c d e') chars, words, lines = wordcount_lib.consume(testfile) assert chars == 9 # includes whitespace in char count assert words == 5 assert lines == 1
bsd-3-clause
Python
abd2ad6098cb0bc827a8bebf12f21f1131dc83fa
Change version number
flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost
fluxghost/__init__.py
fluxghost/__init__.py
__version__ = "0.8.1" DEBUG = False
__version__ = "0.8.0" DEBUG = False
agpl-3.0
Python
52eed6f6d771045b2c06a941db17665785e90b23
return an error exit code if tests failed
tomkralidis/pywps,jonas-eberle/pywps,geopython/pywps,ricardogsilva/PyWPS,SiggyF/pywps-4,ldesousa/PyWPS,doclements/pywps-4,jachym/PyWPS,bird-house/PyWPS
tests/__init__.py
tests/__init__.py
import sys import unittest import parse import extent def load_tests(): return unittest.TestSuite([parse.load_tests(), extent.load_tests()]) if __name__ == "__main__": result = unittest.TextTestRunner(verbosity=2).run(load_tests()) if not result.wasSuccessful(): sys.exit(1)
import unittest import parse import extent def load_tests(): return unittest.TestSuite([parse.load_tests(), extent.load_tests()]) if __name__ == "__main__": unittest.TextTestRunner(verbosity=2).run(load_tests())
mit
Python
bb94d126ae9ff86efc00cfbda5f3fff375490e16
Add missing import to tests/__init__.py.
gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine
tests/__init__.py
tests/__init__.py
# -*- coding: utf-8 -*- # Copyright (c) 2010-2011, GEM Foundation. # # OpenQuake is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version 3 # only, as published by the Free Software Foundation. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License version 3 for more details # (a copy is included in the LICENSE file that accompanied this code). # # You should have received a copy of the GNU Lesser General Public License # version 3 along with OpenQuake. If not, see # <http://www.gnu.org/licenses/lgpl-3.0.txt> for a copy of the LGPLv3 License. from alchemy_db_utils_unittest import * from black_box_tests import * from bulk_insert_unittest import * from cache_gc_unittest import * from db_loader_unittest import * from db_loader_unittest import * from deterministic_hazard_unittest import * from deterministic_risk_unittest import * from geo_unittest import * from handlers_unittest import * from hazard_classical_unittest import * from hazard_nrml_unittest import * from hazard_unittest import * from input_risk_unittest import * from java_unittest import * from job_unittest import * from kvs_unittest import * from logs_unittest import * from loss_map_output_unittest import * from loss_output_unittest import * from output_hazard_unittest import * from output_risk_unittest import * from output_unittest import * from output_writers_unittest import * from parser_exposure_portfolio_unittest import * from parser_hazard_curve_unittest import * from parser_hazard_map_unittest import * from parser_vulnerability_model_unittest import * from probabilistic_unittest import * from producer_unittest import * from risk_job_unittest import * from risk_parser_unittest import * from risk_unittest import * from schema_unittest import * from shapes_unittest import * from tools_dbmaint_unittest import * from utils_general_unittest import * from utils_tasks_unittest import * from utils_version_unittest import * from validator_unittest import * import glob import os import sys for path in glob.glob(os.path.join(os.path.dirname(__file__), '*test*.py')): test = os.path.splitext(os.path.basename(path))[0] module = 'tests.' + test if module not in sys.modules: print >>sys.stderr, "Potential missing import of " + module
# -*- coding: utf-8 -*- # Copyright (c) 2010-2011, GEM Foundation. # # OpenQuake is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License version 3 # only, as published by the Free Software Foundation. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License version 3 for more details # (a copy is included in the LICENSE file that accompanied this code). # # You should have received a copy of the GNU Lesser General Public License # version 3 along with OpenQuake. If not, see # <http://www.gnu.org/licenses/lgpl-3.0.txt> for a copy of the LGPLv3 License. from alchemy_db_utils_unittest import * from black_box_tests import * from bulk_insert_unittest import * from cache_gc_unittest import * from db_loader_unittest import * from db_loader_unittest import * from deterministic_hazard_unittest import * from deterministic_risk_unittest import * from geo_unittest import * from handlers_unittest import * from hazard_classical_unittest import * from hazard_nrml_unittest import * from hazard_unittest import * from java_unittest import * from job_unittest import * from kvs_unittest import * from logs_unittest import * from loss_map_output_unittest import * from loss_output_unittest import * from output_hazard_unittest import * from output_risk_unittest import * from output_unittest import * from output_writers_unittest import * from parser_exposure_portfolio_unittest import * from parser_hazard_curve_unittest import * from parser_hazard_map_unittest import * from parser_vulnerability_model_unittest import * from probabilistic_unittest import * from producer_unittest import * from risk_job_unittest import * from risk_parser_unittest import * from risk_unittest import * from schema_unittest import * from shapes_unittest import * from tools_dbmaint_unittest import * from utils_general_unittest import * from utils_tasks_unittest import * from utils_version_unittest import * from validator_unittest import * import glob import os import sys for path in glob.glob(os.path.join(os.path.dirname(__file__), '*test*.py')): test = os.path.splitext(os.path.basename(path))[0] module = 'tests.' + test if module not in sys.modules: print >>sys.stderr, "Potential missing import of " + module
agpl-3.0
Python
4bafa90acca39a3d3fa5df0303d885c810244700
Add URL
bowen0701/algorithms_data_structures
lc034_find_first_and_last_position_of_element_in_sorted_array.py
lc034_find_first_and_last_position_of_element_in_sorted_array.py
"""Leetcode 34. Find First and Last Position of Element in Sorted Array Medium URL: https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value. Your algorithm's runtime complexity must be in the order of O(log n). If the target is not found in the array, return [-1, -1]. Example 1: Input: nums = [5,7,7,8,8,10], target = 8 Output: [3,4] Example 2: Input: nums = [5,7,7,8,8,10], target = 6 Output: [-1,-1] """ class Solution(object): def searchRange(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] Time complexity: O(logn), where n is the length of nums. Space complexity: O(1). """ # Apply to 2 binary searches to update result [-1, -1]. res = [-1, -1] if not nums: return res # Apply the 1st binary search to search target's left position. first, last = 0, len(nums) - 1 while first < last: mid = first + (last - first) // 2 if nums[mid] < target: first = mid + 1 else: last = mid if nums[first] != target: return res else: res[0] = first # Apply the 2nd binary search to search target's right position. last = len(nums) - 1 while first < last: # Make mid biased to the right. mid = first + (last - first) // 2 + 1 if nums[mid] > target: last = mid - 1 else: first = mid res[1] = last return res def main(): # Ans: [3,4] nums = [5,7,7,8,8,10] target = 8 print Solution().searchRange(nums, target) # Ans: [-1,-1] nums = [5,7,7,8,8,10] target = 6 print Solution().searchRange(nums, target) if __name__ == '__main__': main()
"""Leetcode 34. Find First and Last Position of Element in Sorted Array Medium Given an array of integers nums sorted in ascending order, find the starting and ending position of a given target value. Your algorithm's runtime complexity must be in the order of O(log n). If the target is not found in the array, return [-1, -1]. Example 1: Input: nums = [5,7,7,8,8,10], target = 8 Output: [3,4] Example 2: Input: nums = [5,7,7,8,8,10], target = 6 Output: [-1,-1] """ class Solution(object): def searchRange(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] Time complexity: O(logn), where n is the length of nums. Space complexity: O(1). """ # Apply to 2 binary searches to update result [-1, -1]. res = [-1, -1] if not nums: return res # Apply the 1st binary search to search target's left position. first, last = 0, len(nums) - 1 while first < last: mid = first + (last - first) // 2 if nums[mid] < target: first = mid + 1 else: last = mid if nums[first] != target: return res else: res[0] = first # Apply the 2nd binary search to search target's right position. last = len(nums) - 1 while first < last: # Make mid biased to the right. mid = first + (last - first) // 2 + 1 if nums[mid] > target: last = mid - 1 else: first = mid res[1] = last return res def main(): # Ans: [3,4] nums = [5,7,7,8,8,10] target = 8 print Solution().searchRange(nums, target) # Ans: [-1,-1] nums = [5,7,7,8,8,10] target = 6 print Solution().searchRange(nums, target) if __name__ == '__main__': main()
bsd-2-clause
Python
49c64731fab1de1fc08b61a70190930b829d70d3
Remove import for random
markoshorro/gem5,markoshorro/gem5,briancoutinho0905/2dsampling,briancoutinho0905/2dsampling,Weil0ng/gem5,rallylee/gem5,rjschof/gem5,powerjg/gem5-ci-test,qizenguf/MLC-STT,austinharris/gem5-riscv,zlfben/gem5,rallylee/gem5,samueldotj/TeeRISC-Simulator,sobercoder/gem5,yb-kim/gemV,HwisooSo/gemV-update,gedare/gem5,SanchayanMaity/gem5,KuroeKurose/gem5,zlfben/gem5,joerocklin/gem5,SanchayanMaity/gem5,markoshorro/gem5,austinharris/gem5-riscv,KuroeKurose/gem5,rallylee/gem5,gem5/gem5,aclifton/cpeg853-gem5,yb-kim/gemV,joerocklin/gem5,briancoutinho0905/2dsampling,joerocklin/gem5,aclifton/cpeg853-gem5,samueldotj/TeeRISC-Simulator,zlfben/gem5,HwisooSo/gemV-update,gedare/gem5,qizenguf/MLC-STT,aclifton/cpeg853-gem5,TUD-OS/gem5-dtu,kaiyuanl/gem5,joerocklin/gem5,briancoutinho0905/2dsampling,rjschof/gem5,cancro7/gem5,sobercoder/gem5,rjschof/gem5,joerocklin/gem5,SanchayanMaity/gem5,KuroeKurose/gem5,zlfben/gem5,gem5/gem5,yb-kim/gemV,TUD-OS/gem5-dtu,joerocklin/gem5,kaiyuanl/gem5,Weil0ng/gem5,powerjg/gem5-ci-test,cancro7/gem5,gedare/gem5,rallylee/gem5,HwisooSo/gemV-update,powerjg/gem5-ci-test,qizenguf/MLC-STT,powerjg/gem5-ci-test,kaiyuanl/gem5,samueldotj/TeeRISC-Simulator,gem5/gem5,HwisooSo/gemV-update,powerjg/gem5-ci-test,rallylee/gem5,samueldotj/TeeRISC-Simulator,zlfben/gem5,markoshorro/gem5,samueldotj/TeeRISC-Simulator,gem5/gem5,austinharris/gem5-riscv,SanchayanMaity/gem5,TUD-OS/gem5-dtu,kaiyuanl/gem5,gedare/gem5,markoshorro/gem5,kaiyuanl/gem5,qizenguf/MLC-STT,rallylee/gem5,cancro7/gem5,KuroeKurose/gem5,qizenguf/MLC-STT,qizenguf/MLC-STT,cancro7/gem5,aclifton/cpeg853-gem5,briancoutinho0905/2dsampling,gem5/gem5,austinharris/gem5-riscv,sobercoder/gem5,gedare/gem5,rjschof/gem5,TUD-OS/gem5-dtu,markoshorro/gem5,HwisooSo/gemV-update,Weil0ng/gem5,powerjg/gem5-ci-test,joerocklin/gem5,SanchayanMaity/gem5,cancro7/gem5,kaiyuanl/gem5,zlfben/gem5,yb-kim/gemV,SanchayanMaity/gem5,yb-kim/gemV,samueldotj/TeeRISC-Simulator,kaiyuanl/gem5,aclifton/cpeg853-gem5,sobercoder/gem5,gem5/gem5,TUD-OS/gem5-dtu,HwisooSo/gemV-update,TUD-OS/gem5-dtu,SanchayanMaity/gem5,rjschof/gem5,gedare/gem5,austinharris/gem5-riscv,sobercoder/gem5,Weil0ng/gem5,markoshorro/gem5,zlfben/gem5,cancro7/gem5,rjschof/gem5,yb-kim/gemV,austinharris/gem5-riscv,briancoutinho0905/2dsampling,aclifton/cpeg853-gem5,Weil0ng/gem5,rallylee/gem5,qizenguf/MLC-STT,rjschof/gem5,sobercoder/gem5,KuroeKurose/gem5,HwisooSo/gemV-update,aclifton/cpeg853-gem5,samueldotj/TeeRISC-Simulator,austinharris/gem5-riscv,KuroeKurose/gem5,KuroeKurose/gem5,yb-kim/gemV,yb-kim/gemV,gem5/gem5,Weil0ng/gem5,TUD-OS/gem5-dtu,Weil0ng/gem5,joerocklin/gem5,briancoutinho0905/2dsampling,cancro7/gem5,powerjg/gem5-ci-test,gedare/gem5,sobercoder/gem5
src/python/m5/internal/__init__.py
src/python/m5/internal/__init__.py
# Copyright (c) 2006 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Nathan Binkert import core import debug import event import stats import trace
# Copyright (c) 2006 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Nathan Binkert import core import debug import event import random import stats import trace
bsd-3-clause
Python
01c88b514c64f001fc7824a30b8609a425d646ef
Set defaults for CI and DETERMINISTIC_TESTS. (#653)
untitaker/vdirsyncer,untitaker/vdirsyncer,untitaker/vdirsyncer
tests/conftest.py
tests/conftest.py
# -*- coding: utf-8 -*- ''' General-purpose fixtures for vdirsyncer's testsuite. ''' import logging import os import click_log from hypothesis import HealthCheck, Verbosity, settings import pytest @pytest.fixture(autouse=True) def setup_logging(): click_log.basic_config('vdirsyncer').setLevel(logging.DEBUG) try: import pytest_benchmark except ImportError: @pytest.fixture def benchmark(): return lambda x: x() else: del pytest_benchmark settings.suppress_health_check = [HealthCheck.too_slow] settings.register_profile("ci", settings( max_examples=1000, verbosity=Verbosity.verbose, )) settings.register_profile("deterministic", settings( derandomize=True, )) if os.environ.get('DETERMINISTIC_TESTS', 'false').lower() == 'true': settings.load_profile("deterministic") elif os.environ.get('CI', 'false').lower() == 'true': settings.load_profile("ci")
# -*- coding: utf-8 -*- ''' General-purpose fixtures for vdirsyncer's testsuite. ''' import logging import os import click_log from hypothesis import HealthCheck, Verbosity, settings import pytest @pytest.fixture(autouse=True) def setup_logging(): click_log.basic_config('vdirsyncer').setLevel(logging.DEBUG) try: import pytest_benchmark except ImportError: @pytest.fixture def benchmark(): return lambda x: x() else: del pytest_benchmark settings.suppress_health_check = [HealthCheck.too_slow] settings.register_profile("ci", settings( max_examples=1000, verbosity=Verbosity.verbose, )) settings.register_profile("deterministic", settings( derandomize=True, )) if os.environ['DETERMINISTIC_TESTS'].lower() == 'true': settings.load_profile("deterministic") elif os.environ['CI'].lower() == 'true': settings.load_profile("ci")
mit
Python
88bba8a6145f67fd65e4062123db295601c92000
Fix lint errors
alexwlchan/hot-chocolate,alexwlchan/hot-chocolate
tests/conftest.py
tests/conftest.py
# -*- encoding: utf-8 import os from hotchocolate import Site # TODO: Tidy this up, and don't duplicate code from cli.py curdir = os.path.abspath(os.curdir) os.chdir('tests/examplesite') site = Site.from_folder('content') site.build() os.chdir(curdir)
# -*- encoding: utf-8 import os from hotchocolate import Site import hotchocolate.cli as hcli # TODO: Tidy this up, and don't duplicate code from cli.py curdir = os.path.abspath(os.curdir) os.chdir('tests/examplesite') site = Site.from_folder('content') site.build() os.chdir(curdir)
mit
Python
8dc79a0a1b99d1742ae297db7da26a0404e5ec33
Fix pep8
billvsme/videoSpider
tests/conftest.py
tests/conftest.py
import pytest from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from models import Base from config import create_new_sqla from helpers import get_video_douban_ids test_database_url = 'sqlite:///test.db' @pytest.fixture(scope='session') def session(request): sqla = create_new_sqla(test_database_url, echo=False) session = sqla['session'] engine = sqla['engine'] Base.metadata.create_all(engine) def teardown(): Base.metadata.drop_all(engine) request.addfinalizer(teardown) return session @pytest.fixture def douban_movie_ids(): return list(get_video_douban_ids())
import pytest from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from models import Base from config import create_new_sqla from helpers import get_video_douban_ids test_database_url = 'sqlite:///test.db' @pytest.fixture(scope='session') def session(request): sqla = create_new_sqla(test_database_url, echo=False) session = sqla['session'] engine = sqla['engine'] Base.metadata.create_all(engine) def teardown(): Base.metadata.drop_all(engine) request.addfinalizer(teardown) return session @pytest.fixture def douban_movie_ids(): return list(get_video_douban_ids())
mit
Python
423ec9d9b38be990ab7dca027877e1c12f3d07fe
add in django-registration update media url
cewing/cfpydev-imagr
imagr_site/settings.py
imagr_site/settings.py
""" Django settings for imagr_site project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '_0)ionh8p(-xw=uh-3_8un)^xo+=&obsad&lhohn-d93j(p!21' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] AUTH_USER_MODEL = 'imagr_users.ImagrUser' # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'imagr_users', 'imagr_images', 'south', 'registration', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'imagr_site.urls' WSGI_APPLICATION = 'imagr_site.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR + "/media/"
""" Django settings for imagr_site project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '_0)ionh8p(-xw=uh-3_8un)^xo+=&obsad&lhohn-d93j(p!21' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] AUTH_USER_MODEL = 'imagr_users.ImagrUser' # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'imagr_users', 'imagr_images', 'south', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'imagr_site.urls' WSGI_APPLICATION = 'imagr_site.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = 'http://localhost:8000/media/' MEDIA_ROOT = BASE_DIR + "/media/"
mit
Python
ac754a6a711edc9b3628499ae18e74892efd7f98
Add recording interaction print statements
julianghionoiu/tdl-client-python,julianghionoiu/tdl-client-python
src/tdl/runner/recording_system.py
src/tdl/runner/recording_system.py
import unirest RECORDING_SYSTEM_ENDPOINT = "http://localhost:41375" class RecordingEvent: def __init__(self): pass ROUND_START = 'new' ROUND_SOLUTION_DEPLOY = 'deploy' ROUND_COMPLETED = 'done' class RecordingSystem: def __init__(self, recording_required): self._recording_required = recording_required def is_recording_system_ok(self): return RecordingSystem.is_running() if self._recording_required else True @staticmethod def is_running(): try: response = unirest.get("{}/status".format(RECORDING_SYSTEM_ENDPOINT)) if response.code == 200 and response.body.startswith("OK"): return True except Exception as e: print("Could not reach recording system: {}".format(str(e))) return False def notify_event(self, round_id, event_name): print('Notify round "{}", event "{}"'.format(round_id, event_name)) self._send_post("/notify", round_id + "/" + event_name) def tell_to_stop(self): print('Stopping recording system') self._send_post("/stop", "") def _send_post(self, endpoint, body): if not self.is_recording_system_ok(): return try: response = unirest.post("{}{}".format(RECORDING_SYSTEM_ENDPOINT, endpoint), params=body) if response.code != 200: print("Recording system returned code: {}".format(response.code)) return if not response.body.startswith("ACK"): print("Recording system returned body: {}".format(response.body)) except Exception as e: print("Could not reach recording system: {}".format(str(e))) def on_new_round(self, round_id): self.notify_event(round_id, RecordingEvent.ROUND_START)
import unirest RECORDING_SYSTEM_ENDPOINT = "http://localhost:41375" class RecordingEvent: def __init__(self): pass ROUND_START = 'new' ROUND_SOLUTION_DEPLOY = 'deploy' ROUND_COMPLETED = 'done' class RecordingSystem: def __init__(self, recording_required): self._recording_required = recording_required def is_recording_system_ok(self): return RecordingSystem.is_running() if self._recording_required else True @staticmethod def is_running(): try: response = unirest.get("{}/status".format(RECORDING_SYSTEM_ENDPOINT)) if response.code == 200 and response.body.startswith("OK"): return True except Exception as e: print("Could not reach recording system: {}".format(str(e))) return False def notify_event(self, round_id, event_name): self._send_post("/notify", round_id + "/" + event_name) def tell_to_stop(self): self._send_post("/stop", "") def _send_post(self, endpoint, body): if not self.is_recording_system_ok(): return try: response = unirest.post("{}{}".format(RECORDING_SYSTEM_ENDPOINT, endpoint), params=body) if response.code != 200: print("Recording system returned code: {}".format(response.code)) return if not response.body.startswith("ACK"): print("Recording system returned body: {}".format(response.body)) except Exception as e: print("Could not reach recording system: {}".format(str(e))) def on_new_round(self, round_id): self.notify_event(round_id, RecordingEvent.ROUND_START)
apache-2.0
Python
0b3247c23d37c372d3f3984391b976fa904d00c6
bump to v1.4.0 (#5975)
mfherbst/spack,skosukhin/spack,krafczyk/spack,skosukhin/spack,EmreAtes/spack,EmreAtes/spack,matthiasdiener/spack,mfherbst/spack,mfherbst/spack,mfherbst/spack,iulian787/spack,krafczyk/spack,lgarren/spack,lgarren/spack,LLNL/spack,iulian787/spack,lgarren/spack,EmreAtes/spack,krafczyk/spack,tmerrick1/spack,LLNL/spack,tmerrick1/spack,lgarren/spack,iulian787/spack,tmerrick1/spack,tmerrick1/spack,iulian787/spack,LLNL/spack,LLNL/spack,skosukhin/spack,EmreAtes/spack,mfherbst/spack,matthiasdiener/spack,skosukhin/spack,iulian787/spack,matthiasdiener/spack,lgarren/spack,LLNL/spack,matthiasdiener/spack,krafczyk/spack,tmerrick1/spack,matthiasdiener/spack,skosukhin/spack,EmreAtes/spack,krafczyk/spack
var/spack/repos/builtin/packages/miniamr/package.py
var/spack/repos/builtin/packages/miniamr/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Miniamr(MakefilePackage): """Proxy Application. 3D stencil calculation with Adaptive Mesh Refinement (AMR) """ homepage = "https://mantevo.org" url = "https://github.com/Mantevo/miniAMR/archive/v1.4.tar.gz" tags = ['proxy-app', 'ecp-proxy-app'] version('1.4.0', '3aab0247047a94e343709cf2e51cc46e') variant('mpi', default=True, description='Build with MPI support') depends_on('mpi', when="+mpi") @property def build_targets(self): targets = [] if '+mpi' in self.spec: targets.append('CC={0}'.format(self.spec['mpi'].mpicc)) targets.append('LD={0}'.format(self.spec['mpi'].mpicc)) targets.append('LDLIBS=-lm') else: targets.append('CC={0}'.format(self.compiler.cc)) targets.append('LD={0}'.format(self.compiler.cc)) targets.append('--directory=ref') return targets def install(self, spec, prefix): # Manual installation mkdir(prefix.bin) mkdir(prefix.doc) install('ref/ma.x', prefix.bin) # Install Support Documents install('ref/README', prefix.doc)
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Miniamr(MakefilePackage): """Proxy Application. 3D stencil calculation with Adaptive Mesh Refinement (AMR) """ homepage = "https://mantevo.org" url = "http://mantevo.org/downloads/releaseTarballs/miniapps/MiniAMR/miniAMR_1.0_all.tgz" tags = ['proxy-app', 'ecp-proxy-app'] version('1.0', '812e5aaaab99689a4e9381a3bbd718a6') variant('mpi', default=True, description='Build with MPI support') depends_on('mpi', when="+mpi") @property def build_targets(self): targets = [] if '+mpi' in self.spec: targets.append('CC={0}'.format(self.spec['mpi'].mpicc)) targets.append('LDLIBS=-lm') targets.append('--file=Makefile.mpi') targets.append('--directory=miniAMR_ref') else: targets.append('--file=Makefile.serial') targets.append('--directory=miniAMR_serial') return targets def install(self, spec, prefix): # Manual installation mkdir(prefix.bin) mkdir(prefix.doc) if '+mpi' in spec: install('miniAMR_ref/miniAMR.x', prefix.bin) else: install('miniAMR_serial/miniAMR.x', prefix.bin) # Install Support Documents install('miniAMR_ref/README', prefix.doc)
lgpl-2.1
Python
779393e6c18539c97ff3bdaeb471253170645bc2
Update group.py
enovance/numeter,enovance/numeter,redhat-cip/numeter,redhat-cip/numeter,enovance/numeter,redhat-cip/numeter,enovance/numeter,redhat-cip/numeter
web-app/numeter_webapp/configuration/forms/group.py
web-app/numeter_webapp/configuration/forms/group.py
""" Group Form module. """ from django import forms from django.utils.translation import ugettext_lazy as _ from core.models import Group class Group_Form(forms.ModelForm): """Simple Group Form""" class Meta: model = Group widgets = { 'name': forms.TextInput({'placeholder':_('Name'), 'class':'span', 'ng-model': 'tabIndex.form.name'}), } def get_submit_url(self): """Return url matching with creation or updating.""" if self.instance.id: return self.instance.get_rest_detail_url() else: return self.instance.get_rest_list_url() def get_submit_method(self): """Return method matching with creation or updating.""" if self.instance.id: return 'PATCH' else: return 'POST'
""" Group Form module. """ from django import forms from django.utils.translation import ugettext_lazy as _ from djangular.forms.angular_model import NgModelFormMixin from core.models import Group class Group_Form(forms.ModelForm): """Simple Group Form""" class Meta: model = Group widgets = { 'name': forms.TextInput({'placeholder':_('Name'), 'class':'span', 'ng-model': 'tabIndex.form.name'}), } def get_submit_url(self): """Return url matching with creation or updating.""" if self.instance.id: return self.instance.get_rest_detail_url() else: return self.instance.get_rest_list_url() def get_submit_method(self): """Return method matching with creation or updating.""" if self.instance.id: return 'PATCH' else: return 'POST'
agpl-3.0
Python
7ca6dd5cd84222845db331afd97fc2f314999cff
fix yaspin.compat module docstring
pavdmyt/yaspin
yaspin/compat.py
yaspin/compat.py
# -*- coding: utf-8 -*- """ yaspin.compat ~~~~~~~~~~~~~ Compatibility layer. """ import sys PY2 = sys.version_info[0] == 2 if PY2: builtin_str = str bytes = str str = unicode # noqa def iteritems(dct): return dct.iteritems() else: builtin_str = str bytes = bytes str = str def iteritems(dct): return dct.items()
# -*- coding: utf-8 -*- """ tests.compat ~~~~~~~~~~~~~ Compatibility layer. """ import sys PY2 = sys.version_info[0] == 2 if PY2: builtin_str = str bytes = str str = unicode # noqa def iteritems(dct): return dct.iteritems() else: builtin_str = str bytes = bytes str = str def iteritems(dct): return dct.items()
mit
Python
b9fc0685b3adb05a5049cfa9b68676e00878d48a
Add .fillna(0)
yiori-s/fit_instagram_gender,yiori-s/fit_instagram_gender
instagram_collector.py
instagram_collector.py
import sys from settings import instgram_access_token from api import InstagramAPI, Alchemy import pandas as pd def following_users(api, user_name): instgram_user_id = api.user_id(user_name=user_name) following_users = api.follows_list(user_id=instgram_user_id) return following_users def userinfo_list(api, following_users): userinfo_list = [] for user in following_users: entries = api.media_list(user["user_id"]) for entry in entries: tag_list = Alchemy.tag_list(image_url=entry['url']) if tag_list is None: return userinfo_list entry.update({'tag_list': tag_list}) tags = [entry['tag_list'] for entry in entries] df = pd.DataFrame(tags).fillna(0) user_summery = df.sum() user_summery = user_summery.to_dict() user.update(user_summery) userinfo_list.append(user) return userinfo_list if __name__ == '__main__': argvs = sys.argv argc = len(argvs) if len(argvs) != 2: print('Usage: # python %s INSTAGRAM_USER_NAME' % argvs[0]) quit() instgram_user_name = argvs[1] api = InstagramAPI(access_token=instgram_access_token) following_users = following_users(api, instgram_user_name) following_users = following_users[0:40] userinfo_list = userinfo_list(api, following_users) users_df = pd.DataFrame(userinfo_list).fillna(0) users_df.to_csv("user_tags.csv") # for following_user in following_users: # # entries = api.media_list(user_name=following_user) # # for entry in entries: # # image_url = entry["url"] # # tag_list = Alchemy.tag_list(image_url=image_url) # # entry.update({"tag_list": tag_list}) # # print(entry) # # print(entries) print(userinfo_list)
import sys from settings import instgram_access_token from api import InstagramAPI, Alchemy import pandas as pd def following_users(api, user_name): instgram_user_id = api.user_id(user_name=user_name) following_users = api.follows_list(user_id=instgram_user_id) return following_users def userinfo_list(api, following_users): userinfo_list = [] for user in following_users: entries = api.media_list(user["user_id"]) for entry in entries: tag_list = Alchemy.tag_list(image_url=entry['url']) if tag_list is None: return userinfo_list entry.update({'tag_list': tag_list}) tags = [entry['tag_list'] for entry in entries] df = pd.DataFrame(tags).fillna(0) user_summery = df.sum() user_summery = user_summery.to_dict() user.update(user_summery) userinfo_list.append(user) return userinfo_list if __name__ == '__main__': argvs = sys.argv argc = len(argvs) if len(argvs) != 2: print('Usage: # python %s INSTAGRAM_USER_NAME' % argvs[0]) quit() instgram_user_name = argvs[1] api = InstagramAPI(access_token=instgram_access_token) following_users = following_users(api, instgram_user_name) following_users = following_users[0:40] userinfo_list = userinfo_list(api, following_users) users_df = pd.DataFrame(userinfo_list) users_df.to_csv("user_tags.csv") # for following_user in following_users: # # entries = api.media_list(user_name=following_user) # # for entry in entries: # # image_url = entry["url"] # # tag_list = Alchemy.tag_list(image_url=image_url) # # entry.update({"tag_list": tag_list}) # # print(entry) # # print(entries) print(userinfo_list)
mit
Python
84cdde09d574d2a52446bd751445747407733b22
Remove print statement
django-oscar/django-oscar-accounts,django-oscar/django-oscar-accounts
tests/settings.py
tests/settings.py
import uuid import os.path from django.conf import global_settings, settings from oscar import OSCAR_MAIN_TEMPLATE_DIR, get_core_apps from oscar.defaults import * # noqa from accounts import TEMPLATE_DIR as ACCOUNTS_TEMPLATE_DIR DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', } } STATICFILES_FINDERS=( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) SECRET_KEY = str(uuid.uuid4()) INSTALLED_APPS=[ 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.staticfiles', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.flatpages', 'accounts', 'compressor', 'widget_tweaks', ] + get_core_apps() MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES + ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'oscar.apps.basket.middleware.BasketMiddleware', ) TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( 'django.core.context_processors.request', 'oscar.apps.search.context_processors.search_form', 'oscar.apps.promotions.context_processors.promotions', 'oscar.apps.checkout.context_processors.checkout', 'oscar.core.context_processors.metadata', ) DEBUG=False HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine' } } ROOT_URLCONF = 'tests.urls' TEMPLATE_DIRS = ( OSCAR_MAIN_TEMPLATE_DIR, os.path.join(OSCAR_MAIN_TEMPLATE_DIR, 'templates'), ACCOUNTS_TEMPLATE_DIR, # Include sandbox templates as they patch from templates that # are in Oscar 0.4 but not 0.3 'sandbox/templates', ) STATIC_URL='/static/' COMPRESS_ROOT='' COMPRESS_ENABLED=False SITE_ID=1 ACCOUNTS_UNIT_NAME='Giftcard' NOSE_ARGS=['--nocapture'] USE_TZ=True DDF_FILL_NULLABLE_FIELDS=False ACCOUNTS_DEFERRED_INCOME_ACCOUNT_TYPES=('Test accounts',)
import uuid import os.path from django.conf import global_settings, settings from oscar import OSCAR_MAIN_TEMPLATE_DIR, get_core_apps from oscar.defaults import * # noqa from accounts import TEMPLATE_DIR as ACCOUNTS_TEMPLATE_DIR DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', } } STATICFILES_FINDERS=( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) SECRET_KEY = str(uuid.uuid4()) INSTALLED_APPS=[ 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.staticfiles', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.flatpages', 'accounts', 'compressor', 'widget_tweaks', ] + get_core_apps() MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES + ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'oscar.apps.basket.middleware.BasketMiddleware', ) TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( 'django.core.context_processors.request', 'oscar.apps.search.context_processors.search_form', 'oscar.apps.promotions.context_processors.promotions', 'oscar.apps.checkout.context_processors.checkout', 'oscar.core.context_processors.metadata', ) DEBUG=False HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine' } } ROOT_URLCONF = 'tests.urls' TEMPLATE_DIRS = ( OSCAR_MAIN_TEMPLATE_DIR, os.path.join(OSCAR_MAIN_TEMPLATE_DIR, 'templates'), ACCOUNTS_TEMPLATE_DIR, # Include sandbox templates as they patch from templates that # are in Oscar 0.4 but not 0.3 'sandbox/templates', ) print TEMPLATE_DIRS STATIC_URL='/static/' COMPRESS_ROOT='' COMPRESS_ENABLED=False SITE_ID=1 ACCOUNTS_UNIT_NAME='Giftcard' NOSE_ARGS=['--nocapture'] USE_TZ=True DDF_FILL_NULLABLE_FIELDS=False ACCOUNTS_DEFERRED_INCOME_ACCOUNT_TYPES=('Test accounts',)
bsd-3-clause
Python
26116bb984f7a970c67bcdc01ff026a3fc5f0905
create secondary parses
riptano/cdm
tests/test_ddl.py
tests/test_ddl.py
from pytest import fixture from cdm.ddl import parse_line, create_vertex, create_vertex_index,\ CreateVertex, \ CreateEdge, CreateProperty, CreateIndex, CreateGraph def test_create_graph(): s = "CREATE GRAPH jon" parsed = parse_line(s) assert isinstance(parsed, CreateGraph) assert "system.createGraph('jon').build()" in str(parsed) def test_create_vertex_label(): cmd = "CREATE vertex movie" result = create_vertex.parseString(cmd)[0] assert isinstance(result, CreateVertex) result = parse_line(cmd) assert isinstance(result, CreateVertex) assert result.label == "movie" assert "buildVertexLabel" in str(result) assert "movie" in str(result) result2 = parse_line("CREATE vertex label movie") assert isinstance(result, CreateVertex) def test_create_edge_label(): result = parse_line("CREATE edge rated") assert isinstance(result, CreateEdge) assert result.label == "rated" result2 = parse_line("CREATE edge label rated") assert isinstance(result2, CreateEdge) def test_create_property(): result = parse_line("CREATE PROPERTY name text") assert isinstance(result, CreateProperty) result = parse_line("CREATE PROPERTY name TEXT") assert isinstance(result, CreateProperty) """ graph.schema().vertexLabel("ip").buildVertexIndex("ipById").materialized().byPropertyKey("id").add() Secondary graph.schema().vertexLabel("ip").buildVertexIndex("ipByCountry").secondary().byPropertyKey("country").add() Search graph.schema().vertexLabel("swid").buildVertexIndex("search").search().byPropertyKey("dob").add() """ def test_create_index_fulltext(): s = "CREATE materialized INDEX movie_title_idx ON VERTEX movie(title )" result = create_vertex_index.parseString(s) s = "CREATE secondary INDEX movie_title_idx ON VERTEX movie(title )" result = create_vertex_index.parseString(s) # result = parse_line() # assert isinstance(result, CreateIndex) # # def test_create_index_materialize(): # result = parse_line("CREATE INDEX movie_title_idx ON movie(title) SEARCH"); # result = parse_line("CREATE INDEX user_id_idx ON movie(user_id) MATERIALIZED")
from pytest import fixture from cdm.ddl import parse_line, create_vertex, create_vertex_index,\ CreateVertex, \ CreateEdge, CreateProperty, CreateIndex, CreateGraph def test_create_graph(): s = "CREATE GRAPH jon" parsed = parse_line(s) assert isinstance(parsed, CreateGraph) assert "system.createGraph('jon').build()" in str(parsed) def test_create_vertex_label(): cmd = "CREATE vertex movie" result = create_vertex.parseString(cmd)[0] assert isinstance(result, CreateVertex) result = parse_line(cmd) assert isinstance(result, CreateVertex) assert result.label == "movie" assert "buildVertexLabel" in str(result) assert "movie" in str(result) result2 = parse_line("CREATE vertex label movie") assert isinstance(result, CreateVertex) def test_create_edge_label(): result = parse_line("CREATE edge rated") assert isinstance(result, CreateEdge) assert result.label == "rated" result2 = parse_line("CREATE edge label rated") assert isinstance(result2, CreateEdge) def test_create_property(): result = parse_line("CREATE PROPERTY name text") assert isinstance(result, CreateProperty) result = parse_line("CREATE PROPERTY name TEXT") assert isinstance(result, CreateProperty) """ graph.schema().vertexLabel("ip").buildVertexIndex("ipById").materialized().byPropertyKey("id").add() Secondary graph.schema().vertexLabel("ip").buildVertexIndex("ipByCountry").secondary().byPropertyKey("country").add() Search graph.schema().vertexLabel("swid").buildVertexIndex("search").search().byPropertyKey("dob").add() """ def test_create_index_fulltext(): s = "CREATE materialized INDEX movie_title_idx ON VERTEX movie(title )" result = create_vertex_index.parseString(s) # result = parse_line() # assert isinstance(result, CreateIndex) # # def test_create_index_materialize(): # result = parse_line("CREATE INDEX movie_title_idx ON movie(title) SEARCH"); # result = parse_line("CREATE INDEX user_id_idx ON movie(user_id) MATERIALIZED")
apache-2.0
Python
581eb398360cff5de1488fa06890195c808f8d10
fix make requests test
jadbin/xpaw
tests/test_run.py
tests/test_run.py
# coding=utf-8 from os.path import join import pytest from xpaw.spider import Spider from xpaw.cmdline import main from xpaw.run import run_crawler, run_spider, make_requests from xpaw.http import HttpRequest, HttpResponse from xpaw.errors import ClientError, HttpError def test_run_crawler(tmpdir): proj_name = 'test_run_crawler' proj_dir = join(str(tmpdir), proj_name) main(argv=['xpaw', 'init', proj_dir]) run_crawler(proj_dir, log_level='DEBUG') def test_run_crawler_bad_config(tmpdir, capsys): proj_dir = join(str(tmpdir)) config_file = join(proj_dir, 'config.py') with open(config_file, 'w') as f: f.write('bad config') with pytest.raises(SyntaxError): run_crawler(proj_dir, log_level='DEBUG') _, _ = capsys.readouterr() def test_failed_to_create_cluster(tmpdir, capsys): proj_dir = join(str(tmpdir)) with pytest.raises(Exception): run_crawler(proj_dir, log_level='DEBUG') _, _ = capsys.readouterr() class DummySpider(Spider): def start_requests(self): pass def parse(self, response): pass def test_run_spider(): run_spider(DummySpider, log_level='DEBUG') def test_make_requests(): requests = [None, 'http://unknonw', 'http://python.org/', HttpRequest('http://python.org'), 'http://httpbin.org/status/404'] results = make_requests(requests, log_level='DEBUG') assert len(results) == len(requests) assert results[0] is None assert isinstance(results[1], ClientError) assert isinstance(results[2], HttpResponse) and results[2].status == 200 assert isinstance(results[3], HttpResponse) and results[3].status == 200 assert isinstance(results[4], HttpError) and results[4].response.status == 404
# coding=utf-8 from os.path import join import pytest from xpaw.spider import Spider from xpaw.cmdline import main from xpaw.run import run_crawler, run_spider, make_requests from xpaw.http import HttpRequest, HttpResponse from xpaw.errors import ClientError, HttpError def test_run_crawler(tmpdir): proj_name = 'test_run_crawler' proj_dir = join(str(tmpdir), proj_name) main(argv=['xpaw', 'init', proj_dir]) run_crawler(proj_dir, log_level='DEBUG') def test_run_crawler_bad_config(tmpdir, capsys): proj_dir = join(str(tmpdir)) config_file = join(proj_dir, 'config.py') with open(config_file, 'w') as f: f.write('bad config') with pytest.raises(SyntaxError): run_crawler(proj_dir, log_level='DEBUG') _, _ = capsys.readouterr() def test_failed_to_create_cluster(tmpdir, capsys): proj_dir = join(str(tmpdir)) with pytest.raises(Exception): run_crawler(proj_dir, log_level='DEBUG') _, _ = capsys.readouterr() class DummySpider(Spider): def start_requests(self): pass def parse(self, response): pass def test_run_spider(): run_spider(DummySpider, log_level='DEBUG') def test_make_requests(): requests = [None, 'http://localhost:8080', 'http://python.org/', HttpRequest('http://python.org'), 'http://httpbin.org/status/404'] results = make_requests(requests, log_level='DEBUG') assert len(results) == len(requests) assert results[0] is None assert isinstance(results[1], ClientError) assert isinstance(results[2], HttpResponse) and results[2].status == 200 assert isinstance(results[3], HttpResponse) and results[3].status == 200 assert isinstance(results[4], HttpError) and results[4].response.status == 404
apache-2.0
Python
40c8bec919f2e04befb021d51706f39793eb77a2
Fix typo
tobiajo/tfyarn
tfyarn/factory.py
tfyarn/factory.py
from __future__ import print_function from tfyarn.clusterspecgen_client import ClusterSpecGenClient import os import socket import tensorflow import time def createClusterSpec(job_name, task_index, application_id=None, container_id=None, am_address=None): if application_id is None: application_id = os.environ['APPLICATION_ID'] if container_id is None: container_id = os.environ['CONTAINER_ID'] if am_address is None: am_address = os.environ['AM_ADDRESS'] client = ClusterSpecGenClient(am_address) host = socket.gethostname() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) port = s.getsockname()[1] client.register_container(application_id, container_id, host, port, job_name, task_index) while True: time.sleep(0.2) cluster_spec_list = client.get_cluster_spec() if cluster_spec_list is None: print(container_id + ': createTrainServer: clusterSpec: None') pass elif len(cluster_spec_list) == 0: print(container_id + ': createTrainServer: clusterSpec: (empty)') pass else: break workers = [] pses = [] last_worker_task_index = -1 last_ps_task_index = -1 for container in cluster_spec_list: if container.jobName == 'worker': assert container.taskIndex == last_worker_task_index + 1 last_worker_task_index = container.taskIndex workers.append(container.ip + ':' + str(container.port)) elif container.jobName == 'ps': assert container.taskIndex == last_ps_task_index + 1 last_ps_task_index = container.taskIndex pses.append(container.ip + ':' + str(container.port)) cluster_spec_map = {'worker': workers, 'ps': pses} print(container_id + ': createTrainServer: clusterSpec: ', end='') print(cluster_spec_map) s.close() return tensorflow.train.ClusterSpec(cluster_spec_map)
from __future__ import print_function from tfyarn.clusterspecgen_client import ClusterSpecGenClient import os import socket import tensorflow import time def createClusterSpec(job_name, task_index, application_id=None, container_id=None, am_address=None): if application_id is None: application_id = os.environ['APPLICATION_ID'] if container_id is None: container_id = os.environ['CONTAINER_ID'] if am_address is None: am_address = os.environ['AM_ADDRESS'] client = ClusterSpecGenClient(am_address) host = socket.gethostname() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) port = s.getsockname()[1] client.register_container(application_id, container_id, host, port, job_name, task_index) while True: time.sleep(0.2) cluster_spec_list = client.get_cluster_spec() if cluster_spec_list is None: print(container_id + ': createTrainServer: clusterSpec: None') pass elif len(cluster_spec_list) == 0: print(container_id + ': createTrainServer: clusterSpec: (empty)') pass else: break workers = [] pses = [] last_worker_task_id = -1 last_ps_task_id = -1 for container in cluster_spec_list: if container.jobName == 'worker': assert container.taskIndex == last_worker_task_id + 1 last_worker_task_id = container.taskIndex workers.append(container.ip + ':' + str(container.port)) elif container.jobName == 'ps': assert container.taskIndex == last_ps_task_id + 1 last_ps_task_id = container.taskIndex pses.append(container.ip + ':' + str(container.port)) cluster_spec_map = {'worker': workers, 'ps': pses} print(container_id + ': createTrainServer: clusterSpec: ', end='') print(cluster_spec_map) s.close() return tensorflow.train.ClusterSpec(cluster_spec_map)
apache-2.0
Python
4663589ae44437344ec88dc96dc2ca9bdf55b581
add metric AUC
wepe/tgboost,wepe/tgboost
tgboost/metric.py
tgboost/metric.py
import numpy as np def accuracy(preds, labels): return np.mean(labels == preds.round()) def error(preds, labels): return 1.0 - accuracy(preds,labels) def mean_square_error(preds, labels): return np.mean(np.square(preds - labels)) def mean_absolute_error(preds, labels): return np.mean(np.abs(preds - labels)) def tied_rank(x): sorted_x = sorted(zip(x,range(len(x)))) r = [0 for k in x] cur_val = sorted_x[0][0] last_rank = 0 for i in range(len(sorted_x)): if cur_val != sorted_x[i][0]: cur_val = sorted_x[i][0] for j in range(last_rank, i): r[sorted_x[j][1]] = float(last_rank+1+i)/2.0 last_rank = i if i==len(sorted_x)-1: for j in range(last_rank, i+1): r[sorted_x[j][1]] = float(last_rank+i+2)/2.0 return r # the auc code is from https://github.com/benhamner/Metrics, thanks benhamner def auc(posterior, actual): r = tied_rank(posterior) num_positive = len([0 for x in actual if x==1]) num_negative = len(actual)-num_positive sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1]) auc = ((sum_positive - num_positive*(num_positive+1)/2.0) / (num_negative*num_positive)) return auc metrics = {"acc": accuracy, "error": error, "mse": mean_square_error, "mae": mean_absolute_error, "auc": auc} def get_metric(eval_metric): return metrics[eval_metric]
import numpy as np def accuracy(preds, labels): return np.mean(labels == preds.round()) def error(preds, labels): return 1.0 - accuracy(preds,labels) def mean_square_error(preds, labels): return np.mean(np.square(preds - labels)) def mean_absolute_error(preds, labels): return np.mean(np.abs(preds - labels)) metrics = {"acc": accuracy, "error": error, "mse": mean_square_error, "mae": mean_absolute_error} def get_metric(eval_metric): return metrics[eval_metric]
mit
Python
00140b48d7473c0f6738e5bc7894370baee9ef30
Remove debugging
pwyf/data-quality-tester,pwyf/data-quality-tester,pwyf/data-quality-tester,pwyf/data-quality-tester
IATISimpleTester/lib/helpers.py
IATISimpleTester/lib/helpers.py
from collections import defaultdict import re from lxml import etree from IATISimpleTester import app # given an expression list and the name of an expression, # select it, def select_expression(expression_list, expression_name, default_expression_name=None): expression_dicts = {x["id"]: x for x in expression_list} if expression_name not in expression_dicts: expression_name = default_expression_name return expression_name, expression_dicts.get(expression_name) def slugify(inp): return inp.lower().replace(' ', '-') def pprint(explanation): explanation = explanation.strip().capitalize().replace('\n', '<br>') + '.' return re.sub(r'`([^`]*)`', r'<code>\1</code>', explanation)
from collections import defaultdict import re from lxml import etree from IATISimpleTester import app # given an expression list and the name of an expression, # select it, def select_expression(expression_list, expression_name, default_expression_name=None): expression_dicts = {x["id"]: x for x in expression_list} if expression_name not in expression_dicts: expression_name = default_expression_name return expression_name, expression_dicts.get(expression_name) def slugify(inp): return inp.lower().replace(' ', '-') def pprint(explanation): print(explanation) explanation = explanation.strip().capitalize().replace('\n', '<br>') + '.' return re.sub(r'`([^`]*)`', r'<code>\1</code>', explanation)
mit
Python
303a8c149c30d4dd1d9c833c6716d5ab0da88e04
Change version number to 1.2.
isrusin/cbcalc,isrusin/cbcalc
cbclib/version.py
cbclib/version.py
"""a cbclib version storage module.""" version_tuple = (1, 2, 0) full_version = "%d.%d.%d" % version_tuple
"""a cbclib version storage module.""" version_tuple = (1, 1, 1) full_version = "%d.%d.%d" % version_tuple
mit
Python
994b50c3856e01d3cec712515efe11c0f286781e
Remove deprecated alias
ipython/ipywidgets,jupyter-widgets/ipywidgets,ipython/ipywidgets,ipython/ipywidgets,ipython/ipywidgets,SylvainCorlay/ipywidgets,SylvainCorlay/ipywidgets,jupyter-widgets/ipywidgets,SylvainCorlay/ipywidgets,ipython/ipywidgets,jupyter-widgets/ipywidgets,jupyter-widgets/ipywidgets,SylvainCorlay/ipywidgets
ipywidgets/__init__.py
ipywidgets/__init__.py
# Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Interactive widgets for the Jupyter notebook. Provide simple interactive controls in the notebook. Each Widget corresponds to an object in Python and Javascript, with controls on the page. To put a Widget on the page, you can display it with Jupyter's display machinery:: from ipywidgets import IntSlider slider = IntSlider(min=1, max=10) display(slider) Moving the slider will change the value. Most Widgets have a current value, accessible as a `value` attribute. """ import os from IPython import get_ipython from ._version import version_info, __version__, __protocol_version__, __jupyter_widgets_controls_version__, __jupyter_widgets_base_version__ from .widgets import * from traitlets import link, dlink def load_ipython_extension(ip): """Set up Jupyter to work with widgets""" if not hasattr(ip, 'kernel'): return register_comm_target(ip.kernel) def register_comm_target(kernel=None): """Register the jupyter.widget comm target""" if kernel is None: kernel = get_ipython().kernel kernel.comm_manager.register_target('jupyter.widget', Widget.handle_comm_opened) def _handle_ipython(): """Register with the comm target at import if running in Jupyter""" ip = get_ipython() if ip is None: return load_ipython_extension(ip) _handle_ipython()
# Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Interactive widgets for the Jupyter notebook. Provide simple interactive controls in the notebook. Each Widget corresponds to an object in Python and Javascript, with controls on the page. To put a Widget on the page, you can display it with Jupyter's display machinery:: from ipywidgets import IntSlider slider = IntSlider(min=1, max=10) display(slider) Moving the slider will change the value. Most Widgets have a current value, accessible as a `value` attribute. """ import os from IPython import get_ipython from ._version import version_info, __version__, __protocol_version__, __jupyter_widgets_controls_version__, __jupyter_widgets_base_version__ from .widgets import * from traitlets import link, dlink def load_ipython_extension(ip): """Set up Jupyter to work with widgets""" if not hasattr(ip, 'kernel'): return register_comm_target(ip.kernel) def register_comm_target(kernel=None): """Register the jupyter.widget comm target""" if kernel is None: kernel = get_ipython().kernel kernel.comm_manager.register_target('jupyter.widget', Widget.handle_comm_opened) # deprecated alias handle_kernel = register_comm_target def _handle_ipython(): """Register with the comm target at import if running in Jupyter""" ip = get_ipython() if ip is None: return load_ipython_extension(ip) _handle_ipython()
bsd-3-clause
Python
8d5d8cc8d61596a62513039d79abb57f274333ef
Set version as 0.9.0
Alignak-monitoring-contrib/alignakbackend-api-client,Alignak-monitoring-contrib/alignak-backend-client,Alignak-monitoring-contrib/alignakbackend-api-client,Alignak-monitoring-contrib/alignak-backend-client
alignak_backend_client/__init__.py
alignak_backend_client/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Alignak REST backend client library This module is a Python library used for the REST API of the Alignak backend """ # Application version and manifest VERSION = (0, 9, 0) __application__ = u"Alignak Backend client" __short_version__ = '.'.join((str(each) for each in VERSION[:2])) __version__ = '.'.join((str(each) for each in VERSION[:4])) __author__ = u"Alignak team" __author_email__ = u"[email protected]" __copyright__ = u"(c) 2015-2017 - %s" % __author__ __license__ = u"GNU Affero General Public License, version 3" __description__ = u"Alignak backend client library" __releasenotes__ = u"""Alignak backend client library""" __git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-backend-client" __doc_url__ = "http://alignak-backend-client.readthedocs.org" __classifiers__ = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)', 'Natural Language :: English', 'Programming Language :: Python', 'Topic :: System :: Monitoring', 'Topic :: System :: Systems Administration' ] # Application manifest manifest = { 'name': __application__, 'version': __version__, 'author': __author__, 'description': __description__, 'copyright': __copyright__, 'license': __license__, 'release': __releasenotes__, 'doc': __doc_url__ }
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Alignak REST backend client library This module is a Python library used for the REST API of the Alignak backend """ # Application version and manifest VERSION = (0, 7, 0) __application__ = u"Alignak Backend client" __short_version__ = '.'.join((str(each) for each in VERSION[:2])) __version__ = '.'.join((str(each) for each in VERSION[:4])) __author__ = u"Alignak team" __author_email__ = u"[email protected]" __copyright__ = u"(c) 2015-2017 - %s" % __author__ __license__ = u"GNU Affero General Public License, version 3" __description__ = u"Alignak backend client library" __releasenotes__ = u"""Alignak backend client library""" __git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-backend-client" __doc_url__ = "http://alignak-backend-client.readthedocs.org" __classifiers__ = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)', 'Natural Language :: English', 'Programming Language :: Python', 'Topic :: System :: Monitoring', 'Topic :: System :: Systems Administration' ] # Application manifest manifest = { 'name': __application__, 'version': __version__, 'author': __author__, 'description': __description__, 'copyright': __copyright__, 'license': __license__, 'release': __releasenotes__, 'doc': __doc_url__ }
agpl-3.0
Python
2c42b84a5ffd7ce42295488271781c08ab372bd3
add website_multi_company_portal to demo addons
it-projects-llc/website-addons,it-projects-llc/website-addons,it-projects-llc/website-addons
website_multi_company/__manifest__.py
website_multi_company/__manifest__.py
# -*- coding: utf-8 -*- { "name": """Real Multi Website""", "summary": """Yes, you can set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!""", "category": "eCommerce", "live_test_url": "http://apps.it-projects.info/shop/product/website-multi-company?version=10.0", "images": ['images/website_multi_company_main.png'], "version": "1.2.0", "application": False, "author": "IT-Projects LLC, Ivan Yelizariev", "support": "[email protected]", "website": "https://twitter.com/yelizariev", "license": "LGPL-3", "price": 400.00, "currency": "EUR", "depends": [ "website", "website_multi_theme", "ir_config_parameter_multi_company", ], "external_dependencies": {"python": [], "bin": []}, "data": [ "views/website_views.xml", "views/website_templates.xml", "views/website_menu_views.xml", "views/website_theme_views.xml", "views/res_config_views.xml", ], "qweb": [ ], "demo": [ # "data/website_demo.xml", ], "post_load": "post_load", "pre_init_hook": None, "post_init_hook": None, "auto_install": False, "installable": True, "demo_title": "Real Multi Website", "demo_addons": [ "website_multi_company_sale", "website_multi_company_portal", ], "demo_addons_hidden": [ "website_multi_company_demo", ], "demo_url": "website-multi-company", "demo_summary": "The module allows to set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!", "demo_images": [ "images/website_multi_company_main.png", ] }
# -*- coding: utf-8 -*- { "name": """Real Multi Website""", "summary": """Yes, you can set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!""", "category": "eCommerce", "live_test_url": "http://apps.it-projects.info/shop/product/website-multi-company?version=10.0", "images": ['images/website_multi_company_main.png'], "version": "1.2.0", "application": False, "author": "IT-Projects LLC, Ivan Yelizariev", "support": "[email protected]", "website": "https://twitter.com/yelizariev", "license": "LGPL-3", "price": 400.00, "currency": "EUR", "depends": [ "website", "website_multi_theme", "ir_config_parameter_multi_company", ], "external_dependencies": {"python": [], "bin": []}, "data": [ "views/website_views.xml", "views/website_templates.xml", "views/website_menu_views.xml", "views/website_theme_views.xml", "views/res_config_views.xml", ], "qweb": [ ], "demo": [ # "data/website_demo.xml", ], "post_load": "post_load", "pre_init_hook": None, "post_init_hook": None, "auto_install": False, "installable": True, "demo_title": "Real Multi Website", "demo_addons": [ "website_multi_company_sale", ], "demo_addons_hidden": [ "website_multi_company_demo", ], "demo_url": "website-multi-company", "demo_summary": "The module allows to set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!", "demo_images": [ "images/website_multi_company_main.png", ] }
mit
Python
b671d67aaf80df9297213973659c59a4ebd72e08
test file changed
DiCarloLab-Delft/PycQED_py3,DiCarloLab-Delft/PycQED_py3,DiCarloLab-Delft/PycQED_py3
pycqed/tests/analysis_v2/test_Two_state_T1_analysis.py
pycqed/tests/analysis_v2/test_Two_state_T1_analysis.py
import unittest import pycqed as pq import os from pycqed.analysis_v2 import measurement_analysis as ma from pycqed.analysis_v2 import Two_state_T1_analysis as Ta class Test_efT1_analysis(unittest.TestCase): @classmethod def setUpClass(self): self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data') ma.a_tools.datadir = self.datadir def test_efT1_analysis(self): Ta.efT1_analysis( t_start='20180606_144110', auto=True, close_figs=False) self.fit_res['fit_res_P0'].params['tau1'].value
import unittest import pycqed as pq import os from pycqed.analysis_v2 import measurement_analysis as ma from pycqed.analysis_v2 import Two_state_T1_analysis as Ta class Test_efT1_analysis(unittest.TestCase): @classmethod def setUpClass(self): self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data') ma.a_tools.datadir = self.datadir def test_efT1_analysis(self): Ta.efT1_analysis( t_start='20180606_144110', auto=True, close_figs=False)
mit
Python
196b9547b4dbcbfbf4891c7fd3ea3b9944018430
Revert "Revert "Added script for cron job to load surveys to database.""
paepcke/json_to_relation,paepcke/json_to_relation,paepcke/json_to_relation,paepcke/json_to_relation
scripts/cronRefreshEdxQualtrics.py
scripts/cronRefreshEdxQualtrics.py
from surveyextractor import QualtricsExtractor import getopt import sys ### Script for scheduling regular EdxQualtrics updates ### Usage for cron should be "cronRefreshEdxQualtrics.py -m -s -r" # Append directory for dependencies to PYTHONPATH sys.path.append("/home/dataman/Code/qualtrics_etl/src/qualtrics_etl/") qe = QualtricsExtractor() opts, args = getopt.getopt(sys.argv[1:], 'amsr', ['--reset', '--loadmeta', '--loadsurveys', '--loadresponses']) for opt, arg in opts: if opt in ('-a', '--reset'): qe.resetMetadata() qe.resetSurveys() qe.resetResponses() elif opt in ('-m', '--loadmeta'): qe.loadSurveyMetadata() elif opt in ('-s', '--loadsurvey'): qe.resetSurveys() qe.loadSurveyData() elif opt in ('-r', '--loadresponses'): qe.loadResponseData()
from surveyextractor import QualtricsExtractor import getopt, sys # Script for scheduling regular EdxQualtrics updates # Usage for cron should be "cronRefreshEdxQualtrics.py -m -s -r" qe = QualtricsExtractor() opts, args = getopt.getopt(sys.argv[1:], 'amsr', ['--reset', '--loadmeta', '--loadsurveys', '--loadresponses']) for opt, arg in opts: if opt in ('-a', '--reset'): qe.resetMetadata() qe.resetSurveys() qe.resetResponses() elif opt in ('-m', '--loadmeta'): qe.loadSurveyMetadata() elif opt in ('-s', '--loadsurvey'): qe.resetSurveys() qe.loadSurveyData() elif opt in ('-r', '--loadresponses'): qe.loadResponseData()
bsd-3-clause
Python
f1111b6d7eb387e7287497c1853addd003a81f39
Add a length limit
thomasleese/chatterbox
chatterbox/irc.py
chatterbox/irc.py
import time import random import irc.bot class Bot(irc.bot.SingleServerIRCBot): def __init__(self, generator, channels, nickname, server, port=6667): super().__init__([(server, port)], nickname, nickname) self.generator = generator self.channels_to_join = channels self.nick = nickname def on_nicknameinuse(self, c, e): self.nick = c.get_nickname() + '_' c.nick(self.nick) def on_welcome(self, c, e): for channel in self.channels_to_join: c.join(channel) def on_privmsg(self, c, e): sentence = self.generator.generate_sentence()[:450] time.sleep((random.random() + 1) * 0.015 * len(sentence)) c.privmsg(e.source.nick, sentence) def on_pubmsg(self, c, e): if self.nick in e.arguments[0]: sentence = self.generator.generate_sentence()[:450] time.sleep((random.random() + 1) * 0.015 * len(sentence)) c.privmsg(e.target, sentence)
import time import random import irc.bot class Bot(irc.bot.SingleServerIRCBot): def __init__(self, generator, channels, nickname, server, port=6667): super().__init__([(server, port)], nickname, nickname) self.generator = generator self.channels_to_join = channels self.nick = nickname def on_nicknameinuse(self, c, e): self.nick = c.get_nickname() + '_' c.nick(self.nick) def on_welcome(self, c, e): for channel in self.channels_to_join: c.join(channel) def on_privmsg(self, c, e): sentence = self.generator.generate_sentence() time.sleep((random.random() + 1) * 0.015 * len(sentence)) c.privmsg(e.source.nick, sentence) def on_pubmsg(self, c, e): if self.nick in e.arguments[0]: sentence = self.generator.generate_sentence() time.sleep((random.random() + 1) * 0.015 * len(sentence)) c.privmsg(e.target, sentence)
mit
Python
6f03120a57d40491e7d8245e10989a3e03b9481d
Set up task list for cook robot
swanndri/ROS-Healthcare-Simulator,swanndri/ROS-Healthcare-Simulator
se306/src/package1/scripts/cook.py
se306/src/package1/scripts/cook.py
#!/usr/bin/env python import roslib import rospy import std_msgs.msg import navigation from std_msgs.msg import String class Cook(navigation.Navigation): ''' When a message is passed out from the scheduler, determine whether it is relevant to this object. If so, take the neccessary action ''' def process_event(self, action_msg): message = str(action_msg).split("data: ")[1] if ('Cook.cook_' in message): self.task_list.append(message) def perform_task(self, task): self.status = "active" if task =="Cook.cook_": self.navigate.current_path = list(self.cook_path) self.navigate.target_coordinate = self.navigate.current_path.pop(0) def __init__(self): self.rate = rospy.Rate(20) self.task_list = [] self.status = "idle" # Create a navigation object which will be used to manage all the calls # relating to movement. Passed the robot's name so that the publisher # and subscribers for it's navigation can be set up. #Eventually we will make this input a variable instead of hardcoded self.navigate = navigation.Navigation("robot_2") rospy.Subscriber("scheduler", String, self.process_event) while not rospy.is_shutdown(): self.navigate.movement_publisher.publish(self.navigate.move_cmd) if (len(self.navigate.target_coordinate) == 0): self.status = "idle" if (len(self.task_list) > 0 and self.status == "idle"): self.perform_task(self.task_list.pop(0)) self.rate.sleep() if __name__ == '__main__': rospy.init_node('cook_robot') cook = Cook()
#!/usr/bin/env python import roslib import rospy import std_msgs.msg import navigation from std_msgs.msg import String class Cook(navigation.Navigation): ''' When a message is passed out from the scheduler, determine whether it is relevant to this object. If so, take the neccessary action ''' def process_event(self, action_msg): message = str(action_msg).split("data: ")[1] if ('Cook.cook_' in message): self.navigate.current_path = list(self.cook_path) self.navigate.target_coordinate = self.navigate.current_path.pop(0) def __init__(self): self.rate = rospy.Rate(20) # Create a navigation object which will be used to manage all the calls # relating to movement. Passed the robot's name so that the publisher # and subscribers for it's navigation can be set up. #Eventually we will make this input a variable instead of hardcoded self.navigate = navigation.Navigation("robot_2") rospy.Subscriber("scheduler", String, self.process_event) while not rospy.is_shutdown(): self.navigate.movement_publisher.publish(self.navigate.move_cmd) self.rate.sleep() if __name__ == '__main__': rospy.init_node('cook_robot') cook = Cook()
mit
Python
da55338b1bfc82bd303a3003fef881ceb3605b28
Make views time-centric, not date-centric
mjschultz/django-tracking2,bruth/django-tracking2,bruth/django-tracking2
tracking/views.py
tracking/views.py
import logging from datetime import timedelta from django import forms from django.shortcuts import render from django.contrib.auth.decorators import permission_required from django.utils.timezone import now from tracking.models import Visitor, Pageview from tracking.settings import TRACK_PAGEVIEWS log = logging.getLogger(__file__) # tracking wants to accept more formats than default, here they are input_formats = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%Y-%m', # '2006-10' '%Y', # '2006' ] class DashboardForm(forms.Form): start_time = forms.DateTimeField( required=False, input_formats=input_formats) end_time = forms.DateTimeField( required=False, input_formats=input_formats) @permission_required('tracking.view_visitor') def dashboard(request): "Counts, aggregations and more!" end_time = now() start_time = end_time - timedelta(days=1) defaults = {'start_time': start_time, 'end_time': end_time} form = DashboardForm(data=request.GET or defaults) if form.is_valid(): start_time = form.cleaned_data['start_time'] end_time = form.cleaned_data['end_time'] # determine when tracking began try: track_start_time = Visitor.objects.earliest('start_time').start_time except Visitor.DoesNotExist: track_start_time = now() # If the start_date is before tracking began, warn about incomplete data warn_incomplete = (start_time < track_start_time) # queries take `date` objects (for now) user_stats = Visitor.objects.user_stats(start_time, end_time) visitor_stats = Visitor.objects.stats(start_time, end_time) if TRACK_PAGEVIEWS: pageview_stats = Pageview.objects.stats(start_time, end_time) else: pageview_stats = None context = { 'form': form, 'track_start_time': track_start_time, 'warn_incomplete': warn_incomplete, 'user_stats': user_stats, 'visitor_stats': visitor_stats, 'pageview_stats': pageview_stats, } return render(request, 'tracking/dashboard.html', context)
import logging from datetime import timedelta from django import forms from django.shortcuts import render from django.contrib.auth.decorators import permission_required from django.utils.timezone import now from tracking.models import Visitor, Pageview from tracking.settings import TRACK_PAGEVIEWS log = logging.getLogger(__file__) # tracking wants to accept more formats than default, here they are input_formats = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%Y-%m', # '2006-10' '%Y', # '2006' ] class DashboardForm(forms.Form): start_time = forms.DateTimeField( required=False, input_formats=input_formats) end_time = forms.DateTimeField( required=False, input_formats=input_formats) @permission_required('tracking.view_visitor') def dashboard(request): "Counts, aggregations and more!" end_time = now() start_time = end_time - timedelta(days=1) defaults = {'start_time': start_time, 'end_time': end_time} form = DashboardForm(data=request.GET or defaults) if form.is_valid(): start_time = form.cleaned_data['start_time'] end_time = form.cleaned_data['end_time'] # determine when tracking began try: track_start_time = Visitor.objects.earliest('start_time').start_time except Visitor.DoesNotExist: track_start_time = now() # If the start_date is before tracking began, warn about incomplete data warn_incomplete = (start_time < track_start_time) # queries take `date` objects (for now) start_date = start_time.date() end_date = end_time.date() user_stats = Visitor.objects.user_stats(start_date, end_date) visitor_stats = Visitor.objects.stats(start_date, end_date) if TRACK_PAGEVIEWS: pageview_stats = Pageview.objects.stats(start_date, end_date) else: pageview_stats = None context = { 'form': form, 'track_start_time': track_start_time, 'warn_incomplete': warn_incomplete, 'user_stats': user_stats, 'visitor_stats': visitor_stats, 'pageview_stats': pageview_stats, } return render(request, 'tracking/dashboard.html', context)
bsd-2-clause
Python
d19b72f42801dde328ae1e1d935c5df3a5797d4e
update manage.py for refactored appstate/config modules
omniscale/gbi-client,omniscale/gbi-client,omniscale/gbi-client,omniscale/gbi-client
app/manage.py
app/manage.py
import os import sys import scriptine from scriptine.shell import sh from geobox.web import create_app def babel_init_lang_command(lang): "Initialize new language." sh('pybabel init -i geobox/web/translations/messages.pot -d geobox/web/translations -l %s' % (lang,)) def babel_refresh_command(): "Extract messages and update translation files." # get directory of all extension that also use translations import wtforms wtforms_dir = os.path.dirname(wtforms.__file__) extensions = ' '.join([wtforms_dir]) sh('pybabel extract -F babel.cfg -k lazy_gettext -k _l -o geobox/web/translations/messages.pot geobox/web geobox/model geobox/lib ' + extensions) sh('pybabel update -i geobox/web/translations/messages.pot -d geobox/web/translations') def babel_compile_command(): "Compile translations." sh('pybabel compile -d geobox/web/translations') def fixtures_command(): from geobox.appstate import GeoBoxState from geobox.model.fixtures import add_fixtures app_state = GeoBoxState.initialize() if os.path.exists(app_state.db_filename): os.remove(app_state.db_filename) app_state = GeoBoxState.initialize() session = app_state.user_db_session() add_fixtures(session) session.commit() def init_db_command(): from geobox.appstate import GeoBoxState from geobox.model.fixtures import add_fixtures app_state = GeoBoxState.initialize() if os.path.exists(app_state.db_filename): os.remove(app_state.db_filename) app_state = GeoBoxState.initialize() session = app_state.user_db_session() session.commit() def webserver_command(config='./geobox.ini'): from geobox.appstate import GeoBoxState from geobox.defaults import GeoBoxConfig config = GeoBoxConfig.from_file(config) if not config: sys.exit(1) app_state = GeoBoxState(config) app = create_app(app_state) # scriptine removed sub-command from argv, # but Flask reloader needs complete sys.argv sys.argv[1:1] = ['webserver'] app.run(port=config.get('web', 'port')) if __name__ == '__main__': scriptine.run()
import os import sys import scriptine from scriptine.shell import sh from geobox.web import create_app def babel_init_lang_command(lang): "Initialize new language." sh('pybabel init -i geobox/web/translations/messages.pot -d geobox/web/translations -l %s' % (lang,)) def babel_refresh_command(): "Extract messages and update translation files." # get directory of all extension that also use translations import wtforms wtforms_dir = os.path.dirname(wtforms.__file__) extensions = ' '.join([wtforms_dir]) sh('pybabel extract -F babel.cfg -k lazy_gettext -k _l -o geobox/web/translations/messages.pot geobox/web geobox/model geobox/lib ' + extensions) sh('pybabel update -i geobox/web/translations/messages.pot -d geobox/web/translations') def babel_compile_command(): "Compile translations." sh('pybabel compile -d geobox/web/translations') def fixtures_command(): from geobox.config import GeoBoxState from geobox.model.fixtures import add_fixtures app_state = GeoBoxState.initialize() if os.path.exists(app_state.db_filename): os.remove(app_state.db_filename) app_state = GeoBoxState.initialize() session = app_state.user_db_session() add_fixtures(session) session.commit() def init_db_command(): from geobox.config import GeoBoxState from geobox.model.fixtures import add_fixtures app_state = GeoBoxState.initialize() if os.path.exists(app_state.db_filename): os.remove(app_state.db_filename) app_state = GeoBoxState.initialize() session = app_state.user_db_session() session.commit() def webserver_command(config='./geobox.ini'): from geobox.config import GeoBoxConfig, GeoBoxState config = GeoBoxConfig.from_file(config) if not config: sys.exit(1) app_state = GeoBoxState(config) app = create_app(app_state) # scriptine removed sub-command from argv, # but Flask reloader needs complete sys.argv sys.argv[1:1] = ['webserver'] app.run(port=config.get('web', 'port')) if __name__ == '__main__': scriptine.run()
apache-2.0
Python
0b77e09ac16006d1baa6a5f4093b51c1a13863e9
Add as_dict method to Digit model
starcalibre/MNIST3D,starcalibre/MNIST3D,starcalibre/MNIST3D
app/models.py
app/models.py
from app import db class Digit(db.Model): __tablename__ = 'digits' id = db.Column(db.INTEGER, primary_key=True) label = db.Column(db.INTEGER) tsne_x = db.Column(db.REAL) tsne_y = db.Column(db.REAL) tsne_z = db.Column(db.REAL) array = db.Column(db.String) def __repr__(self): return '<Digit %d %d>' % (self.id, self.label) def as_dict(self, fields=None): if not fields: return {c.name: getattr(self, c.name) for c in self.__table__.columns} else: return {c: getattr(self, c) for c in fields}
from app import db class Digit(db.Model): id = db.Column(db.INTEGER, primary_key=True) label = db.Column(db.INTEGER) tsne_x = db.Column(db.REAL) tsne_y = db.Column(db.REAL) tsne_z = db.Column(db.REAL) array = db.Column(db.String) image = db.Column(db.BLOB) def __repr__(self): return '<Digit %d %d>' % (self.id, self.label)
mit
Python
73f8895ae00f3d076c73bc49a03b870abb2a30cc
Fix typo
gmkou/FikaNote,gmkou/FikaNote,gmkou/FikaNote
app/models.py
app/models.py
from django.db import models import mongoengine from mongoengine import Document, EmbeddedDocument from mongoengine.fields import * import os # Create your models here. class Greeting(models.Model): when = models.DateTimeField('date created', auto_now_add=True) USER = os.getenv('DATABASE_USER') PASSWORD = os.getenv('DATABASE_PASSWORD') MONGODB_URI = "mongodb+srv://" + USER + ":" + PASSWORD + "@fikanotedb.ltkpy.mongodb.net/fikanotedb?retryWrites=true&w=majority".format(USER, PASWORD) mongoengine.connect('fikanotedb', host=MONGODB_URI) class Shownote(EmbeddedDocument): url = URLField() title = StringField() date = DateTimeField() class FikanoteDB(Document): title = StringField() number = IntField() person = ListField(StringField()) agenda = StringField() date = DateTimeField() shownotes = ListField(EmbeddedDocumentField(Shownote)) meta = {'collection': 'fikanotedb'} class AgendaDB(Document): url = URLField() title = StringField() date = DateTimeField() meta = {'collection': 'agendadb'}
from django.db import models import mongoengine from mongoengine import Document, EmbeddedDocument from mongoengine.fields import * import os # Create your models here. class Greeting(models.Model): when = models.DateTimeField('date created', auto_now_add=True) USER = os.getenv('DATABASE_USER') PASWORD = os.getenv('DATABASE_PASSWORD') MONGODB_URI = "mongodb+srv://{}:{}@fikanotedb.ltkpy.mongodb.net/fikanotedb?retryWrites=true&w=majority".format(USER, PASWORD) mongoengine.connect('fikanotedb', host=MONGODB_URI) class Shownote(EmbeddedDocument): url = URLField() title = StringField() date = DateTimeField() class FikanoteDB(Document): title = StringField() number = IntField() person = ListField(StringField()) agenda = StringField() date = DateTimeField() shownotes = ListField(EmbeddedDocumentField(Shownote)) meta = {'collection': 'fikanotedb'} class AgendaDB(Document): url = URLField() title = StringField() date = DateTimeField() meta = {'collection': 'agendadb'}
mit
Python
69d9a36eb9d4536d9999395016759ec0ba23ad82
Fix playlist preview function
cgwire/zou
zou/app/services/playlists_service.py
zou/app/services/playlists_service.py
from zou.app.models.playlist import Playlist from zou.app.models.preview_file import PreviewFile from zou.app.utils import fields from zou.app.services import shots_service, tasks_service from zou.app.services.exception import PlaylistNotFoundException def all_playlists_for_project(project_id): return fields.serialize_value(Playlist.get_all_by(project_id=project_id)) def get_playlist_with_preview_file_revisions(playlist_id): playlist = Playlist.get(playlist_id) if playlist is None: raise PlaylistNotFoundException() playlist_dict = playlist.serialize() if playlist_dict["shots"] is None: playlist_dict["shots"] = [] for shot in playlist_dict["shots"]: shot["preview_files"] = get_preview_files_for_shot( shot["shot_id"] ) return playlist_dict def get_preview_files_for_shot(shot_id): tasks = tasks_service.get_tasks_for_shot(shot_id) previews = {} for task in tasks: preview_files = PreviewFile.query \ .filter_by(task_id=task["id"]) \ .order_by(PreviewFile.revision.desc()) \ .all() task_type_id = task["task_type_id"] if len(preview_files) > 0: previews[task_type_id] = [ { "id": str(preview_file.id), "revision": preview_file.revision } for preview_file in preview_files ] # Do not add too much field to avoid building too big responses return previews
from zou.app.models.playlist import Playlist from zou.app.models.preview_file import PreviewFile from zou.app.utils import fields from zou.app.services import shots_service, tasks_service from zou.app.services.exception import PlaylistNotFoundException def all_playlists_for_project(project_id): return fields.serialize_value(Playlist.get_all_by(project_id=project_id)) def get_playlist_with_preview_file_revisions(playlist_id): playlist = Playlist.get(playlist_id) if playlist is None: raise PlaylistNotFoundException() playlist_dict = playlist.serialize() if playlist_dict["shots"] is None: playlist_dict["shots"] = [] for shot in playlist_dict["shots"]: shot["preview_files"] = shots_service.get_preview_files_for_shot( shot["shot_id"] ) return playlist_dict def get_preview_files_for_shot(shot_id): tasks = tasks_service.get_tasks_for_shot(shot_id) previews = {} for task in tasks: preview_files = PreviewFile.query \ .filter_by(task_id=task["id"]) \ .order_by(PreviewFile.revision.desc()) \ .all() task_type_id = task["task_type_id"] if len(preview_files) > 0: previews[task_type_id] = [ { "id": str(preview_file.id), "revision": preview_file.revision } for preview_file in preview_files ] # Do not add too much field to avoid building too big responses return previews
agpl-3.0
Python
75f28330cd5cf0eea2ec99d8c3f9bf53de18d46c
correct typo
arenaoftitans/arena-of-titans-api,arenaoftitans/arena-of-titans-api
aot/config.py
aot/config.py
import logging import toml from os.path import exists class Config: CONF_FILE_TEMPLATE = 'config/config.{type}.toml' def __init__(self): self._config = None def __getitem__(self, key): if self._config is None: raise RuntimeError( 'Configuration is not loaded. ' 'Call load_config(type) before trying to use the configuration', ) else: return self._config[key] def load_config(self, type, version='latest'): config_path = self.CONF_FILE_TEMPLATE.format(type=type) if type == 'dev' and not exists(config_path): docker_config_file = self.CONF_FILE_TEMPLATE.format(type='docker') logging.info(f'Note: {config_path} not found, using {docker_config_file}') config_path = docker_config_file with open(config_path, 'r') as config_file: self._config = toml.load(config_file) self._set_version_in_socket_name('api', version) self._set_version_in_socket_name('cache', version) def _set_version_in_socket_name(self, section_name, version): socket = self._config[section_name].get('socket', None) if socket: socket = socket.format(version=version) self._config[section_name]['socket'] = socket config = Config()
import logging import toml from os.path import exists class Config: CONF_FILE_TEMPLATE = 'config/config.{type}.toml' def __init__(self): self._config = None def __getitem__(self, key): if self._config is None: raise RuntimeError( 'Configuration is not loaded. ' 'Call load_config(type) before trying to use the coniguration', ) else: return self._config[key] def load_config(self, type, version='latest'): config_path = self.CONF_FILE_TEMPLATE.format(type=type) if type == 'dev' and not exists(config_path): docker_config_file = self.CONF_FILE_TEMPLATE.format(type='docker') logging.info(f'Note: {config_path} not found, using {docker_config_file}') config_path = docker_config_file with open(config_path, 'r') as config_file: self._config = toml.load(config_file) self._set_version_in_socket_name('api', version) self._set_version_in_socket_name('cache', version) def _set_version_in_socket_name(self, section_name, version): socket = self._config[section_name].get('socket', None) if socket: socket = socket.format(version=version) self._config[section_name]['socket'] = socket config = Config()
agpl-3.0
Python
fa6c7b32284bc4159e95b7bc339dab7517b2c255
add sql example
tangramor/Airnow,tangramor/Airnow,tangramor/Airnow
client/ReadAir.py
client/ReadAir.py
# -*- coding: utf-8 -*- import serial, time, MySQLdb, re from socketIO_client import SocketIO, LoggingNamespace ''' SQL to create database: CREATE DATABASE IF NOT EXISTS `airnow` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci; ''' ''' SQL to create table: CREATE TABLE IF NOT EXISTS `air_logs` ( `id` int(11) unsigned NOT NULL AUTO_INCREMENT, `pm25` float NOT NULL, `aqi` int(11) NOT NULL, `time` datetime NOT NULL DEFAULT '0000-00-00 00:00:00' PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; ''' # open mysql connection conn=MySQLdb.connect(host="localhost",user="airnow",passwd="password",db="airnow",charset="utf8") sql = "INSERT INTO air_logs(`pm25`,`aqi`,`time`) VALUES(%s,%s,NOW())" t = serial.Serial("com4", 2400) # serial port and baudrate i = 0 with SocketIO('localhost', 8000, LoggingNamespace) as socketIO: # connect socket.io server while True: i = i + 1 str = t.readline() # read from serial port socketIO.emit('airnow', str) # raise event to socket.io server # record data to mysql if i == 30: # about 30 seconds insert 1 record to database i = 0 # reset counter cursor = conn.cursor() vals = re.split('[:; ]', str) # the str gotten from serial port is: "PM2.5:11.53; AQI:15;" param = (vals[1], vals[4]) # put PM2.5 value and AQI value to param n = cursor.execute(sql, param) # execute the sql query cursor.execute("commit") #print str #Debug cursor.close() # close mysql connection conn.close()
# -*- coding: utf-8 -*- import serial, time, MySQLdb, re from socketIO_client import SocketIO, LoggingNamespace # open a mysql connection conn=MySQLdb.connect(host="localhost",user="airnow",passwd="password",db="airnow",charset="utf8") ''' SQL to create table: CREATE TABLE IF NOT EXISTS `air_logs` ( `id` int(11) unsigned NOT NULL AUTO_INCREMENT, `pm25` float NOT NULL, `aqi` int(11) NOT NULL, `time` datetime NOT NULL DEFAULT '0000-00-00 00:00:00' PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; ''' sql = "INSERT INTO air_logs(`pm25`,`aqi`,`time`) VALUES(%s,%s,NOW())" t = serial.Serial("com4", 2400) # serial port and baudrate i = 0 with SocketIO('localhost', 8000, LoggingNamespace) as socketIO: # connect socket.io server while True: i = i + 1 str = t.readline() # read from serial port socketIO.emit('airnow', str) # raise event to socket.io server # record data to mysql if i == 30: # about 30 seconds insert 1 record to database i = 0 # reset counter cursor = conn.cursor() vals = re.split('[:; ]', str) # the str gotten from serial port is: "PM2.5:11.53; AQI:15;" param = (vals[1], vals[4]) # put PM2.5 value and AQI value to param n = cursor.execute(sql, param) # execute the sql query cursor.execute("commit") #print str #Debug cursor.close() # close mysql connection conn.close()
mit
Python
2f2861f153d0ba0d088ffe95b196b4154b59ce31
Replace constants with literal value.
ssadedin/seqr,ssadedin/seqr,macarthur-lab/seqr,macarthur-lab/seqr,macarthur-lab/seqr,ssadedin/seqr,macarthur-lab/seqr,ssadedin/seqr,macarthur-lab/seqr,ssadedin/seqr
seqr/management/commands/check_bam_cram_paths_tests.py
seqr/management/commands/check_bam_cram_paths_tests.py
import mock from io import BytesIO from django.core.management import call_command from django.test import TestCase class CheckBamCramPathsTest(TestCase): fixtures = ['users', '1kg_project'] @mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path') def test_normal_command(self, mock_validate_path): mock_validate_path.return_value = "" out = BytesIO() call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out) self.assertEqual('Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n', out.getvalue()) @mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path') def test_exception_command(self, mock_validate_path): mock_validate_path.side_effect = Exception('Error accessing "/readviz/NA19675.cram"') out = BytesIO() call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out) self.assertEqual('Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n', out.getvalue())
import mock from io import BytesIO from django.core.management import call_command from django.test import TestCase EXPECTED_EXCEPTION_MSG = 'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n' EXPECTED_NORMAL_MSG = 'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n' class CheckBamCramPathsTest(TestCase): fixtures = ['users', '1kg_project'] @mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path') def test_normal_command(self, mock_validate_path): mock_validate_path.return_value = "" out = BytesIO() call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out) self.assertEqual(EXPECTED_NORMAL_MSG, out.getvalue()) @mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path') def test_exception_command(self, mock_validate_path): mock_validate_path.side_effect = Exception('Error accessing "/readviz/NA19675.cram"') out = BytesIO() call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out) self.assertEqual(EXPECTED_EXCEPTION_MSG, out.getvalue())
agpl-3.0
Python
a6ac3a7a0955fab9cce1d2866a064ff6d4943dd0
bump version to 0.1.2
dpranke/pyjson5
json5/version.py
json5/version.py
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. VERSION = '0.1.2'
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. VERSION = '0.1.1'
apache-2.0
Python
8ba62b47d2d94eb56122f9061b8309e06cc62cdd
add .get()
kibitzr/kibitzr,kibitzr/kibitzr
kibitzr/stash.py
kibitzr/stash.py
import contextlib import logging logger = logging.getLogger(__name__) class Stash(object): FILENAME = 'stash.db' @contextlib.contextmanager def open(self): import shelve with contextlib.closing(shelve.open(self.FILENAME)) as db: yield db def read(self): with self.open() as db: return dict(db) def write(self, data): with self.open() as db: for key, value in data.items(): db[key] = value @classmethod def print_content(cls): for key, value in cls().read().items(): print("{0}: {1}".format(key, value)) class LazyStash(Stash): def __init__(self): self._stashobj = None @property def _stash(self): if self._stashobj is None: self._stashobj = self.read() return self._stashobj def __getitem__(self, key): return self._stash[key] def get(self, key, default=None): try: return self._stash[key] except KeyError: return default
import contextlib import logging logger = logging.getLogger(__name__) class Stash(object): FILENAME = 'stash.db' @contextlib.contextmanager def open(self): import shelve with contextlib.closing(shelve.open(self.FILENAME)) as db: yield db def read(self): with self.open() as db: return dict(db) def write(self, data): with self.open() as db: for key, value in data.items(): db[key] = value @classmethod def print_content(cls): for key, value in cls().read().items(): print("{0}: {1}".format(key, value)) class LazyStash(Stash): def __init__(self): self._stash = None @property def stash(self): if self._stash is None: self._stash = self.read() return self._stash def __getitem__(self, key): return self.stash[key]
mit
Python
e2c92e8b6e8fb10addc73986914014b278598470
Fix docstring in standardnormal example
bees4ever/spotpy,bees4ever/spotpy,bees4ever/spotpy,thouska/spotpy,thouska/spotpy,thouska/spotpy
spotpy/examples/spot_setup_standardnormal.py
spotpy/examples/spot_setup_standardnormal.py
''' Copyright 2015 by Tobias Houska This file is part of Statistical Parameter Estimation Tool (SPOTPY). :author: Tobias Houska This example implements the Standard Normal function into SPOT. ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import spotpy class spot_setup(object): def __init__(self,mean=0,std=1): self.params = [spotpy.parameter.Uniform('x',-5,5,1.5,3.0) ] self.mean=mean self.std=std def parameters(self): return spotpy.parameter.generate(self.params) def simulation(self,x): simulations= (1.0/(std*np.sqrt(2*np.pi)))**((-1.0/2.0)*(((x-self.mean)/self.std)**2)) return simulations def evaluation(self): observations = [0] return observations def objectivefunction(self, simulation,evaluation): objectivefunction = -spotpy.objectivefunctions.rmse(evaluation = evaluation,simulation = simulation) return objectivefunction
''' Copyright 2015 by Tobias Houska This file is part of Statistical Parameter Estimation Tool (SPOTPY). :author: Tobias Houska This example implements the Rosenbrock function into SPOT. ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np import spotpy class spot_setup(object): def __init__(self,mean=0,std=1): self.params = [spotpy.parameter.Uniform('x',-5,5,1.5,3.0) ] self.mean=mean self.std=std def parameters(self): return spotpy.parameter.generate(self.params) def simulation(self,x): simulations= (1.0/(std*np.sqrt(2*np.pi)))**((-1.0/2.0)*(((x-self.mean)/self.std)**2)) return simulations def evaluation(self): observations = [0] return observations def objectivefunction(self, simulation,evaluation): objectivefunction = -spotpy.objectivefunctions.rmse(evaluation = evaluation,simulation = simulation) return objectivefunction
mit
Python
d46368024ee89143bca15a2bdf23f8792970cf5c
add property 'external' to menu nodes
theherk/django-theherk-external-urls
menu_external_urls/menu.py
menu_external_urls/menu.py
from menus.base import Modifier from menus.menu_pool import menu_pool from menu_external_urls.models import MenuExternalUrl class MenuExternalUrlMod(Modifier): """ Adds ability to link page to an external URL. """ def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb): if post_cut: return nodes if breadcrumb: return nodes for node in nodes: try: #Load External URL into nodes menu_external_url = MenuExternalUrl.objects.get(page=(node.id-1)) node.url = menu_external_url.menu_external_url node.external = True except: pass return nodes menu_pool.register_modifier(MenuExternalUrlMod)
from menus.base import Modifier from menus.menu_pool import menu_pool from menu_external_urls.models import MenuExternalUrl class MenuExternalUrlMod(Modifier): """ Adds ability to link page to an external URL. """ def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb): if post_cut: return nodes if breadcrumb: return nodes for node in nodes: try: #Load External URL into nodes menu_external_url = MenuExternalUrl.objects.get(page=(node.id-1)) node.url = menu_external_url.menu_external_url except: pass return nodes menu_pool.register_modifier(MenuExternalUrlMod)
bsd-3-clause
Python
c973385f877d940231deb8d81e929647eadc280a
Use standard env var for DATABASE_URL
crossgovernmentservices/csd-notes,crossgovernmentservices/csd-notes,crossgovernmentservices/csd-notes
app/config.py
app/config.py
# -*- coding: utf-8 -*- """ Application configuration """ import os from os.path import dirname, join # get settings from environment, or credstash if running in AWS env = os.environ if env.get('SETTINGS') == 'AWS': from lib.aws_env import env ASSETS_DEBUG = False DEBUG = bool(env.get('DEBUG', True)) HUMANIZE_USE_UTC = True MARKDOWN_EXTENSIONS = [ 'markdown.extensions.nl2br', 'markdown.extensions.sane_lists', 'markdown.extensions.smart_strong', 'markdown.extensions.smarty', ] SECRET_KEY = env.get('SECRET_KEY', os.urandom(24)) SESSION_COOKIE_SECURE = False SQLALCHEMY_DATABASE_PATH = join(dirname(__file__), '../development.db') SQLALCHEMY_DATABASE_URI = env.get( 'DATABASE_URL', 'sqlite:///{}'.format(SQLALCHEMY_DATABASE_PATH)) SQLALCHEMY_TRACK_MODIFICATIONS = bool(env.get( 'SQLALCHEMY_TRACK_MODIFICATIONS', False)) TESTING = bool(env.get('TESTING', False))
# -*- coding: utf-8 -*- """ Application configuration """ import os from os.path import dirname, join # get settings from environment, or credstash if running in AWS env = os.environ if env.get('SETTINGS') == 'AWS': from lib.aws_env import env ASSETS_DEBUG = False DEBUG = bool(env.get('DEBUG', True)) HUMANIZE_USE_UTC = True MARKDOWN_EXTENSIONS = [ 'markdown.extensions.nl2br', 'markdown.extensions.sane_lists', 'markdown.extensions.smart_strong', 'markdown.extensions.smarty', ] SECRET_KEY = env.get('SECRET_KEY', os.urandom(24)) SESSION_COOKIE_SECURE = False SQLALCHEMY_DATABASE_PATH = join(dirname(__file__), '../development.db') SQLALCHEMY_DATABASE_URI = env.get( 'DATABASE_URI', 'sqlite:///{}'.format(SQLALCHEMY_DATABASE_PATH)) SQLALCHEMY_TRACK_MODIFICATIONS = bool(env.get( 'SQLALCHEMY_TRACK_MODIFICATIONS', False)) TESTING = bool(env.get('TESTING', False))
mit
Python
172c7a3ee0c75462f08e726716bf906ad88eadab
add test of plugin registry options
ellisonbg/altair,altair-viz/altair,jakevdp/altair
altair/utils/tests/test_plugin_registry.py
altair/utils/tests/test_plugin_registry.py
from ..plugin_registry import PluginRegistry from typing import Callable class TypedCallableRegistry(PluginRegistry[Callable[[int], int]]): pass class GeneralCallableRegistry(PluginRegistry): pass def test_plugin_registry(): plugins = TypedCallableRegistry() assert plugins.names() == [] assert plugins.active == '' assert plugins.get() is None assert repr(plugins) == "TypedCallableRegistry(active='', registered=[])" plugins.register('new_plugin', lambda x: x ** 2) assert plugins.names() == ['new_plugin'] assert plugins.active == '' assert plugins.get() is None assert repr(plugins) == ("TypedCallableRegistry(active='', " "registered=['new_plugin'])") plugins.enable('new_plugin') assert plugins.names() == ['new_plugin'] assert plugins.active == 'new_plugin' assert plugins.get()(3) == 9 assert repr(plugins) == ("TypedCallableRegistry(active='new_plugin', " "registered=['new_plugin'])") def test_plugin_registry_extra_options(): plugins = GeneralCallableRegistry() plugins.register('metadata_plugin', lambda x, p=2: x ** p) plugins.enable('metadata_plugin') assert plugins.get()(3) == 9 plugins.enable('metadata_plugin', p=3) assert plugins.get()(3) == 27
from ..plugin_registry import PluginRegistry class RegistryTest(PluginRegistry): pass def test_plugin_registry(): plugins = RegistryTest() assert plugins.names() == [] assert plugins.active == '' assert plugins.get() is None assert repr(plugins) == "RegistryTest(active='', registered=[])" plugins.register('new_plugin', lambda x: x ** 2) assert plugins.names() == ['new_plugin'] assert plugins.active == '' assert plugins.get() is None assert repr(plugins) == ("RegistryTest(active='', " "registered=['new_plugin'])") plugins.enable('new_plugin') assert plugins.names() == ['new_plugin'] assert plugins.active == 'new_plugin' assert plugins.get()(3) == 9 assert repr(plugins) == ("RegistryTest(active='new_plugin', " "registered=['new_plugin'])")
bsd-3-clause
Python
ebe5d80075ce818181a154b6ec772a08e335ae4a
fix test name
dalejung/trtools
trtools/core/tests/test_timeseries.py
trtools/core/tests/test_timeseries.py
from unittest import TestCase import pandas as pd from pandas.core.groupby import BinGrouper import trtools.util.testing as tm import numpy as np import trtools.core.timeseries as ts # start on friday, so second day is saturday df = tm.fake_ohlc(1000000, freq="5min", start="2000-01-07") # business days and trading hours df = df.ix[df.index.dayofweek < 5] df = ts.trading_hours(df) class TestBinning(TestCase): def __init__(self, *args, **kwargs): TestCase.__init__(self, *args, **kwargs) def runTest(self): pass def setUp(self): pass def test_downsample(self): # these should be equivalent grouped = df.downsample('D', drop_empty=False) test = grouped.mean() correct = df.resample('D', how='mean') tm.assert_frame_equal(test, correct) def test_downsample_drop_empty(self): """ the drop_empty which is the default will not include empty groups into the GroupBy. """ grouped = df.downsample('D') test = grouped.mean() correct = df.resample('D', how='mean').dropna(how='all') tm.assert_frame_equal(test, correct) if __name__ == '__main__': import nose nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
from unittest import TestCase import pandas as pd from pandas.core.groupby import BinGrouper import trtools.util.testing as tm import numpy as np import trtools.core.timeseries as ts # start on friday, so second day is saturday df = tm.fake_ohlc(1000000, freq="5min", start="2000-01-07") # business days and trading hours df = df.ix[df.index.dayofweek < 5] df = ts.trading_hours(df) class TestBinning(TestCase): def __init__(self, *args, **kwargs): TestCase.__init__(self, *args, **kwargs) def runTest(self): pass def setUp(self): pass def downsample(self): # these should be equivalent grouped = df.downsample('D', drop_empty=False) test = grouped.mean() correct = df.resample('D', how='mean') tm.assert_frame_equal(test, correct) def test_downsample_drop_empty(self): """ the drop_empty which is the default will not include empty groups into the GroupBy. """ grouped = df.downsample('D') test = grouped.mean() correct = df.resample('D', how='mean').dropna(how='all') tm.assert_frame_equal(test, correct) if __name__ == '__main__': import nose nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
mit
Python
f386fce820fb60abfe1b18c141dfd8ce268c5f4f
Update queue_on_list.py (#851)
TheAlgorithms/Python
data_structures/queue/queue_on_list.py
data_structures/queue/queue_on_list.py
"""Queue represented by a python list""" class Queue(): def __init__(self): self.entries = [] self.length = 0 self.front=0 def __str__(self): printed = '<' + str(self.entries)[1:-1] + '>' return printed """Enqueues {@code item} @param item item to enqueue""" def put(self, item): self.entries.append(item) self.length = self.length + 1 """Dequeues {@code item} @requirement: |self.length| > 0 @return dequeued item that was dequeued""" def get(self): self.length = self.length - 1 dequeued = self.entries[self.front] #self.front-=1 #self.entries = self.entries[self.front:] self.entries = self.entries[1:] return dequeued """Rotates the queue {@code rotation} times @param rotation number of times to rotate queue""" def rotate(self, rotation): for i in range(rotation): self.put(self.get()) """Enqueues {@code item} @return item at front of self.entries""" def front(self): return self.entries[0] """Returns the length of this.entries""" def size(self): return self.length
"""Queue represented by a python list""" class Queue(): def __init__(self): self.entries = [] self.length = 0 self.front=0 def __str__(self): printed = '<' + str(self.entries)[1:-1] + '>' return printed """Enqueues {@code item} @param item item to enqueue""" def put(self, item): self.entries.append(item) self.length = self.length + 1 """Dequeues {@code item} @requirement: |self.length| > 0 @return dequeued item that was dequeued""" def get(self): self.length = self.length - 1 dequeued = self.entries[self.front] self.front-=1 self.entries = self.entries[self.front:] return dequeued """Rotates the queue {@code rotation} times @param rotation number of times to rotate queue""" def rotate(self, rotation): for i in range(rotation): self.put(self.get()) """Enqueues {@code item} @return item at front of self.entries""" def front(self): return self.entries[0] """Returns the length of this.entries""" def size(self): return self.length
mit
Python
50d3fcb1ad4326a55bb156fd641ce40bf52a9a51
rework router
chronossc/django-ldapdb,UGentPortaal/django-ldapdb,UGentPortaal/django-ldapdb-archived
ldapdb/router.py
ldapdb/router.py
# -*- coding: utf-8 -*- # # django-ldapdb # Copyright (c) 2009-2010, Bolloré telecom # All rights reserved. # # See AUTHORS file for a full list of contributors. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Bolloré telecom nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # def is_ldap_model(model): # FIXME: there is probably a better check than testing 'base_dn' return hasattr(model, 'base_dn') class Router(object): """A router to control all database operations on models in the myapp application""" def db_for_read(self, model, **hints): "Point all operations on LDAP models to 'ldap'" if is_ldap_model(model): return 'ldap' return None def db_for_write(self, model, **hints): "Point all operations on LDAP models to 'ldap'" if is_ldap_model(model): return 'ldap' return None
# -*- coding: utf-8 -*- # # django-ldapdb # Copyright (c) 2009-2010, Bolloré telecom # All rights reserved. # # See AUTHORS file for a full list of contributors. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Bolloré telecom nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # class Router(object): """A router to control all database operations on models in the myapp application""" def db_for_read(self, model, **hints): "Point all operations on LDAP models to 'ldap'" from ldapdb.models import Model if Model in model.__bases__: return 'ldap' return None def db_for_write(self, model, **hints): "Point all operations on LDAP models to 'ldap'" from ldapdb.models import Model if Model in model.__bases__: return 'ldap' return None
bsd-2-clause
Python
9501ab023a51ca6f3e37fcad3c9c9ff04223986b
update version to 0.4
oubiwann/txjsonrpc,oubiwann/txjsonrpc,aborilov/txjsonrpc,aborilov/txjsonrpc,wuan/txjsonrpc,wuan/txjsonrpc
txjsonrpc/meta.py
txjsonrpc/meta.py
display_name = "txJSON-RPC" library_name = "txjsonrpc" version = "0.4" author = "Duncan McGreggor" author_email = "[email protected]" license = "BSD, GPL" url = "http://launchpad.net/%s" % library_name description = "Code for creatig Twisted JSON-RPC servers and clients."
display_name = "txJSON-RPC" library_name = "txjsonrpc" version = "0.3.1" author = "Duncan McGreggor" author_email = "[email protected]" license = "BSD, GPL" url = "http://launchpad.net/%s" % library_name description = "Code for creatig Twisted JSON-RPC servers and clients."
mit
Python
0dc29df1e97b8c5f36320b55c659c8290f021c69
Fix parallelization of number of topics script
NLeSC/cptm,NLeSC/cptm
DilipadTopicModelling/experiment_number_of_topics.py
DilipadTopicModelling/experiment_number_of_topics.py
import logging import glob from multiprocessing import Pool from CPTCorpus import CPTCorpus from CPT_Gibbs import GibbsSampler def run_sampler(corpus, nTopics, nIter, beta, out_dir): alpha = 50.0/nTopics logger.info('running Gibbs sampler (nTopics: {}, nIter: {}, alpha: {}, ' 'beta: {})'.format(nTopics, nIter, alpha, beta)) sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha, beta=beta, beta_o=beta, out_dir=out_dir.format(nTopics)) sampler._initialize() sampler.run() logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) #logger.setLevel(logging.INFO) files = glob.glob('/home/jvdzwaan/data/dilipad/20112012/gov_opp/*') out_dir = '/home/jvdzwaan/data/dilipad/res_20112012/{}' corpus = CPTCorpus(files, testSplit=20) corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100) corpus.save_dictionaries(directory=out_dir.format('')) corpus.save(out_dir.format('corpus.json')) #corpus = CPTCorpus.load(out_dir.format('corpus.json'), # topicDict=out_dir.format('topicDict.dict'), # opinionDict=out_dir.format('opinionDict.dict')) nIter = 200 beta = 0.02 nTopics = range(20, 201, 20) logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics))) pool = Pool(processes=3) results = [pool.apply_async(run_sampler, args=(corpus, n, nIter, beta, out_dir)) for n in nTopics] pool.close() pool.join()
import logging import glob from multiprocessing import Process from CPTCorpus import CPTCorpus from CPT_Gibbs import GibbsSampler def run_sampler(corpus, nTopics, nIter, beta, out_dir): alpha = 50.0/nTopics logger.info('running Gibbs sampler (nTopics: {}, nIter: {}, alpha: {}, ' 'beta: {})'.format(nTopics, nIter, alpha, beta)) sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha, beta=beta, beta_o=beta, out_dir=out_dir.format(nTopics)) sampler._initialize() sampler.run() logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) #logger.setLevel(logging.INFO) files = glob.glob('/home/jvdzwaan/data/tmp/test/*') out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}' corpus = CPTCorpus(files, testSplit=20) corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100) corpus.save_dictionaries(directory=out_dir.format('')) corpus.save(out_dir.format('corpus.json')) #corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir), # topicDict='{}/topicDict.dict'.format(out_dir), # opinionDict='{}/opinionDict.dict'.format(out_dir)) nIter = 200 beta = 0.02 nTopics = range(20, 201, 20) logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics))) processes = [Process(target=run_sampler, args=(corpus, n, nIter, beta, out_dir)) for n in nTopics] # Run processes for p in processes: p.start() # Exit the completed processes for p in processes: p.join()
apache-2.0
Python
0f10ec94a7a62968aeafe10c55913e08bb0c7ce6
Fix Bug: Type Error
nday-dev/FbSpider
Scripts/Judge.py
Scripts/Judge.py
#--coding:utf-8-- import re import json import chardet class Judge(): def __init__(self, SurnameCharacter = 'Surname.Chinese.json', SurnamePinyin = 'Surname.Pinyin.json'): # self.SurnameCharacter = json.load(open(SurnameCharacter, 'rb')) self.SurnamePinyin = json.load(open(SurnamePinyin, 'rb')) self.Extractor = re.compile(r'^([\w]+)[ ]?.*?[ ]?(?:([\w]*)$)') self.NotChineseCharacter = re.compile(ur'^[^\u4e00-\u9fa5]*$') def SurnameJudge(self, Name): Name = Name.decode(chardet.detect(Name).get('encoding', 'utf-8')) if self.NotChineseCharacter.search(Name) == None: # True if Name contains Chinese Characters. return True Name = Name.lower() Surname = self.Extractor.findall(Name)[0] for element in Surname: try: if self.SurnamePinyin[element]: return True except KeyError: pass return False def DescriptionJudge(self, Description): Description = Description.decode(chardet.detect(Description).get('encoding', 'utf-8')) if self.NotChineseCharacter.search(Description) == None: # Ture if Description contains Chinese Characters. return True return False
#--coding:utf-8-- import re import json import chardet class Judge(): def __init__(self, SurnameCharacter = 'Surname.Chinese.json', SurnamePinyin = 'Surname.Pinyin.json'): # self.SurnameCharacter = json.load(open(SurnameCharacter, 'rb')) self.SurnamePinyin = json.load(open(SurnamePinyin, 'rb')) self.Extractor = re.compile(r'^([\w]+)[ ]?.*?[ ]?(?:([\w]*)$)') self.NotChineseCharacter = re.compile(ur'^[^\u4e00-\u9fa5]*$') def SurnameJudge(self, Name): Name = Name.decode(chardet.detect(Name)['encoding']) if self.NotChineseCharacter.search(Name) == None: # True if Name contains Chinese Characters. return True Name = Name.lower() Surname = self.Extractor.findall(Name)[0] for element in Surname: try: if self.SurnamePinyin[element]: return True except KeyError: pass return False def DescriptionJudge(self, Description): Description = Description.decode(chardet.detect(Description)['encoding']) if self.NotChineseCharacter.search(Description) == None: # Ture if Description contains Chinese Characters. return True return False
mit
Python
16a54fc100874159da7212e35361e5c7110a7ab2
Add /start route for expeditions
KanColleTool/kcsrv,KanColleTool/kcsrv,KanColleTool/kcsrv
kancolle/api/expedition.py
kancolle/api/expedition.py
"""Expedition blueprint.""" import datetime from flask import Blueprint, g from flask import request, abort import time import util from db import Expedition, Fleet, Admiral from util import prepare_api_blueprint, svdata api_mission = Blueprint("api_mission", __name__) prepare_api_blueprint(api_mission) @api_mission.route("/start", methods=["GET", "POST"]) def start_mission(): # This is mostly an internal method. # This sets up the fleet for an expedition, sending them out. # First, get the required data from the request. fleet_id = int(request.values.get("api_deck_id")) - 1 expedition_id = int(request.values.get("api_mission")) # There's an extra value, api_mission. # No idea what it does. # Also, api_serial_cid # This is presumably an anti-bot method by DMM. # We don't have these, because we don't have the game source code (and never will) # So we ignore this # Get the expedition requested by the ID. expedition = Expedition.query.filter(Expedition.id == expedition_id).first_or_404() # Get the fleet requested by the ID. try: fleet = g.admiral.fleets[fleet_id] except IndexError: abort(404) return # Set the fleet up. if fleet.expedition is not None: # Nice try. abort(400) return # Set the expedition && time. fleet.expedition = expedition fleet.expedition_completed = time.time() + expedition.time_taken # Internal state updated, now to reflect this state on the rest of the app. return svdata( {"api_complatetime": util. millisecond_timestamp(datetime.datetime.now() + datetime.timedelta(seconds=expedition.time_taken)), "api_complatetime_str": datetime.datetime.fromtimestamp(fleet.expedition_completed / 1000) .strftime('%Y-%m-%d %H:%M:%S') })
"""Expedition blueprint.""" from flask import Blueprint from util import prepare_api_blueprint api_mission = Blueprint("api_mission", __name__) prepare_api_blueprint(api_mission)
mit
Python
49d7ba5c4ddf858129bbdd3dea1c968aff8345c1
Update hackerland_radio_transmitters.py
vin0010/Hackerrank,vin0010/Hackerrank,vin0010/Hackerrank
python/hackerrank/practice/hackerland_radio_transmitters.py
python/hackerrank/practice/hackerland_radio_transmitters.py
n, k = map(int, input().split()) arr = list(map(int, input().split())) # arr=[1,7,8,15,16,18,19,21,23] # n=9 # k=2 # sorted_arr = sorted(arr) sorted_arr = [] coverage = (2 * k) my_set = set() for i in arr: my_set.add(i) for i in my_set: sorted_arr.append(i) # 7 2 4 6 5 9 12 11 - input representation of indexes # 1 2 3 4 5 6 7 8 9 10 11 12 - # - 2 - 3 # instead of binary search get next big element t # print(sorted_arr) def binary_search(l, r, x): while l <= r: mid = l + (r - l) // 2 # print(mid, '---', n) if mid==0: return -2 if sorted_arr[mid] == x: return mid + 1 elif sorted_arr[mid] < x and sorted_arr[mid + 1] > x: return mid + 1 elif sorted_arr[mid] < x: l = mid + 1 else: r = mid - 1 return -2 count = 1 # for i in sorted_arr: index = 0 while index <= n - 1: next_range = binary_search(0, len(sorted_arr) - 2, sorted_arr[index] + coverage) # print(index, '---', sorted_arr[index], ' -- ', next_range) if next_range == -2: break else: index = next_range count += 1 print(count) # while True: # # print("current index:{}".format(index)) # index += coverage # count += 1 # nextrange = get_next_range(index) # # print("next range:{}".format(nextrange)) # if nextrange < 0: # # if index < n-1: # # print("coming here") # # count += 1 # break # print(count)
n, k = map(int, input().split()) arr = list(map(int, input().split())) # arr=[1,7,8,15,16,18,19,21,23] # n=9 # k=2 # sorted_arr = sorted(arr) sorted_arr = [] coverage = (2 * k) my_set = set() for i in arr: my_set.add(i) for i in my_set: sorted_arr.append(i) # 7 2 4 6 5 9 12 11 - input representation of indexes # 1 2 3 4 5 6 7 8 9 10 11 12 - # - 2 - 3 # instead of binary search get next big element t # print(sorted_arr) def binary_search(l, r, x): while l <= r: mid = l + (r - l) // 2 # print(mid, '---', n) if mid==0: return -2 if sorted_arr[mid] == x: return mid + 1 elif sorted_arr[mid] < x and sorted_arr[mid + 1] > x: return mid + 1 elif sorted_arr[mid] < x: l = mid + 1 else: r = mid - 1 return -2 count = 1 # for i in sorted_arr: index = 0 while index <= n - 1: next_range = binary_search(0, len(sorted_arr) - 2, sorted_arr[index] + coverage) # print(index, '---', sorted_arr[index], ' -- ', next_range) if next_range == -2: break else: index = next_range count += 1 print(count) # while True: # # print("current index:{}".format(index)) # index += coverage # count += 1 # nextrange = get_next_range(index) # # print("next range:{}".format(nextrange)) # if nextrange < 0: # # if index < n-1: # # print("coming here") # # count += 1 # break # print(count)
apache-2.0
Python
7cee7de43fc77e362cf19a9484f243d66e034f59
Refactor from_json
hanjae/upstream,Storj/upstream
upstream/chunk.py
upstream/chunk.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from upstream.exc import ChunkError class Chunk(object): def __init__(self, filehash=None, decryptkey=None, filename=None, filepath=None): """ Stores information about an encryted chunk. Allows for format conversions. :param filehash: The hash for a file. :param decryptkey: The decryption key for a file. :param filename: Name of the file(destroyed on encryption). :param filepath: Location of the file. """ self.filehash = filehash self.decryptkey = decryptkey self.filename = filename self.filepath = filepath def from_uri(self, uri): """ :param uri: URI as a string :return: """ try: self.filehash, self.decryptkey = str(uri).split("?key=") except: raise ChunkError("%s not format of <hash>?key=<key>") def from_json(self, json_str): self.json_str = json_str data = json.loads(json_str) self.filehash = data['filehash'] self.decryptkey = data['key'] # Gets def get_uri(self): if not self.has_hashes(): return return self.filehash + "?key=" + self.decryptkey def get_hashes(self): if not self.has_hashes(): return return self.filehash, self.decryptkey def get_json(self): if not self.has_hashes(): return return json.dumps( { "key": self.decryptkey, "filehash": self.filehash, } ) def has_hashes(self): return self.filehash and self.decryptkey # Extra metadata def set_filename(self, filename): self.filename = filename def set_filepath(self, filepath): self.filepath = filepath def get_filename(self): return self.filename def get_filepath(self): return self.filepath
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from upstream.exc import ChunkError class Chunk(object): def __init__(self, filehash=None, decryptkey=None, filename=None, filepath=None): """ Stores information about an encryted chunk. Allows for format conversions. :param filehash: The hash for a file. :param decryptkey: The decryption key for a file. :param filename: Name of the file(destroyed on encryption). :param filepath: Location of the file. """ self.filehash = filehash self.decryptkey = decryptkey self.filename = filename self.filepath = filepath def from_uri(self, uri): """ :param uri: URI as a string :return: """ try: self.filehash, self.decryptkey = str(uri).split("?key=") except: raise ChunkError("%s not format of <hash>?key=<key>") def load_json(self, raw): self.raw_json = raw data = json.loads(raw) self.filehash = data['filehash'] self.decryptkey = data['key'] return self # Gets def get_uri(self): if not self.has_hashes(): return return self.filehash + "?key=" + self.decryptkey def get_hashes(self): if not self.has_hashes(): return return self.filehash, self.decryptkey def get_json(self): if not self.has_hashes(): return return json.dumps( { "key": self.decryptkey, "filehash": self.filehash, } ) def has_hashes(self): return self.filehash and self.decryptkey # Extra metadata def set_filename(self, filename): self.filename = filename def set_filepath(self, filepath): self.filepath = filepath def get_filename(self): return self.filename def get_filepath(self): return self.filepath
mit
Python