commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
b14de33367ddf82d39ee5fe1671bc2526a5280b6
correct module version
it-projects-llc/pos-addons,it-projects-llc/pos-addons,it-projects-llc/pos-addons
pos_mobile_restaurant/__manifest__.py
pos_mobile_restaurant/__manifest__.py
{ "name": """POS Mobile UI for Waiters""", "summary": """Your Restaurant in the Mobile Version""", "category": "Point of Sale", "live_test_url": "http://apps.it-projects.info/shop/product/pos-mobile-ui?version=11.0", "images": ["images/pos_mobile_restaurant.png"], "version": "11.0.1.3.8", "application": False, "author": "IT-Projects LLC, Dinar Gabbasov", "support": "[email protected]", "website": "https://it-projects.info/team/GabbasovDinar", "license": "LGPL-3", "price": 100.00, "currency": "EUR", "depends": [ "pos_restaurant_base", "pos_mobile", ], "external_dependencies": {"python": [], "bin": []}, "data": [ "views/pos_mobile_restaurant_template.xml", "views/pos_mobile_restaurant_view.xml", ], "qweb": [ "static/src/xml/pos.xml", ], "demo": [ ], "post_load": None, "pre_init_hook": None, "post_init_hook": None, "auto_install": True, "installable": True, }
{ "name": """POS Mobile UI for Waiters""", "summary": """Your Restaurant in the Mobile Version""", "category": "Point of Sale", "live_test_url": "http://apps.it-projects.info/shop/product/pos-mobile-ui?version=11.0", "images": ["images/pos_mobile_restaurant.png"], "version": "10.0.1.3.8", "application": False, "author": "IT-Projects LLC, Dinar Gabbasov", "support": "[email protected]", "website": "https://it-projects.info/team/GabbasovDinar", "license": "LGPL-3", "price": 100.00, "currency": "EUR", "depends": [ "pos_restaurant_base", "pos_mobile", ], "external_dependencies": {"python": [], "bin": []}, "data": [ "views/pos_mobile_restaurant_template.xml", "views/pos_mobile_restaurant_view.xml", ], "qweb": [ "static/src/xml/pos.xml", ], "demo": [ ], "post_load": None, "pre_init_hook": None, "post_init_hook": None, "auto_install": True, "installable": True, }
mit
Python
669fa4443e9e4b551613ac1bb6b69c8818f382fc
Fix tweet format
romansalin/twiboozer
twiboozer.py
twiboozer.py
# -*- encoding: utf-8 -*- # TODO вынести, оформить как package import os import datetime import random import textwrap from pymarkovchain import MarkovChain from twibot import TwiBot def format_tweet(tweet): """Format tweet after generation.""" max_len = 140 if len(tweet) > max_len: tweet = textwrap.wrap(tweet, max_len - 1)[0] if tweet[-1] not in ".?!": tweet += get_end_tweet() return tweet def get_end_tweet(): """Get random punctuation at the end of the sentence.""" endings = ('.', '!') rate = 0.2 return endings[random.random() < rate] def train(tweets): """Training of model from tweets based on a Markov chain.""" directory = "db" filename = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") if not os.path.exists(directory): os.makedirs(directory) path = os.path.join(directory, filename) model = MarkovChain(path) model.generateDatabase("\n".join(tweets).encode("utf-8")) return model def main(): twibot = TwiBot() tweets = twibot.get_timeline(count=300) mc = train(tweets) tweet = mc.generateString() tweet = format_tweet(tweet) twibot.post_tweet(tweet) print(tweet) if __name__ == "__main__": main()
# -*- encoding: utf-8 -*- # TODO вынести, оформить как package import os import datetime import random import textwrap from pymarkovchain import MarkovChain from twibot import TwiBot def format_tweet(tweet): """Format tweet after generation.""" if tweet[-1] not in ".?!": tweet = "{0}{1}".format(tweet, get_end_tweet()) max_len = 140 if len(tweet) > max_len: tweet = textwrap.wrap(tweet, max_len)[0] return tweet def get_end_tweet(): """Get random punctuation at the end of the sentence.""" endings = ('.', '!') rate = 0.2 return endings[random.random() < rate] def train(tweets): """Training of model from tweets based on a Markov chain.""" directory = "db" filename = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") if not os.path.exists(directory): os.makedirs(directory) path = os.path.join(directory, filename) model = MarkovChain(path) model.generateDatabase("\n".join(tweets).encode("utf-8")) return model def main(): twibot = TwiBot() tweets = twibot.get_timeline(count=300) mc = train(tweets) tweet = mc.generateString() tweet = format_tweet(tweet) twibot.post_tweet(tweet) print(tweet) if __name__ == "__main__": main()
mit
Python
ef1248dc4e150e72b9a347120f73b01909ff7522
remove site requirement in pages app
Signbank/BSL-signbank,Signbank/Auslan-signbank,Signbank/Auslan-signbank,Signbank/Auslan-signbank,Signbank/BSL-signbank,Signbank/Auslan-signbank,Signbank/BSL-signbank,Signbank/BSL-signbank
pages/views.py
pages/views.py
from auslan.pages.models import Page from django.template import loader, RequestContext from django.shortcuts import get_object_or_404, render_to_response from django.http import HttpResponse, HttpResponseRedirect from django.conf import settings from django.core.xheaders import populate_xheaders from django.utils.safestring import mark_safe DEFAULT_TEMPLATE = 'pages/default.html' def page(request, url): """ Flat page view. Models: `pages.page` Templates: Uses the template defined by the ``template_name`` field, or `pages/default.html` if template_name is not defined. Context: page `pages.page` object """ if not url.endswith('/') and settings.APPEND_SLASH: return HttpResponseRedirect("%s/" % request.path) if not url.startswith('/'): url = "/" + url # here I've removed the requirement that the page be for this site # - this won't work if we ever have more than one site here # which isn't planned f = get_object_or_404(Page, url__exact=url) # If registration is required for accessing this page, and the user isn't # logged in, redirect to the login page. if f.registration_required and not request.user.is_authenticated(): from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.path) # if there is a form var 'playlist' then generate a playlist # xml file instead of the page itself if request.GET.has_key('playlist'): return render_to_response('pages/playlist.xml', {'page': f}, mimetype='application/xml') if f.template_name: t = loader.select_template((f.template_name, DEFAULT_TEMPLATE)) else: t = loader.get_template(DEFAULT_TEMPLATE) # To avoid having to always use the "|safe" filter in flatpage templates, # mark the title and content as already safe (since they are raw HTML # content in the first place). f.title = mark_safe(f.title) f.content = mark_safe(f.content) c = RequestContext(request, { 'page': f, }) response = HttpResponse(t.render(c)) populate_xheaders(request, response, Page, f.id) return response
from auslan.pages.models import Page from django.template import loader, RequestContext from django.shortcuts import get_object_or_404, render_to_response from django.http import HttpResponse, HttpResponseRedirect from django.conf import settings from django.core.xheaders import populate_xheaders from django.utils.safestring import mark_safe DEFAULT_TEMPLATE = 'pages/default.html' def page(request, url): """ Flat page view. Models: `pages.page` Templates: Uses the template defined by the ``template_name`` field, or `pages/default.html` if template_name is not defined. Context: page `pages.page` object """ if not url.endswith('/') and settings.APPEND_SLASH: return HttpResponseRedirect("%s/" % request.path) if not url.startswith('/'): url = "/" + url f = get_object_or_404(Page, url__exact=url, sites__id__exact=settings.SITE_ID) # If registration is required for accessing this page, and the user isn't # logged in, redirect to the login page. if f.registration_required and not request.user.is_authenticated(): from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.path) # if there is a form var 'playlist' then generate a playlist # xml file instead of the page itself if request.GET.has_key('playlist'): return render_to_response('pages/playlist.xml', {'page': f}, mimetype='application/xml') if f.template_name: t = loader.select_template((f.template_name, DEFAULT_TEMPLATE)) else: t = loader.get_template(DEFAULT_TEMPLATE) # To avoid having to always use the "|safe" filter in flatpage templates, # mark the title and content as already safe (since they are raw HTML # content in the first place). f.title = mark_safe(f.title) f.content = mark_safe(f.content) c = RequestContext(request, { 'page': f, }) response = HttpResponse(t.render(c)) populate_xheaders(request, response, Page, f.id) return response
bsd-3-clause
Python
3b3418592331059f560bb641704a184d64734fc7
fix evals
ambros-gleixner/rubberband,xmunoz/rubberband,ambros-gleixner/rubberband,xmunoz/rubberband,ambros-gleixner/rubberband,xmunoz/rubberband,xmunoz/rubberband
rubberband/constants.py
rubberband/constants.py
INFINITY_KEYS = ("separating/flowcover/maxslackroot", "separating/flowcover/maxslack", "heuristics/undercover/maxcoversizeconss") INFINITY_MASK = -1 ZIPPED_SUFFIX = ".gz" FILES_DIR = "files/" STATIC_FILES_DIR = FILES_DIR + "static/" ALL_SOLU = STATIC_FILES_DIR + "all.solu" IPET_EVALUATIONS = { 0: {"path": STATIC_FILES_DIR + "eval1.xml", "name": "evaluation1"}, 1: {"path": STATIC_FILES_DIR + "eval2.xml", "name": "evaluation2"}, 2: {"path": STATIC_FILES_DIR + "eval3.xml", "name": "evaluation3"}, 3: {"path": STATIC_FILES_DIR + "eval4.xml", "name": "evaluation4"}, 4: {"path": STATIC_FILES_DIR + "eval5.xml", "name": "evaluation5"}, 5: {"path": STATIC_FILES_DIR + "eval6.xml", "name": "evaluation6"}, 6: {"path": STATIC_FILES_DIR + "eval7.xml", "name": "evaluation7"}, 7: {"path": STATIC_FILES_DIR + "eval8.xml", "name": "evaluation8"}, 8: {"path": STATIC_FILES_DIR + "eval9.xml", "name": "evaluation9"}, 9: {"path": STATIC_FILES_DIR + "eval10.xml", "name": "evaluation10"} } NONE_DISPLAY = "--" EXPORT_DATA_FORMATS = ("gzip", "json", "csv", "raw") EXPORT_FILE_TYPES = (".out", ".set", ".err", ".meta") ELASTICSEARCH_INDEX = "solver-results" FORMAT_DATE = "%Y-%m-%d" FORMAT_DATETIME_LONG = "%B %d, %Y %H:%M" FORMAT_DATETIME_SHORT = FORMAT_DATE + " %H:%M" FORMAT_DATETIME = "%Y-%m-%d %H:%M:%S"
INFINITY_KEYS = ("separating/flowcover/maxslackroot", "separating/flowcover/maxslack", "heuristics/undercover/maxcoversizeconss") INFINITY_MASK = -1 ZIPPED_SUFFIX = ".gz" FILES_DIR = "files/" STATIC_FILES_DIR = FILES_DIR + "static/" ALL_SOLU = STATIC_FILES_DIR + "all.solu" IPET_EVALUATIONS = { 0: {"path": STATIC_FILES_DIR + "eval1.xml", "name": "evaluation1"}, 1: {"path": STATIC_FILES_DIR + "eval2.xml", "name": "evaluation2"}, 2: {"path": STATIC_FILES_DIR + "eval3.xml", "name": "evaluation3"}, 3: {"path": STATIC_FILES_DIR + "eval4.xml", "name": "evaluation4"}, 4: {"path": STATIC_FILES_DIR + "eval5.xml", "name": "evaluation5"}, 5: {"path": STATIC_FILES_DIR + "eval6.xml", "name": "evaluation6"}, 6: {"path": STATIC_FILES_DIR + "eval1.xml", "name": "evaluation7"}, 7: {"path": STATIC_FILES_DIR + "eval2.xml", "name": "evaluation8"}, 8: {"path": STATIC_FILES_DIR + "eval3.xml", "name": "evaluation9"}, 9: {"path": STATIC_FILES_DIR + "eval4.xml", "name": "evaluation10"} } NONE_DISPLAY = "--" EXPORT_DATA_FORMATS = ("gzip", "json", "csv", "raw") EXPORT_FILE_TYPES = (".out", ".set", ".err", ".meta") ELASTICSEARCH_INDEX = "solver-results" FORMAT_DATE = "%Y-%m-%d" FORMAT_DATETIME_LONG = "%B %d, %Y %H:%M" FORMAT_DATETIME_SHORT = FORMAT_DATE + " %H:%M" FORMAT_DATETIME = "%Y-%m-%d %H:%M:%S"
mit
Python
9011b359bdf164994734f8d6890a2d5acb5fa865
Replace joins with list_to_number in 32
cryvate/project-euler,cryvate/project-euler
project_euler/solutions/problem_32.py
project_euler/solutions/problem_32.py
from itertools import permutations from ..library.base import list_to_number def solve() -> int: pandigital = [] for permutation in permutations(range(1, 10)): result = list_to_number(permutation[:4]) for i in range(1, 4): left = list_to_number(permutation[4:4 + i]) right = list_to_number(permutation[4 + i:]) if left * right == result: pandigital.append(result) return sum(set(pandigital))
from itertools import permutations def solve() -> int: pandigital = [] for permutation in permutations(range(1, 10)): result = int(''.join(str(digit) for digit in permutation[:4])) for i in range(1, 4): left = int(''.join(str(digit) for digit in permutation[4:4 + i])) right = int(''.join(str(digit) for digit in permutation[4 + i:])) if left * right == result: pandigital.append(result) return sum(set(pandigital))
mit
Python
dc333069f4536fdc978d76924b098d10a1a8a50a
Fix error on status for last line in file.
sentience/SublimeSimpleCov,sentience/SublimeSimpleCov
ruby_coverage_status.py
ruby_coverage_status.py
import os import sublime import sublime_plugin import json import re from .common.json_coverage_reader import JsonCoverageReader STATUS_KEY = 'ruby-coverage-status' class RubyCoverageStatusListener(sublime_plugin.EventListener): """Show coverage statistics in status bar.""" def on_load(self, view): self.on_selection_modified(view) def on_selection_modified(self, view): if 'source.ruby' not in view.scope_name(0): return self.view = view if sublime.load_settings('SublimeRubyCoverage.sublime-settings').get('coverage_status_in_status_bar'): sublime.set_timeout_async(self.update_status, 0) else: self.erase_status() def update_status(self): view = self.view view.set_status(STATUS_KEY, self.get_view_coverage_status()) def erase_status(self): view = self.view view.erase_status(STATUS_KEY) def get_view_coverage_status(self): view = self.view filename = view.file_name() if not filename: self.erase_status() r = JsonCoverageReader(filename) coverage = r.get_file_coverage(filename) if r else None if coverage is None: self.erase_status() return '' line_number = self.get_line_number() if line_number is None: self.erase_status() file_coverage = "File covered {:.1f}% ({}/{})".format( coverage['covered_percent'], coverage['covered_lines'], coverage['lines_of_code'] ) line_coverage = coverage['coverage'][line_number] if len(coverage['coverage']) > line_number else None if line_coverage is None: line_coverage = 'Line not executable' elif line_coverage > 0: line_coverage = 'Line covered × {}'.format(line_coverage) else: line_coverage = 'Line not covered' return file_coverage + ', ' + line_coverage def get_line_number(self): view = self.view regions = view.sel() if len(regions) > 1: return return view.rowcol(regions[0].a)[0]
import os import sublime import sublime_plugin import json import re from .common.json_coverage_reader import JsonCoverageReader STATUS_KEY = 'ruby-coverage-status' class RubyCoverageStatusListener(sublime_plugin.EventListener): """Show coverage statistics in status bar.""" def on_load(self, view): self.on_selection_modified(view) def on_selection_modified(self, view): if 'source.ruby' not in view.scope_name(0): return self.view = view if sublime.load_settings('SublimeRubyCoverage.sublime-settings').get('coverage_status_in_status_bar'): sublime.set_timeout_async(self.update_status, 0) else: self.erase_status() def update_status(self): view = self.view view.set_status(STATUS_KEY, self.get_view_coverage_status()) def erase_status(self): view = self.view view.erase_status(STATUS_KEY) def get_view_coverage_status(self): view = self.view filename = view.file_name() if not filename: self.erase_status() r = JsonCoverageReader(filename) coverage = r.get_file_coverage(filename) if r else None if coverage is None: self.erase_status() return '' line_number = self.get_line_number() if line_number is None: self.erase_status() file_coverage = "File covered {:.1f}% ({}/{})".format( coverage['covered_percent'], coverage['covered_lines'], coverage['lines_of_code'] ) line_coverage = coverage['coverage'][line_number] if line_coverage is None: line_coverage = 'Line not executable' elif line_coverage > 0: line_coverage = 'Line covered × {}'.format(line_coverage) else: line_coverage = 'Line not covered' return file_coverage + ', ' + line_coverage def get_line_number(self): view = self.view regions = view.sel() if len(regions) > 1: return return view.rowcol(regions[0].a)[0]
mit
Python
921bdcc5d6f6ac4be7dfd0015e5b5fd6d06e6486
Raise exception when --debug is specified to main script
wylee/runcommands,wylee/runcommands
runcommands/__main__.py
runcommands/__main__.py
import sys from .config import RawConfig, RunConfig from .exc import RunCommandsError from .run import run, partition_argv, read_run_args from .util import printer def main(argv=None): debug = None try: all_argv, run_argv, command_argv = partition_argv(argv) cli_args = run.parse_args(RawConfig(run=RunConfig()), run_argv) run_args = read_run_args(run) run_args.update(cli_args) debug = run_args.get('debug', run.parameters['debug'].default) run.implementation( None, all_argv=all_argv, run_argv=run_argv, command_argv=command_argv, cli_args=cli_args, **run_args) except RunCommandsError as exc: if debug or debug is None: # User specified --debug OR processing didn't get far enough # to determine whether user specified --debug. raise printer.error(exc, file=sys.stderr) return 1 return 0 if __name__ == '__main__': sys.exit(main())
import sys from .config import RawConfig, RunConfig from .exc import RunCommandsError from .run import run, partition_argv, read_run_args from .util import printer def main(argv=None): try: all_argv, run_argv, command_argv = partition_argv(argv) cli_args = run.parse_args(RawConfig(run=RunConfig()), run_argv) run_args = read_run_args(run) run_args.update(cli_args) run.implementation( None, all_argv=all_argv, run_argv=run_argv, command_argv=command_argv, cli_args=cli_args, **run_args) except RunCommandsError as exc: printer.error(exc, file=sys.stderr) return 1 return 0 if __name__ == '__main__': sys.exit(main())
mit
Python
5ca0e0683a663271c40d728e5f88ee19a26eca61
Add ProfileSummary to defaults
caffodian/django-devserver,Stackdriver/django-devserver,pjdelport/django-devserver,chriscauley/django-devserver,jimmyye/django-devserver,dcramer/django-devserver,madgeekfiend/django-devserver,mathspace/django-devserver,takeshineshiro/django-devserver,coagulant/django-devserver,bastianh/django-devserver
devserver/settings.py
devserver/settings.py
DEVSERVER_MODULES = ( 'devserver.modules.sql.SQLRealTimeModule', 'devserver.modules.profile.ProfileSummaryModule', # 'devserver.modules.cache.CacheSummaryModule', ) # This variable gets set to True when we're running the devserver DEVSERVER_ACTIVE = False
DEVSERVER_MODULES = ( 'devserver.modules.sql.SQLRealTimeModule', 'devserver.modules.cache.CacheSummaryModule', ) # This variable gets set to True when we're running the devserver DEVSERVER_ACTIVE = False
bsd-3-clause
Python
75ac453e873727675ba18e1f45b5bc0cfda26fd7
Increment the version number
bugra/angel-list
angel/__init__.py
angel/__init__.py
__title__ = 'angel' __version__ = '0.0.2' __author__ = 'Bugra Akyildiz' __license__ = 'MIT' __copyright__ = 'Copyright 2014 Bugra Akyildiz'
__title__ = 'angel' __version__ = '0.0.1' __author__ = 'Bugra Akyildiz' __license__ = 'MIT' __copyright__ = 'Copyright 2014 Bugra Akyildiz'
mit
Python
c39d6494d1bc27dedb2141970cdd7a51382f0af4
Update version 0.5.0.dev1 -> 0.5.0
oneklc/dimod,oneklc/dimod
dimod/package_info.py
dimod/package_info.py
__version__ = '0.5.0' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
__version__ = '0.5.0.dev1' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
apache-2.0
Python
ff504057223cd71f8ebbb7e7a53dc7982a9422a8
Add stream logger config convenience function
boto/boto3
boto3/__init__.py
boto3/__init__.py
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from boto3.session import Session __author__ = 'Amazon Web Services' __version__ = '0.0.1' # The default Boto3 session; autoloaded when needed. DEFAULT_SESSION = None def setup_default_session(**kwargs): """ Set up a default session, passing through any parameters to the session constructor. There is no need to call this unless you wish to pass custom parameters, because a default session will be created for you. """ global DEFAULT_SESSION DEFAULT_SESSION = Session(**kwargs) def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None): """ Add a stream handler for the given name and level to the logging module. By default, this logs all boto3 messages to ``stdout``. >>> import boto3 >>> boto3.set_stream_logger('boto3.resources', logging.INFO) :type name: string :param name: Log name :type level: int :param level: Logging level, e.g. ``logging.INFO`` :type format_string: str :param format_string: Log message format """ if format_string is None: format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s" logger = logging.getLogger(name) logger.setLevel(level) handler = logging.StreamHandler() handler.setLevel(level) formatter = logging.Formatter(format_string) handler.setFormatter(formatter) logger.addHandler(handler) def _get_default_session(): """ Get the default session, creating one if needed. :rtype: boto3.session.Sesssion :return: The default session """ if DEFAULT_SESSION is None: setup_default_session() return DEFAULT_SESSION def client(service): """ Create a low-level service client by name using the default session. :type service: string :param service: The name of a service, e.g. 's3' or 'ec2' :return: Service client instance """ return _get_default_session().client(service) def resource(service): """ Create a resource service client by name using the default session. :type service: string :param service: The name of a service, e.g. 's3' or 'ec2' :return: Resource client instance """ return _get_default_session().resource(service) # Set up logging to ``/dev/null`` like a library is supposed to. # http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger('boto3').addHandler(NullHandler())
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from boto3.session import Session __author__ = 'Amazon Web Services' __version__ = '0.0.1' # The default Boto3 session; autoloaded when needed. DEFAULT_SESSION = None def setup_default_session(**kwargs): """ Set up a default session, passing through any parameters to the session constructor. There is no need to call this unless you wish to pass custom parameters, because a default session will be created for you. """ global DEFAULT_SESSION DEFAULT_SESSION = Session(**kwargs) def _get_default_session(): """ Get the default session, creating one if needed. :rtype: boto3.session.Sesssion :return: The default session """ if DEFAULT_SESSION is None: setup_default_session() return DEFAULT_SESSION def client(service): """ Create a low-level service client by name using the default session. :type service: string :param service: The name of a service, e.g. 's3' or 'ec2' :return: Service client instance """ return _get_default_session().client(service) def resource(service): """ Create a resource service client by name using the default session. :type service: string :param service: The name of a service, e.g. 's3' or 'ec2' :return: Resource client instance """ return _get_default_session().resource(service) # Set up logging to ``/dev/null`` like a library is supposed to. # http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger('boto3').addHandler(NullHandler())
apache-2.0
Python
7b071f3ccacd87f6dcb0e9a570d8ce386dbf7a4f
change FULLNAME to AUTHOR_FULLNAME
bsdlp/burrito.sh,fly/burrito.sh,fly/burrito.sh,bsdlp/burrito.sh
pelicanconf.py
pelicanconf.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = u'jchen' AUTHOR_FULLNAME = u'Jon Chen' SITENAME = u'BURRITO 4 LYFE' SITEURL = '' TIMEZONE = 'ETC/UTC' DEFAULT_LANG = u'en' CSS_FILE = 'style.css' # theme stuff THEME = './theme' # plugins PLUGIN_PATH = './plugins' PLUGINS = ['gravatar'] DISQUS_SITENAME = "voltaireblog" # gravatar email AUTHOR_EMAIL = '[email protected]' # social TWITTER_USERNAME = 's_jchen' # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None DEFAULT_PAGINATION = 10 DISPLAY_CATEGORIES_ON_MENU = False DISPLAY_MENUITEMS_ON_MENU = False DISPLAY_NAVBAR = False DISPLAY_PAGES_ON_MENU = False DEFAULT_DATE_FORMAT = ('%Y-%m-%d') # Uncomment following line if you want document-relative URLs when developing RELATIVE_URLS = True
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = u'jchen' FULLNAME = u'Jon Chen' SITENAME = u'BURRITO 4 LYFE' SITEURL = '' TIMEZONE = 'ETC/UTC' DEFAULT_LANG = u'en' CSS_FILE = 'style.css' # theme stuff THEME = './theme' # plugins PLUGIN_PATH = './plugins' PLUGINS = ['gravatar'] DISQUS_SITENAME = "voltaireblog" # gravatar email AUTHOR_EMAIL = '[email protected]' # social TWITTER_USERNAME = 's_jchen' # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None DEFAULT_PAGINATION = 10 DISPLAY_CATEGORIES_ON_MENU = False DISPLAY_MENUITEMS_ON_MENU = False DISPLAY_NAVBAR = False DISPLAY_PAGES_ON_MENU = False DEFAULT_DATE_FORMAT = ('%Y-%m-%d') # Uncomment following line if you want document-relative URLs when developing RELATIVE_URLS = True
bsd-3-clause
Python
a3c582df681aae77034e2db08999c89866cd6470
Refactor earth mover's distance implementation
davidfoerster/schema-matching
utilities.py
utilities.py
import collections def each(function, iterable): for item in iterable: function(item) def each_unpack(function, iterable): for item in iterable: function(*item) def minmax(*args): min = None max = None for x in args: if max < x: max = x if x > min: min = x return min, max def map_inplace(function, list, depth=0): if depth <= 0: list[:] = map(function, list) else: for item in list: map_inplace(function, item, depth - 1) def count_if(function, iterable): count = 0 for item in iterable: if function(item): count += 1 return count def teemap(iterable, *functions): map(lambda item: (f(item) for f in functions), iterable) class ProbabilityDistribution(collections.defaultdict): """"Holds a probability distribution and can compute the distance to other dists""" def __init__(self): collections.defaultdict.__init__(self, int) def get(self, k, d = 0): return dict.get(self, k, d) def distance_to(self, compare_to): return sum( (abs(self.get(bin) - compare_to.get(bin)) for bin in self.viewkeys() | compare_to.viewkeys()), 0)
import collections def each(function, iterable): for item in iterable: function(item) def each_unpack(function, iterable): for item in iterable: function(*item) def minmax(*args): min = None max = None for x in args: if max < x: max = x if x > min: min = x return min, max def map_inplace(function, list, depth=0): if depth <= 0: list[:] = map(function, list) else: for item in list: map_inplace(function, item, depth - 1) def count_if(function, iterable): count = 0 for item in iterable: if function(item): count += 1 return count def teemap(iterable, *functions): map(lambda item: (f(item) for f in functions), iterable) class ProbabilityDistribution(collections.defaultdict): """"Holds a probability distribution and can compute the distance to other dists""" def __init__(self): collections.defaultdict.__init__(self, int) def get(self, k, d = 0): return dict.get(self, k, d) def distance_to(self, compare_to): key_set = self.viewkeys() | compare_to.viewkeys() currentEMD = 0 lastEMD = 0 totaldistance = 0 for key in key_set: lastEMD = currentEMD currentEMD = (self.get(key, 0) + lastEMD) - compare_to.get(key, 0) totaldistance += math.fabs(currentEMD) return totaldistance
mit
Python
26cb100e7e4782cdc4d7a55f6a096a9da2db2b5c
fix bug 1495003: add search to GraphicsDeviceAdmin
mozilla/socorro,lonnen/socorro,lonnen/socorro,lonnen/socorro,mozilla/socorro,mozilla/socorro,mozilla/socorro,lonnen/socorro,mozilla/socorro,mozilla/socorro
webapp-django/crashstats/crashstats/admin.py
webapp-django/crashstats/crashstats/admin.py
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION from crashstats.crashstats.models import ( GraphicsDevice, Signature, ) # Fix the Django Admin User list display so it shows the columns we care about UserAdmin.list_display = [ 'email', 'first_name', 'last_name', 'is_superuser', 'is_staff', 'is_active', 'date_joined', 'last_login' ] ACTION_TO_NAME = { ADDITION: 'add', CHANGE: 'change', DELETION: 'delete' } @admin.register(LogEntry) class LogEntryAdmin(admin.ModelAdmin): date_hierarchy = 'action_time' list_display = [ 'action_time', 'user_email', 'content_type', 'object_repr', 'action_name', 'get_change_message' ] def user_email(self, obj): return obj.user.email def action_name(self, obj): return ACTION_TO_NAME[obj.action_flag] def has_add_permission(self, request): return False def has_change_permission(self, request, obj=None): # FIXME(willkg): If this always returned False, then this modeladmin # doesn't show up in the index. However, this means you get a change # page that suggests you can change it, but errors out when saving. # # We can nix this and use has_view_permission when we upgrade to # Django 2.1. return request.method != 'POST' def has_delete_permission(self, request, obj=None): return False def has_module_permission(self, request): return True @admin.register(GraphicsDevice) class GraphicsDeviceAdmin(admin.ModelAdmin): list_display = [ 'id', 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name' ] search_fields = [ 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name' ] @admin.register(Signature) class Signature(admin.ModelAdmin): list_display = [ 'signature', 'first_build', 'first_date' ] search_fields = [ 'signature' ]
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION from crashstats.crashstats.models import ( GraphicsDevice, Signature, ) # Fix the Django Admin User list display so it shows the columns we care about UserAdmin.list_display = [ 'email', 'first_name', 'last_name', 'is_superuser', 'is_staff', 'is_active', 'date_joined', 'last_login' ] ACTION_TO_NAME = { ADDITION: 'add', CHANGE: 'change', DELETION: 'delete' } @admin.register(LogEntry) class LogEntryAdmin(admin.ModelAdmin): date_hierarchy = 'action_time' list_display = [ 'action_time', 'user_email', 'content_type', 'object_repr', 'action_name', 'get_change_message' ] def user_email(self, obj): return obj.user.email def action_name(self, obj): return ACTION_TO_NAME[obj.action_flag] def has_add_permission(self, request): return False def has_change_permission(self, request, obj=None): # FIXME(willkg): If this always returned False, then this modeladmin # doesn't show up in the index. However, this means you get a change # page that suggests you can change it, but errors out when saving. # # We can nix this and use has_view_permission when we upgrade to # Django 2.1. return request.method != 'POST' def has_delete_permission(self, request, obj=None): return False def has_module_permission(self, request): return True @admin.register(GraphicsDevice) class GraphicsDeviceAdmin(admin.ModelAdmin): list_display = [ 'id', 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name' ] @admin.register(Signature) class Signature(admin.ModelAdmin): list_display = [ 'signature', 'first_build', 'first_date' ]
mpl-2.0
Python
0d2079b1dcb97708dc55c32d9e2c1a0f12595875
Replace string substitution with string formatting
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
salt/runners/launchd.py
salt/runners/launchd.py
# -*- coding: utf-8 -*- ''' Manage launchd plist files ''' # Import python libs import os import sys def write_launchd_plist(program): ''' Write a launchd plist for managing salt-master or salt-minion CLI Example: .. code-block:: bash salt-run launchd.write_launchd_plist salt-master ''' plist_sample_text = ''' <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>Label</key> <string>org.saltstack.{program}</string> <key>ProgramArguments</key> <array> <string>{python}</string> <string>{script}</string> </array> <key>RunAtLoad</key> <true/> </dict> </plist> '''.strip() supported_programs = ['salt-master', 'salt-minion'] if program not in supported_programs: sys.stderr.write('Supported programs: {0!r}\n'.format(supported_programs)) sys.exit(-1) sys.stdout.write( plist_sample_text.format( program=program, python=sys.executable, script=os.path.join(os.path.dirname(sys.executable), program) ) )
# -*- coding: utf-8 -*- ''' Manage launchd plist files ''' # Import python libs import os import sys def write_launchd_plist(program): ''' Write a launchd plist for managing salt-master or salt-minion CLI Example: .. code-block:: bash salt-run launchd.write_launchd_plist salt-master ''' plist_sample_text = """ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>Label</key> <string>org.saltstack.{program}</string> <key>ProgramArguments</key> <array> <string>{python}</string> <string>{script}</string> </array> <key>RunAtLoad</key> <true/> </dict> </plist> """.strip() supported_programs = ['salt-master', 'salt-minion'] if program not in supported_programs: sys.stderr.write("Supported programs: %r\n" % supported_programs) sys.exit(-1) sys.stdout.write( plist_sample_text.format( program=program, python=sys.executable, script=os.path.join(os.path.dirname(sys.executable), program) ) )
apache-2.0
Python
a2a6b336295e65d29881e83ba45e1758c4582bbb
add available filters
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
corehq/apps/reports/standard/users/reports.py
corehq/apps/reports/standard/users/reports.py
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_lazy from memoized import memoized from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader from corehq.apps.reports.dispatcher import UserManagementReportDispatcher from corehq.apps.reports.generic import GenericTabularReport from corehq.apps.reports.standard import DatespanMixin, ProjectReport from corehq.apps.reports.util import datespan_from_beginning from corehq.apps.users.models import UserHistory class UserHistoryReport(DatespanMixin, GenericTabularReport, ProjectReport): slug = 'user_history' name = ugettext_lazy("User History") section_name = ugettext_lazy("User Management") dispatcher = UserManagementReportDispatcher # ToDo: Add pending filters fields = [ 'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter', 'corehq.apps.reports.filters.dates.DatespanFilter', ] description = ugettext_lazy("History of user updates") ajax_pagination = True sortable = False @property def default_datespan(self): return datespan_from_beginning(self.domain_object, self.timezone) @property def headers(self): # ToDo: Add headers h = [ DataTablesColumn(_("User")), ] return DataTablesHeader(*h) @property def total_records(self): return self._get_queryset().count() @memoized def _get_queryset(self): # ToDo: add query based on params return UserHistory.objects.none() @property def rows(self): records = self._get_queryset().order_by('-changed_at')[ self.pagination.start:self.pagination.start + self.pagination.count ] for record in records: yield _user_history_row(record) def _user_history_row(record): # ToDo: add render for each row return []
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_lazy from memoized import memoized from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader from corehq.apps.reports.dispatcher import UserManagementReportDispatcher from corehq.apps.reports.generic import GenericTabularReport from corehq.apps.reports.standard import ProjectReport from corehq.apps.users.models import UserHistory class UserHistoryReport(GenericTabularReport, ProjectReport): slug = 'user_history' name = ugettext_lazy("User History") section_name = ugettext_lazy("User Management") dispatcher = UserManagementReportDispatcher # ToDo: Add filters fields = [] description = ugettext_lazy("History of user updates") ajax_pagination = True sortable = False @property def headers(self): # ToDo: Add headers h = [ DataTablesColumn(_("User")), ] return DataTablesHeader(*h) @property def total_records(self): return self._get_queryset().count() @memoized def _get_queryset(self): # ToDo: add query based on params return UserHistory.objects.none() @property def rows(self): records = self._get_queryset().order_by('-changed_at')[ self.pagination.start:self.pagination.start + self.pagination.count ] for record in records: yield _user_history_row(record) def _user_history_row(record): # ToDo: add render for each row return []
bsd-3-clause
Python
e1f6e98d7e3a1840567b1b5e379f87ec1e0aa9dc
add two more views
amdeb/odoo-connector
connector8/__openerp__.py
connector8/__openerp__.py
# -*- coding: utf-8 -*- {'name': 'Connector8', 'version': '0.1', 'author': 'Openerp Connector Core Editors and Amdeb', 'license': 'AGPL-3', 'category': 'Generic Modules', 'description': """ This is a port of OCA connector to Odoo 8.0 """, 'depends': ['mail' ], 'data': ['security/connector_security.xml', 'security/ir.model.access.csv', 'queue/model_view.xml', 'queue/queue_data.xml', 'checkpoint/checkpoint_view.xml', 'connector_menu.xml', 'setting_view.xml', 'res_partner_view.xml', ], 'installable': True, 'application': True, }
# -*- coding: utf-8 -*- {'name': 'Connector8', 'version': '0.1', 'author': 'Openerp Connector Core Editors and Amdeb', 'license': 'AGPL-3', 'category': 'Generic Modules', 'description': """ This is a port of OCA connector to Odoo 8.0 """, 'depends': ['mail' ], 'data': ['security/connector_security.xml', 'security/ir.model.access.csv', 'queue/model_view.xml', 'queue/queue_data.xml', 'checkpoint/checkpoint_view.xml', 'res_partner_view.xml', ], 'installable': True, 'application': True, }
agpl-3.0
Python
fe96f6539b40a880e88f7efe8502279cea1de506
update test
qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
corehq/apps/accounting/tests/test_model_validation.py
corehq/apps/accounting/tests/test_model_validation.py
from datetime import date from django.core.exceptions import ValidationError from corehq.apps.accounting.models import ( BillingAccount, CreditAdjustment, Invoice, LineItem, Subscriber, Subscription, ) from corehq.apps.accounting.tests import generator from corehq.apps.accounting.tests.base_tests import BaseAccountingTest class TestCreditAdjustmentValidation(BaseAccountingTest): def tearDown(self): CreditAdjustment.objects.all().delete() LineItem.objects.all().delete() Invoice.objects.all().delete() generator.delete_all_subscriptions() generator.delete_all_accounts() super(TestCreditAdjustmentValidation, self).tearDown() def test_clean(self): account = BillingAccount.objects.create( name='Test Account', created_by='[email protected]', currency=generator.init_default_currency(), ) subscription = Subscription.objects.create( account=account, date_start=date.today(), plan_version=generator.subscribable_plan(), subscriber=Subscriber.objects.create(domain='test') ) invoice = Invoice.objects.create( date_start=date.today(), date_end=date.today(), subscription=subscription, ) line_item = LineItem.objects.create( invoice=invoice, ) with self.assertRaises(ValidationError): try: CreditAdjustment( invoice=invoice, line_item=line_item, ).save() except ValidationError as e: self.assertIn('__all__', e.error_dict) raise e
from datetime import date from django.core.exceptions import ValidationError from django.test import TransactionTestCase from corehq.apps.accounting.models import ( BillingAccount, CreditAdjustment, Invoice, LineItem, Subscriber, Subscription, ) from corehq.apps.accounting.tests import generator from corehq.apps.accounting.tests.base_tests import BaseAccountingTest class TestCreditAdjustmentValidation(BaseAccountingTest): def tearDown(self): CreditAdjustment.objects.all().delete() LineItem.objects.all().delete() Invoice.objects.all().delete() generator.delete_all_subscriptions() generator.delete_all_accounts() super(TestCreditAdjustmentValidation, self).tearDown() def test_clean(self): account = BillingAccount.objects.create( currency=generator.init_default_currency(), ) subscription = Subscription.objects.create( account=account, date_start=date.today(), plan_version=generator.subscribable_plan(), subscriber=Subscriber.objects.create(domain='test') ) invoice = Invoice.objects.create( date_start=date.today(), date_end=date.today(), subscription=subscription, ) line_item = LineItem.objects.create( invoice=invoice, ) with self.assertRaises(ValidationError): try: CreditAdjustment( invoice=invoice, line_item=line_item, ).save() except ValidationError as e: self.assertIn('__all__', e.error_dict) raise e
bsd-3-clause
Python
4f040d1d7730ee611f0c4a6768ecc181c6a43ff7
Fix broken view test for select seats
Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet
karspexet/ticket/tests/test_views.py
karspexet/ticket/tests/test_views.py
# coding: utf-8 from django.shortcuts import reverse from django.test import TestCase, RequestFactory from django.utils import timezone from karspexet.show.models import Show, Production from karspexet.ticket import views from karspexet.venue.models import Venue, SeatingGroup import pytest class TestHome(TestCase): def setUp(self): rf = RequestFactory() self.request = rf.get(reverse(views.home)) self.tomorrow = timezone.now() + timezone.timedelta(days=1) def test_home_lists_visible_upcoming_shows(self): venue = Venue.objects.create(name="Teater 1") production = Production.objects.create(name="Uppsättningen") yesterday = timezone.now() - timezone.timedelta(days=1) show = Show.objects.create(date=self.tomorrow, production=production, venue=venue) invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False) old_show = Show.objects.create(date=yesterday, production=production, venue=venue) response = views.home(self.request) shows = response.context_data["upcoming_shows"] assert show in shows assert old_show not in shows def test_home_contains_only_visible_shows(self): venue = Venue.objects.create(name="Teater 1") production = Production.objects.create(name="Uppsättningen") show = Show.objects.create(date=self.tomorrow, production=production, venue=venue) invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False) response = views.home(self.request) shows = response.context_data["upcoming_shows"] assert show in shows assert invisible_show not in shows class TestSelect_seats(TestCase): def test_select_seats(self): venue = Venue.objects.create(name="Teater 1") seatinggroup = SeatingGroup.objects.create(name="prisgrupp 1", venue=venue) production = Production.objects.create(name="Uppsättningen") show = Show.objects.create(date=timezone.now(), production=production, venue=venue) response = self.client.get(reverse(views.select_seats, args=[show.slug])) self.assertContains(response, "Köp biljetter för Uppsättningen")
# coding: utf-8 from django.shortcuts import reverse from django.test import TestCase, RequestFactory from django.utils import timezone from karspexet.show.models import Show, Production from karspexet.ticket import views from karspexet.venue.models import Venue, SeatingGroup import pytest class TestHome(TestCase): def setUp(self): rf = RequestFactory() self.request = rf.get(reverse(views.home)) self.tomorrow = timezone.now() + timezone.timedelta(days=1) def test_home_lists_visible_upcoming_shows(self): venue = Venue.objects.create(name="Teater 1") production = Production.objects.create(name="Uppsättningen") yesterday = timezone.now() - timezone.timedelta(days=1) show = Show.objects.create(date=self.tomorrow, production=production, venue=venue) invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False) old_show = Show.objects.create(date=yesterday, production=production, venue=venue) response = views.home(self.request) shows = response.context_data["upcoming_shows"] assert show in shows assert old_show not in shows def test_home_contains_only_visible_shows(self): venue = Venue.objects.create(name="Teater 1") production = Production.objects.create(name="Uppsättningen") show = Show.objects.create(date=self.tomorrow, production=production, venue=venue) invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False) response = views.home(self.request) shows = response.context_data["upcoming_shows"] assert show in shows assert invisible_show not in shows class TestSelect_seats(TestCase): def test_select_seats(self): venue = Venue.objects.create(name="Teater 1") seatinggroup = SeatingGroup.objects.create(name="prisgrupp 1", venue=venue) production = Production.objects.create(name="Uppsättningen") show = Show.objects.create(date=timezone.now(), production=production, venue=venue) response = self.client.get(reverse(views.select_seats, args=[show.id])) self.assertContains(response, "Köp biljetter för Uppsättningen")
mit
Python
14f0ed32b62e2d00443e99428516a2d17a68bc58
Use COMPLEX_TEST_STRING for testing
d6e/coala,shreyans800755/coala,djkonro/coala,SambitAcharya/coala,sagark123/coala,arafsheikh/coala,svsn2117/coala,Balaji2198/coala,coala/coala,lonewolf07/coala,scottbelden/coala,rresol/coala,refeed/coala,NalinG/coala,Balaji2198/coala,saurabhiiit/coala,andreimacavei/coala,meetmangukiya/coala,karansingh1559/coala,Balaji2198/coala,RJ722/coala,yland/coala,impmihai/coala,scriptnull/coala,RJ722/coala,arush0311/coala,ManjiriBirajdar/coala,jayvdb/coala,meetmangukiya/coala,Nosferatul/coala,scriptnull/coala,kartikeys98/coala,sudheesh001/coala,AbdealiJK/coala,andreimacavei/coala,scriptnull/coala,damngamerz/coala,JohnS-01/coala,meetmangukiya/coala,lonewolf07/coala,FeodorFitsner/coala,NalinG/coala,tushar-rishav/coala,NalinG/coala,rimacone/testing2,saurabhiiit/coala,JohnS-01/coala,rimacone/testing2,nemaniarjun/coala,yashLadha/coala,nemaniarjun/coala,mr-karan/coala,tltuan/coala,jayvdb/coala,kartikeys98/coala,svsn2117/coala,AbdealiJK/coala,sudheesh001/coala,tushar-rishav/coala,sophiavanvalkenburg/coala,shreyans800755/coala,abhiroyg/coala,MattAllmendinger/coala,abhiroyg/coala,karansingh1559/coala,AdeshAtole/coala,FeodorFitsner/coala,impmihai/coala,aptrishu/coala,CruiseDevice/coala,sils1297/coala,andreimacavei/coala,RJ722/coala,JohnS-01/coala,yland/coala,swatilodha/coala,damngamerz/coala,SambitAcharya/coala,dagdaggo/coala,arafsheikh/coala,arush0311/coala,CruiseDevice/coala,Asalle/coala,NalinG/coala,refeed/coala,coala/coala,SambitAcharya/coala,karansingh1559/coala,saurabhiiit/coala,NiklasMM/coala,arjunsinghy96/coala,CruiseDevice/coala,ayushin78/coala,NalinG/coala,tushar-rishav/coala,rimacone/testing2,ayushin78/coala,mr-karan/coala,yashtrivedi96/coala,sudheesh001/coala,Tanmay28/coala,Uran198/coala,NiklasMM/coala,Tanmay28/coala,SambitAcharya/coala,stevemontana1980/coala,yashLadha/coala,Tanmay28/coala,coala-analyzer/coala,vinc456/coala,d6e/coala,dagdaggo/coala,Asnelchristian/coala,scottbelden/coala,SanketDG/coala,lonewolf07/coala,vinc456/coala,MariosPanag/coala,AbdealiJK/coala,yland/coala,rresol/coala,refeed/coala,FeodorFitsner/coala,coala-analyzer/coala,scriptnull/coala,mr-karan/coala,abhiroyg/coala,Tanmay28/coala,yashtrivedi96/coala,sagark123/coala,incorrectusername/coala,Tanmay28/coala,MattAllmendinger/coala,Asalle/coala,sophiavanvalkenburg/coala,swatilodha/coala,sils1297/coala,scottbelden/coala,Nosferatul/coala,AdeshAtole/coala,Tanmay28/coala,impmihai/coala,Nosferatul/coala,Asnelchristian/coala,scriptnull/coala,vinc456/coala,aptrishu/coala,djkonro/coala,arafsheikh/coala,scriptnull/coala,netman92/coala,Tanmay28/coala,arush0311/coala,AdeshAtole/coala,aptrishu/coala,SanketDG/coala,SanketDG/coala,Shade5/coala,coala/coala,swatilodha/coala,SambitAcharya/coala,coala-analyzer/coala,stevemontana1980/coala,NiklasMM/coala,NalinG/coala,rresol/coala,Tanmay28/coala,nemaniarjun/coala,shreyans800755/coala,jayvdb/coala,sils1297/coala,djkonro/coala,netman92/coala,sophiavanvalkenburg/coala,Shade5/coala,kartikeys98/coala,Asnelchristian/coala,Asalle/coala,MariosPanag/coala,ManjiriBirajdar/coala,dagdaggo/coala,Shade5/coala,Uran198/coala,incorrectusername/coala,tltuan/coala,svsn2117/coala,netman92/coala,d6e/coala,NalinG/coala,arjunsinghy96/coala,scriptnull/coala,incorrectusername/coala,SambitAcharya/coala,ManjiriBirajdar/coala,sagark123/coala,tltuan/coala,MattAllmendinger/coala,stevemontana1980/coala,yashtrivedi96/coala,ayushin78/coala,yashLadha/coala,SambitAcharya/coala,Uran198/coala,damngamerz/coala,MariosPanag/coala,arjunsinghy96/coala
coalib/tests/processes/communication/LogMessageTest.py
coalib/tests/processes/communication/LogMessageTest.py
""" This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys sys.path.insert(0, ".") from coalib.misc.i18n import _ from coalib.misc.StringConstants import StringConstants from coalib.processes.communication.LOG_LEVEL import LOG_LEVEL from coalib.processes.communication.LogMessage import LogMessage import unittest class LogMessageTestCase(unittest.TestCase): def setUp(self): self.uut = LogMessage() def test_construction(self): # take a look if defaults are good self.assertEqual(self.uut.log_level, LOG_LEVEL.DEBUG) self.assertEqual(self.uut.message, "") # see that arguments are processed right self.uut = LogMessage(LOG_LEVEL.WARNING, "a msg") self.assertEqual(self.uut.log_level, LOG_LEVEL.WARNING) self.assertEqual(self.uut.message, "a msg") def test_to_str(self): self.uut.message = StringConstants.COMPLEX_TEST_STRING self.uut.log_level = LOG_LEVEL.ERROR self.assertEqual(str(self.uut), "[{}] {}".format(_("ERROR"), StringConstants.COMPLEX_TEST_STRING)) self.uut.log_level = LOG_LEVEL.WARNING self.assertEqual(str(self.uut), "[{}] {}".format(_("WARNING"), StringConstants.COMPLEX_TEST_STRING)) self.uut.log_level = LOG_LEVEL.DEBUG self.assertEqual(str(self.uut), "[{}] {}".format(_("DEBUG"), StringConstants.COMPLEX_TEST_STRING)) if __name__ == '__main__': unittest.main(verbosity=2)
""" This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys sys.path.insert(0, ".") from coalib.misc.i18n import _ from coalib.processes.communication.LOG_LEVEL import LOG_LEVEL from coalib.processes.communication.LogMessage import LogMessage import unittest class LogMessageTestCase(unittest.TestCase): def setUp(self): self.uut = LogMessage() def test_construction(self): # take a look if defaults are good self.assertEqual(self.uut.log_level, LOG_LEVEL.DEBUG) self.assertEqual(self.uut.message, "") # see that arguments are processed right self.uut = LogMessage(LOG_LEVEL.WARNING, "a msg") self.assertEqual(self.uut.log_level, LOG_LEVEL.WARNING) self.assertEqual(self.uut.message, "a msg") def test_to_str(self): self.uut.message = "test message änd umlauts!" self.uut.log_level = LOG_LEVEL.ERROR self.assertEqual(str(self.uut), "[{}] test message änd umlauts!".format(_("ERROR"))) self.uut.log_level = LOG_LEVEL.WARNING self.assertEqual(str(self.uut), "[{}] test message änd umlauts!".format(_("WARNING"))) self.uut.log_level = LOG_LEVEL.DEBUG self.assertEqual(str(self.uut), "[{}] test message änd umlauts!".format(_("DEBUG"))) if __name__ == '__main__': unittest.main(verbosity=2)
agpl-3.0
Python
7cbf46b1c44791b6a1466b08e049b568d32cf2d3
fix soil.tests.test_download_base:TestBlobDownload
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
corehq/ex-submodules/soil/tests/test_download_base.py
corehq/ex-submodules/soil/tests/test_download_base.py
from __future__ import absolute_import from __future__ import unicode_literals from io import BytesIO from uuid import uuid4 from django.test import TestCase from soil import BlobDownload from soil.util import expose_blob_download from corehq.blobs.tests.util import new_meta, TemporaryFilesystemBlobDB class TestBlobDownload(TestCase): identifier = 'identifier' @classmethod def setUpClass(cls): super(TestBlobDownload, cls).setUpClass() cls.db = TemporaryFilesystemBlobDB() @classmethod def tearDownClass(cls): cls.db.close() super(TestBlobDownload, cls).tearDownClass() def test_expose_blob_download(self): ref = expose_blob_download( self.identifier, expiry=60, content_disposition='text/xml', ) self.db.put(BytesIO(b'content'), meta=new_meta(key=ref.download_id)) response = BlobDownload.get(ref.download_id).toHttpResponse() self.assertEqual(next(response.streaming_content), b'content') def test_expose_blob_download_with_legacy_download_id(self): self.db.put(BytesIO(b'legacy-blob'), self.identifier) ref = BlobDownload( self.identifier, mimetype='text/plain', content_disposition='text/xml', ) ref.download_id = uuid4().hex # old download id format ref.save(60) response = BlobDownload.get(ref.download_id).toHttpResponse() self.assertEqual(next(response.streaming_content), b'legacy-blob')
from __future__ import absolute_import from __future__ import unicode_literals from io import BytesIO from uuid import uuid4 from django.test import TestCase from soil import BlobDownload from soil.util import expose_blob_download from corehq.blobs.tests.util import new_meta, TemporaryFilesystemBlobDB class TestBlobDownload(TestCase): identifier = 'identifier' @classmethod def setUpClass(cls): super(TestBlobDownload, cls).setUpClass() cls.db = TemporaryFilesystemBlobDB() @classmethod def tearDownClass(cls): cls.db.close() super(TestBlobDownload, cls).tearDownClass() def test_expose_blob_download(self): ref = expose_blob_download( self.identifier, expiry=60, content_disposition='text/xml', ) self.db.put(BytesIO(b'content'), meta=new_meta(key=ref.download_id)) response = BlobDownload.get(ref.download_id).toHttpResponse() self.assertEqual(next(response.streaming_content), 'content') def test_expose_blob_download_with_legacy_download_id(self): self.db.put(BytesIO(b'legacy-blob'), self.identifier) ref = BlobDownload( self.identifier, mimetype='text/plain', content_disposition='text/xml', ) ref.download_id = uuid4().hex # old download id format ref.save(60) response = BlobDownload.get(ref.download_id).toHttpResponse() self.assertEqual(next(response.streaming_content), 'legacy-blob')
bsd-3-clause
Python
c3367eaa7bccf5843abd12a438e14518d533cdbe
Allow API on Windows
platformio/platformio-api
platformio_api/__init__.py
platformio_api/__init__.py
# Copyright 2014-present Ivan Kravets <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging.config import os VERSION = (1, 18, 1) __version__ = ".".join([str(s) for s in VERSION]) __title__ = "platformio-api" __description__ = ("An API for PlatformIO") __url__ = "https://github.com/ivankravets/platformio-api" __author__ = "Ivan Kravets" __email__ = "[email protected]" __license__ = "MIT License" __copyright__ = "Copyright (C) 2014-2017 Ivan Kravets" config = dict( SQLALCHEMY_DATABASE_URI=None, GITHUB_LOGIN=None, GITHUB_PASSWORD=None, DL_PIO_DIR=None, DL_PIO_URL=None, MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb # Fuzzy search will not be applied to words shorter than the value below SOLR_FUZZY_MIN_WORD_LENGTH=3, LOGGING=dict(version=1) ) assert "PIOAPI_CONFIG_PATH" in os.environ with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f: config.update(json.load(f)) # configure logging for packages logging.basicConfig() logging.config.dictConfig(config['LOGGING']) # setup time zone to UTC globally os.environ['TZ'] = "+00:00" try: from time import tzset tzset() except ImportError: pass
# Copyright 2014-present Ivan Kravets <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging.config import os from time import tzset VERSION = (1, 18, 1) __version__ = ".".join([str(s) for s in VERSION]) __title__ = "platformio-api" __description__ = ("An API for PlatformIO") __url__ = "https://github.com/ivankravets/platformio-api" __author__ = "Ivan Kravets" __email__ = "[email protected]" __license__ = "MIT License" __copyright__ = "Copyright (C) 2014-2017 Ivan Kravets" config = dict( SQLALCHEMY_DATABASE_URI=None, GITHUB_LOGIN=None, GITHUB_PASSWORD=None, DL_PIO_DIR=None, DL_PIO_URL=None, MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb # Fuzzy search will not be applied to words shorter than the value below SOLR_FUZZY_MIN_WORD_LENGTH=3, LOGGING=dict(version=1) ) assert "PIOAPI_CONFIG_PATH" in os.environ with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f: config.update(json.load(f)) # configure logging for packages logging.basicConfig() logging.config.dictConfig(config['LOGGING']) # setup time zone to UTC globally os.environ['TZ'] = "+00:00" tzset()
apache-2.0
Python
dc743c63c52c7ef0bcab73d7b4fcf8f3f4a54ea6
make median transmittance optional in plot_mean_transmittance.
yishayv/lyacorr,yishayv/lyacorr
plot_mean_transmittance.py
plot_mean_transmittance.py
import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import host_subplot import common_settings import mean_transmittance import median_transmittance lya_center = 1215.67 settings = common_settings.Settings() enable_median_transmittance = False def do_plot(): m = mean_transmittance.MeanTransmittance.from_file(settings.get_mean_transmittance_npy()) ar_z, mean = m.get_weighted_mean_with_minimum_count(1) # low_pass_mean = m.get_low_pass_mean()[1] fig = plt.figure(figsize=(14, 10)) ax1 = fig.add_subplot(2, 1, 1) ax2 = ax1.twiny() ax1.plot(ar_z, mean) # ax1.plot(ar_z, low_pass_mean, color='red') if enable_median_transmittance: med = median_transmittance.MedianTransmittance.from_file(settings.get_median_transmittance_npy()) ar_z_med, ar_median = med.get_weighted_median_with_minimum_count(1) ar_z_med, ar_unweighted_median = med.get_weighted_median_with_minimum_count(1, weighted=False) ax1.plot(ar_z_med, ar_median, color='orange') ax1.plot(ar_z_med, ar_unweighted_median, color='green') ax1.set_ylabel(r"$\left< f_q(z)/C_q(z) \right> $") plt.ylim(0.0, 1.2) # add wavelength tick marks on top x_lim2 = tuple([lya_center * (1 + z) for z in ax1.get_xlim()]) ax2.set_xlim(x_lim2) plt.axis() ax3 = host_subplot(2, 1, 2) ax4 = ax3.twinx() ax4.set_ylabel(r"$N_{Spectra}$") ax3.plot(m.ar_z, m.ar_total_flux, color='blue', label=r"Total flux$\times$ weight") ax3.plot(m.ar_z, m.ar_weights, ':', color='green', label='Total weight') ax4.plot(m.ar_z, m.ar_count, ':', color='red', label='Spectra count') ax3.set_xlim(ax1.get_xlim()) ax3.set_ylabel(r"$\sum_q f_q(z)/C_q(z)$") ax3.set_xlabel(r"$z$") ax3.legend(loc='best') plt.show() if __name__ == '__main__': do_plot()
import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import host_subplot import common_settings import mean_transmittance import median_transmittance lya_center = 1215.67 settings = common_settings.Settings() def do_plot(): m = mean_transmittance.MeanTransmittance.from_file(settings.get_mean_transmittance_npy()) med = median_transmittance.MedianTransmittance.from_file(settings.get_median_transmittance_npy()) ar_z, mean = m.get_weighted_mean_with_minimum_count(1) ar_z_med, ar_median = med.get_weighted_median_with_minimum_count(1) ar_z_med, ar_unweighted_median = med.get_weighted_median_with_minimum_count(1, weighted=False) # low_pass_mean = m.get_low_pass_mean()[1] fig = plt.figure(figsize=(14, 10)) ax1 = fig.add_subplot(2, 1, 1) ax2 = ax1.twiny() ax1.plot(ar_z, mean) # ax1.plot(ar_z, low_pass_mean, color='red') ax1.plot(ar_z_med, ar_median, color='orange') ax1.plot(ar_z_med, ar_unweighted_median, color='green') ax1.set_ylabel(r"$\left< f_q(z)/C_q(z) \right> $") plt.ylim(0.0, 1.2) # add wavelength tick marks on top x_lim2 = tuple([lya_center * (1 + z) for z in ax1.get_xlim()]) ax2.set_xlim(x_lim2) plt.axis() ax3 = host_subplot(2, 1, 2) ax4 = ax3.twinx() ax4.set_ylabel(r"$N_{Spectra}$") ax3.plot(m.ar_z, m.ar_total_flux, color='blue', label=r"Total flux$\times$ weight") ax3.plot(m.ar_z, m.ar_weights, ':', color='green', label='Total weight') ax4.plot(m.ar_z, m.ar_count, ':', color='red', label='Spectra count') ax3.set_xlim(ax1.get_xlim()) ax3.set_ylabel(r"$\sum_q f_q(z)/C_q(z)$") ax3.set_xlabel(r"$z$") ax3.legend(loc='best') plt.show() if __name__ == '__main__': do_plot()
mit
Python
4173221d72356fc336be63273a7252c81831fd54
fix datetime_to_string
kirov/exp1403,kirov/ephim
ephim/utils.py
ephim/utils.py
from datetime import datetime import string def to_base(num, b, numerals=string.digits + string.ascii_lowercase): return ((num == 0) and numerals[0]) or (to_base(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b]) def datetime_to_string(dt: datetime): delta = dt - datetime.utcfromtimestamp(0) ### 0 # return dt.strftime('%Y-%m-%d %H.%M.%S.') ### 1 # ts = int(dt.timestamp()) # return '{sign}{ts}'.format( # sign='N' if ts < 0 else 'P', # ts=abs(ts), # ) ### 2 return '{sign}{days}_{seconds}'.format( sign='0' if delta.days < 0 else '', days=to_base(abs(delta.days), 36, string.digits + string.ascii_uppercase), seconds=str(delta.seconds).zfill(5), ) # return '{sign}{days}_{seconds}'.format( # sign='n' if delta.days < 0 else 'p', # days=to_base(abs(delta.days), 36), # seconds=str(delta.seconds).zfill(5), # ) # return str(dt.strftime('%Y%m%d')) + '_' + str(delta.seconds).zfill(5) # return str(int(dt.timestamp())) # return to_base(int(dt.timestamp()), 36) # return '{days}_{seconds}'.format( # days=to_base(abs(delta.days), 26, string.ascii_uppercase), # seconds=str(delta.seconds).zfill(5), # ) # print(delta.seconds) # return '{days}{seconds}'.format( # days=to_base(abs(delta.days), 26, string.ascii_uppercase), # # hours=to_base(delta.seconds // 3600, 26, string.ascii_uppercase), # seconds=str(delta.seconds).zfill(5), # # seconds=dt.strftime('%H%M'), # ) # return to_base(int(dt.timestamp()), 26, string.ascii_lowercase)
from datetime import datetime import string def to_base(num, b, numerals=string.digits + string.ascii_lowercase): return ((num == 0) and numerals[0]) or (to_base(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b]) def datetime_to_string(dt: datetime): delta = dt - datetime.fromtimestamp(0) ### 0 # return dt.strftime('%Y-%m-%d %H.%M.%S.') ### 1 # ts = int(dt.timestamp()) # return '{sign}{ts}'.format( # sign='N' if ts < 0 else 'P', # ts=abs(ts), # ) ### 2 return '{sign}{days}_{seconds}'.format( sign='0' if delta.days < 0 else '', days=to_base(abs(delta.days), 36, string.digits + string.ascii_uppercase), seconds=str(delta.seconds).zfill(5), ) # return '{sign}{days}_{seconds}'.format( # sign='n' if delta.days < 0 else 'p', # days=to_base(abs(delta.days), 36), # seconds=str(delta.seconds).zfill(5), # ) # return str(dt.strftime('%Y%m%d')) + '_' + str(delta.seconds).zfill(5) # return str(int(dt.timestamp())) # return to_base(int(dt.timestamp()), 36) # return '{days}_{seconds}'.format( # days=to_base(abs(delta.days), 26, string.ascii_uppercase), # seconds=str(delta.seconds).zfill(5), # ) # print(delta.seconds) # return '{days}{seconds}'.format( # days=to_base(abs(delta.days), 26, string.ascii_uppercase), # # hours=to_base(delta.seconds // 3600, 26, string.ascii_uppercase), # seconds=str(delta.seconds).zfill(5), # # seconds=dt.strftime('%H%M'), # ) # return to_base(int(dt.timestamp()), 26, string.ascii_lowercase)
mit
Python
58ab8c5ebafad2109b8d8f19c44adbb11fe18c02
Fix broken or_else implementation
udacity/pygow
pygow/maybe.py
pygow/maybe.py
class Just: a = None def __init__(self, a): self.a = a def __eq__(self, other): return (isinstance(other, self.__class__) and self.a == other.a) def __ne__(self, other): return not self.__eq__(other) def __str__(self): return 'Just(%s)' % self.a def is_just(self): return True def map(self, f): return Just(f(self.a)) def flat_map(self, f): return f(self.a) def get_or_else(self, x): return self.a def or_else(self, x): return self class Nothing: def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not self.__eq__(other) def is_just(self): return False def __str__(self): return 'Nothing()' def map(self, f): return Nothing() def flat_map(self, f): return Nothing() def get_or_else(self, x): return x def or_else(self, x): return x def get_maybe_env(name): from os import getenv value = getenv(name) if value is None: return Nothing() else: return Just(value) def non_empty_string(x): if len(x.strip()) is 0: return Nothing() else: return Just(x) def parse_int(x): try: return Just(int(x)) except: return Nothing() def maybe(x): if x is None: return Nothing() else: return Just(x)
class Just: a = None def __init__(self, a): self.a = a def __eq__(self, other): return (isinstance(other, self.__class__) and self.a == other.a) def __ne__(self, other): return not self.__eq__(other) def __str__(self): return 'Just(%s)' % self.a def is_just(self): return True def map(self, f): return Just(f(self.a)) def flat_map(self, f): return f(self.a) def or_else(self, x): return self def get_or_else(self, x): return self.a class Nothing: def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not self.__eq__(other) def is_just(self): return False def __str__(self): return 'Nothing()' def map(self, f): return Nothing() def flat_map(self, f): return Nothing() def or_else(self, x): return self def get_or_else(self, x): return x def get_maybe_env(name): from os import getenv value = getenv(name) if value is None: return Nothing() else: return Just(value) def non_empty_string(x): if len(x.strip()) is 0: return Nothing() else: return Just(x) def parse_int(x): try: return Just(int(x)) except: return Nothing() def maybe(x): if x is None: return Nothing() else: return Just(x)
bsd-3-clause
Python
2aca9f77b6f5b8171ec33906a66cd805f57937a0
Fix mistake with previous commit.
kron4eg/django-localeurl,kron4eg/django-localeurl
localeurl/templatetags/localeurl_tags.py
localeurl/templatetags/localeurl_tags.py
# Copyright (c) 2008 Joost Cassee # Licensed under the terms of the MIT License (see LICENSE.txt) from django import template from django.template import Node, Token, TemplateSyntaxError from django.template import resolve_variable, defaulttags from django.template.defaultfilters import stringfilter from django.conf import settings from django.utils import translation import localeurl from localeurl.utils import is_locale_independent, strip_locale_prefix, \ get_language register = template.Library() def chlocale(path, locale): """ Changes the path's locale prefix if the path is not locale-independent. Otherwise removes locale prefix. """ stripped_path = rmlocale(path) if not localeurl.PREFIX_DEFAULT_LOCALE and \ get_language(locale) == get_language(settings.LANGUAGE_CODE): return stripped_path if is_locale_independent(stripped_path): return stripped_path else: return '/' + get_language(locale) + stripped_path chlocale = stringfilter(chlocale) register.filter('chlocale', chlocale) def rmlocale(url): """Removes the locale prefix from the path.""" return strip_locale_prefix(url) rmlocale = stringfilter(rmlocale) register.filter('rmlocale', rmlocale) def locale_url(parser, token): """ Renders the url for the view with another locale prefix. The syntax is like the 'url' tag, only with a locale before the view. Examples: {% locale_url "de" cal.views.day day %} {% locale_url "nl" cal.views.home %} {% locale_url "en-gb" cal.views.month month as month_url %} """ bits = token.split_contents() if len(bits) < 3: raise TemplateSyntaxError("'%s' takes at least two arguments:" " the locale and a view" % bits[0]) urltoken = Token(token.token_type, bits[0] + ' ' + ' '.join(bits[2:])) urlnode = defaulttags.url(parser, urltoken) return LocaleURLNode(bits[1], urlnode) class LocaleURLNode(Node): def __init__(self, locale, urlnode): self.locale = locale self.urlnode = urlnode def render(self, context): locale = resolve_variable(self.locale, context) path = self.urlnode.render(context) if self.urlnode.asvar: self.urlnode.render(context) context[self.urlnode.asvar] = chlocale(context[self.urlnode.asvar], locale) return '' else: return chlocale(path, locale) register.tag('locale_url', locale_url)
# Copyright (c) 2008 Joost Cassee # Licensed under the terms of the MIT License (see LICENSE.txt) from django import template from django.template import Node, Token, TemplateSyntaxError from django.template import resolve_variable, defaulttags from django.template.defaultfilters import stringfilter from django.conf import settings from django.utils import translation import localeurl from localeurl.utils import strip_locale_prefix, get_language register = template.Library() def chlocale(path, locale): """ Changes the path's locale prefix if the path is not locale-independent. Otherwise removes locale prefix. """ if not localeurl.PREFIX_DEFAULT_LOCALE and \ get_language(locale) == get_language(settings.LANGUAGE_CODE): return rmlocale(path) if is_locale_independent(rmed): return rmlocale(path) else: return '/' + get_language(locale) + rmlocale(path) chlocale = stringfilter(chlocale) register.filter('chlocale', chlocale) def rmlocale(url): """Removes the locale prefix from the path.""" return strip_locale_prefix(url) rmlocale = stringfilter(rmlocale) register.filter('rmlocale', rmlocale) def locale_url(parser, token): """ Renders the url for the view with another locale prefix. The syntax is like the 'url' tag, only with a locale before the view. Examples: {% locale_url "de" cal.views.day day %} {% locale_url "nl" cal.views.home %} {% locale_url "en-gb" cal.views.month month as month_url %} """ bits = token.split_contents() if len(bits) < 3: raise TemplateSyntaxError("'%s' takes at least two arguments:" " the locale and a view" % bits[0]) urltoken = Token(token.token_type, bits[0] + ' ' + ' '.join(bits[2:])) urlnode = defaulttags.url(parser, urltoken) return LocaleURLNode(bits[1], urlnode) class LocaleURLNode(Node): def __init__(self, locale, urlnode): self.locale = locale self.urlnode = urlnode def render(self, context): locale = resolve_variable(self.locale, context) path = self.urlnode.render(context) if self.urlnode.asvar: self.urlnode.render(context) context[self.urlnode.asvar] = chlocale(context[self.urlnode.asvar], locale) return '' else: return chlocale(path, locale) register.tag('locale_url', locale_url)
mit
Python
f18d675f2877e8f9356dc64a96bf8fba364cddd3
Add search field to admin Terms.
unt-libraries/django-controlled-vocabularies,unt-libraries/django-controlled-vocabularies
controlled_vocabularies/admin.py
controlled_vocabularies/admin.py
from django.contrib import admin from django import forms from controlled_vocabularies.models import Vocabulary, Term, Property class PropertyInline(admin.TabularInline): model = Property fk_name = "term_key" extra = 1 class VocabularyAdmin(admin.ModelAdmin): """ Vocabulary class that determines how comment appears in admin """ list_display = ('name', 'label', 'order', 'maintainer', 'created', 'modified') fieldsets = ( (None, { 'classes': 'wide extrapretty', 'fields': ('name', 'label', 'order', 'maintainer', 'maintainerEmail', 'definition') }), ) class TermAdmin(admin.ModelAdmin): """ Term class that determines how comment appears in admin """ list_display = ('id', 'name', 'get_vocab', 'label', 'order',) search_fields = ['name', 'label'] fieldsets = ( (None, { 'classes': 'wide extrapretty', 'fields': ('vocab_list', 'name', 'label', 'order') }), ) list_filter = ('vocab_list',) inlines = [PropertyInline] class PropertyAdmin(admin.ModelAdmin): """ Property class that determines how comment appears in admin """ list_display = ('property_name', 'get_vocab', 'get_term', 'label',) fieldsets = ( (None, { 'classes': 'wide extrapretty', 'fields': ('term_key', 'property_name', 'label') }), ) def has_spaces(name): """ Make sure there are no spaces """ if ' ' in name: raise forms.ValidationError("Spaces are not allowed.") else: return name class VocabularyAdminForm(forms.ModelForm): """ Vocabulary class to specify how form data is handled in admin """ class Meta: model = Vocabulary fields = '__all__' def clean_name(self): """ Make sure there are no spaces in the name field """ return has_spaces(self.cleaned_data["name"]) admin.site.register(Vocabulary, VocabularyAdmin) admin.site.register(Term, TermAdmin) admin.site.register(Property, PropertyAdmin)
from django.contrib import admin from django import forms from controlled_vocabularies.models import Vocabulary, Term, Property class PropertyInline(admin.TabularInline): model = Property fk_name = "term_key" extra = 1 class VocabularyAdmin(admin.ModelAdmin): """ Vocabulary class that determines how comment appears in admin """ list_display = ('name', 'label', 'order', 'maintainer', 'created', 'modified') fieldsets = ( (None, { 'classes': 'wide extrapretty', 'fields': ('name', 'label', 'order', 'maintainer', 'maintainerEmail', 'definition') }), ) class TermAdmin(admin.ModelAdmin): """ Term class that determines how comment appears in admin """ list_display = ('id', 'name', 'get_vocab', 'label', 'order',) fieldsets = ( (None, { 'classes': 'wide extrapretty', 'fields': ('vocab_list', 'name', 'label', 'order') }), ) list_filter = ('vocab_list',) inlines = [PropertyInline] class PropertyAdmin(admin.ModelAdmin): """ Property class that determines how comment appears in admin """ list_display = ('property_name', 'get_vocab', 'get_term', 'label',) fieldsets = ( (None, { 'classes': 'wide extrapretty', 'fields': ('term_key', 'property_name', 'label') }), ) def has_spaces(name): """ Make sure there are no spaces """ if ' ' in name: raise forms.ValidationError("Spaces are not allowed.") else: return name class VocabularyAdminForm(forms.ModelForm): """ Vocabulary class to specify how form data is handled in admin """ class Meta: model = Vocabulary fields = '__all__' def clean_name(self): """ Make sure there are no spaces in the name field """ return has_spaces(self.cleaned_data["name"]) admin.site.register(Vocabulary, VocabularyAdmin) admin.site.register(Term, TermAdmin) admin.site.register(Property, PropertyAdmin)
bsd-3-clause
Python
e60ce628029e3100d6f2a8a8f7260e2ed229e6ac
Add helper method to retrieve review count per user in a skeleton
fzadow/CATMAID,fzadow/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,htem/CATMAID,htem/CATMAID
django/applications/catmaid/control/review.py
django/applications/catmaid/control/review.py
from collections import defaultdict from catmaid.models import Review from django.db import connection def get_treenodes_to_reviews(treenode_ids=None, skeleton_ids=None, umap=lambda r: r): """ Returns a dictionary that contains all reviewed nodes of the passed <treenode_ids> and/or <skeleton_ids> lists as keys. The reviewer user IDs are kept in a list as values. A function can be passed to which is executed for every reviewer_id to change the value stored result (e.g. to use user names instead of an ID. It defaults to the identity and therefore reviewer IDs. """ # Set up filters reviews = Review.objects.all() if treenode_ids: reviews = reviews.filter(treenode_id__in=treenode_ids) if skeleton_ids: reviews = reviews.filter(skeleton_id__in=skeleton_ids) # Only request treenode ID and reviewer ID reviews = reviews.values_list('treenode_id', 'reviewer_id') # Build dictionary treenode_to_reviews = defaultdict(list) for tid, rid in reviews: treenode_to_reviews[tid].append(umap(rid)) return treenode_to_reviews def get_review_count(skeleton_ids): """ Returns a dictionary that maps skelton IDs to dictonaries that map user_ids to a review count for this particular skeleton. """ # Count nodes that have been reviewed by each user in each partner skeleton cursor = connection.cursor() cursor.execute(''' SELECT skeleton_id, reviewer_id, count(skeleton_id) FROM review WHERE skeleton_id IN (%s) GROUP BY reviewer_id, skeleton_id ''' % ",".join(str(skid) for skid in skeleton_ids)) # Build dictionary reviews = defaultdict(lambda: defaultdict(int)) for row in cursor.fetchall(): reviews[row[0]][row[1]] = row[2] return reviews
from collections import defaultdict from catmaid.models import Review def get_treenodes_to_reviews(treenode_ids=None, skeleton_ids=None, umap=lambda r: r): """ Returns a dictionary that contains all reviewed nodes of the passed <treenode_ids> and/or <skeleton_ids> lists as keys. The reviewer user IDs are kept in a list as values. A function can be passed to which is executed for every reviewer_id to change the value stored result (e.g. to use user names instead of an ID. It defaults to the identity and therefore reviewer IDs. """ # Set up filters reviews = Review.objects.all() if treenode_ids: reviews = reviews.filter(treenode_id__in=treenode_ids) if skeleton_ids: reviews = reviews.filter(skeleton_id__in=skeleton_ids) # Only request treenode ID and reviewer ID reviews = reviews.values_list('treenode_id', 'reviewer_id') # Build dictionary treenode_to_reviews = defaultdict(list) for tid, rid in reviews: treenode_to_reviews[tid].append(umap(rid)) return treenode_to_reviews
agpl-3.0
Python
a36a7a0eb6560156c5be6f0cc5523c17e79591e4
fix import errors
deepchem/deepchem,peastman/deepchem,peastman/deepchem,deepchem/deepchem
deepchem/models/tests/test_normalizing_flow_pytorch.py
deepchem/models/tests/test_normalizing_flow_pytorch.py
""" Test for Pytorch Normalizing Flow model and its transformations """ import pytest import numpy as np import unittest try: import torch from torch.distributions import MultivariateNormal from deepchem.models.torch_models.layers import Affine has_torch = True except: has_torch = False @unittest.skipIf(not has_torch, 'torch is not installed') @pytest.mark.torch def test_Affine(): """ This test should evaluate if the transformation its being applied correctly. When computing the logarithm of the determinant jacobian matrix the result must be zero for any distribution when performing the first forward and inverse pass (initialized). This is the expected behavior since nothing is being learned yet. input shape: (samples, dim) output shape: (samples, dim) """ dim = 2 samples = 96 data = MultivariateNormal(torch.zeros(dim), torch.eye(dim)) tensor = data.sample(torch.Size((samples, dim))) _, log_det_jacobian = Affine(dim).forward(tensor) _, inverse_log_det_jacobian = Affine(dim).inverse(tensor) # The first pass of the transformation should be 0 log_det_jacobian = log_det_jacobian.detach().numpy() inverse_log_det_jacobian = inverse_log_det_jacobian.detach().numpy() zeros = np.zeros((samples,)) assert np.array_equal(log_det_jacobian, zeros) assert np.array_equal(inverse_log_det_jacobian, zeros)
""" Test for Pytorch Normalizing Flow model and its transformations """ import pytest import numpy as np import unittest try: import torch from torch.distributions import MultivariateNormal from deepchem.models.torch_models.normalizing_flows_pytorch import Affine has_torch = True except: has_torch = False @unittest.skipIf(not has_torch, 'torch is not installed') @pytest.mark.torch def test_Affine(): """ This test should evaluate if the transformation its being applied correctly. When computing the logarithm of the determinant jacobian matrix the result must be zero for any distribution as input when performing the first forward and inverse pass (initialized). This is the expected behavior because nothing is learned yet. input shape: (samples, dim) output shape: (samples, dim) """ dim = 2 samples = 96 data = MultivariateNormal(torch.zeros(dim), torch.eye(dim)) tensor = data.sample(torch.Size((samples, dim))) _, log_det_jacobian = Affine(dim).forward(tensor) _, inverse_log_det_jacobian = Affine(dim).inverse(tensor) # The first pass of the transformation should be 0 log_det_jacobian = log_det_jacobian.detach().numpy() inverse_log_det_jacobian = inverse_log_det_jacobian.detach().numpy() zeros = np.zeros((samples,)) assert np.array_equal(log_det_jacobian, zeros) assert np.array_equal(inverse_log_det_jacobian, zeros)
mit
Python
3dc54a1c845cf0b99fd0dfc6fd454659895ba888
Fix import.
liberation/django-elasticsearch,sadnoodles/django-elasticsearch,AlexandreProenca/django-elasticsearch,leotsem/django-elasticsearch,alsur/django-elasticsearch
django_elasticsearch/contrib/restframework/__init__.py
django_elasticsearch/contrib/restframework/__init__.py
from rest_framework import VERSION from django_elasticsearch.contrib.restframework.base import AutoCompletionMixin if int(VERSION[0]) < 3: from django_elasticsearch.contrib.restframework.restframework2 import IndexableModelMixin from django_elasticsearch.contrib.restframework.restframework2 import ElasticsearchFilterBackend else: from django_elasticsearch.contrib.restframework.restframework3 import IndexableModelMixin from django_elasticsearch.contrib.restframework.restframework3 import ElasticsearchFilterBackend __all__ = [ElasticsearchFilterBackend, IndexableModelMixin, AutoCompletionMixin]
from rest_framework import VERSION from django_elasticsearch.contrib.restframework.restframework import AutoCompletionMixin if int(VERSION[0]) < 3: from django_elasticsearch.contrib.restframework.restframework2 import IndexableModelMixin from django_elasticsearch.contrib.restframework.restframework2 import ElasticsearchFilterBackend else: from django_elasticsearch.contrib.restframework.restframework3 import IndexableModelMixin from django_elasticsearch.contrib.restframework.restframework3 import ElasticsearchFilterBackend __all__ = [ElasticsearchFilterBackend, IndexableModelMixin, AutoCompletionMixin]
mit
Python
df553c4e0c536f7deaa180076658ba61e3af66b6
Rework parsers to use subparsers
zxiiro/sym
refresh/cli.py
refresh/cli.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # The MIT License (MIT) # # Copyright (c) 2013 Thanh Ha # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import argparse import sys def init(args): print('init') def add(args): print('add') def remove(args): print('remove') def verify(args): print('verify') def setup_parser_args(parser, subparsers): """Setup the main arguement parser""" setup_parser_init(subparsers) setup_parser_add(subparsers) setup_parser_remove(subparsers) setup_parser_verify(subparsers) def setup_parser_init(subparsers): """Setup the init command parser""" parser_init = subparsers.add_parser('init', help='Add dotfile for management') parser_init.set_defaults(func = init) def setup_parser_add(subparsers): """Setup the add command parser""" parser_add = subparsers.add_parser('add', help='Add dotfile for management') parser_add.add_argument('source') parser_add.add_argument('destination') parser_add.set_defaults(func = add) def setup_parser_remove(subparsers): """Setup the remove command parser""" parser_remove = subparsers.add_parser('remove', help='Remove dotfile from management') parser_remove.add_argument('symlink') parser_remove.set_defaults(func = remove) def setup_parser_verify(subparsers): """Setup the verify command parser""" parser_verify = subparsers.add_parser('verify', help='Verify dotfiles') parser_verify.set_defaults(func = verify) def parse_args(): """Initialize the Argument Parser""" parser = argparse.ArgumentParser(description='Refresh, dotfiles management tool') subparsers = parser.add_subparsers(help='Command List') setup_parser_args(parser, subparsers) args = parser.parse_args() args.func(args) print("parse complete") def main(): parse_args() if __name__ == '__main__': main()
# -*- coding: utf-8 -*- ''' The MIT License (MIT) Copyright (c) 2013 Thanh Ha Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import argparse def setup_parser_args(parser): '''Add arguments to parse''' parser.add_argument('--add', help='Add dotfile for management', action='store_true') parser.add_argument('--remove', help='Remove dotfile from management', action='store_true') parser.add_argument('--check', help='Check dotfile link', action='store_true') def parse_args(): '''Initialize the Argument Parser''' parser = argparse.ArgumentParser(description='Refresh, dotfiles management tool') setup_parser_args(parser) args = parser.parse_args() def main(): parse_args() if __name__ == '__main__': main()
mit
Python
89dac8b14610f08b12db0ab6e00b7432b527fd89
Remove trailing whitespace
jongiddy/balcazapy,jongiddy/balcazapy,jongiddy/balcazapy
python/balcaza/activity/local/text.py
python/balcaza/activity/local/text.py
from balcaza.t2types import * from balcaza.t2activity import BeanshellCode ByteArrayToString = BeanshellCode( '''if ((bytes == void) || (bytes == null)) { throw new RuntimeException("The 'bytes' parameter must be specified"); } if (encoding == void) { string = new String(bytes); } else { string = new String(bytes, encoding); } ''', inputs = dict( bytes = String, encoding = Optional[String] ), outputs = dict( string = String ), defaultInput = 'bytes', name = 'ByteArrayToString' )
from balcaza.t2types import * from balcaza.t2activity import BeanshellCode ByteArrayToString = BeanshellCode( '''if ((bytes == void) || (bytes == null)) { throw new RuntimeException("The 'bytes' parameter must be specified"); } if (encoding == void) { string = new String(bytes); } else { string = new String(bytes, encoding); } ''', inputs = dict( bytes = String, encoding = Optional[String] ), outputs = dict( string = String ), defaultInput = 'bytes', name = 'ByteArrayToString' )
lgpl-2.1
Python
b324031ee683005be0307e3b323c4709ce3a01eb
Disable those new requirements because pip requires gcc to install them
LibreTime/libretime,LibreTime/libretime,comiconomenclaturist/libretime,Lapotor/libretime,LibreTime/libretime,comiconomenclaturist/libretime,LibreTime/libretime,Lapotor/libretime,Lapotor/libretime,LibreTime/libretime,Lapotor/libretime,Lapotor/libretime,comiconomenclaturist/libretime,comiconomenclaturist/libretime,Lapotor/libretime,comiconomenclaturist/libretime,LibreTime/libretime,comiconomenclaturist/libretime,comiconomenclaturist/libretime
python_apps/airtime_analyzer/setup.py
python_apps/airtime_analyzer/setup.py
from setuptools import setup from subprocess import call import sys # Allows us to avoid installing the upstart init script when deploying airtime_analyzer # on Airtime Pro: if '--no-init-script' in sys.argv: data_files = [] sys.argv.remove('--no-init-script') # super hax else: data_files = [('/etc/init', ['install/upstart/airtime_analyzer.conf'])] print data_files setup(name='airtime_analyzer', version='0.1', description='Airtime Analyzer Worker and File Importer', url='http://github.com/sourcefabric/Airtime', author='Albert Santoni', author_email='[email protected]', license='MIT', packages=['airtime_analyzer'], scripts=['bin/airtime_analyzer'], install_requires=[ 'mutagen', 'pika', 'python-magic', 'nose', 'coverage', 'mock', 'python-daemon', 'requests', # These next 3 are required for requests to support SSL with SNI. Learned this the hard way... # What sucks is that GCC is required to pip install these. #'ndg-httpsclient', #'pyasn1', #'pyopenssl' ], zip_safe=False, data_files=data_files) # Reload the initctl config so that "service start airtime_analyzer" works if data_files: print "Reloading initctl configuration" call(['initctl', 'reload-configuration']) print "Run \"sudo service airtime_analyzer restart\" now." # TODO: Should we start the analyzer here or not?
from setuptools import setup from subprocess import call import sys # Allows us to avoid installing the upstart init script when deploying airtime_analyzer # on Airtime Pro: if '--no-init-script' in sys.argv: data_files = [] sys.argv.remove('--no-init-script') # super hax else: data_files = [('/etc/init', ['install/upstart/airtime_analyzer.conf'])] print data_files setup(name='airtime_analyzer', version='0.1', description='Airtime Analyzer Worker and File Importer', url='http://github.com/sourcefabric/Airtime', author='Albert Santoni', author_email='[email protected]', license='MIT', packages=['airtime_analyzer'], scripts=['bin/airtime_analyzer'], install_requires=[ 'mutagen', 'pika', 'python-magic', 'nose', 'coverage', 'mock', 'python-daemon', 'requests', # These next 3 are required for requests to support SSL with SNI. This is extremely important. Learned this the hard way... 'ndg-httpsclient', 'pyasn1', 'pyopenssl' ], zip_safe=False, data_files=data_files) # Reload the initctl config so that "service start airtime_analyzer" works if data_files: print "Reloading initctl configuration" call(['initctl', 'reload-configuration']) print "Run \"sudo service airtime_analyzer restart\" now." # TODO: Should we start the analyzer here or not?
agpl-3.0
Python
2c1811fad85d6bacf8d3fcaf1299994bfc5efb78
Support serializer path instead of "self" keyword
Hipo/drf-extra-fields,Hipo/drf-extra-fields
drf_extra_fields/relations.py
drf_extra_fields/relations.py
from collections import OrderedDict from django.utils.module_loading import import_string from rest_framework.relations import PrimaryKeyRelatedField, SlugRelatedField class PresentableRelatedFieldMixin(object): def __init__(self, **kwargs): self.presentation_serializer = kwargs.pop("presentation_serializer", None) self.presentation_serializer_kwargs = kwargs.pop( "presentation_serializer_kwargs", dict() ) assert self.presentation_serializer is not None, ( self.__class__.__name__ + " must provide a `presentation_serializer` argument" ) super(PresentableRelatedFieldMixin, self).__init__(**kwargs) def use_pk_only_optimization(self): """ Instead of sending pk only object, return full object. The object already retrieved from db by drf. This doesn't cause an extra query. It even might save from making an extra query on serializer.to_representation method. Related source codes: - https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L41 - https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L132 """ return False def get_choices(self, cutoff=None): queryset = self.get_queryset() if queryset is None: # Ensure that field.choices returns something sensible # even when accessed with a read-only field. return {} if cutoff is not None: queryset = queryset[:cutoff] return OrderedDict([(item.pk, self.display_value(item)) for item in queryset]) def to_representation(self, data): if isinstance(self.presentation_serializer, str): self.presentation_serializer = import_string(self.presentation_serializer) return self.presentation_serializer( data, context=self.context, **self.presentation_serializer_kwargs ).data class PresentablePrimaryKeyRelatedField( PresentableRelatedFieldMixin, PrimaryKeyRelatedField ): """ Override PrimaryKeyRelatedField to represent serializer data instead of a pk field of the object. """ pass class PresentableSlugRelatedField(PresentableRelatedFieldMixin, SlugRelatedField): """ Override SlugRelatedField to represent serializer data instead of a slug field of the object. """ pass
from collections import OrderedDict from rest_framework.relations import PrimaryKeyRelatedField, SlugRelatedField class PresentableRelatedFieldMixin(object): def __init__(self, **kwargs): self.presentation_serializer = kwargs.pop("presentation_serializer", None) self.presentation_serializer_kwargs = kwargs.pop( "presentation_serializer_kwargs", dict() ) assert self.presentation_serializer is not None, ( self.__class__.__name__ + " must provide a `presentation_serializer` argument" ) super(PresentableRelatedFieldMixin, self).__init__(**kwargs) def use_pk_only_optimization(self): """ Instead of sending pk only object, return full object. The object already retrieved from db by drf. This doesn't cause an extra query. It even might save from making an extra query on serializer.to_representation method. Related source codes: - https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L41 - https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L132 """ return False def get_choices(self, cutoff=None): queryset = self.get_queryset() if queryset is None: # Ensure that field.choices returns something sensible # even when accessed with a read-only field. return {} if cutoff is not None: queryset = queryset[:cutoff] return OrderedDict([(item.pk, self.display_value(item)) for item in queryset]) def to_representation(self, data): return self.presentation_serializer( data, context=self.context, **self.presentation_serializer_kwargs ).data def bind(self, field_name, parent): if self.presentation_serializer == "self": self.presentation_serializer = parent.__class__ super(PresentableRelatedFieldMixin, self).bind(field_name, parent) class PresentablePrimaryKeyRelatedField( PresentableRelatedFieldMixin, PrimaryKeyRelatedField ): """ Override PrimaryKeyRelatedField to represent serializer data instead of a pk field of the object. """ pass class PresentableSlugRelatedField(PresentableRelatedFieldMixin, SlugRelatedField): """ Override SlugRelatedField to represent serializer data instead of a slug field of the object. """ pass
apache-2.0
Python
461b7c5bf5541fc3a56039d6756262d6b99e8428
Add null count.
jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools,jhanley634/testing-tools
problem/column_explorer/column_explorer.py
problem/column_explorer/column_explorer.py
#! /usr/bin/env python3 # Copyright 2019 John Hanley. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # The software is provided "AS IS", without warranty of any kind, express or # implied, including but not limited to the warranties of merchantability, # fitness for a particular purpose and noninfringement. In no event shall # the authors or copyright holders be liable for any claim, damages or # other liability, whether in an action of contract, tort or otherwise, # arising from, out of or in connection with the software or the use or # other dealings in the software. """Systematically finds aggregate stats for a table's columns.""" import sqlalchemy as sa import uszipcode def get_zipcode_session(): return uszipcode.SearchEngine().ses def get_zipcode_cs(): """Returns a JDBC connect string for the zipcode database.""" # typical value: sqlite:////Users/foo/.uszipcode/simple_db.sqlite return get_zipcode_session().connection().engine.url class ColumnExplorer: def __init__(self, cs_or_engine): self.engine = sa.create_engine(cs_or_engine) def report(self, table_name, round_digits=3): meta = sa.MetaData(bind=self.engine) tbl = sa.Table(table_name, meta, autoload=True) cnt, = self.engine.execute(f'select count(*) from {table_name}').fetchone() print(f'# {table_name}\n{cnt} rows, {len(tbl.c)} columns\n') for column in self._get_col_names(tbl): print('\n## ' + column) for agg in ['min', 'avg', 'max', 'count(distinct ', 'nulls']: if '(' not in agg: agg += '(' select = f'select {agg}{column}) from {table_name}' if agg.startswith('nulls'): select = f'select count(*) from {table_name} where {column} is null' stat, = self.engine.execute(select).fetchone() if agg.startswith('avg'): stat = round(stat, round_digits) if agg.startswith('nulls'): pct = round(100 * stat / cnt, round_digits) stat = f'{stat} ({pct} %)' print('-', agg.replace('(', ' '), stat) print(f'\n{cnt} rows in {table_name}') def _get_col_names(self, table): for col in table.columns: yield str(col).split('.')[1] if __name__ == '__main__': ColumnExplorer(get_zipcode_cs()).report('simple_zipcode')
#! /usr/bin/env python3 # Copyright 2019 John Hanley. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # The software is provided "AS IS", without warranty of any kind, express or # implied, including but not limited to the warranties of merchantability, # fitness for a particular purpose and noninfringement. In no event shall # the authors or copyright holders be liable for any claim, damages or # other liability, whether in an action of contract, tort or otherwise, # arising from, out of or in connection with the software or the use or # other dealings in the software. """Systematically finds aggregate stats for a table's columns.""" import sqlalchemy as sa import uszipcode def get_zipcode_session(): return uszipcode.SearchEngine().ses def get_zipcode_cs(): """Returns a JDBC connect string for the zipcode database.""" # typical value: sqlite:////Users/foo/.uszipcode/simple_db.sqlite return get_zipcode_session().connection().engine.url class ColumnExplorer: def __init__(self, cs_or_engine): self.engine = sa.create_engine(cs_or_engine) def report(self, table_name): for column in self._get_col_names(table_name): print('\n## ' + column) for agg in ['min', 'avg', 'max', 'count(distinct ']: if '(' not in agg: agg += '(' select = f'select {agg}{column}) from {table_name}' stat, = self.engine.execute(select).fetchone() print('-', agg.replace('(', ' '), stat) cnt, = self.engine.execute(f'select count(*) from {table_name}').fetchone() print(f'\n{cnt} rows in {table_name}') def _get_col_names(self, table_name): meta = sa.MetaData(bind=self.engine) tbl = sa.Table(table_name, meta, autoload=True) return map(str, tbl.columns) if __name__ == '__main__': ColumnExplorer(get_zipcode_cs()).report('simple_zipcode')
mit
Python
86d088835a88c00af69090b6b7f1bae42ff5c09a
remove monetdb typo
bootandy/sqlalchemy,Cito/sqlalchemy,epa/sqlalchemy,olemis/sqlalchemy,monetate/sqlalchemy,dstufft/sqlalchemy,halfcrazy/sqlalchemy,wfxiang08/sqlalchemy,276361270/sqlalchemy,davidjb/sqlalchemy,alex/sqlalchemy,wujuguang/sqlalchemy,zzzeek/sqlalchemy,Akrog/sqlalchemy,alex/sqlalchemy,Cito/sqlalchemy,davidfraser/sqlalchemy,WinterNis/sqlalchemy,inspirehep/sqlalchemy,epa/sqlalchemy,pdufour/sqlalchemy,j5int/sqlalchemy,alex/sqlalchemy,wfxiang08/sqlalchemy,EvaSDK/sqlalchemy,robin900/sqlalchemy,bootandy/sqlalchemy,bdupharm/sqlalchemy,hsum/sqlalchemy,sqlalchemy/sqlalchemy,davidjb/sqlalchemy,bdupharm/sqlalchemy,sandan/sqlalchemy,hsum/sqlalchemy,wujuguang/sqlalchemy,276361270/sqlalchemy,itkovian/sqlalchemy,j5int/sqlalchemy,elelianghh/sqlalchemy,inspirehep/sqlalchemy,ThiefMaster/sqlalchemy,davidfraser/sqlalchemy,Akrog/sqlalchemy,EvaSDK/sqlalchemy,olemis/sqlalchemy,sandan/sqlalchemy,brianv0/sqlalchemy,graingert/sqlalchemy,elelianghh/sqlalchemy,brianv0/sqlalchemy,graingert/sqlalchemy,halfcrazy/sqlalchemy,dstufft/sqlalchemy,itkovian/sqlalchemy,ThiefMaster/sqlalchemy,robin900/sqlalchemy,monetate/sqlalchemy,WinterNis/sqlalchemy,pdufour/sqlalchemy,Cito/sqlalchemy
lib/sqlalchemy/databases/__init__.py
lib/sqlalchemy/databases/__init__.py
# __init__.py # Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer [email protected] # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php __all__ = [ 'sqlite', 'postgres', 'mysql', 'oracle', 'mssql', 'firebird', 'sybase', 'access', 'maxdb' ]
# __init__.py # Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer [email protected] # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php __all__ = [ 'sqlite', 'postgres', 'mysql', 'oracle', 'mssql', 'firebird', 'sybase', 'access', 'maxdb', 'monetdb' ]
mit
Python
a4f475245c3af8470337fe0c25b136e58189a607
Update griddy to use CoordinatorEntity (#39392)
sander76/home-assistant,toddeye/home-assistant,FreekingDean/home-assistant,jawilson/home-assistant,partofthething/home-assistant,rohitranjan1991/home-assistant,tchellomello/home-assistant,turbokongen/home-assistant,sdague/home-assistant,rohitranjan1991/home-assistant,Danielhiversen/home-assistant,turbokongen/home-assistant,sander76/home-assistant,home-assistant/home-assistant,tboyce021/home-assistant,w1ll1am23/home-assistant,toddeye/home-assistant,balloob/home-assistant,soldag/home-assistant,tboyce1/home-assistant,mezz64/home-assistant,adrienbrault/home-assistant,GenericStudent/home-assistant,tboyce1/home-assistant,nkgilley/home-assistant,tboyce1/home-assistant,kennedyshead/home-assistant,GenericStudent/home-assistant,home-assistant/home-assistant,tboyce021/home-assistant,lukas-hetzenecker/home-assistant,FreekingDean/home-assistant,sdague/home-assistant,jawilson/home-assistant,w1ll1am23/home-assistant,aronsky/home-assistant,balloob/home-assistant,kennedyshead/home-assistant,aronsky/home-assistant,balloob/home-assistant,adrienbrault/home-assistant,Danielhiversen/home-assistant,lukas-hetzenecker/home-assistant,soldag/home-assistant,mezz64/home-assistant,tchellomello/home-assistant,partofthething/home-assistant,tboyce1/home-assistant,rohitranjan1991/home-assistant,nkgilley/home-assistant
homeassistant/components/griddy/sensor.py
homeassistant/components/griddy/sensor.py
"""Support for August sensors.""" import logging from homeassistant.const import ENERGY_KILO_WATT_HOUR from homeassistant.helpers.update_coordinator import CoordinatorEntity from .const import CONF_LOADZONE, DOMAIN _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the August sensors.""" coordinator = hass.data[DOMAIN][config_entry.entry_id] settlement_point = config_entry.data[CONF_LOADZONE] async_add_entities([GriddyPriceSensor(settlement_point, coordinator)], True) class GriddyPriceSensor(CoordinatorEntity): """Representation of an August sensor.""" def __init__(self, settlement_point, coordinator): """Initialize the sensor.""" super().__init__(coordinator) self._settlement_point = settlement_point @property def unit_of_measurement(self): """Return the unit of measurement.""" return f"¢/{ENERGY_KILO_WATT_HOUR}" @property def name(self): """Device Name.""" return f"{self._settlement_point} Price Now" @property def icon(self): """Device Ice.""" return "mdi:currency-usd" @property def unique_id(self): """Device Uniqueid.""" return f"{self._settlement_point}_price_now" @property def state(self): """Get the current price.""" return round(float(self.coordinator.data.now.price_cents_kwh), 4)
"""Support for August sensors.""" import logging from homeassistant.const import ENERGY_KILO_WATT_HOUR from homeassistant.helpers.entity import Entity from .const import CONF_LOADZONE, DOMAIN _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the August sensors.""" coordinator = hass.data[DOMAIN][config_entry.entry_id] settlement_point = config_entry.data[CONF_LOADZONE] async_add_entities([GriddyPriceSensor(settlement_point, coordinator)], True) class GriddyPriceSensor(Entity): """Representation of an August sensor.""" def __init__(self, settlement_point, coordinator): """Initialize the sensor.""" self._coordinator = coordinator self._settlement_point = settlement_point @property def unit_of_measurement(self): """Return the unit of measurement.""" return f"¢/{ENERGY_KILO_WATT_HOUR}" @property def name(self): """Device Name.""" return f"{self._settlement_point} Price Now" @property def icon(self): """Device Ice.""" return "mdi:currency-usd" @property def unique_id(self): """Device Uniqueid.""" return f"{self._settlement_point}_price_now" @property def available(self): """Return True if entity is available.""" return self._coordinator.last_update_success @property def state(self): """Get the current price.""" return round(float(self._coordinator.data.now.price_cents_kwh), 4) @property def should_poll(self): """Return False, updates are controlled via coordinator.""" return False async def async_update(self): """Update the entity. Only used by the generic entity update service. """ await self._coordinator.async_request_refresh() async def async_added_to_hass(self): """Subscribe to updates.""" self.async_on_remove( self._coordinator.async_add_listener(self.async_write_ha_state) )
apache-2.0
Python
331f5b0a951e13f816e752609ac348df272e1b1e
Update conf_template.py
LinkingDataIO/RO2SHARE
docs/conf_template.py
docs/conf_template.py
# Statement for enabling the development environment DEBUG = True # Define the application directory import os BASE_DIR = os.path.abspath(os.path.dirname(__file__)) # Define the database - we are working with # SQLite for this example SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db') DATABASE_CONNECT_OPTIONS = {} # Application threads. A common general assumption is # using 2 per available processor cores - to handle # incoming requests using one and performing background # operations using the other. THREADS_PER_PAGE = 2 # Enable protection agains *Cross-site Request Forgery (CSRF)* CSRF_ENABLED = True # Use a secure, unique and absolutely secret key for # signing the data. CSRF_SESSION_KEY = "secret" # Secret key for signing cookies SECRET_KEY = "secret" ORCID_CLIENT_ID = "" ORCID_SECRET = "" ORCID_API_URL = "https://orcid.org/oauth/token" ORCID_REDIRECT_URL = "http://localhost:4200/login" GITHUB_CLIENT_ID = "" GITHUB_SECRET = "" GITHUB_API_URL = "https://github.com/login/oauth/access_token" GITHUB_USER_API_URL = "https://api.github.com/user" SHARE_API_URL = "https://share.osf.io/api/v2/search/creativeworks/_search" SLIDESHARE_API_URL = "https://www.slideshare.net/api/2/get_slideshows_by_user" SLIDESHARE_PARAMS = "?api_key={api_key}&ts={ts}&hash={hash}&username_for={username}" SLIDESHARE_API_KEY = "" SLIDESHARE_SECRET = "" OPENAIRE_PUBLICATION_API_URL = "http://api.openaire.eu/search/publications?author={author}" OPENAIRE_DATASET_API_URL = "http://api.openaire.eu/search/datasets?author={author}" SPARQL_QUERY_ENDPOINT = "http://localhost:3030/ro2share/sparql" SPARQL_UPLOAD_ENDPOINT = "http://localhost:3030/ro2share/update" BASE_URI = 'http://ro2share.org/' TMP_DIR = '/tmp/'
# Statement for enabling the development environment DEBUG = True # Define the application directory import os BASE_DIR = os.path.abspath(os.path.dirname(__file__)) # Define the database - we are working with # SQLite for this example SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db') DATABASE_CONNECT_OPTIONS = {} # Application threads. A common general assumption is # using 2 per available processor cores - to handle # incoming requests using one and performing background # operations using the other. THREADS_PER_PAGE = 2 # Enable protection agains *Cross-site Request Forgery (CSRF)* CSRF_ENABLED = True # Use a secure, unique and absolutely secret key for # signing the data. CSRF_SESSION_KEY = "secret" # Secret key for signing cookies SECRET_KEY = "secret" ORCID_CLIENT_ID = "" ORCID_SECRET = "" ORCID_API_URL = "https://orcid.org/oauth/token" ORCID_REDIRECT_URL = "http://localhost:4200/login" GITHUB_CLIENT_ID = "" GITHUB_SECRET = "" GITHUB_API_URL = "https://github.com/login/oauth/access_token" GITHUB_USER_API_URL = "https://api.github.com/user" SHARE_API_URL = "https://share.osf.io/api/v2/search/creativeworks/_search" SLIDESHARE_API_URL = "https://www.slideshare.net/api/2/get_slideshows_by_user" SLIDESHARE_PARAMS = "?api_key={api_key}&ts={ts}&hash={hash}&username_for={username}" SLIDESHARE_API_KEY = "" SLIDESHARE_SECRET = "" OPENAIRE_PUBLICATION_API_URL = "http://api.openaire.eu/search/publications?author={author}" OPENAIRE_DATASET_API_URL = "http://api.openaire.eu/search/datasets?author={author}" SPARQL_QUERY_ENDPOINT = "http://localhost:3030/ro2share/sparql" SPARQL_UPLOAD_ENDPOINT = "http://localhost:3030/ro2share/update" BASE_URI = 'http://ro2share.org/' TMP_DIR = 'tmp/'
mit
Python
30db4b3ae377669b3b598c9d4d22b5fbff2082ab
Fix typo on model
bcgov/gwells,bcgov/gwells,bcgov/gwells,bcgov/gwells
app/backend/aquifers/serializers.py
app/backend/aquifers/serializers.py
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from rest_framework import serializers from aquifers.models import Aquifer class AquiferSerializer(serializers.ModelSerializer): """Serialize a aquifer list""" demand_description = serializers.SlugRelatedField(source='demand', read_only=True, slug_field='description') material_description = serializers.SlugRelatedField(source='material', read_only=True, slug_field='description') productivity_description = serializers.SlugRelatedField(source='productivity', read_only=True, slug_field='description') subtype_description = serializers.SlugRelatedField(source='subtype', read_only=True, slug_field='description') vulnerability_description = serializers.SlugRelatedField(source='vulnerability', read_only=True, slug_field='description') quality_concern_description = serializers.SlugRelatedField(source='quality_concern', read_only=True, slug_field='description') class Meta: model = Aquifer fields = ( 'aquifer_id', 'aquifer_name', 'area', 'demand_description', 'demand', 'litho_stratographic_unit', 'location_description', 'mapping_year', 'material_description', 'material', 'productivity_description', 'productivity', 'quality_concern_description', 'quality_concern', 'subtype_description', 'subtype', 'vulnerability_description', 'vulnerability' )
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from rest_framework import serializers from aquifers.models import Aquifer class AquiferSerializer(serializers.ModelSerializer): """Serialize a aquifer list""" demand_description = serializers.SlugRelatedField(source='demand', read_only=True, slug_field='description') material_description = serializers.SlugRelatedField(source='material', read_only=True, slug_field='description') productivity_description = serializers.SlugRelatedField(source='productivity', read_only=True, slug_field='description') subtype_description = serializers.SlugRelatedField(source='subtype', read_only=True, slug_field='description') vulnerability_description = serializers.SlugRelatedField(source='vulnerability', read_only=True, slug_field='description') quality_concern_description = serializers.SlugRelatedField(source='quality_concert', read_only=True, slug_field='description') class Meta: model = Aquifer fields = ( 'aquifer_id', 'aquifer_name', 'area', 'demand_description', 'demand', 'litho_stratographic_unit', 'location_description', 'mapping_year', 'material_description', 'material', 'productivity_description', 'productivity', 'quality_concern_description', 'quality_concern', 'subtype_description', 'subtype', 'vulnerability_description', 'vulnerability' )
apache-2.0
Python
8995d7314bddcf4418a08cb39b2fabbc8704706e
Use conservative defaults for local facebook settings.
Kegbot/kegbot-server,Kegbot/kegbot-server,Kegbot/kegbot-server,Kegbot/kegbot-server,Kegbot/kegbot-server
pykeg/src/pykeg/contrib/facebook/models.py
pykeg/src/pykeg/contrib/facebook/models.py
import datetime from django.db import models from django.db.models.signals import post_save from socialregistration import models as sr_models PRIVACY_CHOICES = ( ('EVERYONE', 'Everyone'), ('ALL_FRIENDS', 'Friends'), ('FRIENDS_OF_FRIENDS', 'Friends of Friends'), ('NETWORK_FRIENDS', 'Networks and Friends'), #('CUSTOM', 'Custom permissions'), ) class FacebookSession(models.Model): """Stores the session id for a user.""" profile = models.ForeignKey(sr_models.FacebookProfile, unique=True, related_name='session') session_id = models.CharField(max_length=255, blank=False, null=False) updated = models.DateTimeField(default=datetime.datetime.now) @classmethod def get_session(cls, request): if not hasattr(request, 'facebook'): raise ValueError, "no facebook" return None fb = request.facebook if not fb.uid: raise ValueError, "no uid" return None profile = sr_models.FacebookProfile.objects.get(uid=fb.uid) if not profile: raise ValueError, "no profile" return None session, new = FacebookSession.objects.get_or_create(profile=profile) if new or session.session_id != fb.session_key: session.session_id = fb.session_key session.save() def add_permission(self, perm): qs = self.profile.permission_set.filter(permission=perm) if not qs.count(): perm = FacebookPermission(profile=self.profile, permission=perm) perm.save() def rm_permission(self, perm): qs = self.profile.permission_set.filter(permission=perm) if qs.count(): qs.delete() def profile_post_save(sender, instance, **kwargs): """Create default settings on new profile.""" settings, new = FacebookSettings.objects.get_or_create( profile=instance) post_save.connect(profile_post_save, sender=sr_models.FacebookProfile) class FacebookPermission(models.Model): """Records a granted permission.""" profile = models.ForeignKey(sr_models.FacebookProfile, unique=True, related_name='permission_set') permission = models.CharField(max_length=255, blank=False, null=False, unique=True) class FacebookSettings(models.Model): profile = models.ForeignKey(sr_models.FacebookProfile, unique=True, related_name='settings') # stream.publish stuff # http://wiki.developers.facebook.com/index.php/Stream.publish publish_events = models.BooleanField(default=False, help_text='Post each drink to your wall.') include_link = models.BooleanField(default=False, help_text='Add a link to this kegbot when publishing to wall.') publish_status = models.BooleanField(default=False, help_text='Update status on start of a new drinking session.') privacy = models.CharField(max_length=64, choices=PRIVACY_CHOICES, default='ALL_FRIENDS', help_text='Privacy setting for drink posts.')
import datetime from django.db import models from django.db.models.signals import post_save from socialregistration import models as sr_models PRIVACY_CHOICES = ( ('EVERYONE', 'Everyone'), ('ALL_FRIENDS', 'Friends'), ('FRIENDS_OF_FRIENDS', 'Friends of Friends'), ('NETWORK_FRIENDS', 'Networks and Friends'), #('CUSTOM', 'Custom permissions'), ) class FacebookSession(models.Model): """Stores the session id for a user.""" profile = models.ForeignKey(sr_models.FacebookProfile, unique=True, related_name='session') session_id = models.CharField(max_length=255, blank=False, null=False) updated = models.DateTimeField(default=datetime.datetime.now) @classmethod def get_session(cls, request): if not hasattr(request, 'facebook'): raise ValueError, "no facebook" return None fb = request.facebook if not fb.uid: raise ValueError, "no uid" return None profile = sr_models.FacebookProfile.objects.get(uid=fb.uid) if not profile: raise ValueError, "no profile" return None session, new = FacebookSession.objects.get_or_create(profile=profile) if new or session.session_id != fb.session_key: session.session_id = fb.session_key session.save() def add_permission(self, perm): qs = self.profile.permission_set.filter(permission=perm) if not qs.count(): perm = FacebookPermission(profile=self.profile, permission=perm) perm.save() def rm_permission(self, perm): qs = self.profile.permission_set.filter(permission=perm) if qs.count(): qs.delete() def profile_post_save(sender, instance, **kwargs): """Create default settings on new profile.""" settings, new = FacebookSettings.objects.get_or_create( profile=instance) post_save.connect(profile_post_save, sender=sr_models.FacebookProfile) class FacebookPermission(models.Model): """Records a granted permission.""" profile = models.ForeignKey(sr_models.FacebookProfile, unique=True, related_name='permission_set') permission = models.CharField(max_length=255, blank=False, null=False, unique=True) class FacebookSettings(models.Model): profile = models.ForeignKey(sr_models.FacebookProfile, unique=True, related_name='settings') # stream.publish stuff # http://wiki.developers.facebook.com/index.php/Stream.publish publish_events = models.BooleanField(default=True, help_text='Post each drink to your wall.') include_link = models.BooleanField(default=True, help_text='Add a link to this kegbot when publishing to wall.') publish_status = models.BooleanField(default=False, help_text='Update status on start of a new drinking session.') privacy = models.CharField(max_length=64, choices=PRIVACY_CHOICES, default='ALL_FRIENDS', help_text='Privacy setting for drink posts.')
mit
Python
eff9a7fa2c25739926a8c583c51f30fee66185c9
return plugin name changed at loading
quinoescobar/keystoneauth-oidc-refreshtoken
keystoneauth_oidc_refreshtoken/loading.py
keystoneauth_oidc_refreshtoken/loading.py
# coding=utf-8 # Copyright 2017 JOSÉ JOAQUÍN ESCOBAR GÓMEZ # File: loading.py # Description: # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading from keystoneauth1.loading._plugins.identity import v3 from keystoneauth_oidc_refreshtoken import plugin class OpenIDConnectRefreshToken(v3._OpenIDConnectBase): @property def plugin_class(self): return plugin.OidcRefreshToken def get_options(self): options = super(OpenIDConnectRefreshToken, self).get_options() options.extend([ loading.Opt('refresh_token', required=True, help='OAuth 2.0 Refresh Token') ]) return options
# coding=utf-8 # Copyright 2017 JOSÉ JOAQUÍN ESCOBAR GÓMEZ # File: loading.py # Description: # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading from keystoneauth1.loading._plugins.identity import v3 from keystoneauth_oidc_refreshtoken import plugin class OpenIDConnectRefreshToken(v3._OpenIDConnectBase): @property def plugin_class(self): return plugin.v3oidcrefreshtoken def get_options(self): options = super(OpenIDConnectRefreshToken, self).get_options() options.extend([ loading.Opt('refresh_token', required=True, help='OAuth 2.0 Refresh Token') ]) return options
apache-2.0
Python
b978d2a1f2f9cc9942971a6e252ccd1209a9269b
remove message (#8163)
williamFalcon/pytorch-lightning,williamFalcon/pytorch-lightning
pytorch_lightning/metrics/__init__.py
pytorch_lightning/metrics/__init__.py
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pytorch_lightning.metrics.classification import ( # noqa: F401 Accuracy, AUC, AUROC, AveragePrecision, ConfusionMatrix, F1, FBeta, HammingDistance, IoU, Precision, PrecisionRecallCurve, Recall, ROC, StatScores, ) from pytorch_lightning.metrics.metric import Metric, MetricCollection # noqa: F401 from pytorch_lightning.metrics.regression import ( # noqa: F401 ExplainedVariance, MeanAbsoluteError, MeanSquaredError, MeanSquaredLogError, PSNR, R2Score, SSIM, )
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pytorch_lightning.metrics.classification import ( # noqa: F401 Accuracy, AUC, AUROC, AveragePrecision, ConfusionMatrix, F1, FBeta, HammingDistance, IoU, Precision, PrecisionRecallCurve, Recall, ROC, StatScores, ) from pytorch_lightning.metrics.metric import Metric, MetricCollection # noqa: F401 from pytorch_lightning.metrics.regression import ( # noqa: F401 ExplainedVariance, MeanAbsoluteError, MeanSquaredError, MeanSquaredLogError, PSNR, R2Score, SSIM, ) from pytorch_lightning.utilities import rank_zero_deprecation rank_zero_deprecation( "`pytorch_lightning.metrics.*` module has been renamed to `torchmetrics.*` and split off to its own package" " (https://github.com/PyTorchLightning/metrics) since v1.3 and will be removed in v1.5" )
apache-2.0
Python
d6b9cc4acb4800aa63cc91957c05c75312a081e5
update language_by_size from trunk r9110, add new sq-site
azatoth/pywikipedia
pywikibot/families/wikinews_family.py
pywikibot/families/wikinews_family.py
# -*- coding: utf-8 -*- from pywikibot import family __version__ = '$Id$' # The Wikimedia family that is known as Wikinews class Family(family.Family): def __init__(self): family.Family.__init__(self) self.name = 'wikinews' self.languages_by_size = [ 'sr', 'en', 'pl', 'de', 'fr', 'it', 'es', 'pt', 'zh', 'ja', 'sv', 'ru', 'ta', 'fi', 'cs', 'he', 'ro', 'bg', 'ar', 'hu', 'sd', 'tr', 'uk', 'fa', 'ca', 'no', 'sq', 'bs', 'th', 'ko', 'eo', ] for lang in self.languages_by_size: self.langs[lang] = '%s.wikinews.org' % lang self.obsolete = { 'jp': 'ja', 'nb': 'no', 'nl': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=20325 'zh-tw': 'zh', 'zh-cn': 'zh' } # Which languages have a special order for putting interlanguage links, # and what order is it? If a language is not in interwiki_putfirst, # alphabetical order on language code is used. For languages that are in # interwiki_putfirst, interwiki_putfirst is checked first, and # languages are put in the order given there. All other languages are put # after those, in code-alphabetical order. self.interwiki_putfirst = { 'en': self.alphabetic, 'fi': self.alphabetic, 'fr': self.alphabetic, 'he': ['en'], 'hu': ['en'], 'pl': self.alphabetic, } # Global bot allowed languages on http://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation self.cross_allowed = ['cs', 'hu',] # CentralAuth cross avaliable projects. self.cross_projects = [ 'wikipedia', 'wiktionary', 'wikibooks', 'wikiquote', 'wikisource', 'wikiversity', 'meta', 'mediawiki', 'test', 'incubator', 'commons', 'species' ] def code2encoding(self, code): return 'utf-8' def version(self, code): return '1.17wmf1' def shared_image_repository(self, code): return ('commons', 'commons')
# -*- coding: utf-8 -*- from pywikibot import family __version__ = '$Id$' # The Wikimedia family that is known as Wikinews class Family(family.Family): def __init__(self): family.Family.__init__(self) self.name = 'wikinews' self.languages_by_size = [ 'sr', 'en', 'pl', 'de', 'fr', 'it', 'es', 'pt', 'zh', 'ja', 'sv', 'ru', 'ta', 'fi', 'cs', 'he', 'ro', 'bg', 'ar', 'hu', 'sd', 'tr', 'uk', 'ca', 'fa', 'no', 'bs', 'th', 'ko', 'eo', ] for lang in self.languages_by_size: self.langs[lang] = '%s.wikinews.org' % lang self.obsolete = { 'jp': 'ja', 'nb': 'no', 'nl': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=20325 'zh-tw': 'zh', 'zh-cn': 'zh' } # Which languages have a special order for putting interlanguage links, # and what order is it? If a language is not in interwiki_putfirst, # alphabetical order on language code is used. For languages that are in # interwiki_putfirst, interwiki_putfirst is checked first, and # languages are put in the order given there. All other languages are put # after those, in code-alphabetical order. self.interwiki_putfirst = { 'en': self.alphabetic, 'fi': self.alphabetic, 'fr': self.alphabetic, 'he': ['en'], 'hu': ['en'], 'pl': self.alphabetic, } # Global bot allowed languages on http://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation self.cross_allowed = ['cs', 'hu',] # CentralAuth cross avaliable projects. self.cross_projects = [ 'wikipedia', 'wiktionary', 'wikibooks', 'wikiquote', 'wikisource', 'wikiversity', 'meta', 'mediawiki', 'test', 'incubator', 'commons', 'species' ] def code2encoding(self, code): return 'utf-8' def version(self, code): return '1.17wmf1' def shared_image_repository(self, code): return ('commons', 'commons')
mit
Python
06a851590f32acad0bc1e5b0d87cc4b1148b644c
Add unique index to patient_numbers
renalreg/radar,renalreg/radar,renalreg/radar,renalreg/radar
radar/radar/models/patient_numbers.py
radar/radar/models/patient_numbers.py
from sqlalchemy import Column, Integer, ForeignKey, String, Index from sqlalchemy.orm import relationship from radar.database import db from radar.models import MetaModelMixin from radar.models.common import uuid_pk_column, patient_id_column, patient_relationship class PatientNumber(db.Model, MetaModelMixin): __tablename__ = 'patient_numbers' id = uuid_pk_column() patient_id = patient_id_column() patient = patient_relationship('patient_numbers') data_source_id = Column(Integer, ForeignKey('data_sources.id'), nullable=False) data_source = relationship('DataSource') organisation_id = Column(Integer, ForeignKey('organisations.id'), nullable=False) organisation = relationship('Organisation') number = Column(String, nullable=False) # Data source, organisation and number must be unique Index('patient_numbers_data_source_id_organisation_id_number_idx', PatientNumber.data_source_id, PatientNumber.organisation_id, PatientNumber.number, unique=True) Index('patient_numbers_patient_id_idx', PatientNumber.patient_id) Index('patient_numbers_organisation_id_idx', PatientNumber.organisation_id)
from sqlalchemy import Column, Integer, ForeignKey, String, Index from sqlalchemy.orm import relationship from radar.database import db from radar.models import MetaModelMixin from radar.models.common import uuid_pk_column, patient_id_column, patient_relationship class PatientNumber(db.Model, MetaModelMixin): __tablename__ = 'patient_numbers' id = uuid_pk_column() patient_id = patient_id_column() patient = patient_relationship('patient_numbers') data_source_id = Column(Integer, ForeignKey('data_sources.id'), nullable=False) data_source = relationship('DataSource') organisation_id = Column(Integer, ForeignKey('organisations.id'), nullable=False) organisation = relationship('Organisation') number = Column(String, nullable=False) # TODO add unique index on data_source_id, organisation_id, number Index('patient_numbers_patient_id_idx', PatientNumber.patient_id) Index('patient_numbers_organisation_id_idx', PatientNumber.organisation_id)
agpl-3.0
Python
ab2d635f6f52c6cbc6c59d3fa887176852e186ff
Move Ko-Fi notifications to private channels.
Eylesis/Botfriend
KofiFriend_Brain.py
KofiFriend_Brain.py
import traceback import json import util_functions from discord.ext import commands import discord import sys import re import os import asyncio from aiohttp import web import datetime botToken = os.environ.get('botToken') def run_app(app, *, host='0.0.0.0', port=None, shutdown_timeout=60.0, ssl_context=None, print=print, backlog=128): """Run an app""" if port is None: if not ssl_context: port = 8080 else: port = 8443 loop = app.loop handler = app.make_handler() server = loop.create_server(handler, host, port, ssl=ssl_context, backlog=backlog) srv, startup_res = loop.run_until_complete(asyncio.gather(server, app.startup(), loop=loop)) scheme = 'https' if ssl_context else 'http' print("======== Running on {scheme}://{host}:{port}/ ========\n" "(Press CTRL+C to quit)".format( scheme=scheme, host=host, port=port)) async def tba_handler(request): data = await request.post() data = json.loads(data['data']) print("Accepted request:\n{}".format(data)) print("{}".format(data)) embed = discord.Embed( title="Ko-Fi Received!", url="https://ko-fi.com/eylesis", description="{} has sent ${}.".format(data['from_name'], data['amount'])) embed.set_footer(text="Ko-Fi Notification") if data['message'] == "": data['message'] == "No Message." embed.add_field(name="__Message__", value=data['message']) channelids = {'470455397912674305'} for channelid in channelids: await bot.send_message(bot.get_channel(channelid), embed=embed) return web.Response() bot = commands.Bot(command_prefix='*') loop = bot.loop app = web.Application(loop=loop) app.router.add_post('/endpoint', tba_handler) if __name__ == "__main__": run_app(app, host=os.environ.get('HOST'), port=os.environ.get('PORT')) bot.run(botToken)
import traceback import json import util_functions from discord.ext import commands import discord import sys import re import os import asyncio from aiohttp import web import datetime botToken = os.environ.get('botToken') def run_app(app, *, host='0.0.0.0', port=None, shutdown_timeout=60.0, ssl_context=None, print=print, backlog=128): """Run an app""" if port is None: if not ssl_context: port = 8080 else: port = 8443 loop = app.loop handler = app.make_handler() server = loop.create_server(handler, host, port, ssl=ssl_context, backlog=backlog) srv, startup_res = loop.run_until_complete(asyncio.gather(server, app.startup(), loop=loop)) scheme = 'https' if ssl_context else 'http' print("======== Running on {scheme}://{host}:{port}/ ========\n" "(Press CTRL+C to quit)".format( scheme=scheme, host=host, port=port)) async def tba_handler(request): data = await request.post() data = json.loads(data['data']) print("Accepted request:\n{}".format(data)) print("{}".format(data)) embed = discord.Embed( title="Crooq's Computer Quest Updated!", url="https://ko-fi.com/eylesis", description="{} has given ${} to the cause! The donation is appreciated!".format(data['from_name'], data['amount'])) embed.set_footer(text="Ko-Fi Notification") if data['message'] == "": data['message'] == "No Message." embed.add_field(name="__Message__", value=data['message']) channelids = {'470455397912674305', '391157967493267457'} for channelid in channelids: await bot.send_message(bot.get_channel(channelid), embed=embed) return web.Response() bot = commands.Bot(command_prefix='*') loop = bot.loop app = web.Application(loop=loop) app.router.add_post('/endpoint', tba_handler) if __name__ == "__main__": run_app(app, host=os.environ.get('HOST'), port=os.environ.get('PORT')) bot.run(botToken)
mit
Python
b980d69fe3d2da87814a915c6a85ef930d832860
Change simple_blend to simply average the predictions
jvanbrug/netflix,jvanbrug/netflix
scripts/simple_blend.py
scripts/simple_blend.py
import numpy as np import os import sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from utils.data_paths import SUBMISSIONS_DIR_PATH OUTPUT_FILE_PATH = os.path.join(SUBMISSIONS_DIR_PATH, 'simple_blend.dta') PREDICTION_FILE_PATHS = [os.path.join(SUBMISSIONS_DIR_PATH, 'predictions1.dta'), os.path.join(SUBMISSIONS_DIR_PATH, 'predictions2.dta')] def main(): predictions = get_predictions() write(predictions) def get_predictions(): predictions = np.array([]) for i, prediction_file_path in enumerate(PREDICTION_FILE_PATHS): with open(prediction_file_path, 'r') as prediction_file: prediction = np.transpose(np.array([prediction_file.read().split()], dtype=np.float32)) if predictions.size == 0: predictions = prediction else: predictions = np.append(predictions, prediction, axis=1) return np.matrix(predictions) def write(predictions): with open(OUTPUT_FILE_PATH, 'w+') as output_file: for prediction_set in predictions: prediction = np.average(np.ravel(prediction_set)) output_file.write('{}\n'.format(prediction)) if __name__ == '__main__': main()
import numpy as np import os import sys sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) from utils.data_paths import SUBMISSIONS_DIR_PATH OUTPUT_FILE_PATH = os.path.join(SUBMISSIONS_DIR_PATH, 'simple_blend.dta') PREDICTION_FILE_PATHS = [os.path.join(SUBMISSIONS_DIR_PATH, 'predictions1.dta'), os.path.join(SUBMISSIONS_DIR_PATH, 'predictions2.dta')] PREDICTION_COEFFICIENTS = [0.4, 0.6] def main(): predictions = get_predictions() write(predictions) def get_predictions(): predictions = np.array([]) for i, prediction_file_path in enumerate(PREDICTION_FILE_PATHS): with open(prediction_file_path, 'r') as prediction_file: prediction = np.transpose(np.array([prediction_file.read().split()], dtype=np.float32)) if predictions.size == 0: predictions = prediction else: predictions = np.append(predictions, prediction, axis=1) return np.matrix(predictions) def write(predictions): coefficients = np.array(PREDICTION_COEFFICIENTS) with open(OUTPUT_FILE_PATH, 'w+') as output_file: for prediction_set in predictions: prediction = np.dot(np.ravel(prediction_set), coefficients) output_file.write('{}\n'.format(prediction)) if __name__ == '__main__': main()
mit
Python
4657a4fafb1218fe73b76d142c554bd8f347d81f
Make the correct None check
adderall/regulations-site,eregs/regulations-site,willbarton/regulations-site,willbarton/regulations-site,ascott1/regulations-site,grapesmoker/regulations-site,eregs/regulations-site,eregs/regulations-site,jeremiak/regulations-site,adderall/regulations-site,EricSchles/regulations-site,tadhg-ohiggins/regulations-site,ascott1/regulations-site,EricSchles/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,grapesmoker/regulations-site,willbarton/regulations-site,jeremiak/regulations-site,ascott1/regulations-site,adderall/regulations-site,18F/regulations-site,EricSchles/regulations-site,18F/regulations-site,tadhg-ohiggins/regulations-site,ascott1/regulations-site,18F/regulations-site,willbarton/regulations-site,grapesmoker/regulations-site,adderall/regulations-site,jeremiak/regulations-site,jeremiak/regulations-site,tadhg-ohiggins/regulations-site,grapesmoker/regulations-site,EricSchles/regulations-site,18F/regulations-site
regserver/regulations/views/chrome.py
regserver/regulations/views/chrome.py
from django.conf import settings from django.http import Http404 from django.views.generic.base import TemplateView from regulations.generator import generator from regulations.generator.versions import fetch_grouped_history from regulations.views import utils from regulations.views.partial import * from regulations.views.sidebar import SideBarView class ChromeView(TemplateView): """ Base class for views which wish to include chrome. """ template_name = 'chrome.html' def add_extras(self, context): context['env'] = 'source' if settings.DEBUG else 'built' context['GOOGLE_ANALYTICS_SITE'] = settings.GOOGLE_ANALYTICS_SITE context['GOOGLE_ANALYTICS_ID'] = settings.GOOGLE_ANALYTICS_ID return context def get_context_data(self, **kwargs): context = super(ChromeView, self).get_context_data(**kwargs) label_id = context['label_id'] version = context['version'] # Hack solution: pull in full regulation, then the partial # @todo: just query the meta and toc layers part = label_id.split('-')[0] full_tree = generator.get_regulation(part, version) relevant_tree = generator.get_tree_paragraph(label_id, version) if full_tree is None or relevant_tree is None: raise Http404 partial_view = self.partial_class.as_view() response = partial_view( self.request, label_id=label_id, version=version) response.render() context['partial_content'] = response.content sidebar_view = SideBarView.as_view() response = sidebar_view(self.request, label_id=label_id, version=version) response.render() context['sidebar_content'] = response.content appliers = utils.handle_specified_layers( 'toc,meta', part, version, self.partial_class.sectional_links) builder = generate_html(full_tree, appliers) context['tree'] = full_tree self.add_extras(context) context['part'] = part context['history'] = fetch_grouped_history(part) return context class ChromeInterpView(ChromeView): """Interpretation of regtext section/paragraph or appendix with chrome""" partial_class = PartialInterpView class ChromeSectionView(ChromeView): """Regtext section with chrome""" partial_class = PartialSectionView class ChromeParagraphView(ChromeView): """Regtext paragraph with chrome""" partial_class = PartialParagraphView class ChromeRegulationView(ChromeView): """Entire regulation with chrome""" partial_class = PartialRegulationView
from django.conf import settings from django.http import Http404 from django.views.generic.base import TemplateView from regulations.generator import generator from regulations.generator.versions import fetch_grouped_history from regulations.views import utils from regulations.views.partial import * from regulations.views.sidebar import SideBarView class ChromeView(TemplateView): """ Base class for views which wish to include chrome. """ template_name = 'chrome.html' def add_extras(self, context): context['env'] = 'source' if settings.DEBUG else 'built' context['GOOGLE_ANALYTICS_SITE'] = settings.GOOGLE_ANALYTICS_SITE context['GOOGLE_ANALYTICS_ID'] = settings.GOOGLE_ANALYTICS_ID return context def get_context_data(self, **kwargs): context = super(ChromeView, self).get_context_data(**kwargs) label_id = context['label_id'] version = context['version'] # Hack solution: pull in full regulation, then the partial # @todo: just query the meta and toc layers part = label_id.split('-')[0] full_tree = generator.get_regulation(part, version) relevant_tree = generator.get_tree_paragraph(label_id, version) if not full_tree or relevant_tree: raise Http404 partial_view = self.partial_class.as_view() response = partial_view( self.request, label_id=label_id, version=version) response.render() context['partial_content'] = response.content sidebar_view = SideBarView.as_view() response = sidebar_view(self.request, label_id=label_id, version=version) response.render() context['sidebar_content'] = response.content appliers = utils.handle_specified_layers( 'toc,meta', part, version, self.partial_class.sectional_links) builder = generate_html(full_tree, appliers) context['tree'] = full_tree self.add_extras(context) context['part'] = part context['history'] = fetch_grouped_history(part) return context class ChromeInterpView(ChromeView): """Interpretation of regtext section/paragraph or appendix with chrome""" partial_class = PartialInterpView class ChromeSectionView(ChromeView): """Regtext section with chrome""" partial_class = PartialSectionView class ChromeParagraphView(ChromeView): """Regtext paragraph with chrome""" partial_class = PartialParagraphView class ChromeRegulationView(ChromeView): """Entire regulation with chrome""" partial_class = PartialRegulationView
cc0-1.0
Python
aaa6142718827ea6d568eccc75c624598b0bc9c9
Update __init__.py
ralph-group/pymeasure
pymeasure/instruments/thorlabs/__init__.py
pymeasure/instruments/thorlabs/__init__.py
# # This file is part of the PyMeasure package. # # Copyright (c) 2013-2020 PyMeasure Developers # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from .thorlabspm100usb import ThorlabsPM100USB from .thorlabspro8000 import thorlabsPro8000
# # This file is part of the PyMeasure package. # # Copyright (c) 2013-2020 PyMeasure Developers # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from .thorlabspm100usb import ThorlabsPM100USB
mit
Python
1a9581a33efab4bcf7f1b7a6e555fa373d6f0739
Fix repo URL in staging report
kironapublic/vaadin,Peppe/vaadin,shahrzadmn/vaadin,kironapublic/vaadin,shahrzadmn/vaadin,kironapublic/vaadin,Legioth/vaadin,magi42/vaadin,Darsstar/framework,magi42/vaadin,Peppe/vaadin,mstahv/framework,asashour/framework,jdahlstrom/vaadin.react,kironapublic/vaadin,udayinfy/vaadin,sitexa/vaadin,asashour/framework,Darsstar/framework,sitexa/vaadin,magi42/vaadin,Peppe/vaadin,peterl1084/framework,jdahlstrom/vaadin.react,asashour/framework,oalles/vaadin,Legioth/vaadin,Darsstar/framework,peterl1084/framework,Legioth/vaadin,sitexa/vaadin,mstahv/framework,peterl1084/framework,Legioth/vaadin,Peppe/vaadin,jdahlstrom/vaadin.react,udayinfy/vaadin,peterl1084/framework,Darsstar/framework,shahrzadmn/vaadin,synes/vaadin,magi42/vaadin,asashour/framework,synes/vaadin,sitexa/vaadin,mstahv/framework,oalles/vaadin,oalles/vaadin,udayinfy/vaadin,magi42/vaadin,udayinfy/vaadin,shahrzadmn/vaadin,peterl1084/framework,oalles/vaadin,udayinfy/vaadin,shahrzadmn/vaadin,kironapublic/vaadin,Legioth/vaadin,oalles/vaadin,jdahlstrom/vaadin.react,asashour/framework,Darsstar/framework,jdahlstrom/vaadin.react,mstahv/framework,mstahv/framework,synes/vaadin,synes/vaadin,synes/vaadin,Peppe/vaadin,sitexa/vaadin
scripts/GenerateStagingReport.py
scripts/GenerateStagingReport.py
#coding=UTF-8 from BuildArchetypes import archetypes, getDeploymentContext import argparse, cgi parser = argparse.ArgumentParser(description="Build report generator") parser.add_argument("version", type=str, help="Vaadin version that was just built") parser.add_argument("deployUrl", type=str, help="Base url of the deployment server") parser.add_argument("buildResultUrl", type=str, help="URL for the build result page") parser.add_argument("stagingRepo", type=str, help="URL for the staging repository") args = parser.parse_args() content = """<html> <head></head> <body> <table> """ content += "<tr><td>Try archetype demos<ul>" for archetype in archetypes: content += "<li><a href='{url}/{context}'>{demo}</a></li>\n".format(url=args.deployUrl, demo=archetype, context=getDeploymentContext(archetype, args.version)) content += """</ul></td></tr> <tr><td><a href="{repoUrl}">Staging repository</a></td></tr> <tr><td>Eclipse Ivy Settings:<br><pre>""".format(repoUrl=args.stagingRepo) content += cgi.escape(""" <ibiblio name="vaadin-staging" usepoms="true" m2compatible="true" root="{repoUrl}" />""".format(repoUrl=args.stagingRepo)) content += """</pre> </td></tr> <tr><td><a href="https://dev.vaadin.com/milestone/Vaadin {version}">Trac Milestone</a></td></tr> <tr><td><a href="https://dev.vaadin.com/admin/ticket/versions">Add version {version} to Trac</td></tr> <tr><td><a href="{url}">Staging result page (See test results, pin and tag build and dependencies)</a></td></tr> </table> </body> </html>""".format(url=args.buildResultUrl, repoUrl=args.stagingRepo, version=args.version) f = open("result/report.html", 'w') f.write(content)
#coding=UTF-8 from BuildArchetypes import archetypes, getDeploymentContext import argparse, cgi parser = argparse.ArgumentParser(description="Build report generator") parser.add_argument("version", type=str, help="Vaadin version that was just built") parser.add_argument("deployUrl", type=str, help="Base url of the deployment server") parser.add_argument("buildResultUrl", type=str, help="URL for the build result page") parser.add_argument("stagingRepo", type=str, help="URL for the staging repository") args = parser.parse_args() content = """<html> <head></head> <body> <table> """ content += "<tr><td>Try archetype demos<ul>" for archetype in archetypes: content += "<li><a href='{url}/{context}'>{demo}</a></li>\n".format(url=args.deployUrl, demo=archetype, context=getDeploymentContext(archetype, args.version)) content += """</ul></td></tr> <tr><td><a href="{repoUrl}">Staging repository</a></td></tr> <tr><td>Eclipse Ivy Settings:<br><pre>""" content += cgi.escape(""" <ibiblio name="vaadin-staging" usepoms="true" m2compatible="true" root="{repoUrl}" />""".format(repoUrl=args.stagingRepo)) content += """</pre> </td></tr> <tr><td><a href="https://dev.vaadin.com/milestone/Vaadin {version}">Trac Milestone</a></td></tr> <tr><td><a href="https://dev.vaadin.com/admin/ticket/versions">Add version {version} to Trac</td></tr> <tr><td><a href="{url}">Staging result page (See test results, pin and tag build and dependencies)</a></td></tr> </table> </body> </html>""".format(url=args.buildResultUrl, repoUrl=args.stagingRepo, version=args.version) f = open("result/report.html", 'w') f.write(content)
apache-2.0
Python
ceb5f223f2f38969157372b608d03771a9179858
Make threading tests work in environment with restricted maxprocs
ndawe/rootpy,kreczko/rootpy,rootpy/rootpy,ndawe/rootpy,kreczko/rootpy,rootpy/rootpy,rootpy/rootpy,kreczko/rootpy,ndawe/rootpy
rootpy/logger/tests/test_threading.py
rootpy/logger/tests/test_threading.py
from __future__ import division import itertools import os import resource import thread import threading import time from math import ceil from random import random import ROOT import rootpy; log = rootpy.log["rootpy.logger.test.threading"] rootpy.logger.magic.DANGER.enabled = True from .logcheck import EnsureLogContains def optional_fatal(abort=True): msg = "[rootpy.ALWAYSABORT]" if abort else "[rootpy.NEVERABORT]" ROOT.Error("rootpy.logger.test", msg) f = optional_fatal optional_fatal._bytecode = lambda: map(ord, f.func_code.co_code) optional_fatal._ORIG_BYTECODE = optional_fatal._bytecode() optional_fatal._unmodified = lambda: f._bytecode() == f._ORIG_BYTECODE def optional_fatal_bytecode_check(): assert optional_fatal._unmodified(), ( "Detected modified bytecode. This should never happen.") number_of_fatals = itertools.count() total = itertools.count() def maybe_fatal(): try: # Throw exceptions 80% of the time optional_fatal(random() < 0.8) except rootpy.ROOTError: number_of_fatals.next() finally: total.next() optional_fatal_bytecode_check() def randomfatal(should_exit): while not should_exit.is_set(): maybe_fatal() def spareprocs(): """ Compute the maximum number of threads we can start up according to ulimit """ nmax, _ = resource.getrlimit(resource.RLIMIT_NPROC) me = os.geteuid() return nmax - sum(1 for p in os.listdir("/proc") if p.isdigit() and os.stat("/proc/" + p).st_uid == me) def test_multithread_exceptions(): should_exit = threading.Event() sup_logger = log["/ROOT.rootpy.logger.test"] old_level = sup_logger.level # Suppress test warnings sup_logger.setLevel(log.CRITICAL) # Run for 1/4 second or 10s if LONG_TESTS is in the environment length = float(os.environ.get("TEST_TIME", 0.25)) try: threads = [] for i in range(min(100, int(ceil(spareprocs()*0.8)))): t = threading.Thread(target=randomfatal, args=(should_exit,)) try: t.start() threads.append(t) except thread.error: log.warning("Unable to start thread") break assert threads, "Didn't manage to start any threads!" time.sleep(length) should_exit.set() for t in threads: t.join() finally: sup_logger.setLevel(old_level) tot = total.next()-1 fatals = number_of_fatals.next()-1 fmt = "Success raising exceptions in {0} threads: total: {1} (fatals {2:%})" log.debug(fmt.format(len(threads), tot, fatals / tot))
from __future__ import division import itertools import os import threading import time from random import random import rootpy; log = rootpy.log["rootpy.logger.test.threading"] rootpy.logger.magic.DANGER.enabled = True import ROOT from .logcheck import EnsureLogContains def optional_fatal(abort=True): msg = "[rootpy.ALWAYSABORT]" if abort else "[rootpy.NEVERABORT]" ROOT.Error("rootpy.logger.test", msg) f = optional_fatal optional_fatal._bytecode = lambda: map(ord, f.func_code.co_code) optional_fatal._ORIG_BYTECODE = optional_fatal._bytecode() optional_fatal._unmodified = lambda: f._bytecode() == f._ORIG_BYTECODE def optional_fatal_bytecode_check(): assert optional_fatal._unmodified(), ( "Detected modified bytecode. This should never happen.") number_of_fatals = itertools.count() total = itertools.count() def maybe_fatal(): try: # Throw exceptions 80% of the time optional_fatal(random() < 0.8) except rootpy.ROOTError: number_of_fatals.next() finally: total.next() optional_fatal_bytecode_check() def randomfatal(should_exit): while not should_exit.is_set(): maybe_fatal() #@EnsureLogContains("ERROR", "ALWAYSABORT") def test_multithread_exceptions(): should_exit = threading.Event() sup_logger = log["/ROOT.rootpy.logger.test"] old_level = sup_logger.level # Suppress test warnings sup_logger.setLevel(log.CRITICAL) # Run for 1/4 second or 10s if LONG_TESTS is in the environment length = float(os.environ.get("TEST_TIME", 0.25)) try: threads = [] for i in range(100): t = threading.Thread(target=randomfatal, args=(should_exit,)) t.start() threads.append(t) time.sleep(length) should_exit.set() for t in threads: t.join() finally: #sup_logger.setLevel(old_level) pass tot = total.next()-1 fatals = number_of_fatals.next()-1 log.debug("Success raising exceptions: total: {0} (fatals {1:%})".format(tot, fatals / tot))
bsd-3-clause
Python
65ecd399ea82abdafd0a2471193a9c850b50db87
Debug level of logging
emkael/jfrteamy-playoff,emkael/jfrteamy-playoff
playoff.py
playoff.py
import traceback from jfr_playoff.filemanager import PlayoffFileManager from jfr_playoff.generator import PlayoffGenerator from jfr_playoff.settings import PlayoffSettings def main(): interactive = False try: import argparse arg_parser = argparse.ArgumentParser( description='Generate play-off HTML for JFR Teamy tournaments') output_args = arg_parser.add_mutually_exclusive_group() output_args.add_argument('-v', '--verbose', action='store_true', help='display info on STDERR') output_args.add_argument('-vv', '--debug', action='store_true', help='display debug info on STDERR') output_args.add_argument('-q', '--quiet', action='store_true', help='suppress warnings on STDERR') arg_parser.add_argument('config_file', metavar='JSON_FILE', help='path to config JSON file', type=str, nargs='?', default=None) arguments = arg_parser.parse_args() settings = PlayoffSettings(arguments.config_file) interactive = settings.interactive generator = PlayoffGenerator(settings) content = generator.generate_content() file_manager = PlayoffFileManager(settings) file_manager.write_content(content) file_manager.copy_scripts() file_manager.send_files() except SystemExit: interactive = False raise except: print traceback.format_exc() finally: if interactive: raw_input('Press any key to continue...') if __name__ == '__main__': main()
import traceback from jfr_playoff.filemanager import PlayoffFileManager from jfr_playoff.generator import PlayoffGenerator from jfr_playoff.settings import PlayoffSettings def main(): interactive = False try: import argparse arg_parser = argparse.ArgumentParser( description='Generate play-off HTML for JFR Teamy tournaments') output_args = arg_parser.add_mutually_exclusive_group() output_args.add_argument('-v', '--verbose', action='store_true', help='display debug info on STDERR') output_args.add_argument('-q', '--quiet', action='store_true', help='suppress warnings on STDERR') arg_parser.add_argument('config_file', metavar='JSON_FILE', help='path to config JSON file', type=str, nargs='?', default=None) arguments = arg_parser.parse_args() settings = PlayoffSettings(arguments.config_file) interactive = settings.interactive generator = PlayoffGenerator(settings) content = generator.generate_content() file_manager = PlayoffFileManager(settings) file_manager.write_content(content) file_manager.copy_scripts() file_manager.send_files() except SystemExit: interactive = False raise except: print traceback.format_exc() finally: if interactive: raw_input('Press any key to continue...') if __name__ == '__main__': main()
bsd-2-clause
Python
3f80c759c55552dce7d45cf5f84e953ac7863974
add placeholder for more examples
JiscPER/magnificent-octopus,JiscPER/magnificent-octopus,JiscPER/magnificent-octopus
octopus/modules/examples/examples.py
octopus/modules/examples/examples.py
from octopus.core import app from flask import Blueprint, render_template blueprint = Blueprint('examples', __name__) #@blueprint.route("/") #def list_examples(): # return render_template("examples/list.html") @blueprint.route("/ac") def autocomplete(): return render_template("examples/es/autocomplete.html") @blueprint.route("/fact") def fact(): return render_template("examples/sherpafact/proxy.html") @blueprint.route("/clientjs") def clientjs(): pass @blueprint.route("/epmc") def epmc(): pass @blueprint.route("/romeo") def romeo(): # at the moment the romeo endpoint only deals with downloads, which is not very demoable pass
from octopus.core import app from flask import Blueprint, render_template blueprint = Blueprint('examples', __name__) #@blueprint.route("/") #def list_examples(): # return render_template("examples/list.html") @blueprint.route("/ac") def autocomplete(): return render_template("examples/es/autocomplete.html") @blueprint.route("/fact") def fact(): return render_template("examples/sherpafact/proxy.html")
apache-2.0
Python
2668829d114031ba6fa641bb989988368371917b
add program lookup to choice group admin hotfix
ITOO-UrFU/open-programs,ITOO-UrFU/open-programs,ITOO-UrFU/open-programs
open_programs/apps/programs/admin.py
open_programs/apps/programs/admin.py
from django.contrib import admin from reversion.admin import VersionAdmin from ajax_select.admin import AjaxSelectAdmin from ajax_select import make_ajax_form from .models import Program, TrainingTarget, ProgramCompetence, ProgramModules, TargetModules, ChoiceGroup, ChoiceGroupType, LearningPlan @admin.register(Program) class ProgramAdmin(VersionAdmin): list_display = ( 'title', "training_direction", 'chief', "level", 'created', 'updated', 'archived', 'status', ) list_filter = ("level", 'created', 'updated', 'status', 'archived',) filter_horizontal = ("learning_plans", ) @admin.register(TrainingTarget) class TrainingTargetAdmin(VersionAdmin): list_display = ( "title", "number" ) # TODO: "program" list_filter = ( "program", "number" ) @admin.register(ProgramCompetence) class ProgramCompetenceAdmin(VersionAdmin): list_display = ("title", "number", "program") list_filter = ("title", "number") search_fields = ("title", ) @admin.register(ProgramModules) class ProgramModulesAdmin(VersionAdmin): list_display = ("id", "semester", "module", "program", "choice_group", "competence") list_filter = ("program", "semester",) raw_id_fields = ("module", ) @admin.register(TargetModules) class TargetModulesAdmin(VersionAdmin): list_display = ("id", ) # TODO: "choice_group", "program_module", "target" @admin.register(ChoiceGroup) class ChoiceGroupAdmin(VersionAdmin, AjaxSelectAdmin): list_display = ("id", "program", "title", "labor", "choice_group_type", "number") form = make_ajax_form(ChoiceGroup, {'program': 'program'}) @admin.register(ChoiceGroupType) class ChoiceGroupTypeAdmin(VersionAdmin): list_display = ("title", ) @admin.register(LearningPlan) class LearningPlanAdmin(VersionAdmin): list_display = ('uni_displayableTitle', 'uni_number', 'uni_title', 'uni_stage', 'uni_loadTimeType')
from django.contrib import admin from reversion.admin import VersionAdmin from ajax_select.admin import AjaxSelectAdmin from ajax_select import make_ajax_form from .models import Program, TrainingTarget, ProgramCompetence, ProgramModules, TargetModules, ChoiceGroup, ChoiceGroupType, LearningPlan @admin.register(Program) class ProgramAdmin(VersionAdmin): list_display = ( 'title', "training_direction", 'chief', "level", 'created', 'updated', 'archived', 'status', ) list_filter = ("level", 'created', 'updated', 'status', 'archived',) filter_horizontal = ("learning_plans", ) @admin.register(TrainingTarget) class TrainingTargetAdmin(VersionAdmin): list_display = ( "title", "number" ) # TODO: "program" list_filter = ( "program", "number" ) @admin.register(ProgramCompetence) class ProgramCompetenceAdmin(VersionAdmin): list_display = ("title", "number", "program") list_filter = ("title", "number") search_fields = ("title", ) @admin.register(ProgramModules) class ProgramModulesAdmin(VersionAdmin): list_display = ("id", "semester", "module", "program", "choice_group", "competence") list_filter = ("program", "semester",) raw_id_fields = ("module", ) @admin.register(TargetModules) class TargetModulesAdmin(VersionAdmin): list_display = ("id", ) # TODO: "choice_group", "program_module", "target" @admin.register(ChoiceGroup) class ChoiceGroupAdmin(VersionAdmin, AjaxSelectAdmin): list_display = ("id", "program", "title", "labor", "choice_group_type", "number") form = make_ajax_form(Program, {'program': 'program'}) @admin.register(ChoiceGroupType) class ChoiceGroupTypeAdmin(VersionAdmin): list_display = ("title", ) @admin.register(LearningPlan) class LearningPlanAdmin(VersionAdmin): list_display = ('uni_displayableTitle', 'uni_number', 'uni_title', 'uni_stage', 'uni_loadTimeType')
unlicense
Python
ec5cb4e878dae00bb6b23965c6c466ee29727583
Update HashFilter
Parsely/python-bloomfilter
pybloom/hashfilter.py
pybloom/hashfilter.py
import time class HashFilter(object): ''' Plain Temporal Hash Filter for testing purposes ''' def __init__(self, expiration): self.expiration = expiration self.unique_items = {} def add(self, key, timestamp = None): timestamp = int(timestamp) if key in self.unique_items: if timestamp < self.unique_items[key]: self.unique_items[key] = timestamp + self.expiration return True else: self.unique_items[key] = timestamp + self.expiration return False else: self.unique_items[key] = timestamp + self.expiration return False def contains(self, key, timestamp): timestamp = int(timestamp) if key in self.unique_items: if timestamp < self.unique_items[key]: return True else: del self.unique_items[key] return False
import time class HashFilter(object): ''' Plain Temporal Hash Filter for testing purposes ''' def __init__(self, expiration): self.expiration = expiration self.unique_items = {} def add(self, key, timestamp = None): if key in self.unique_items: if not timestamp: timestamp = time.time() self.unique_items[key] = int(timestamp) + self.expiration return True else: if not timestamp: timestamp = time.time() self.unique_items[key] = int(timestamp) + self.expiration return False def contains(self, key, timestamp): timestamp = int(timestamp) if key in self.unique_items: if timestamp < self.unique_items[key]: return True else: del self.unique_items[key] return False def __contains__(self, key): timestamp = time.time() if key in self.unique_items: if timestamp < self.unique_items[key]: return True else: del self.unique_items[key] return False
mit
Python
ab13290364a40c0592ed347bf7b91110afaa7115
Fix test_json
adrienpacifico/openfisca-france,benjello/openfisca-france,SophieIPP/openfisca-france,antoinearnoud/openfisca-france,benjello/openfisca-france,sgmap/openfisca-france,sgmap/openfisca-france,antoinearnoud/openfisca-france,SophieIPP/openfisca-france,adrienpacifico/openfisca-france
openfisca_france/tests/test_jsons.py
openfisca_france/tests/test_jsons.py
#! /usr/bin/env python # -*- coding: utf-8 -*- # OpenFisca -- A versatile microsimulation software # By: OpenFisca Team <[email protected]> # # Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team # https://github.com/openfisca # # This file is part of OpenFisca. # # OpenFisca is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # OpenFisca is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ########### DESCRIPTION ############ ## Ce programme teste tous les fichiers .json créés par un script et renvoie les erreurs d'OpenFisca import json import os import sys from biryani1.baseconv import check import numpy as np import openfisca_france from openfisca_france.scripts.compare_openfisca_impots import compare_variable TaxBenefitSystem = openfisca_france.init_country() tax_benefit_system = TaxBenefitSystem() def test(): path = os.path.join(os.path.dirname(__file__), 'json') err = 1 for fichier in os.listdir(path): with open(os.path.join(path, fichier)) as officiel: try: content = json.load(officiel) except: print fichier official_result = content['resultat_officiel'] json_scenario = content['scenario'] scenario = check(tax_benefit_system.Scenario.make_json_to_instance( tax_benefit_system = tax_benefit_system))(json_scenario) year = json_scenario['year'] print scenario # print scenario.test_case.keys() totpac = scenario['test_case']['foyers_fiscaux'].values()[0].get('personnes_a_charge') simulation = scenario.new_simulation() for code, field in official_result.iteritems(): if compare_variable(code, field, simulation, totpac, fichier, year): err = 0 assert err, "Erreur" if __name__ == "__main__": sys.exit(test())
#! /usr/bin/env python # -*- coding: utf-8 -*- # OpenFisca -- A versatile microsimulation software # By: OpenFisca Team <[email protected]> # # Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team # https://github.com/openfisca # # This file is part of OpenFisca. # # OpenFisca is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # OpenFisca is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ########### DESCRIPTION ############ ## Ce programme teste tous les fichiers .json créés par un script et renvoie les erreurs d'OpenFisca import json import os import sys from biryani1.baseconv import check import numpy as np import openfisca_france from openfisca_france.scripts.compare_openfisca_impots import compare_variable TaxBenefitSystem = openfisca_france.init_country() tax_benefit_system = TaxBenefitSystem() def test(): path = os.path.join(os.path.dirname(__file__), 'json') err = 1 for fichier in os.listdir(path): with open(os.path.join(path, fichier)) as officiel: try: content = json.load(officiel) except: print fichier official_result = content['resultat_officiel'] json_scenario = content['scenario'] scenario = check(tax_benefit_system.Scenario.make_json_to_instance( tax_benefit_system = tax_benefit_system))(json_scenario) year = json_scenario['year'] totpac = scenario.test_case['foyers_fiscaux'].values()[0].get('personnes_a_charge') simulation = scenario.new_simulation() for code, field in official_result.iteritems(): if compare_variable(code, field, simulation, totpac, fichier, year): err = 0 assert err, "Erreur" if __name__ == "__main__": sys.exit(test())
agpl-3.0
Python
72ec0d82bfa59d14dbd9e8ffd89ddcfc990fc4fe
Fix #14
hadim/pygraphml
pygraphml/__init__.py
pygraphml/__init__.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function from .attribute import Attribute from .item import Item from .point import Point from .node import Node from .edge import Edge from .graph import Graph from .graphml_parser import GraphMLParser __version__ = '2.1.4'
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from __future__ import print_function from .attribute import Attribute from .item import Item from .point import Point from .node import Node from .edge import Edge from .graph import Graph from .graphml_parser import GraphMLParser __version__ = '2.1.3'
bsd-3-clause
Python
3905327d8cb02c6c7929f6b3bd12658c6bc1b6ab
bump to 1.73
lobocv/pyperform
pyperform/__init__.py
pyperform/__init__.py
from __future__ import print_function __version__ = '1.73' from pyperform.benchmark import Benchmark from .comparisonbenchmark import ComparisonBenchmark from .benchmarkedclass import BenchmarkedClass from .benchmarkedfunction import BenchmarkedFunction from .timer import timer from .exceptions import ValidationError def enable(): """ Enable all benchmarking. """ Benchmark.enable = True ComparisonBenchmark.enable = True BenchmarkedFunction.enable = True BenchmarkedClass.enable = True def disable(): """ Disable all benchmarking. """ Benchmark.enable = False ComparisonBenchmark.enable = False BenchmarkedFunction.enable = False BenchmarkedClass.enable = False
from __future__ import print_function __version__ = '1.72' from pyperform.benchmark import Benchmark from .comparisonbenchmark import ComparisonBenchmark from .benchmarkedclass import BenchmarkedClass from .benchmarkedfunction import BenchmarkedFunction from .timer import timer from .exceptions import ValidationError def enable(): """ Enable all benchmarking. """ Benchmark.enable = True ComparisonBenchmark.enable = True BenchmarkedFunction.enable = True BenchmarkedClass.enable = True def disable(): """ Disable all benchmarking. """ Benchmark.enable = False ComparisonBenchmark.enable = False BenchmarkedFunction.enable = False BenchmarkedClass.enable = False
mit
Python
813fac88b392f81825d60f3862a09718f12bf424
add ccsd
Konjkov/pyquante2,Konjkov/pyquante2,Konjkov/pyquante2
pyquante2/__init__.py
pyquante2/__init__.py
from pyquante2.basis.basisset import basisset from pyquante2.basis.cgbf import cgbf,sto from pyquante2.basis.pgbf import pgbf from pyquante2.geo.molecule import molecule from pyquante2.geo.samples import * from pyquante2.graphics.vtkplot import vtk_orbs from pyquante2.grid.grid import grid from pyquante2.ints.one import S,T,V from pyquante2.pt.mp2 import mp2 from pyquante2.cc.ccsd import ccsd from pyquante2.scf.hamiltonians import rhf,uhf try: import matplotlib from pyquante2.graphics.lineplot import lineplot_orbs,line from pyquante2.graphics.contourplot import contourplot except: pass
from pyquante2.basis.basisset import basisset from pyquante2.basis.cgbf import cgbf,sto from pyquante2.basis.pgbf import pgbf from pyquante2.geo.molecule import molecule from pyquante2.geo.samples import * from pyquante2.graphics.vtkplot import vtk_orbs from pyquante2.grid.grid import grid from pyquante2.ints.one import S,T,V from pyquante2.pt.mp2 import mp2 from pyquante2.scf.hamiltonians import rhf,uhf try: import matplotlib from pyquante2.graphics.lineplot import lineplot_orbs,line from pyquante2.graphics.contourplot import contourplot except: pass
bsd-3-clause
Python
f808b67c9a067d9addd75f09e10853c3812d6101
Refactor code
artefactual/automation-tools,artefactual/automation-tools
transfers/examples/pre-transfer/00_unbag.py
transfers/examples/pre-transfer/00_unbag.py
#!/usr/bin/env python # Script to re-package unzipped bags as standard transfers, utilizing checksums from bag manifest. # Assumes bags are structured as either bag/data/(content) or bag/data/objects/(content). # Enables use of scripts to add metadata to SIP without failing transfer at bag validation. from __future__ import print_function, unicode_literals import os import shutil import sys def main(transfer_path): transfer_path = os.path.abspath(transfer_path) # check if transfer is an unzipped bag if not os.path.isfile(os.path.join(transfer_path, 'bag-info.txt')): return 1 # move files in data up one level if 'objects' folder already exists data_path = os.path.join(transfer_path, 'data') if os.path.isdir(os.path.join(data_path, 'objects')): data_contents = os.listdir(data_path) data_contents = [os.path.join(data_path, filename) for filename in data_contents] for f in data_contents: shutil.move(f, transfer_path) # otherwise, rename data to objects else: os.rename(data_path, os.path.join(transfer_path, 'objects')) # create metadata and subdoc folders if don't already exist metadata_dir = os.path.join(transfer_path, 'metadata') subdoc_dir = os.path.join(metadata_dir, 'submissionDocumentation') if not os.path.isdir(metadata_dir): os.mkdir(metadata_dir) if not os.path.isdir(subdoc_dir): os.mkdir(subdoc_dir) # write manifest checksums to checksum file with open(os.path.join(transfer_path, 'manifest-md5.txt'), 'r') as old_file: with open (os.path.join(metadata_dir, 'checksum.md5'), 'w') as new_file: for line in old_file: if "data/objects/" in line: new_line = line.replace("data/objects/", "../objects/") else: new_line = line.replace("data/", "../objects/") new_file.write(new_line) # move bag files to submissionDocumentation for bagfile in 'bag-info.txt', 'bagit.txt', 'manifest-md5.txt', 'tagmanifest-md5.txt': shutil.copy2(os.path.join(transfer_path, bagfile), os.path.join(subdoc_dir, bagfile)) os.remove(os.path.join(transfer_path, bagfile)) return 0 if __name__ == '__main__': transfer_path = sys.argv[1] main(transfer_path)
#!/usr/bin/env python # Script to re-package unzipped bags as standard transfers, utilizing checksums from bag manifest. # Assumes bags are structured as either bag/data/(content) or bag/data/objects/(content). # Enables use of scripts to add metadata to SIP without failing transfer at bag validation. from __future__ import print_function, unicode_literals import os import shutil import sys def main(transfer_path): transfer_path = os.path.abspath(transfer_path) # check if transfer is an unzipped bag if not os.path.isfile(os.path.join(transfer_path, 'bag-info.txt')): return 1 # move files in data up one level if 'objects' folder already exists data_path = os.path.join(transfer_path, 'data') if os.path.isdir(os.path.join(data_path, 'objects')): data_contents = os.listdir(data_path) data_contents = [os.path.abspath(data_path) + '/' + filename for filename in data_contents] for f in data_contents: shutil.move(f, transfer_path) # otherwise, rename data to objects else: os.rename(data_path, os.path.join(transfer_path, 'objects')) # create metadata and subdoc folders if don't already exist metadata_dir = os.path.join(transfer_path, 'metadata') subdoc_dir = os.path.join(metadata_dir, 'submissionDocumentation') if not os.path.isdir(metadata_dir): os.mkdir(metadata_dir) if not os.path.isdir(subdoc_dir): os.mkdir(subdoc_dir) # write manifest checksums to checksum file with open(os.path.join(transfer_path, 'manifest-md5.txt'), 'r') as old_file: with open (os.path.join(metadata_dir, 'checksum.md5'), 'w') as new_file: manifest_content = old_file.readlines() for line in manifest_content: if "data/objects/" in line: new_line = line.replace("data/objects/", "../objects/") else: new_line = line.replace("data/", "../objects/") new_file.write(new_line) # move bag files to submissionDocumentation for bagfile in 'bag-info.txt', 'bagit.txt', 'manifest-md5.txt', 'tagmanifest-md5.txt': shutil.copy2(os.path.join(transfer_path, bagfile), os.path.join(subdoc_dir, bagfile)) os.remove(os.path.join(transfer_path, bagfile)) return 0 if __name__ == '__main__': transfer_path = sys.argv[1] main(transfer_path)
agpl-3.0
Python
2fbd5ceead47ea980e5dfa7b2bc29eafbbab2d72
remove unneeded import in views
arturtamborski/arturtamborskipl,arturtamborski/arturtamborskipl
blog/views.py
blog/views.py
from django.http import Http404 from django.shortcuts import render, get_object_or_404, get_list_or_404 from django.utils import timezone from . import models as blog def home(request): NUM_LAST_ARTICLES = 5 articles = blog.Article.objects.filter(date__lte=timezone.now()).order_by('-date')[:NUM_LAST_ARTICLES] return render(request, 'blog/article.html', {'isroot': True, 'articles': articles}) def article(request, slug=None): if slug is None: articles = get_list_or_404(blog.Article) else: articles = get_list_or_404(blog.Article, slug=slug) return render(request, 'blog/article.html', { 'isroot': bool(slug is None), 'articles': articles }) def category(request, slug=None): if slug is None: categories = get_list_or_404(blog.Category) else: categories = get_list_or_404(blog.Category, slug=slug) return render(request, 'blog/category.html', { 'isroot': bool(slug is None), 'categories': categories, })
from django.core.exceptions import ObjectDoesNotExist from django.http import Http404 from django.shortcuts import render, get_object_or_404, get_list_or_404 from django.utils import timezone from . import models as blog def home(request): NUM_LAST_ARTICLES = 5 articles = blog.Article.objects.filter(date__lte=timezone.now()).order_by('-date')[:NUM_LAST_ARTICLES] return render(request, 'blog/article.html', {'isroot': True, 'articles': articles}) def article(request, slug=None): if slug is None: articles = get_list_or_404(blog.Article) else: articles = get_list_or_404(blog.Article, slug=slug) return render(request, 'blog/article.html', { 'isroot': bool(slug is None), 'articles': articles }) def category(request, slug=None): if slug is None: categories = get_list_or_404(blog.Category) else: categories = get_list_or_404(blog.Category, slug=slug) return render(request, 'blog/category.html', { 'isroot': bool(slug is None), 'categories': categories, })
mit
Python
62c76a953ea5a1c753f9c7447bab5800bb25c2b1
add life expantency context bulk down for ihme
semio/ddf_utils
ddf_utils/factory/igme.py
ddf_utils/factory/igme.py
# -*- coding: utf-8 -*- """download sources from CME info portal""" __doc__ = """T.B.D""" import os.path as osp import re import requests import pandas as pd from lxml import html from urllib.parse import urlsplit, urljoin url = 'http://www.childmortality.org/' metadata = None def load_metadata(): r = requests.get(url) h = html.fromstring(r.content) flist = [] for l in h.xpath('//a/@href'): if l.endswith('xlsx'): #print(urljoin(url, l)) flist.append(urljoin(url, l)) md = pd.DataFrame(flist, columns=['link']) md['name'] = md['link'].map(lambda x: osp.basename(x)[:-5]) global metadata metadata = md[['name', 'link']].copy() def has_newer_source(v): """accepts a int and return true if version inferred from metadata is bigger.""" if metadata is None: load_metadata() link = metadata.loc[0, 'link'] ver = re.match('.*files_v(\d+).*', link).groups()[0] if int(ver) > v: return True return False def bulk_download(out_dir, name=None): if metadata is None: load_metadata() if name: names = [name] else: names = metadata['name'].values for n in names: if n not in metadata['name'].values: raise KeyError("{} not found in page.".format(n)) link = metadata.loc[metadata['name'] == n, 'link'].values[0] res = requests.get(link) out_path = osp.join(out_dir, osp.basename(link)) with open(osp.expanduser(out_path), 'wb') as f: f.write(res.content) f.close()
# -*- coding: utf-8 -*- """download sources from CME info portal""" __doc__ = """T.B.D""" import os.path as osp import re import requests import pandas as pd from io import BytesIO from lxml import html from urllib.parse import urlsplit, urljoin url = 'http://www.childmortality.org/' metadata = None def load_metadata(): r = requests.get(url) h = html.fromstring(r.content) flist = [] for l in h.xpath('//a/@href'): if l.endswith('xlsx'): #print(urljoin(url, l)) flist.append(urljoin(url, l)) md = pd.DataFrame(flist, columns=['link']) md['name'] = md['link'].map(lambda x: osp.basename(x)[:-5]) global metadata metadata = md[['name', 'link']].copy() def has_newer_source(v): """accepts a int and return true if version inferred from metadata is bigger.""" if metadata is None: load_metadata() link = metadata.loc[0, 'link'] ver = re.match('.*files_v(\d+).*', link).groups()[0] if int(ver) > v: return True return False def bulk_download(out_dir, name=None): if metadata is None: load_metadata() if name: names = [name] else: names = metadata['name'].values for n in names: if n not in metadata['name'].values: raise KeyError("{} not found in page.".format(n)) link = metadata.loc[metadata['name'] == n, 'link'].values[0] res = requests.get(link) out_path = osp.join(out_dir, osp.basename(link)) with open(osp.expanduser(out_path), 'wb') as f: b = BytesIO(res.content) f.write(b.read()) f.close()
mit
Python
5dce1ee6c54d8686cee42651528c087e9939368b
Bump version, 0.9.4.21
why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado
dp_tornado/version.py
dp_tornado/version.py
__version_info__ = (0, 9, 4, 22) __version__ = '.'.join(map(str, __version_info__))
__version_info__ = (0, 9, 4, 21) __version__ = '.'.join(map(str, __version_info__))
mit
Python
7eca9eb4d5c7134b84c3462ac01cf1679557819f
Update example
arambadk/django-datatable,arambadk/django-datatable,shymonk/django-datatable,shymonk/django-datatable,arambadk/django-datatable,shymonk/django-datatable
example/app/tables.py
example/app/tables.py
#!/usr/bin/env python # coding: utf-8 from table.columns import Column, LinkColumn, DatetimeColumn, Link from table.utils import A from table import Table from models import Person class PersonTable(Table): id = Column(field='id', header=u'#', header_attrs={'width': '5%'}) name = Column(field='name', header=u'NAME') action = LinkColumn(header=u'ACTION', links=[Link(text=u'EDIT', viewname='app.views.edit', args=(A('id'),))]) class Meta: model = Person ext_button_template = "button.html" # disable_search = True # disable_info = True # disable_length_menu = True # disable_pagination = True
#!/usr/bin/env python # coding: utf-8 from table.columns import Column, LinkColumn, DatetimeColumn, Link from table.utils import A from table import Table from models import Person class PersonTable(Table): id = Column(field='id', header=u'#', header_attrs={'width': '5%'}) name = Column(field='name', header=u'姓名') action = LinkColumn(header=u'操作', links=[Link(text=u'编辑', viewname='app.views.edit', args=(A('id'),))]) class Meta: model = Person ext_button_link = "http://www.baidu.com" ext_button_text = "Add +"
mit
Python
cd9e9efd8587b5be9e3d9a4e7efeaf26b048b0d2
fix attribute error on handlers loading
dimagi/rapidsms,dimagi/rapidsms
lib/rapidsms/contrib/handlers/settings.py
lib/rapidsms/contrib/handlers/settings.py
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 INSTALLED_HANDLERS = None EXCLUDED_HANDLERS = [] RAPIDSMS_HANDLERS_EXCLUDE_APPS = []
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 INSTALLED_HANDLERS = None EXCLUDED_HANDLERS = []
bsd-3-clause
Python
5223846786b70dd9c198f98f7a620e70b40fab3d
update k84
WatsonDNA/nlp100,wtsnjp/nlp100,wtsnjp/nlp100,WatsonDNA/nlp100
chap09/k84.py
chap09/k84.py
# # usage: python k84.py {N} # import sys import plyvel import struct from math import log def create_matrix(n): co_db = plyvel.DB('./co.ldb', create_if_missing=True) word_db = plyvel.DB('./word.ldb', create_if_missing=True) context_db = plyvel.DB('./context.ldb', create_if_missing=True) matrix_db = plyvel.DB('./matrix.ldb', create_if_missing=True) for k, v in co_db: tmp = k.decode('utf-8').strip().split('\t') if len(tmp) != 2: continue x = 0 f_tc = int.from_bytes(v, 'big') if f_tc >= 10: f_t = int.from_bytes(word_db.get(tmp[0].encode('utf-8')), 'big') f_c = int.from_bytes(context_db.get(tmp[1].encode('utf-8')), 'big') x = max(log(2, n * f_tc / (f_t * f_c)), 0) if x != 0: matrix_db.put(k, struct.pack('>d', x)) co_db.close() word_db.close() context_db.close() matrix_db.close() def get_matrix(t, c): matrix_db = plyvel.DB('./matrix.ldb', create_if_missing=True) t_key = '\t'.join((t, c)).encode('utf-8') v = float(struct.unpack('>d', matrix_db.get(t_key))[0]) matrix_db.close() print('X("{}", "{}") = {}'.format(t, c, v)) if __name__ == '__main__': N = int(sys.argv[1]) create_matrix(N) get_matrix('of', 'a')
# # usage: python k84.py {N} # import sys import plyvel from math import log def wc_matrix(n, ofn): co_db = plyvel.DB('./co.ldb', create_if_missing=True) word_db = plyvel.DB('./word.ldb', create_if_missing=True) context_db = plyvel.DB('./context.ldb', create_if_missing=True) x = 0 ZERO = x.to_bytes((x.bit_length() + 7) // 8, 'big') for k, v in co_db: tmp = k.decode('utf-8').strip().split('\t') if len(tmp) != 2: continue x = 0 f_tc = int.from_bytes(v, 'big') if f_tc >= 10: f_t = int.from_bytes(word_db.get(tmp[0].encode('utf-8'), ZERO), 'big') f_c = int.from_bytes(context_db.get(tmp[1].encode('utf-8'), ZERO), 'big') x = max(log(2, n * f_tc / (f_t * f_c)), 0) if x != 0: with open(ofn, 'a') as f: f.write('{}\t{}\t{}\n'.format(tmp[0], tmp[1], x)) co_db.close() word_db.close() context_db.close() if __name__ == '__main__': N = int(sys.argv[1]) ofn = 'wc-matrix.txt' wc_matrix(N, ofn)
unlicense
Python
241897e2f4596dfee6eae87a6467254e135ac61b
Upgrade ready to fly quads to parts v2.
rcbuild-info/scrape,rcbuild-info/scrape
rcbi/rcbi/spiders/ReadyToFlyQuadsSpider.py
rcbi/rcbi/spiders/ReadyToFlyQuadsSpider.py
import scrapy from scrapy import log from scrapy.spiders import CrawlSpider, Rule from scrapy.linkextractors import LinkExtractor from rcbi.items import Part MANUFACTURERS = ["Tiger", "RTF ", "HQ Prop", "Lemon"] CORRECT = {"Tiger": "T-Motor", "RTF ": "ReadyToFlyQuads", "HQ Prop": "HQProp", "Lemon": "Lemon Rx"} STOCK_STATE_MAP = {"http://schema.org/InStock": "in_stock", "http://schema.org/OutOfStock": "out_of_stock"} class ReadyToFlyQuadsSpider(CrawlSpider): name = "readytoflyquads" allowed_domains = ["readytoflyquads.com"] start_urls = ["http://www.readytoflyquads.com/catalog/seo_sitemap/product/"] rules = ( # Extract links matching 'category.php' (but not matching 'subsection.php') # and follow links from them (since no callback means follow=True by default). Rule(LinkExtractor(allow=('seo_sitemap/product/', ))), # Extract links matching 'item.php' and parse them with the spider's method parse_item Rule(LinkExtractor(allow=('/.*', )), callback='parse_item'), ) def parse_item(self, response): headers = response.css("#product-attribute-specs-table th") data = response.css("#product-attribute-specs-table td") manufacturer = None for i, header in enumerate(headers): header = header.xpath("text()").extract()[0] if header == "Manufacturer": manufacturer = data[i].xpath("text()").extract()[0] item = Part() if manufacturer and manufacturer != "No": item["manufacturer"] = manufacturer item["site"] = self.name product_name = response.css("div.product-name") if not product_name: return item["name"] = product_name[0].xpath("//h1/text()").extract()[0].strip() for m in MANUFACTURERS: if item["name"].startswith(m): item["name"] = item["name"][len(m):].strip() if m in CORRECT: m = CORRECT[m] item["manufacturer"] = m break variant = {} variant["timestamp"] = response.headers["Date"] if "Last-Modified" in response.headers: variant["timestamp"] = response.headers["Last-Modified"] item["variants"] = [variant] variant["url"] = response.url price = response.css("[itemprop=\"price\"]::text") variant["price"] = price.extract()[0] availability = response.css("[itemprop=\"availability\"]::attr(href)").extract() availability = availability[0] variant["stock_state"] = STOCK_STATE_MAP[availability] return item
import scrapy from scrapy import log from scrapy.spiders import CrawlSpider, Rule from scrapy.linkextractors import LinkExtractor from rcbi.items import Part MANUFACTURERS = ["Tiger", "RTF ", "HQ Prop", "Lemon"] CORRECT = {"Tiger": "T-Motor", "RTF ": "ReadyToFlyQuads", "HQ Prop": "HQProp", "Lemon": "Lemon Rx"} class ReadyToFlyQuadsSpider(CrawlSpider): name = "readytoflyquads" allowed_domains = ["readytoflyquads.com"] start_urls = ["http://www.readytoflyquads.com/catalog/seo_sitemap/product/"] rules = ( # Extract links matching 'category.php' (but not matching 'subsection.php') # and follow links from them (since no callback means follow=True by default). Rule(LinkExtractor(allow=('seo_sitemap/product/', ))), # Extract links matching 'item.php' and parse them with the spider's method parse_item Rule(LinkExtractor(allow=('/.*', )), callback='parse_item'), ) def parse_item(self, response): headers = response.css("#product-attribute-specs-table th") data = response.css("#product-attribute-specs-table td") manufacturer = None for i, header in enumerate(headers): header = header.xpath("text()").extract()[0] if header == "Manufacturer": manufacturer = data[i].xpath("text()").extract()[0] item = Part() if manufacturer and manufacturer != "No": item["manufacturer"] = manufacturer item["site"] = self.name item["url"] = response.url product_name = response.css("div.product-name") if not product_name: return item["name"] = product_name[0].xpath("//h1/text()").extract()[0].strip() for m in MANUFACTURERS: if item["name"].startswith(m): item["name"] = item["name"][len(m):].strip() if m in CORRECT: m = CORRECT[m] item["manufacturer"] = m break return item
apache-2.0
Python
01003d7b64220b794d8e10e78dd26badef4dfcc5
Fix tests
klen/Flask-Foundation,klen/fquest,klen/tweetchi
base/auth/tests.py
base/auth/tests.py
from flask_testing import TestCase from ..app import create_app from ..config import test from ..ext import db class BaseCoreTest(TestCase): def create_app(self): return create_app(test) def setUp(self): db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_users(self): from base.auth.models import User response = self.client.post('/users/login/', data=dict()) self.assertRedirects(response, '/') user = User(username='test', pw_hash='test', email='[email protected]') db.session.add(user) db.session.commit() self.assertTrue(user.updated_at) response = self.client.post('/users/login/', data=dict( email='[email protected]', action_save=True, password='test')) self.assertRedirects(response, '/users/profile/') response = self.client.get('/users/logout/') self.assertRedirects(response, '/') response = self.client.post('/users/register/', data=dict( username='test2', email='[email protected]', action_save=True, password='test', password_confirm='test', )) self.assertRedirects(response, '/users/profile/') user = User.query.filter(User.username == 'test2').first() self.assertEqual(user.email, '[email protected]') def test_manager(self): from base.auth.models import Role, User from manage import manager manager.app = self.app manager.handle('manage', 'create_role', ['test']) role = Role.query.filter(Role.name == 'test').first() self.assertEqual(role.name, 'test') manager.handle('manage', 'create_user', 'test [email protected] -p 12345'.split()) user = User.query.filter(User.username == 'test').first() manager.handle('manage', 'add_role', 'test test'.split()) self.assertTrue(role in user.roles) def test_oauth(self): from flask import url_for self.assertTrue(url_for('login_twitter'))
from flask_testing import TestCase from ..app import create_app from ..config import test from ..ext import db class BaseCoreTest(TestCase): def create_app(self): return create_app(test) def setUp(self): db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_users(self): from base.auth.models import User response = self.client.post('/users/login/', data=dict()) self.assertRedirects(response, '/') user = User(username='test', pw_hash='test', email='[email protected]') db.session.add(user) db.session.commit() self.assertTrue(user.updated) response = self.client.post('/users/login/', data=dict( email='[email protected]', action_save=True, password='test')) self.assertRedirects(response, '/users/profile/') response = self.client.get('/users/logout/') self.assertRedirects(response, '/') response = self.client.post('/users/register/', data=dict( username='test2', email='[email protected]', action_save=True, password='test', password_confirm='test', )) self.assertRedirects(response, '/users/profile/') user = User.query.filter(User.username == 'test2').first() self.assertEqual(user.email, '[email protected]') def test_manager(self): from base.auth.models import Role, User from manage import manager manager.app = self.app manager.handle('manage', 'create_role', ['test']) role = Role.query.filter(Role.name == 'test').first() self.assertEqual(role.name, 'test') manager.handle('manage', 'create_user', 'test [email protected] -p 12345'.split()) user = User.query.filter(User.username == 'test').first() manager.handle('manage', 'add_role', 'test test'.split()) self.assertTrue(role in user.roles) def test_oauth(self): from flask import url_for self.assertTrue(url_for('login_twitter'))
bsd-3-clause
Python
69e760e4a571d16e75f30f1e97ea1a917445f333
Switch to recipe engine "url" module.
CoherentLabs/depot_tools,CoherentLabs/depot_tools
recipes/recipe_modules/gitiles/__init__.py
recipes/recipe_modules/gitiles/__init__.py
DEPS = [ 'recipe_engine/json', 'recipe_engine/path', 'recipe_engine/python', 'recipe_engine/raw_io', 'recipe_engine/url', ]
DEPS = [ 'recipe_engine/json', 'recipe_engine/path', 'recipe_engine/python', 'recipe_engine/raw_io', 'url', ]
bsd-3-clause
Python
bb366439065924732b9b1559a0dc776c586fa07c
fix url
willbarton/regulations-site,willbarton/regulations-site,ascott1/regulations-site,adderall/regulations-site,ascott1/regulations-site,EricSchles/regulations-site,jeremiak/regulations-site,jeremiak/regulations-site,EricSchles/regulations-site,adderall/regulations-site,adderall/regulations-site,EricSchles/regulations-site,18F/regulations-site,willbarton/regulations-site,eregs/regulations-site,jeremiak/regulations-site,jeremiak/regulations-site,tadhg-ohiggins/regulations-site,grapesmoker/regulations-site,adderall/regulations-site,ascott1/regulations-site,willbarton/regulations-site,18F/regulations-site,grapesmoker/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,18F/regulations-site,eregs/regulations-site,grapesmoker/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,EricSchles/regulations-site,18F/regulations-site,tadhg-ohiggins/regulations-site,grapesmoker/regulations-site,ascott1/regulations-site
regulations/tests/selenium/example_test.py
regulations/tests/selenium/example_test.py
import os import unittest import base64 import json import httplib import sys from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait class ExampleTest(unittest.TestCase): def setUp(self): self.capabilities = webdriver.DesiredCapabilities.CHROME self.capabilities['tunnel-identifier'] = os.environ['TRAVIS_JOB_NUMBER'] self.capabilities['build'] = os.environ['TRAVIS_BUILD_NUMBER'] self.capabilities['platform'] = 'LINUX' self.capabilities['version'] = '' self.capabilities['name'] = 'Example test' self.username = os.environ['SAUCE_USERNAME'] self.key = os.environ['SAUCE_ACCESS_KEY'] hub_url = "%s:%s" % (self.username, self.key) self.driver = webdriver.Remote(desired_capabilities=self.capabilities, command_executor = ("http://%[email protected]:80/wd/hub" % hub_url)) self.jobid = self.driver.session_id print("Sauce Labs job: https://saucelabs.com/jobs/%s" % self.jobid) self.driver.implicitly_wait(30) def test_sauce(self): self.driver.get('http://localhost:8000/1005') toc_link_1005_1 = self.driver.find_element_by_xpath('//*[@id="toc"]/ol/li[1]/a') self.assertEquals(toc_link_1005_1.get_attribute('data-section-id'), '1005-1') def tearDown(self): print("https://saucelabs.com/jobs/%s" % self.driver.session_id) self.driver.quit() if __name__ == '__main__': unittest.main()
import os import unittest import base64 import json import httplib import sys from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait class ExampleTest(unittest.TestCase): def setUp(self): self.capabilities = webdriver.DesiredCapabilities.CHROME self.capabilities['tunnel-identifier'] = os.environ['TRAVIS_JOB_NUMBER'] self.capabilities['build'] = os.environ['TRAVIS_BUILD_NUMBER'] self.capabilities['platform'] = 'LINUX' self.capabilities['version'] = '' self.capabilities['name'] = 'Example test' self.username = os.environ['SAUCE_USERNAME'] self.key = os.environ['SAUCE_ACCESS_KEY'] hub_url = "%s:%s" % (self.username, self.key) self.driver = webdriver.Remote(desired_capabilities=self.capabilities, command_executor = ("http://%[email protected]:80/wd/hub" % hub_url)) self.jobid = self.driver.session_id print("Sauce Labs job: https://saucelabs.com/jobs/%s" % self.jobid) self.driver.implicitly_wait(30) def test_sauce(self): self.driver.get('http://localhost:8000') toc_link_1005_1 = self.driver.find_element_by_xpath('//*[@id="toc"]/ol/li[1]/a') self.assertEquals(toc_link_1005_1.get_attribute('data-section-id'), '1005-1') def tearDown(self): print("https://saucelabs.com/jobs/%s" % self.driver.session_id) self.driver.quit() if __name__ == '__main__': unittest.main()
cc0-1.0
Python
bbee1e9b8563d56c0d0acbfc6ae61334f8251159
Reset default value in test so that it doesn't produce error
burnpanck/traits,burnpanck/traits
enthought/traits/tests/undefined_test_case.py
enthought/traits/tests/undefined_test_case.py
import unittest from enthought.traits.api import HasTraits, Str, Undefined, ReadOnly, Float class Foo(HasTraits): name = Str() original_name = ReadOnly bar = Str baz = Float def _name_changed(self): if self.original_name is Undefined: self.original_name = self.name class Bar(HasTraits): name = Str(Undefined) class UndefinedTestCase(unittest.TestCase): def test_initial_value(self): b = Bar() self.failUnlessEqual( b.name, Undefined ) return def test_name_change(self): b = Bar() b.name = 'first' self.failUnlessEqual( b.name, 'first' ) return def test_read_only_write_once(self): f = Foo() self.failUnlessEqual(f.name, '') self.failUnless(f.original_name is Undefined) f.name = 'first' self.failUnlessEqual(f.name, 'first') self.failUnlessEqual(f.original_name, 'first') f.name = 'second' self.failUnlessEqual(f.name, 'second') self.failUnlessEqual(f.original_name, 'first') return def test_read_only_write_once_from_constructor(self): f = Foo(name='first') f.name = 'first' self.failUnlessEqual(f.name, 'first') self.failUnlessEqual(f.original_name, 'first') f.name = 'second' self.failUnlessEqual(f.name, 'second') self.failUnlessEqual(f.original_name, 'first') return ### EOF #######################################################################
import unittest from enthought.traits.api import HasTraits, Str, Undefined, ReadOnly, Float class Foo(HasTraits): name = Str() original_name = ReadOnly bar = Str baz = Float def _name_changed(self): if self.original_name is Undefined: self.original_name = self.name class Bar(HasTraits): name = Str(Undefined()) class UndefinedTestCase(unittest.TestCase): def test_initial_value(self): b = Bar() self.failUnlessEqual( b.name, Undefined ) return def test_name_change(self): b = Bar() b.name = 'first' self.failUnlessEqual( b.name, 'first' ) return def test_read_only_write_once(self): f = Foo() self.failUnlessEqual(f.name, '') self.failUnless(f.original_name is Undefined) f.name = 'first' self.failUnlessEqual(f.name, 'first') self.failUnlessEqual(f.original_name, 'first') f.name = 'second' self.failUnlessEqual(f.name, 'second') self.failUnlessEqual(f.original_name, 'first') return def test_read_only_write_once_from_constructor(self): f = Foo(name='first') f.name = 'first' self.failUnlessEqual(f.name, 'first') self.failUnlessEqual(f.original_name, 'first') f.name = 'second' self.failUnlessEqual(f.name, 'second') self.failUnlessEqual(f.original_name, 'first') return ### EOF #######################################################################
bsd-3-clause
Python
85f14fffc01002e5a1c0a7a3644a81a4ade61745
Bump dsub version to 0.2.1
DataBiosphere/dsub,DataBiosphere/dsub
dsub/_dsub_version.py
dsub/_dsub_version.py
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Single source of truth for dsub's version. This must remain small and dependency-free so that any dsub module may import it without creating circular dependencies. Note that this module is parsed as a text file by setup.py and changes to the format of this file could break setup.py. The version should follow formatting requirements specified in PEP-440. - https://www.python.org/dev/peps/pep-0440 A typical release sequence will be versioned as: 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ DSUB_VERSION = '0.2.1'
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Single source of truth for dsub's version. This must remain small and dependency-free so that any dsub module may import it without creating circular dependencies. Note that this module is parsed as a text file by setup.py and changes to the format of this file could break setup.py. The version should follow formatting requirements specified in PEP-440. - https://www.python.org/dev/peps/pep-0440 A typical release sequence will be versioned as: 0.1.3.dev0 -> 0.1.3 -> 0.1.4.dev0 -> ... """ DSUB_VERSION = '0.2.1.dev0'
apache-2.0
Python
edb6f738979e213cca3fd03991caebdf209b09b9
Fix permissions script
GluuFederation/community-edition-setup,GluuFederation/community-edition-setup,GluuFederation/community-edition-setup
static/extension/dynamic_scope/dynamic_permission.py
static/extension/dynamic_scope/dynamic_permission.py
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text. # Copyright (c) 2016, Gluu # # Author: Yuriy Movchan # from org.xdi.model.custom.script.type.scope import DynamicScopeType from org.xdi.service.cdi.util import CdiUtil from org.xdi.oxauth.service import UserService from org.xdi.util import StringHelper, ArrayHelper from java.util import Arrays, ArrayList import java class DynamicScope(DynamicScopeType): def __init__(self, currentTimeMillis): self.currentTimeMillis = currentTimeMillis def init(self, configurationAttributes): print "Permission dynamic scope. Initialization" print "Permission dynamic scope. Initialized successfully" return True def destroy(self, configurationAttributes): print "Permission dynamic scope. Destroy" print "Permission dynamic scope. Destroyed successfully" return True # Update Json Web token before signing/encrypring it # dynamicScopeContext is org.xdi.oxauth.service.external.context.DynamicScopeExternalContext # configurationAttributes is java.util.Map<String, SimpleCustomProperty> def update(self, dynamicScopeContext, configurationAttributes): print "Permission dynamic scope scope. Update method" authorizationGrant = dynamicScopeContext.getAuthorizationGrant() user = dynamicScopeContext.getUser() jsonWebResponse = dynamicScopeContext.getJsonWebResponse() claims = jsonWebResponse.getClaims() userService = CdiUtil.bean(UserService) roles = userService.getCustomAttribute(user, "role") if roles != None: claims.setClaim("role", roles.getValues()) return True def logout(self, configurationAttributes, requestParameters): return True def getApiVersion(self): return 1
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text. # Copyright (c) 2016, Gluu # # Author: Yuriy Movchan # from org.xdi.model.custom.script.type.scope import DynamicScopeType from org.xdi.oxauth.service import UserService from org.xdi.util import StringHelper, ArrayHelper from java.util import Arrays, ArrayList import java class DynamicScope(DynamicScopeType): def __init__(self, currentTimeMillis): self.currentTimeMillis = currentTimeMillis def init(self, configurationAttributes): print "Permission dynamic scope. Initialization" print "Permission dynamic scope. Initialized successfully" return True def destroy(self, configurationAttributes): print "Permission dynamic scope. Destroy" print "Permission dynamic scope. Destroyed successfully" return True # Update Json Web token before signing/encrypring it # dynamicScopeContext is org.xdi.oxauth.service.external.context.DynamicScopeExternalContext # configurationAttributes is java.util.Map<String, SimpleCustomProperty> def update(self, dynamicScopeContext, configurationAttributes): print "Permission dynamic scope scope. Update method" authorizationGrant = dynamicScopeContext.getAuthorizationGrant() user = dynamicScopeContext.getUser() jsonWebResponse = dynamicScopeContext.getJsonWebResponse() claims = jsonWebResponse.getClaims() userService = UserService.instance() roles = userService.getCustomAttribute(user, "role") if roles != None: claims.setClaim("role", roles.getValues()) return True def logout(self, configurationAttributes, requestParameters): return True def getApiVersion(self): return 1
mit
Python
c1923339d7d64b9e85e3a2a1522ff0442e18a798
Update common version (#6060)
Azure/azure-sdk-for-python,Azure/azure-sdk-for-python,Azure/azure-sdk-for-python,Azure/azure-sdk-for-python
sdk/core/azure-common/azure/common/_version.py
sdk/core/azure-common/azure/common/_version.py
#------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- VERSION = "1.1.23"
#------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- VERSION = "1.1.22"
mit
Python
3ff373ed0d5349087a77b2a96af41e0e5cc9c15d
add UI for boardd loopback test
commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot
selfdrive/boardd/tests/test_boardd_loopback.py
selfdrive/boardd/tests/test_boardd_loopback.py
#!/usr/bin/env python3 import os import random import time from collections import defaultdict from functools import wraps import cereal.messaging as messaging from cereal import car from common.basedir import PARAMS from common.params import Params from common.spinner import Spinner from panda import Panda from selfdrive.boardd.boardd import can_list_to_can_capnp from selfdrive.car import make_can_msg from selfdrive.test.helpers import with_processes def reset_panda(fn): @wraps(fn) def wrapper(): p = Panda() for i in [0, 1, 2, 0xFFFF]: p.can_clear(i) p.reset() p.close() fn() return wrapper os.environ['STARTED'] = '1' os.environ['BOARDD_LOOPBACK'] = '1' os.environ['PARAMS_PATH'] = PARAMS @reset_panda @with_processes(['boardd']) def test_boardd_loopback(): # wait for boardd to init spinner = Spinner() time.sleep(2) # boardd blocks on CarVin and CarParams cp = car.CarParams.new_message() cp.safetyModel = car.CarParams.SafetyModel.allOutput Params().put("CarVin", b"0"*17) Params().put("CarParams", cp.to_bytes()) sendcan = messaging.pub_sock('sendcan') can = messaging.sub_sock('can', conflate=False, timeout=100) time.sleep(1) n = 1000 for i in range(n): spinner.update(f"boardd loopback {i}/{n}") sent_msgs = defaultdict(set) for _ in range(random.randrange(10)): to_send = [] for __ in range(random.randrange(100)): bus = random.randrange(3) addr = random.randrange(1, 1<<29) dat = bytes([random.getrandbits(8) for _ in range(random.randrange(1, 9))]) sent_msgs[bus].add((addr, dat)) to_send.append(make_can_msg(addr, dat, bus)) sendcan.send(can_list_to_can_capnp(to_send, msgtype='sendcan')) max_recv = 10 while max_recv > 0 and any(len(sent_msgs[bus]) for bus in range(3)): recvd = messaging.drain_sock(can, wait_for_one=True) for msg in recvd: for m in msg.can: if m.src >= 128: k = (m.address, m.dat) assert k in sent_msgs[m.src-128] sent_msgs[m.src-128].discard(k) max_recv -= 1 # if a set isn't empty, messages got dropped for bus in range(3): assert not len(sent_msgs[bus]), f"loop {i}: bus {bus} missing {len(sent_msgs[bus])} messages" spinner.close()
#!/usr/bin/env python3 import os import random import time from collections import defaultdict from functools import wraps import cereal.messaging as messaging from cereal import car from common.basedir import PARAMS from common.params import Params from panda import Panda from selfdrive.boardd.boardd import can_list_to_can_capnp from selfdrive.car import make_can_msg from selfdrive.test.helpers import with_processes def reset_panda(fn): @wraps(fn) def wrapper(): p = Panda() for i in [0, 1, 2, 0xFFFF]: p.can_clear(i) p.reset() p.close() fn() return wrapper os.environ['STARTED'] = '1' os.environ['BOARDD_LOOPBACK'] = '1' os.environ['PARAMS_PATH'] = PARAMS @reset_panda @with_processes(['boardd']) def test_boardd_loopback(): # wait for boardd to init time.sleep(2) # boardd blocks on CarVin and CarParams cp = car.CarParams.new_message() cp.safetyModel = car.CarParams.SafetyModel.allOutput Params().put("CarVin", b"0"*17) Params().put("CarParams", cp.to_bytes()) sendcan = messaging.pub_sock('sendcan') can = messaging.sub_sock('can', conflate=False, timeout=100) time.sleep(1) for i in range(1000): sent_msgs = defaultdict(set) for _ in range(random.randrange(10)): to_send = [] for __ in range(random.randrange(100)): bus = random.randrange(3) addr = random.randrange(1, 1<<29) dat = bytes([random.getrandbits(8) for _ in range(random.randrange(1, 9))]) sent_msgs[bus].add((addr, dat)) to_send.append(make_can_msg(addr, dat, bus)) sendcan.send(can_list_to_can_capnp(to_send, msgtype='sendcan')) max_recv = 10 while max_recv > 0 and any(len(sent_msgs[bus]) for bus in range(3)): recvd = messaging.drain_sock(can, wait_for_one=True) for msg in recvd: for m in msg.can: if m.src >= 128: k = (m.address, m.dat) assert k in sent_msgs[m.src-128] sent_msgs[m.src-128].discard(k) max_recv -= 1 # if a set isn't empty, messages got dropped for bus in range(3): assert not len(sent_msgs[bus]), f"loop {i}: bus {bus} missing {len(sent_msgs[bus])} messages"
mit
Python
4019372609565b074a5c3ba946245b61c8479ada
update dev version after 2.1.0 tag [skip ci]
desihub/fiberassign,desihub/fiberassign,desihub/fiberassign,desihub/fiberassign
py/fiberassign/_version.py
py/fiberassign/_version.py
__version__ = '2.1.0.dev2650'
__version__ = '2.1.0'
bsd-3-clause
Python
9602574af41a9c09edbc84bf77bde3a285d71741
use datastore client in example
radinformatics/som-tools,radinformatics/som-tools,radinformatics/som-tools,radinformatics/som-tools
examples/google/radiology/upload_storage_radiology.py
examples/google/radiology/upload_storage_radiology.py
#!/usr/bin/env python # RADIOLOGY --------------------------------------------------- # This is an example script to upload data (images, text, metadata) to # google cloud storage and datastore. Data MUST be de-identified import os # Start google storage client for pmc-stanford from som.api.google.datastore import DataStoreClient as Client client = Client(bucket_name='radiology') # big_query not developed yet collection = client.create_collection(uid='IRB41449') # Let's load some dummy data from deid from deid.data import get_dataset from deid.dicom import get_files dicom_files = get_files(get_dataset('dicom-cookies')) # Now de-identify to get clean files from deid.dicom import get_identifiers, replace_identifiers ids=get_identifiers(dicom_files) updated_files = replace_identifiers(dicom_files=dicom_files, ids=ids) # Define some metadata for the entity metadata = { "source_id" : "cookieTumorDatabase", "id":"cookie-47", "Modality": "cookie"} # Upload the dataset client.upload_dataset(images=updated_files, collection=collection, uid=metadata['id'], entity_metadata=metadata) # Now try with adding metadata for an image images_metadata = { updated_files[0]: { "Modality":"cookie", "Type": "chocolate-chip", "Width": 350, "Height": 350 } } # And again do the call client.upload_dataset(images=updated_files, collection=collection, uid="cookie-47", images_metadata=images_metadata)
#!/usr/bin/env python # RADIOLOGY --------------------------------------------------- # This is an example script to upload data (images, text, metadata) to # google cloud storage and datastore. Data MUST be de-identified import os # Start google storage client for pmc-stanford from som.api.google import Client client = Client(use_bigquery=False, bucket_name='radiology') collection = client.create_collection(uid='IRB41449') # Let's load some dummy data from deid from deid.data import get_dataset from deid.dicom import get_files dicom_files = get_files(get_dataset('dicom-cookies')) # Now de-identify to get clean files from deid.dicom import get_identifiers, replace_identifiers ids=get_identifiers(dicom_files) updated_files = replace_identifiers(dicom_files=dicom_files, ids=ids) # Define some metadata for the entity metadata = { "source_id" : "cookieTumorDatabase", "id":"cookie-47", "Modality": "cookie"} # Upload the dataset client.upload_dataset(images=updated_files, collection=collection, uid=metadata['id'], entity_metadata=metadata) # Now try with adding metadata for an image images_metadata = { updated_files[0]: { "Modality":"cookie", "Type": "chocolate-chip", "Width": 350, "Height": 350 } } # And again do the call client.upload_dataset(images=updated_files, collection=collection, uid="cookie-47", images_metadata=images_metadata)
mit
Python
951b6b9cc14e323dc97aa6e67dee17ef110e673f
check for exclusive try/else and if/else
mitar/pychecker,mitar/pychecker
pychecker2/utest/scopes.py
pychecker2/utest/scopes.py
from pychecker2.TestSupport import WarningTester from pychecker2 import ScopeChecks class RedefinedTestCase(WarningTester): def testScopes(self): w = ScopeChecks.RedefineCheck.redefinedScope self.warning('def f(): pass\n' 'def f(): pass\n', 1, w, 'f', 2) self.warning('class C:\n' ' def g(self): pass\n' ' def g(self): pass\n', 2, w, 'g', 3) self.silent('def s(): pass\n' 'def f(): pass\n') self.silent('import sys\n' 'if sys.argv:\n' ' def f(): return 1\n' 'else:\n' ' def f(): return 0\n') self.warning('import sys\n' 'if sys.argv:\n' ' def f(): return 1\n' ' def f(): return 0\n', 3, w, 'f', 4) self.warning('try:\n' ' def f(): return 1\n' 'except Exception:\n' ' pass\n' 'else:\n' ' def f(): return 0\n', 2, w, 'f', 6)
from pychecker2.TestSupport import WarningTester from pychecker2 import ScopeChecks class RedefinedTestCase(WarningTester): def testScopes(self): self.warning('def f(): pass\n' 'def f(): pass\n', 1, ScopeChecks.RedefineCheck.redefinedScope, 'f', 2) self.warning('class C:\n' ' def g(self): pass\n' ' def g(self): pass\n', 2, ScopeChecks.RedefineCheck.redefinedScope, 'g', 3) self.silent('def s(): pass\n' 'def f(): pass\n')
bsd-3-clause
Python
a434050e0f1c9f3e162898a3687cd7de8b77980c
Update load.py
simphony/simphony-mayavi
simphony_mayavi/load.py
simphony_mayavi/load.py
from mayavi.core.api import registry from simphony_mayavi.adapt2cuds import adapt2cuds def load(filename, name=None, kind=None, rename_arrays=None): """ Load the file data into a CUDS container. """ data_set = _read(filename) return adapt2cuds( data_set, name, kind, rename_arrays) def _read(filename): """ Find a suitable reader and read in the tvtk.Dataset. """ metasource = registry.get_file_reader(filename) if metasource is None: message = 'No suitable reader found for file: {}' raise RuntimeError(message.format(filename)) if metasource.factory is None: source = metasource.get_callable()() source.initialize(filename) source.update() reader = source.reader else: message = 'Mayavi reader that requires a scene is not supported : {}' raise NotImplementedError(message.format(filename)) if len(source.outputs) != 1: message = 'Only one output is expected from the reader' raise RuntimeError(message) return reader.output
from mayavi.core.api import registry from simphony_mayavi.adapt2cuds import adapt2cuds def load(filename, name=None, kind=None, rename_arrays=None): """ Load the file data into a CUDS container. """ data_set = _read(filename) return adapt2cuds( data_set, name, kind, rename_arrays) def _read(filename): """ Find a suitable reader and read in the tvtk.Dataset. """ metasource = registry.get_file_reader(filename) if metasource is None: message = 'No suitable reader found for file: {}' raise RuntimeError(message.format(filename)) if metasource.factory is None: source = metasource.get_callable()() source.initialize(filename) source.update() reader = source.reader else: message = 'Mayavi reader that requires a scene is not supported : {}' raise NotImplementedError(message.format(filename)) if len(source.outputs) != 1: message = 'Only one output is expected from the reader' raise RuntimeError(message) return reader.output
bsd-2-clause
Python
2bf0c9e0d8bbce50f06ca08c79f97ecf5b76e21b
Fix logging
thombashi/SimpleSQLite,thombashi/SimpleSQLite
simplesqlite/_logger.py
simplesqlite/_logger.py
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <[email protected]> """ from __future__ import absolute_import, unicode_literals import logbook import sqliteschema import tabledata logger = logbook.Logger("SimpleSQLie") logger.disable() def set_logger(is_enable): if is_enable != logger.disabled: return if is_enable: logger.enable() else: logger.disable() tabledata.set_logger(is_enable) sqliteschema.set_logger(is_enable) try: import pytablereader pytablereader.set_logger(is_enable) except ImportError: pass def set_log_level(log_level): """ Set logging level of this module. Using `logbook <http://logbook.readthedocs.io/en/stable/>`__ module for logging. :param int log_level: One of the log level of `logbook <http://logbook.readthedocs.io/en/stable/api/base.html>`__. Disabled logging if ``log_level`` is ``logbook.NOTSET``. :raises LookupError: If ``log_level`` is an invalid value. """ # validate log level logbook.get_level_name(log_level) if log_level == logger.level: return if log_level == logbook.NOTSET: set_logger(is_enable=False) else: set_logger(is_enable=True) logger.level = log_level tabledata.set_log_level(log_level) sqliteschema.set_log_level(log_level) try: import pytablereader pytablereader.set_log_level(log_level) except ImportError: pass
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <[email protected]> """ from __future__ import absolute_import, unicode_literals import logbook import tabledata logger = logbook.Logger("SimpleSQLie") logger.disable() def set_logger(is_enable): if is_enable != logger.disabled: return if is_enable: logger.enable() else: logger.disable() tabledata.set_logger(is_enable) try: import pytablereader pytablereader.set_logger(is_enable) except ImportError: pass def set_log_level(log_level): """ Set logging level of this module. Using `logbook <http://logbook.readthedocs.io/en/stable/>`__ module for logging. :param int log_level: One of the log level of `logbook <http://logbook.readthedocs.io/en/stable/api/base.html>`__. Disabled logging if ``log_level`` is ``logbook.NOTSET``. :raises LookupError: If ``log_level`` is an invalid value. """ # validate log level logbook.get_level_name(log_level) if log_level == logger.level: return if log_level == logbook.NOTSET: set_logger(is_enable=False) else: set_logger(is_enable=True) logger.level = log_level tabledata.set_log_level(log_level) try: import pytablereader pytablereader.set_log_level(log_level) except ImportError: pass
mit
Python
930508e5ec00d9f174409097ba54e70c7c6b2b3c
Fix #421: RPN_DEFNS needs to passed to Pelegant via env
radiasoft/sirepo,radiasoft/sirepo,radiasoft/sirepo,mrakitin/sirepo,mrakitin/sirepo,mkeilman/sirepo,radiasoft/sirepo,mkeilman/sirepo,mkeilman/sirepo,mrakitin/sirepo,mrakitin/sirepo,radiasoft/sirepo,mkeilman/sirepo
sirepo/pkcli/elegant.py
sirepo/pkcli/elegant.py
# -*- coding: utf-8 -*- """Wrapper to run elegant from the command line. :copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function from pykern import pkio from pykern import pkresource from pykern import pksubprocess from pykern.pkdebug import pkdp, pkdc from sirepo import mpi from sirepo import simulation_db from sirepo.template import template_common from sirepo.template.elegant import extract_report_data, ELEGANT_LOG_FILE import copy import os import re import subprocess _ELEGANT_STDERR_FILE = 'elegant.stderr' def run(cfg_dir): """Run elegant in ``cfg_dir`` The files in ``cfg_dir`` must be configured properly. Args: cfg_dir (str): directory to run elegant in """ with pkio.save_chdir(cfg_dir): _run_elegant(bunch_report=True) _extract_bunch_report() def run_background(cfg_dir): """Run elegant as a background task Args: cfg_dir (str): directory to run elegant in """ with pkio.save_chdir(cfg_dir): _run_elegant(with_mpi=True); simulation_db.write_result({}) def _run_elegant(bunch_report=False, with_mpi=False): exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals()) if bunch_report and re.search('\&sdds_beam\s', elegant_file): return pkio.write_text('elegant.lte', lattice_file) ele = 'elegant.ele' pkio.write_text(ele, elegant_file) # TODO(robnagler) Need to handle this specially, b/c different binary env = copy.deepcopy(os.environ) env['RPN_DEFNS'] = pkresource.filename('defns.rpn') if with_mpi and mpi.cfg.cores > 1: return mpi.run_program(['Pelegant', ele], output=ELEGANT_LOG_FILE, env=env) pksubprocess.check_call_with_signals( ['elegant', ele], output=ELEGANT_LOG_FILE, env=env, msg=pkdp, ) def _extract_bunch_report(): data = simulation_db.read_json(template_common.INPUT_BASE_NAME) if data['models']['bunchSource']['inputSource'] == 'sdds_beam': file = 'bunchFile-sourceFile.{}'.format(data['models']['bunchFile']['sourceFile']) else: file = 'elegant.bun' info = extract_report_data(file, data['models'][data['report']], data['models']['bunch']['p_central_mev'], 0) simulation_db.write_result(info)
# -*- coding: utf-8 -*- """Wrapper to run elegant from the command line. :copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function from pykern import pkio from pykern import pkresource from pykern import pksubprocess from pykern.pkdebug import pkdp, pkdc from sirepo import mpi from sirepo import simulation_db from sirepo.template import template_common from sirepo.template.elegant import extract_report_data, ELEGANT_LOG_FILE import copy import os import re import subprocess _ELEGANT_STDERR_FILE = 'elegant.stderr' def run(cfg_dir): """Run elegant in ``cfg_dir`` The files in ``cfg_dir`` must be configured properly. Args: cfg_dir (str): directory to run elegant in """ with pkio.save_chdir(cfg_dir): _run_elegant(bunch_report=True) _extract_bunch_report() def run_background(cfg_dir): """Run elegant as a background task Args: cfg_dir (str): directory to run elegant in """ with pkio.save_chdir(cfg_dir): _run_elegant(with_mpi=True); simulation_db.write_result({}) def _run_elegant(bunch_report=False, with_mpi=False): exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals()) if bunch_report and re.search('\&sdds_beam\s', elegant_file): return pkio.write_text('elegant.lte', lattice_file) ele = 'elegant.ele' pkio.write_text(ele, elegant_file) # TODO(robnagler) Need to handle this specially, b/c different binary if with_mpi and mpi.cfg.cores > 1: return mpi.run_program(['Pelegant', ele], output=ELEGANT_LOG_FILE) env = copy.deepcopy(os.environ) env['RPN_DEFNS'] = pkresource.filename('defns.rpn') pksubprocess.check_call_with_signals( ['elegant', ele], output=ELEGANT_LOG_FILE, env=env, msg=pkdp, ) def _extract_bunch_report(): data = simulation_db.read_json(template_common.INPUT_BASE_NAME) if data['models']['bunchSource']['inputSource'] == 'sdds_beam': file = 'bunchFile-sourceFile.{}'.format(data['models']['bunchFile']['sourceFile']) else: file = 'elegant.bun' info = extract_report_data(file, data['models'][data['report']], data['models']['bunch']['p_central_mev'], 0) simulation_db.write_result(info)
apache-2.0
Python
5c8a6072309989ac97eefc2a6f63a6082a2c5ff0
Update matching_specific_string.py
JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking
hacker_rank/contests/regular_expresso/matching_specific_string.py
hacker_rank/contests/regular_expresso/matching_specific_string.py
Regex_Pattern = r'hackerrank' # Do not delete 'r'.
mit
Python
05b15f2db049e8b722f17867f5163c0b6e3a3108
Allow POST to /git-update url.
m-allanson/pelican-themes,m-allanson/pelican-themes
pthemes.py
pthemes.py
import os import logging from flask import Flask, render_template, redirect, url_for, flash from pq import PQ from api import APIGrabber from db import PonyDB logging.basicConfig() # Config # --------------- # App config app = Flask(__name__) app.config.from_object(os.environ.get('APP_SETTINGS', None)) db = PonyDB(app) pq = PQ(db.get_connection()) # Postgres work queue if db.table_exists('queue') is False: pq.create() queue = pq['themes'] # Routes # --------------- @app.route('/') def show_entries(): """ List out all the themes. """ image_themes = db.get_image_themes() no_image_themes = db.get_no_image_themes() sha = db.get_sha() counts = {} counts['image_themes'] = len(image_themes) counts['no_image_themes'] = len(no_image_themes) counts['total'] = counts['image_themes'] + counts['no_image_themes'] for t in image_themes: if t['image_urls'] is not None: t['image_urls'] = t['image_urls'].split(',') return render_template('list.html', image_themes=image_themes, no_image_themes=no_image_themes, counts=counts, sha=sha) @app.route('/git-update', methods=['GET', 'POST']) def refresh_themes(): """ Adds a job to the job queue. The job is to refresh the theme list. As all jobs are identical, the job will only be added if there are no existing jobs. """ if len(queue) < 1: queue.put('Refresh themes') flash('Added theme refresh job to queue.') else: flash('A theme refresh job has already been scheduled.') return redirect(url_for('show_entries')) # App decorators # --------------- # @app.cli.command('initdb') # def initdb_command(): # """Creates the database tables.""" # db.init_db() # @app.cli.command('populatedb') # def populatedb_command(): # db.populate_db() @app.cli.command('worker') def queue_worker(): """ Process queue tasks and then exit """ for task in queue: if task is None: break a = APIGrabber(app.config['GITHUB_API_KEY']) sha, data = a.process() db.populate_db(sha, data) if __name__ == "__main__": app.run()
import os import logging from flask import Flask, render_template, redirect, url_for, flash from pq import PQ from api import APIGrabber from db import PonyDB logging.basicConfig() # Config # --------------- # App config app = Flask(__name__) app.config.from_object(os.environ.get('APP_SETTINGS', None)) db = PonyDB(app) pq = PQ(db.get_connection()) # Postgres work queue if db.table_exists('queue') is False: pq.create() queue = pq['themes'] # Routes # --------------- @app.route('/') def show_entries(): """ List out all the themes. """ image_themes = db.get_image_themes() no_image_themes = db.get_no_image_themes() sha = db.get_sha() counts = {} counts['image_themes'] = len(image_themes) counts['no_image_themes'] = len(no_image_themes) counts['total'] = counts['image_themes'] + counts['no_image_themes'] for t in image_themes: if t['image_urls'] is not None: t['image_urls'] = t['image_urls'].split(',') return render_template('list.html', image_themes=image_themes, no_image_themes=no_image_themes, counts=counts, sha=sha) @app.route('/git-update', methods=['GET']) def refresh_themes(): """ Adds a job to the job queue. The job is to refresh the theme list. As all jobs are identical, the job will only be added if there are no existing jobs. """ if len(queue) < 1: queue.put('Refresh themes') flash('Added theme refresh job to queue.') else: flash('A theme refresh job has already been scheduled.') return redirect(url_for('show_entries')) # App decorators # --------------- # @app.cli.command('initdb') # def initdb_command(): # """Creates the database tables.""" # db.init_db() # @app.cli.command('populatedb') # def populatedb_command(): # db.populate_db() @app.cli.command('worker') def queue_worker(): """ Process queue tasks and then exit """ for task in queue: if task is None: break a = APIGrabber(app.config['GITHUB_API_KEY']) sha, data = a.process() db.populate_db(sha, data) if __name__ == "__main__": app.run()
mit
Python
8f4f902042b848a6a212ae966aaf6435ae8d5c77
set background as a widget
angelonuffer/Sheldon-Chess,angelonuffer/Sheldon-Chess,angelonuffer/Sheldon-Chess
sheldonchess/interface/web/sheldonchess.py
sheldonchess/interface/web/sheldonchess.py
from rajesh import Application, run, expr from rajesh.element import Img, Div from screens import MainMenu, NormalGameLobby class Player(object): def __init__(self, app): self.app = app self.name = "" class SheldonChess(Application): def begin(self): self.player = Player(self) self.title = "Sheldon Chess" self.background = "images/sheldonchess_background.png" info_box = Div(id="info_box") self.put(info_box, ("50%", 0)) main_menu = MainMenu(self) self.put(main_menu, ("50%", "50%")) def connectionLost(self, reason): for player in NormalGameLobby.players: if player == self.player: NormalGameLobby.players.remove(player) NormalGameLobby.update_players() if __name__ == "__main__": run()
from rajesh import Application, run, expr from rajesh.element import Img, Div from screens import MainMenu, NormalGameLobby class Player(object): def __init__(self, app): self.app = app self.name = "" class SheldonChess(Application): def begin(self): self.player = Player(self) self.title = "Sheldon Chess" background = Img(id="background", src="images/sheldonchess_background.png", width="100%", height="100%") self.put(background, (0, 0)) info_box = Div(id="info_box") self.put(info_box, ("50%", 0)) main_menu = MainMenu(self) self.put(main_menu, ("50%", "50%")) def connectionLost(self, reason): for player in NormalGameLobby.players: if player == self.player: NormalGameLobby.players.remove(player) NormalGameLobby.update_players() if __name__ == "__main__": run()
mit
Python
fac2c5752c23d2fd415caafd2654f696c4842806
Bump version.
bboe/pyramid_addons,bboe/pyramid_addons
pyramid_addons/__init__.py
pyramid_addons/__init__.py
__version__ = '0.21'
__version__ = '0.20'
bsd-2-clause
Python
8e4a5ef25c87879fb01aa79f88c6c6a833820f8b
bump version
dpressel/baseline,dpressel/baseline,dpressel/baseline,dpressel/baseline
python/baseline/version.py
python/baseline/version.py
__version__ = "1.1.3"
__version__ = "1.1.2"
apache-2.0
Python
86768d83067745def16761db24ce496e3125399e
test script works
oelarnes/mtgreatest,oelarnes/mtgreatest
mtgreatest/process_results/test_script.py
mtgreatest/process_results/test_script.py
#!/usr/bin/env python import scrape_results from mtgreatest.rdb import Cursor cursor = Cursor() event_info = cursor.execute('select event_id, event_link from event_table where results_loaded = 1') event_info = [dict(zip(('event_id','event_link'), item)) for item in event_info] #this should be a method (or default return structure) in rdb failed = [] for row in event_info: soup = scrape_results.event_soup(row['event_link']) print 'scraping standings for event {}'.format(row['event_id']) try: scrape_results.scrape_standings(soup, row['event_id']) except: failed.append(row['event_id'])
#!/usr/bin/env python import scrape_results from mtgreatest.rdb import Cursor cursor = Cursor() event_info = cursor.execute('select event_id, event_link from event_table where results_loaded = 1') event_info = [dict(zip(('event_id','event_link'), item)) for item in event_info] #this should be a method (or default return structure) in rdb soups = [scrape_results.event_soup(row['event_link']) for row in event_info] failed = [] for i in range(len(soups)): try: scrape_results.scrape_standings(soups[i], event_info[i]['event_id']) except: failed.append(event_info[i]['event_id'])
mit
Python
e164a3ccb7625d4f36c83628aa6f7f030f38d6cf
remove normalized test
dmoliveira/networkx,harlowja/networkx,bzero/networkx,Sixshaman/networkx,michaelpacer/networkx,dhimmel/networkx,beni55/networkx,debsankha/networkx,ltiao/networkx,ghdk/networkx,jakevdp/networkx,nathania/networkx,kernc/networkx,RMKD/networkx,aureooms/networkx,ionanrozenfeld/networkx,RMKD/networkx,debsankha/networkx,OrkoHunter/networkx,yashu-seth/networkx,chrisnatali/networkx,jakevdp/networkx,bzero/networkx,aureooms/networkx,jni/networkx,jcurbelo/networkx,wasade/networkx,nathania/networkx,NvanAdrichem/networkx,farhaanbukhsh/networkx,farhaanbukhsh/networkx,blublud/networkx,kernc/networkx,harlowja/networkx,aureooms/networkx,ionanrozenfeld/networkx,RMKD/networkx,dhimmel/networkx,tmilicic/networkx,blublud/networkx,kernc/networkx,JamesClough/networkx,jtorrents/networkx,sharifulgeo/networkx,kai5263499/networkx,andnovar/networkx,ghdk/networkx,nathania/networkx,jfinkels/networkx,ghdk/networkx,bzero/networkx,jni/networkx,goulu/networkx,jakevdp/networkx,sharifulgeo/networkx,kai5263499/networkx,chrisnatali/networkx,dhimmel/networkx,harlowja/networkx,dmoliveira/networkx,jni/networkx,cmtm/networkx,debsankha/networkx,sharifulgeo/networkx,farhaanbukhsh/networkx,chrisnatali/networkx,SanketDG/networkx,dmoliveira/networkx,kai5263499/networkx,blublud/networkx,ionanrozenfeld/networkx,jtorrents/networkx
networkx/algorithms/tests/test_smetric.py
networkx/algorithms/tests/test_smetric.py
from nose.tools import assert_equal import networkx as nx def test_smetric(): g = nx.Graph() g.add_edge(1,2) g.add_edge(2,3) g.add_edge(2,4) g.add_edge(1,4) sm = nx.s_metric(g,normalized=False) assert_equal(sm, 19.0) # smNorm = nx.s_metric(g,normalized=True) # assert_equal(smNorm, 0.95)
from nose.tools import assert_equal import networkx as nx def test_smetric(): g = nx.Graph() g.add_edge(1,2) g.add_edge(2,3) g.add_edge(2,4) g.add_edge(1,4) sm = nx.s_metric(g,normalized=False) assert_equal(sm, 19.0) smNorm = nx.s_metric(g,normalized=True) # assert_equal(smNorm, 0.95)
bsd-3-clause
Python
3de294f3492f7a3f796c162113c8b4298347e900
expand on tests
kaczmarj/neurodocker,kaczmarj/neurodocker
neurodocker/interfaces/tests/test_ants.py
neurodocker/interfaces/tests/test_ants.py
"""Tests for neurodocker.interfaces.ANTs""" # Author: Jakub Kaczmarzyk <[email protected]> from __future__ import absolute_import, division, print_function import pytest from neurodocker.interfaces import ANTs from neurodocker.interfaces.tests import utils class TestANTs(object): """Tests for ANTs class.""" def test_build_image_ants_210_binaries_centos7(self): """Install ANTs 2.1.0 binaries on CentOS 7.""" specs = {'base': 'centos:7', 'pkg_manager': 'yum', 'check_urls': True, 'ants': {'version': '2.1.0', 'use_binaries': True}} container = utils.get_container_from_specs(specs) output = container.exec_run('Atropos') assert "error" not in output, "error running Atropos" utils.test_cleanup(container) def test_invalid_binaries(self): with pytest.raises(ValueError): ANTs(version='fakeversion', pkg_manager='apt', check_urls=False) def test_install_with_custom_url(self): url = 'http://fakesite' ants = ANTs(version='2.2.0', pkg_manager='apt', use_binaries=url, check_urls=False) assert url in ants.cmd with pytest.raises(ValueError): ants = ANTs(version='2.2.0', pkg_manager='apt', use_binaries='fakesite', check_urls=False) def test_build_from_source_github(self): # TODO: expand on tests for building ANTs from source. It probably # will not be possible to build ANTs in Travic because of the 50 min # time limit. It takes about 45 minutes to compile ANTs. ants = ANTs(version='latest', pkg_manager='apt', use_binaries=False) assert "git checkout" not in ants.cmd ants = ANTs(version='2.2.0', pkg_manager='yum', use_binaries=False) assert ants.cmd ants = ANTs(version='arbitrary', pkg_manager='apt', use_binaries=False, git_hash='12345') assert 'git checkout 12345' in ants.cmd
"""Tests for neurodocker.interfaces.ANTs""" # Author: Jakub Kaczmarzyk <[email protected]> from __future__ import absolute_import, division, print_function from neurodocker.interfaces import ANTs from neurodocker.interfaces.tests import utils class TestANTs(object): """Tests for ANTs class.""" def test_build_image_ants_210_binaries_centos7(self): """Install ANTs 2.1.0 binaries on CentOS 7.""" specs = {'base': 'centos:7', 'pkg_manager': 'yum', 'check_urls': False, 'ants': {'version': '2.1.0', 'use_binaries': True}} container = utils.get_container_from_specs(specs) output = container.exec_run('Atropos') assert "error" not in output, "error running Atropos" utils.test_cleanup(container) def test_build_from_source_github(self): # TODO: expand on tests for building ANTs from source. It probably # will not be possible to build ANTs in Travic because of the 50 min # time limit. It takes about 45 minutes to compile ANTs. ants = ANTs(version='latest', pkg_manager='apt', use_binaries=False) assert ants.cmd ants = ANTs(version='latest', pkg_manager='yum', use_binaries=False) assert ants.cmd
apache-2.0
Python
d1490510cd5f66a5da699ffea39b6a8b37c88f01
add test for composite potential
adrn/gary,adrn/gary,adrn/gala,adrn/gala,adrn/gary,adrn/gala
gary/potential/tests/test_io.py
gary/potential/tests/test_io.py
# coding: utf-8 """ test reading/writing potentials to files """ from __future__ import division, print_function __author__ = "adrn <[email protected]>" # Standard library import os # Third-party import numpy as np # Project from ..io import read, write from ..builtin import IsochronePotential from ..custom import PW14Potential from ...units import galactic # TODO: config item to specify path to test data? test_data_path = os.path.abspath(os.path.join(os.path.split(__file__)[0], "../../../test-data/")) def test_read(): f1 = os.path.join(test_data_path, 'potential', 'isochrone.yml') potential = read(f1) assert np.allclose(potential.parameters['m'], 1E11) assert np.allclose(potential.parameters['b'], 0.76) f2 = os.path.join(test_data_path, 'potential', 'pw14.yml') potential = read(f2) f3 = os.path.join(test_data_path, 'potential', 'pw14_2.yml') potential = read(f3) def test_write(): tmp_filename = "/tmp/potential.yml" # try a simple potential potential = IsochronePotential(m=1E11, b=0.76, units=galactic) with open(tmp_filename,'w') as f: write(potential, f) write(potential, tmp_filename) # more complex potential = PW14Potential() with open(tmp_filename,'w') as f: write(potential, f) write(potential, tmp_filename)
# coding: utf-8 """ test reading/writing potentials to files """ from __future__ import division, print_function __author__ = "adrn <[email protected]>" # Standard library import os # Third-party import numpy as np # Project from ..io import read, write from ..builtin import IsochronePotential from ...units import galactic # TODO: config item to specify path to test data? test_data_path = os.path.abspath(os.path.join(os.path.split(__file__)[0], "../../../test-data/")) def test_read(): f1 = os.path.join(test_data_path, 'potential', 'isochrone.yml') potential = read(f1) assert np.allclose(potential.parameters['m'], 1E11) assert np.allclose(potential.parameters['b'], 0.76) f2 = os.path.join(test_data_path, 'potential', 'pw14.yml') potential = read(f2) def test_write(): tmp_filename = "/tmp/potential.yml" # try a simple potential potential = IsochronePotential(m=1E11, b=0.76, units=galactic) with open(tmp_filename,'w') as f: write(potential, f) write(potential, tmp_filename)
mit
Python
db69d08b61f83703fc40ef7273cba1b2e0b825a3
stop checking if an entry already exists when polling
Nurdok/nextfeed,Nurdok/nextfeed,Nurdok/nextfeed,Nurdok/nextfeed,Nurdok/nextfeed
feeds/tasks.py
feeds/tasks.py
import time import feedparser from celery import task from feeds.models import Feed, Entry from django.core.exceptions import ObjectDoesNotExist from profiles.models import UserProfile, UserEntryDetail def poll_feed(feed): parser = feedparser.parse(feed.link) # Add entries from feed entries = parser.entries for entry in entries: published = time.strftime('%Y-%m-%d %H:%M', entry.published_parsed) entry_obj, _ = Entry.objects.get_or_create(feed=feed, title=entry.title, link=entry.link, published=published) subscribers = UserProfile.objects.filter(feeds=feed) for profile in subscribers: if not UserEntryDetail.objects.filter(entry=entry_obj, profile=profile)\ .exists(): UserEntryDetail(entry=entry_obj, profile=profile, read=False).save() @task def poll_all_feeds(): feeds = Feed.objects.all() for feed in feeds: poll_feed(feed)
import time import feedparser from celery import task from feeds.models import Feed, Entry from django.core.exceptions import ObjectDoesNotExist from profiles.models import UserProfile, UserEntryDetail def poll_feed(feed): parser = feedparser.parse(feed.link) # Add entries from feed entries = parser.entries for entry in entries: try: Entry.objects.get(link=entry.link) except ObjectDoesNotExist: pass else: continue published = time.strftime('%Y-%m-%d %H:%M', entry.published_parsed) entry_obj, _ = Entry.objects.get_or_create(feed=feed, title=entry.title, link=entry.link, published=published) subscribers = UserProfile.objects.filter(feeds=feed) for profile in subscribers: if not UserEntryDetail.objects.filter(entry=entry_obj, profile=profile)\ .exists(): UserEntryDetail(entry=entry_obj, profile=profile, read=False).save() @task def poll_all_feeds(): feeds = Feed.objects.all() for feed in feeds: poll_feed(feed)
mit
Python
81806fe89b9c4a364d373020bd56ee1a396a5858
Add automatic escaping of the settings docstring for Windows' sake in py3k
jamesbeebop/evennia,jamesbeebop/evennia,jamesbeebop/evennia
evennia/game_template/server/conf/settings.py
evennia/game_template/server/conf/settings.py
r""" Evennia settings file. The available options are found in the default settings file found here: {settings_default} Remember: Don't copy more from the default file than you actually intend to change; this will make sure that you don't overload upstream updates unnecessarily. When changing a setting requiring a file system path (like path/to/actual/file.py), use GAME_DIR and EVENNIA_DIR to reference your game folder and the Evennia library folders respectively. Python paths (path.to.module) should be given relative to the game's root folder (typeclasses.foo) whereas paths within the Evennia library needs to be given explicitly (evennia.foo). If you want to share your game dir, including its settings, you can put secret game- or server-specific settings in secret_settings.py. """ # Use the defaults from Evennia unless explicitly overridden from evennia.settings_default import * ###################################################################### # Evennia base server config ###################################################################### # This is the name of your game. Make it catchy! SERVERNAME = {servername} # Server ports. If enabled and marked as "visible", the port # should be visible to the outside world on a production server. # Note that there are many more options available beyond these. # Telnet ports. Visible. TELNET_ENABLED = True TELNET_PORTS = [4000] # (proxy, internal). Only proxy should be visible. WEBSERVER_ENABLED = True WEBSERVER_PORTS = [(4001, 4002)] # Telnet+SSL ports, for supporting clients. Visible. SSL_ENABLED = False SSL_PORTS = [4003] # SSH client ports. Requires crypto lib. Visible. SSH_ENABLED = False SSH_PORTS = [4004] # Websocket-client port. Visible. WEBSOCKET_CLIENT_ENABLED = True WEBSOCKET_CLIENT_PORT = 4005 # Internal Server-Portal port. Not visible. AMP_PORT = 4006 ###################################################################### # Settings given in secret_settings.py override those in this file. ###################################################################### try: from server.conf.secret_settings import * except ImportError: print("secret_settings.py file not found or failed to import.")
""" Evennia settings file. The available options are found in the default settings file found here: {settings_default} Remember: Don't copy more from the default file than you actually intend to change; this will make sure that you don't overload upstream updates unnecessarily. When changing a setting requiring a file system path (like path/to/actual/file.py), use GAME_DIR and EVENNIA_DIR to reference your game folder and the Evennia library folders respectively. Python paths (path.to.module) should be given relative to the game's root folder (typeclasses.foo) whereas paths within the Evennia library needs to be given explicitly (evennia.foo). If you want to share your game dir, including its settings, you can put secret game- or server-specific settings in secret_settings.py. """ # Use the defaults from Evennia unless explicitly overridden from evennia.settings_default import * ###################################################################### # Evennia base server config ###################################################################### # This is the name of your game. Make it catchy! SERVERNAME = {servername} # Server ports. If enabled and marked as "visible", the port # should be visible to the outside world on a production server. # Note that there are many more options available beyond these. # Telnet ports. Visible. TELNET_ENABLED = True TELNET_PORTS = [4000] # (proxy, internal). Only proxy should be visible. WEBSERVER_ENABLED = True WEBSERVER_PORTS = [(4001, 4002)] # Telnet+SSL ports, for supporting clients. Visible. SSL_ENABLED = False SSL_PORTS = [4003] # SSH client ports. Requires crypto lib. Visible. SSH_ENABLED = False SSH_PORTS = [4004] # Websocket-client port. Visible. WEBSOCKET_CLIENT_ENABLED = True WEBSOCKET_CLIENT_PORT = 4005 # Internal Server-Portal port. Not visible. AMP_PORT = 4006 ###################################################################### # Settings given in secret_settings.py override those in this file. ###################################################################### try: from server.conf.secret_settings import * except ImportError: print("secret_settings.py file not found or failed to import.")
bsd-3-clause
Python
a85f6f86522dbb984818defc0d5c3cee049f1341
add simple async post support
xsank/Pyeventbus,xsank/Pyeventbus
eventbus/eventbus.py
eventbus/eventbus.py
__author__ = 'Xsank' import inspect from multiprocessing.pool import ThreadPool from listener import Listener from exception import RegisterError from exception import UnregisterError class EventBus(object): def __init__(self,pool_size=4): self.listeners=dict() self.pool=ThreadPool(pool_size) def register(self,listener): if not isinstance(listener,Listener): raise RegisterError self.listeners[listener.__class__.__name__]=listener def unregister(self,listener): try: self.listeners.pop(listener.__class__.__name__) except Exception: raise UnregisterError def post(self,event): for listener in self.listeners.values(): for name,func in inspect.getmembers(listener,predicate=inspect.ismethod): func(event) def async_post(self,event): self.pool.map(self.post,(event,)) def destroy(self): self.listeners.clear() self.pool.close()
__author__ = 'Xsank' import inspect from listener import Listener from exception import RegisterError from exception import UnregisterError class EventBus(object): def __init__(self): self.listeners=dict() def register(self,listener): if not isinstance(listener,Listener): raise RegisterError self.listeners[listener.__class__.__name__]=listener def unregister(self,listener): try: self.listeners.pop(listener.__class__.__name__) except Exception: raise UnregisterError def post(self,event): for listener in self.listeners.values(): for name,func in inspect.getmembers(listener,predicate=inspect.ismethod): func(event) def destroy(self): self.listeners.clear()
mit
Python
30e00089247b314e82bc7792ac6f9641cd632bbd
Bump to dev.
tempbottle/eventlet,tempbottle/eventlet,collinstocks/eventlet,collinstocks/eventlet,lindenlab/eventlet,lindenlab/eventlet,lindenlab/eventlet
eventlet/__init__.py
eventlet/__init__.py
version_info = (0, 9, 16, "dev") __version__ = ".".join(map(str, version_info)) try: from eventlet import greenthread from eventlet import greenpool from eventlet import queue from eventlet import timeout from eventlet import patcher from eventlet import convenience import greenlet sleep = greenthread.sleep spawn = greenthread.spawn spawn_n = greenthread.spawn_n spawn_after = greenthread.spawn_after kill = greenthread.kill Timeout = timeout.Timeout with_timeout = timeout.with_timeout GreenPool = greenpool.GreenPool GreenPile = greenpool.GreenPile Queue = queue.Queue import_patched = patcher.import_patched monkey_patch = patcher.monkey_patch connect = convenience.connect listen = convenience.listen serve = convenience.serve StopServe = convenience.StopServe wrap_ssl = convenience.wrap_ssl getcurrent = greenlet.greenlet.getcurrent # deprecated TimeoutError = timeout.Timeout exc_after = greenthread.exc_after call_after_global = greenthread.call_after_global except ImportError, e: # This is to make Debian packaging easier, it ignores import # errors of greenlet so that the packager can still at least # access the version. Also this makes easy_install a little quieter if 'greenlet' not in str(e): # any other exception should be printed import traceback traceback.print_exc()
version_info = (0, 9, 15) __version__ = ".".join(map(str, version_info)) try: from eventlet import greenthread from eventlet import greenpool from eventlet import queue from eventlet import timeout from eventlet import patcher from eventlet import convenience import greenlet sleep = greenthread.sleep spawn = greenthread.spawn spawn_n = greenthread.spawn_n spawn_after = greenthread.spawn_after kill = greenthread.kill Timeout = timeout.Timeout with_timeout = timeout.with_timeout GreenPool = greenpool.GreenPool GreenPile = greenpool.GreenPile Queue = queue.Queue import_patched = patcher.import_patched monkey_patch = patcher.monkey_patch connect = convenience.connect listen = convenience.listen serve = convenience.serve StopServe = convenience.StopServe wrap_ssl = convenience.wrap_ssl getcurrent = greenlet.greenlet.getcurrent # deprecated TimeoutError = timeout.Timeout exc_after = greenthread.exc_after call_after_global = greenthread.call_after_global except ImportError, e: # This is to make Debian packaging easier, it ignores import # errors of greenlet so that the packager can still at least # access the version. Also this makes easy_install a little quieter if 'greenlet' not in str(e): # any other exception should be printed import traceback traceback.print_exc()
mit
Python
22f373c0e6ef3a2c7bfb128ad014af245f113ea9
Add default serial baudrate.
pollen/pyrobus
robus/io/serial_io.py
robus/io/serial_io.py
import serial as _serial from serial.tools.list_ports import comports from . import IOHandler class Serial(IOHandler): @classmethod def is_host_compatible(cls, host): available_host = (p.device for p in comports()) return host in available_host def __init__(self, host, baudrate=57600): self._serial = _serial.Serial(host, baudrate) def recv(self): return self._serial.readline() def write(self, data): self._serial.write(data + '\r'.encode())
import serial as _serial from serial.tools.list_ports import comports from . import IOHandler class Serial(IOHandler): @classmethod def is_host_compatible(cls, host): available_host = (p.device for p in comports()) return host in available_host def __init__(self, host, baudrate): self._serial = _serial.Serial(host, baudrate) def recv(self): return self._serial.readline() def write(self, data): self._serial.write(data + '\r'.encode())
mit
Python
e0024790be3a85e529c93397500e4f736c82a5ef
Fix the runner for Python 2.
calmjs/calmjs.parse
src/calmjs/parse/tests/test_testing.py
src/calmjs/parse/tests/test_testing.py
# -*- coding: utf-8 -*- import unittest from calmjs.parse.testing.util import build_equality_testcase from calmjs.parse.testing.util import build_exception_testcase def run(self): """ A dummy run method. """ class BuilderEqualityTestCase(unittest.TestCase): def test_build_equality_testcase(self): DummyTestCase = build_equality_testcase('DummyTestCase', int, [ ('str_to_int_pass', '1', 1), ('str_to_int_fail', '2', 1), ('str_to_int_exception', 'z', 1), ]) DummyTestCase.runTest = run testcase = DummyTestCase() testcase.test_str_to_int_pass() with self.assertRaises(AssertionError): testcase.test_str_to_int_fail() with self.assertRaises(ValueError): testcase.test_str_to_int_exception() def test_build_equality_testcase_flag_dupe_labels(self): with self.assertRaises(ValueError): build_equality_testcase('DummyTestCase', int, [ ('str_to_int_dupe', '1', 1), ('str_to_int_dupe', '2', 2), ]) class BuilderExceptionTestCase(unittest.TestCase): def test_build_exception_testcase(self): FailTestCase = build_exception_testcase( 'FailTestCase', int, [ ('str_to_int_fail1', 'hello'), ('str_to_int_fail2', 'goodbye'), ('str_to_int_fail3', '1'), ], ValueError, ) FailTestCase.runTest = run testcase = FailTestCase() # ValueError should have been caught. testcase.test_str_to_int_fail1() testcase.test_str_to_int_fail2() # Naturally, the final test will not raise it. with self.assertRaises(AssertionError): testcase.test_str_to_int_fail3()
# -*- coding: utf-8 -*- import unittest from calmjs.parse.testing.util import build_equality_testcase from calmjs.parse.testing.util import build_exception_testcase class BuilderEqualityTestCase(unittest.TestCase): def test_build_equality_testcase(self): DummyTestCase = build_equality_testcase('DummyTestCase', int, [ ('str_to_int_pass', '1', 1), ('str_to_int_fail', '2', 1), ('str_to_int_exception', 'z', 1), ]) testcase = DummyTestCase() testcase.test_str_to_int_pass() with self.assertRaises(AssertionError): testcase.test_str_to_int_fail() with self.assertRaises(ValueError): testcase.test_str_to_int_exception() def test_build_equality_testcase_flag_dupe_labels(self): with self.assertRaises(ValueError): build_equality_testcase('DummyTestCase', int, [ ('str_to_int_dupe', '1', 1), ('str_to_int_dupe', '2', 2), ]) class BuilderExceptionTestCase(unittest.TestCase): def test_build_exception_testcase(self): FailTestCase = build_exception_testcase( 'FailTestCase', int, [ ('str_to_int_fail1', 'hello'), ('str_to_int_fail2', 'goodbye'), ('str_to_int_fail3', '1'), ], ValueError, ) testcase = FailTestCase() # ValueError should have been caught. testcase.test_str_to_int_fail1() testcase.test_str_to_int_fail2() # Naturally, the final test will not raise it. with self.assertRaises(AssertionError): testcase.test_str_to_int_fail3()
mit
Python
d926c984e895b68ad0cc0383926451c0d7249512
Fix use of deprecated find_module
saimn/astropy,lpsinger/astropy,mhvk/astropy,lpsinger/astropy,pllim/astropy,astropy/astropy,lpsinger/astropy,larrybradley/astropy,StuartLittlefair/astropy,astropy/astropy,pllim/astropy,saimn/astropy,saimn/astropy,StuartLittlefair/astropy,astropy/astropy,StuartLittlefair/astropy,astropy/astropy,mhvk/astropy,astropy/astropy,pllim/astropy,mhvk/astropy,mhvk/astropy,mhvk/astropy,saimn/astropy,StuartLittlefair/astropy,lpsinger/astropy,larrybradley/astropy,larrybradley/astropy,larrybradley/astropy,pllim/astropy,saimn/astropy,StuartLittlefair/astropy,lpsinger/astropy,larrybradley/astropy,pllim/astropy
astropy/tests/tests/test_imports.py
astropy/tests/tests/test_imports.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pkgutil def test_imports(): """ This just imports all modules in astropy, making sure they don't have any dependencies that sneak through """ def onerror(name): # We should raise any legitimate error that occurred, but not # any warnings which happen to be caught because of our pytest # settings (e.g., DeprecationWarning). try: raise except Warning: pass for imper, nm, ispkg in pkgutil.walk_packages(['astropy'], 'astropy.', onerror=onerror): imper.find_spec(nm) def test_toplevel_namespace(): import astropy d = dir(astropy) assert 'os' not in d assert 'log' in d assert 'test' in d assert 'sys' not in d
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pkgutil def test_imports(): """ This just imports all modules in astropy, making sure they don't have any dependencies that sneak through """ def onerror(name): # We should raise any legitimate error that occurred, but not # any warnings which happen to be caught because of our pytest # settings (e.g., DeprecationWarning). try: raise except Warning: pass for imper, nm, ispkg in pkgutil.walk_packages(['astropy'], 'astropy.', onerror=onerror): imper.find_module(nm) def test_toplevel_namespace(): import astropy d = dir(astropy) assert 'os' not in d assert 'log' in d assert 'test' in d assert 'sys' not in d
bsd-3-clause
Python
8864d0f7ea909bebda68b28ad53a312c10af581e
Update ClientCredentials tutorial to use Python3
MyPureCloud/developercenter-tutorials,MyPureCloud/developercenter-tutorials,MyPureCloud/developercenter-tutorials,MyPureCloud/developercenter-tutorials,MyPureCloud/developercenter-tutorials,MyPureCloud/developercenter-tutorials,MyPureCloud/developercenter-tutorials,MyPureCloud/developercenter-tutorials
oauth-client-credentials/python/script.py
oauth-client-credentials/python/script.py
import base64, requests, sys print("-----------------------------------------------") print("- PureCloud Python Client Credentials Example -") print("-----------------------------------------------") client_id = "CLIENT_ID" client_secret = "CLIENT_SECRET" # Base64 encode the client ID and client secret authorization = base64.b64encode(bytes(client_id + ":" + client_secret, "ISO-8859-1")).decode("ascii") # Prepare for POST /oauth/token request request_headers = { "Authorization": f"Basic {authorization}", "Content-Type": "application/x-www-form-urlencoded" } request_body = { "grant_type": "client_credentials" } # Get token response = requests.post("https://login.mypurecloud.com/oauth/token", data=request_body, headers=request_headers) # Check response if response.status_code == 200: print("Got token") else: print(f"Failure: { str(response.status_code) } - { response.reason }") sys.exit(response.status_code) # Get JSON response body response_json = response.json() # Prepare for GET /api/v2/authorization/roles request requestHeaders = { "Authorization": f"{ response_json['token_type'] } { response_json['access_token']}" } # Get roles response = requests.get("https://api.mypurecloud.com/api/v2/authorization/roles", headers=requestHeaders) # Check response if response.status_code == 200: print("Got roles") else: print(f"Failure: { str(response.status_code) } - { response.reason }") sys.exit(response.status_code) # Print roles print("\nRoles:") for entity in response.json()["entities"]: print(f" { entity['name'] }") print("\nDone")
import base64, requests, sys print '-----------------------------------------------' print '- PureCloud Python Client Credentials Example -' print '-----------------------------------------------' clientId = '7de3af06-c0b3-4f9b-af45-72f4a1403797' clientSecret = '1duphi_YtswNjN2GXOg_APY-KKTmnYXvfNj7N8GUhnM' # Base64 encode the client ID and client secret authorization = base64.b64encode(clientId + ':' + clientSecret) # Prepare for POST /oauth/token request requestHeaders = { 'Authorization': 'Basic ' + authorization, 'Content-Type': 'application/x-www-form-urlencoded' } requestBody = { 'grant_type': 'client_credentials' } # Get token response = requests.post('https://login.mypurecloud.com/oauth/token', data=requestBody, headers=requestHeaders) # Check response if response.status_code == 200: print 'Got token' else: print 'Failure: ' + str(response.status_code) + ' - ' + response.reason sys.exit(response.status_code) # Get JSON response body responseJson = response.json() # Prepare for GET /api/v2/authorization/roles request requestHeaders = { 'Authorization': responseJson['token_type'] + ' ' + responseJson['access_token'] } # Get roles response = requests.get('https://api.mypurecloud.com/api/v2/authorization/roles', headers=requestHeaders) # Check response if response.status_code == 200: print 'Got roles' else: print 'Failure: ' + str(response.status_code) + ' - ' + response.reason sys.exit(response.status_code) # Print roles print '\nRoles:' for entity in response.json()['entities']: print ' ' + entity['name'] print '\nDone'
mit
Python
c479c360d979d22182e787f74f5a74473fc41002
Save sales ranges only when the forms data is changed
suutari/shoop,shoopio/shoop,shawnadelic/shuup,suutari-ai/shoop,suutari-ai/shoop,shoopio/shoop,hrayr-artunyan/shuup,suutari/shoop,shoopio/shoop,shawnadelic/shuup,hrayr-artunyan/shuup,suutari/shoop,suutari-ai/shoop,hrayr-artunyan/shuup,shawnadelic/shuup
shoop/campaigns/admin_module/form_parts.py
shoop/campaigns/admin_module/form_parts.py
# This file is part of Shoop. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals from django import forms from django.utils.translation import ugettext_lazy as _ from shoop.admin.form_part import FormPart, TemplatedFormDef from shoop.campaigns.models import ContactGroupSalesRange from shoop.core.models import Shop, ShopStatus from shoop.core.models._contacts import PROTECTED_CONTACT_GROUP_IDENTIFIERS class SalesRangesForm(forms.ModelForm): class Meta: model = ContactGroupSalesRange fields = ["min_value", "max_value"] labels = { "min_value": _("Minimum value"), "max_value": _("Maximum value") } help_texts = { "max_value": _("Leave empty for no maximum") } def __init__(self, **kwargs): super(SalesRangesForm, self).__init__(**kwargs) class SalesRangesFormPart(FormPart): priority = 3 name = "contact_group_sales_ranges" form = SalesRangesForm def __init__(self, request, object=None): super(SalesRangesFormPart, self).__init__(request, object) self.shops = Shop.objects.filter(status=ShopStatus.ENABLED) def _get_form_name(self, shop): return "%d-%s" % (shop.pk, self.name) def get_form_defs(self): if not self.object.pk or self.object.identifier in PROTECTED_CONTACT_GROUP_IDENTIFIERS: return for shop in self.shops: instance, _ = ContactGroupSalesRange.objects.get_or_create(group=self.object, shop=shop) yield TemplatedFormDef( name=self._get_form_name(shop), form_class=self.form, template_name="shoop/campaigns/admin/sales_ranges_form_part.jinja", required=False, kwargs={"instance": instance} ) def form_valid(self, form): form_names = [self._get_form_name(shop) for shop in self.shops] forms = [form.forms[name] for name in form_names if name in form.forms] for form in forms: if form.changed_data: form.save()
# This file is part of Shoop. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals from django import forms from django.utils.translation import ugettext_lazy as _ from shoop.admin.form_part import FormPart, TemplatedFormDef from shoop.campaigns.models import ContactGroupSalesRange from shoop.core.models import Shop, ShopStatus from shoop.core.models._contacts import PROTECTED_CONTACT_GROUP_IDENTIFIERS class SalesRangesForm(forms.ModelForm): class Meta: model = ContactGroupSalesRange fields = ["min_value", "max_value"] labels = { "min_value": _("Minimum value"), "max_value": _("Maximum value") } help_texts = { "max_value": _("Leave empty for no maximum") } def __init__(self, **kwargs): super(SalesRangesForm, self).__init__(**kwargs) class SalesRangesFormPart(FormPart): priority = 3 name = "contact_group_sales_ranges" form = SalesRangesForm def __init__(self, request, object=None): super(SalesRangesFormPart, self).__init__(request, object) self.shops = Shop.objects.filter(status=ShopStatus.ENABLED) def _get_form_name(self, shop): return "%d-%s" % (shop.pk, self.name) def get_form_defs(self): if not self.object.pk or self.object.identifier in PROTECTED_CONTACT_GROUP_IDENTIFIERS: return for shop in self.shops: instance, _ = ContactGroupSalesRange.objects.get_or_create(group=self.object, shop=shop) yield TemplatedFormDef( name=self._get_form_name(shop), form_class=self.form, template_name="shoop/campaigns/admin/sales_ranges_form_part.jinja", required=False, kwargs={"instance": instance} ) def form_valid(self, form): for shop in self.shops: name = self._get_form_name(shop) if name in form.forms: form.forms[name].save()
agpl-3.0
Python
029e39edfb733a524d7ea4fc7f64fa93e81b9f53
Add SACREBLEU_DIR and smart_open to imports (#73)
mjpost/sacreBLEU,mjpost/sacreBLEU
sacrebleu/__init__.py
sacrebleu/__init__.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not # use this file except in compliance with the License. A copy of the License # is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. __version__ = '1.4.5' __description__ = 'Hassle-free computation of shareable, comparable, and reproducible BLEU scores' from .sacrebleu import smart_open, corpus_bleu, corpus_chrf, sentence_bleu, sentence_chrf, compute_bleu,\ raw_corpus_bleu, BLEU, CHRF, DATASETS, TOKENIZERS, SACREBLEU_DIR # more imports for backward compatibility from .sacrebleu import ref_stats, bleu_signature, extract_ngrams, extract_char_ngrams, \ get_corpus_statistics, display_metric, get_sentence_statistics, download_test_set
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not # use this file except in compliance with the License. A copy of the License # is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. __version__ = '1.4.5' __description__ = 'Hassle-free computation of shareable, comparable, and reproducible BLEU scores' from .sacrebleu import corpus_bleu, corpus_chrf, sentence_bleu, sentence_chrf, compute_bleu,\ raw_corpus_bleu, BLEU, CHRF, DATASETS, TOKENIZERS # more imports for backward compatibility from .sacrebleu import ref_stats, bleu_signature, extract_ngrams, extract_char_ngrams, \ get_corpus_statistics, display_metric, get_sentence_statistics, download_test_set
apache-2.0
Python
9fdcc147a754e3b4f85decb858066610652bd713
update version. (#5439)
iulian787/spack,LLNL/spack,iulian787/spack,EmreAtes/spack,mfherbst/spack,tmerrick1/spack,TheTimmy/spack,skosukhin/spack,TheTimmy/spack,EmreAtes/spack,LLNL/spack,LLNL/spack,krafczyk/spack,TheTimmy/spack,matthiasdiener/spack,iulian787/spack,TheTimmy/spack,mfherbst/spack,LLNL/spack,skosukhin/spack,matthiasdiener/spack,skosukhin/spack,mfherbst/spack,lgarren/spack,krafczyk/spack,TheTimmy/spack,skosukhin/spack,tmerrick1/spack,tmerrick1/spack,mfherbst/spack,krafczyk/spack,matthiasdiener/spack,krafczyk/spack,EmreAtes/spack,lgarren/spack,EmreAtes/spack,iulian787/spack,lgarren/spack,matthiasdiener/spack,lgarren/spack,tmerrick1/spack,skosukhin/spack,iulian787/spack,lgarren/spack,EmreAtes/spack,matthiasdiener/spack,tmerrick1/spack,LLNL/spack,mfherbst/spack,krafczyk/spack
var/spack/repos/builtin/packages/r-tibble/package.py
var/spack/repos/builtin/packages/r-tibble/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RTibble(RPackage): """Provides a 'tbl_df' class that offers better checking and printing capabilities than traditional data frames.""" homepage = "https://github.com/tidyverse/tibble" url = "https://cran.rstudio.com/src/contrib/tibble_1.3.4.tar.gz" list_url = homepage version('1.3.4', '298e81546f999fb0968625698511b8d3') version('1.2', 'bdbc3d67aa16860741add6d6ec20ea13') version('1.1', '2fe9f806109d0b7fadafb1ffafea4cb8') depends_on('[email protected]:') depends_on('r-assertthat', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('r-rcpp', type=('build', 'run'))
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RTibble(RPackage): """Provides a 'tbl_df' class that offers better checking and printing capabilities than traditional data frames.""" homepage = "https://github.com/hadley/tibble" url = "https://cran.r-project.org/src/contrib/tibble_1.2.tar.gz" version('1.2', 'bdbc3d67aa16860741add6d6ec20ea13') version('1.1', '2fe9f806109d0b7fadafb1ffafea4cb8') depends_on('[email protected]:') depends_on('r-assertthat', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('r-rcpp', type=('build', 'run'))
lgpl-2.1
Python