commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
66c33c880d1e5f20a23e01937f8c88f5b66bfc5c
fix SQL error on non existing column
ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo
addons/website_membership/models/membership.py
addons/website_membership/models/membership.py
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import models class MembershipLine(models.Model): _inherit = 'membership.membership_line' def get_published_companies(self, limit=None): if not self.ids: return [] limit_clause = '' if limit is None else ' LIMIT %d' % limit self.env.cr.execute(""" SELECT DISTINCT p.id FROM res_partner p INNER JOIN membership_membership_line m ON p.id = m.partner WHERE is_published AND is_company AND m.id IN %s """ + limit_clause, (tuple(self.ids),)) return [partner_id[0] for partner_id in self.env.cr.fetchall()]
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import models class MembershipLine(models.Model): _inherit = 'membership.membership_line' def get_published_companies(self, limit=None): if not self.ids: return [] limit_clause = '' if limit is None else ' LIMIT %d' % limit self.env.cr.execute(""" SELECT DISTINCT p.id FROM res_partner p INNER JOIN membership_membership_line m ON p.id = m.partner WHERE website_published AND is_company AND m.id IN %s """ + limit_clause, (tuple(self.ids),)) return [partner_id[0] for partner_id in self.env.cr.fetchall()]
agpl-3.0
Python
d5cb2a37ea77b15c5725d6ebf8e0ab79f3bea613
Fix interface in historian service interface
genome/flow-workflow,genome/flow-workflow,genome/flow-workflow
flow_workflow/historian/service_interface.py
flow_workflow/historian/service_interface.py
import logging from flow_workflow.historian.messages import UpdateMessage LOG = logging.getLogger(__name__) class WorkflowHistorianServiceInterface(object): def __init__(self, broker=None, exchange=None, routing_key=None): self.broker = broker self.exchange = exchange self.routing_key = routing_key def update(self, net_key, operation_id, name, workflow_plan_id, **kwargs): if workflow_plan_id < 0: # ignore update (don't even make message) LOG.debug("Received negative workflow_plan_id:%s, " "ignoring update (net_key=%s, operation_id=%s, name=%s," "workflow_plan_id=%s, kwargs=%s)", workflow_plan_id, net_key, peration_id, name, workflow_plan_id, kwargs) else: LOG.debug("Sending update (net_key=%s, operation_id=%s, name=%s," "workflow_plan_id=%s, kwargs=%s)", net_key, operation_id, name, workflow_plan_id, kwargs) message = UpdateMessage(net_key=net_key, operation_id=operation_id, name=name, workflow_plan_id=workflow_plan_id, **kwargs) self.broker.publish(self.exchange, self.routing_key, message)
import logging from flow_workflow.historian.messages import UpdateMessage LOG = logging.getLogger(__name__) class WorkflowHistorianServiceInterface(object): def __init__(self, broker=None, exchange=None, routing_key=None): self.broker = broker self.exchange = exchange self.routing_key = routing_key def update(self, net_key, operation_id, name, workflow_plan_id, **kwargs): if workflow_plan_id < 0: # ignore update (don't even make message) LOG.debug("Received negative workflow_plan_id:%s, " "ignoring update (net_key=%s, operation_id=%s, name=%s," "workflow_plan_id=%s, kwargs=%s)", workflow_plan_id, net_key, peration_id, name, workflow_plan_id, kwargs) else: LOG.debug("Sending update (net_key=%s, operation_id=%s, name=%s," "workflow_plan_id=%s, kwargs=%s)", net_key, peration_id, name, workflow_plan_id, kwargs) message = UpdateMessage(net_key=net_key, operation_id=operation_id, **kwargs) self.broker.publish(self.exchange, self.routing_key, message)
agpl-3.0
Python
26f5adea28f81ebbe830d4a207958320e0b40520
update version
hammerlab/gtfparse,hammerlab/gtfparse,hammerlab/gtftools,hammerlab/gtftools
gtfparse/__init__.py
gtfparse/__init__.py
# Copyright (c) 2015. Mount Sinai School of Medicine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .attribute_parsing import expand_attribute_strings from .create_missing_features import create_missing_features from .line_parsing import parse_gtf_lines from .required_columns import REQUIRED_COLUMNS from .parsing_error import ParsingError from .read_gtf import read_gtf_as_dataframe, read_gtf_as_dict __version__ = "0.2.3" __all__ = [ "expand_attribute_strings", "create_missing_features", "parse_gtf_lines", "REQUIRED_COLUMNS", "ParsingError", "read_gtf_as_dataframe", "read_gtf_as_dict", ]
# Copyright (c) 2015. Mount Sinai School of Medicine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .attribute_parsing import expand_attribute_strings from .create_missing_features import create_missing_features from .line_parsing import parse_gtf_lines from .required_columns import REQUIRED_COLUMNS from .parsing_error import ParsingError from .read_gtf import read_gtf_as_dataframe, read_gtf_as_dict __version__ = "0.2.2" __all__ = [ "expand_attribute_strings", "create_missing_features", "parse_gtf_lines", "REQUIRED_COLUMNS", "ParsingError", "read_gtf_as_dataframe", "read_gtf_as_dict", ]
apache-2.0
Python
85dc28b44def27658e282d621749598ec80ea420
Fix typo
telefonicaid/fiware-cosmos-ambari,telefonicaid/fiware-cosmos-ambari,telefonicaid/fiware-cosmos-ambari,telefonicaid/fiware-cosmos-ambari,telefonicaid/fiware-cosmos-ambari,telefonicaid/fiware-cosmos-ambari,telefonicaid/fiware-cosmos-ambari
ambari-server/src/main/python/TeardownAgent.py
ambari-server/src/main/python/TeardownAgent.py
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import sys import logging import os import subprocess def exec_os_command(os_command): os_stat = subprocess.Popen(os_command, stdout=subprocess.PIPE) return { "exitstatus": os_stat.returncode, "log": os_stat.communicate(0) } def is_suse(): """Return true if the current OS is Suse Linux, false otherwise""" if os.path.isfile("/etc/issue"): if "suse" in open("/etc/issue").read().lower(): return True return False def teardown_agent_suse(): """ Run zypper remove""" zypper_command = ["zypper", "remove", "-y", "ambari-agent"] return exec_os_command(zypper_command)['exitstatus'] def teardown_agent(): """ Run yum remove""" rpm_command = ["yum", "-y", "remove", "ambari-agent"] return exec_os_command(rpm_command)['exitstatus'] def parse_args(argv): onlyargs = argv[1:] pass_phrase = onlyargs[0] hostname = onlyargs[1] project_version = None if len(onlyargs) > 2: project_version = onlyargs[2] if project_version is None or project_version == "null": project_version = "" if project_version != "": project_version = "-" + project_version return (pass_phrase, hostname, project_version) def main(argv=None): script_dir = os.path.realpath(os.path.dirname(argv[0])) (pass_phrase, hostname, project_version) = parse_args(argv) exec_os_command(["ambari-agent", "stop"]) exec_os_command(["ambari-agent", "unregister"]) if is_suse(): exit_code = teardown_agent_suse() else: exit_code = teardown_agent() sys.exit(exit_code) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) main(sys.argv)
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import sys import logging import os import subprocess def exec_os_command(os_command): os_stat = subprocess.Popen(os_command, stdout=subprocess.PIPE) return { "exitstatus": os.stat.returncode, "log": os_stat.communicate(0) } def is_suse(): """Return true if the current OS is Suse Linux, false otherwise""" if os.path.isfile("/etc/issue"): if "suse" in open("/etc/issue").read().lower(): return True return False def teardown_agent_suse(): """ Run zypper remove""" zypper_command = ["zypper", "remove", "-y", "ambari-agent"] return exec_os_command(zypper_command)['exitstatus'] def teardown_agent(): """ Run yum remove""" rpm_command = ["yum", "-y", "remove", "ambari-agent"] return exec_os_command(rpm_command)['exitstatus'] def parse_args(argv): onlyargs = argv[1:] pass_phrase = onlyargs[0] hostname = onlyargs[1] project_version = None if len(onlyargs) > 2: project_version = onlyargs[2] if project_version is None or project_version == "null": project_version = "" if project_version != "": project_version = "-" + project_version return (pass_phrase, hostname, project_version) def main(argv=None): script_dir = os.path.realpath(os.path.dirname(argv[0])) (pass_phrase, hostname, project_version) = parse_args(argv) exec_os_command(["ambari-agent", "stop"]) exec_os_command(["ambari-agent", "unregister"]) if is_suse(): exit_code = teardown_agent_suse() else: exit_code = teardown_agent() sys.exit(exit_code) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) main(sys.argv)
apache-2.0
Python
31f55658d7495bf1fed8a5a466ffe54502a0348c
Make countersong check for language-dependent spells
GrognardsFromHell/TemplePlus,GrognardsFromHell/TemplePlus,GrognardsFromHell/TemplePlus,GrognardsFromHell/TemplePlus,GrognardsFromHell/TemplePlus
tpdatasrc/tpgamefiles/scr/tpModifiers/countersong.py
tpdatasrc/tpgamefiles/scr/tpModifiers/countersong.py
from templeplus.pymod import PythonModifier from toee import * import tpdp def Remove(char, args, evt_obj): if evt_obj.is_modifier('Countersong'): args.condition_remove() return 0 # built-in hook only checks for Sonic descriptor def Lang(char, args, evt_obj): lang = 1 << (D20STD_F_SPELL_DESCRIPTOR_LANGUAGE_DEPENDENT-1) sonic = 1 << (D20S%D_F_SPELL_DESCRIPTOR_SONIC-1) if (evt_obj.flags & lang) and not (evt_obj.flags & sonic): perform = args.get_arg(1) save_bonus = evt_obj.bonus_list.get_sum() delta = perform - save_bonus - evt_obj.roll_result if delta > 0: evt_obj.bonus_list.add(delta, 0, 192) return 0 countersong = PythonModifier() countersong.ExtendExisting('Countersong') countersong.AddHook(ET_OnConditionAddPre, EK_NONE, Remove, ()) countersong.AddHook(ET_OnCountersongSaveThrow, EK_NONE, Lang, ())
from templeplus.pymod import PythonModifier from toee import * import tpdp def Remove(char, args, evt_obj): if evt_obj.is_modifier('Countersong'): args.condition_remove() return 0 countersong = PythonModifier() countersong.ExtendExisting('Countersong') countersong.AddHook(ET_OnConditionAddPre, EK_NONE, Remove, ())
mit
Python
e535def2bc9b7de203e1fd37fc592cdeed1be526
fix selection bug
facebook/PathPicker,facebook/PathPicker,Shenil/PathPicker,slackorama/PathPicker,alecjacobson/PathPicker,Shenil/PathPicker,pallavagarwal07/PathPicker,slackorama/PathPicker,facebook/PathPicker,Shenil/PathPicker,alecjacobson/PathPicker,slackorama/PathPicker,pallavagarwal07/PathPicker,pallavagarwal07/PathPicker,alecjacobson/PathPicker,alecjacobson/PathPicker,alecjacobson/PathPicker,slackorama/PathPicker,Shenil/PathPicker,facebook/PathPicker,pallavagarwal07/PathPicker,Shenil/PathPicker
src/choose.py
src/choose.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. # # @nolint from __future__ import print_function import curses import pickle import sys import os import output import screenControl import logger import format PICKLE_FILE = '~/.fbPager.pickle' SELECTION_PICKLE = '~/.fbPager.selection.pickle' LOAD_SELECTION_WARNING = ''' WARNING! Loading the standard input and previous selection failed. This is probably due to a backwards compatibility issue with upgrading PathPicker or an internal error. Please pipe a new set of input to PathPicker to start fresh (after which this error will go away) ''' def doProgram(stdscr): output.clearFile() logger.clearFile() lineObjs = getLineObjs() screen = screenControl.Controller(stdscr, lineObjs) screen.control() def getLineObjs(): filePath = os.path.expanduser(PICKLE_FILE) try: lineObjs = pickle.load(open(filePath, 'rb')) except: output.appendError(LOAD_SELECTION_WARNING) sys.exit(1) logger.addEvent('total_num_files', len(lineObjs.items())) selectionPath = os.path.expanduser(SELECTION_PICKLE) if os.path.isfile(selectionPath): setSelectionsFromPickle(selectionPath, lineObjs) matches = [lineObj for i, lineObj in lineObjs.items() if not lineObj.isSimple()] if not len(matches): output.writeToFile('echo "No lines matched!!"') sys.exit(0) return lineObjs def setSelectionsFromPickle(selectionPath, lineObjs): try: selectedIndices = pickle.load(open(selectionPath, 'rb')) except: output.appendError(LOAD_SELECTION_WARNING) sys.exit(1) for index in selectedIndices: if index >= len(lineObjs.items()): error = 'Found index %d more than total matches' % index output.appendError(error) continue toSelect = lineObjs[index] if isinstance(toSelect, format.LineMatch): lineObjs[index].setSelect(True) else: error = 'Line %d was selected but is not LineMatch' % index output.appendError(error) if __name__ == '__main__': if not os.path.exists(os.path.expanduser(PICKLE_FILE)): print('Nothing to do!') output.writeToFile('echo ":D"') sys.exit(0) output.clearFile() curses.wrapper(doProgram)
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. # # @nolint from __future__ import print_function import curses import pickle import sys import os import output import screenControl import logger PICKLE_FILE = '~/.fbPager.pickle' SELECTION_PICKLE = '~/.fbPager.selection.pickle' LOAD_SELECTION_WARNING = ''' WARNING! Loading the standard input and previous selection failed. This is probably due to a backwards compatibility issue with upgrading PathPicker or an internal error. Please pipe a new set of input to PathPicker to start fresh (after which this error will go away) ''' def doProgram(stdscr): output.clearFile() logger.clearFile() lineObjs = getLineObjs() screen = screenControl.Controller(stdscr, lineObjs) screen.control() def getLineObjs(): filePath = os.path.expanduser(PICKLE_FILE) try: lineObjs = pickle.load(open(filePath, 'rb')) except: output.appendError(LOAD_SELECTION_WARNING) sys.exit(1) logger.addEvent('total_num_files', len(lineObjs.items())) selectionPath = os.path.expanduser(SELECTION_PICKLE) if os.path.isfile(selectionPath): setSelectionsFromPickle(lineObjs) matches = [lineObj for i, lineObj in lineObjs.items() if not lineObj.isSimple()] if not len(matches): output.writeToFile('echo "No lines matched!!"') sys.exit(0) return lineObjs def setSelectionsFromPickle(lineObjs): try: selectedIndices = pickle.load(open(selectionPath, 'rb')) except: output.appendError(LOAD_SELECTION_WARNING) sys.exit(1) for index in selectedIndices: if index >= len(lineObjs.items()): error = 'Found index %d more than total matches' % index output.appendError(error) continue toSelect = lineObjs[index] if isinstance(toSelect, format.LineMatch): lineObjs[index].setSelect(True) else: error = 'Line %d was selected but is not LineMatch' % index output.appendError(error) if __name__ == '__main__': if not os.path.exists(os.path.expanduser(PICKLE_FILE)): print('Nothing to do!') output.writeToFile('echo ":D"') sys.exit(0) output.clearFile() curses.wrapper(doProgram)
mit
Python
0a44fc07efb902912e22e72979f69fbab200cd32
Update version 0.6.8 -> 0.6.9
dwavesystems/dimod,dwavesystems/dimod
dimod/package_info.py
dimod/package_info.py
__version__ = '0.6.9' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
__version__ = '0.6.8' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
apache-2.0
Python
6d86e8565a9ea1aac07b8a1470e2f3b724b981c2
fix for use on python 2.1
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
Lib/bsddb/test/test_misc.py
Lib/bsddb/test/test_misc.py
"""Miscellaneous bsddb module test cases """ import os import sys import unittest try: # For Python 2.3 from bsddb import db, dbshelve except ImportError: # For earlier Pythons w/distutils pybsddb from bsddb3 import db, dbshelve #---------------------------------------------------------------------- class MiscTestCase(unittest.TestCase): def setUp(self): self.filename = self.__class__.__name__ + '.db' homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except OSError: pass def tearDown(self): try: os.remove(self.filename) except OSError: pass import glob files = glob.glob(os.path.join(self.homeDir, '*')) for file in files: os.remove(file) def test01_badpointer(self): dbs = dbshelve.open(self.filename) dbs.close() self.assertRaises(db.DBError, dbs.get, "foo") def test02_db_home(self): env = db.DBEnv() # check for crash fixed when db_home is used before open() assert env.db_home is None env.open(self.homeDir, db.DB_CREATE) assert self.homeDir == env.db_home #---------------------------------------------------------------------- def test_suite(): return unittest.makeSuite(MiscTestCase) if __name__ == '__main__': unittest.main(defaultTest='test_suite')
"""Miscellaneous bsddb module test cases """ import os import sys import unittest try: # For Python 2.3 from bsddb import db, dbshelve except ImportError: # For earlier Pythons w/distutils pybsddb from bsddb3 import db, dbshelve from test.test_support import verbose #---------------------------------------------------------------------- class MiscTestCase(unittest.TestCase): def setUp(self): self.filename = self.__class__.__name__ + '.db' homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except OSError: pass def tearDown(self): try: os.remove(self.filename) except OSError: pass import glob files = glob.glob(os.path.join(self.homeDir, '*')) for file in files: os.remove(file) def test01_badpointer(self): dbs = dbshelve.open(self.filename) dbs.close() self.assertRaises(db.DBError, dbs.get, "foo") def test02_db_home(self): env = db.DBEnv() # check for crash fixed when db_home is used before open() assert env.db_home is None env.open(self.homeDir, db.DB_CREATE) assert self.homeDir == env.db_home #---------------------------------------------------------------------- def test_suite(): return unittest.makeSuite(MiscTestCase) if __name__ == '__main__': unittest.main(defaultTest='test_suite')
mit
Python
d7b8186f0f4115307753d0aef038ec61155c83bc
Fix typo in python
ikinz/Benchmark-exjobb,ikinz/Benchmark-exjobb,ikinz/Benchmark-exjobb,ikinz/Benchmark-exjobb,ikinz/Benchmark-exjobb,ikinz/Benchmark-exjobb
Test/Test-IO/python/TestIO.py
Test/Test-IO/python/TestIO.py
#!/usr/bin/python import timeit, sys, io def wrapper(func, *args, **kwargs): def wrapped(): return func(*args, **kwargs) return wrapped def start(file, outfile): input = open(file, 'r') output = open(outfile, 'w') line = input.readline() while line: line = line.replace('Tellus', 'Terra') line = line.replace('tellus', 'terra') output.write(line) line = input.readline() def main(argv): file = 'dump.txt' output = 'res.txt' for i in range(len(argv)): if argv[i] == '-f': i = i + 1 file = argv[i] elif argv[i] == '-o': i = i + 1 output = argv[i] #ns = time.time() wrapped = wrapper(start, file, output) print (timeit.timeit(wrapped, number=1)*1000) #totaltime = (time.time() - ns) / 1000000 #print (totaltime) sys.exit(0) if __name__ == '__main__':main(sys.argv[1:])
#!/usr/bin/python import timeit, sys, io def wrapper(func, *args, **kwargs): def wrapped(): return func(*args, **kwargs) return wrapped def start(file, outfile): #input = open(file, 'r') #output = open(outfile, 'w') line = input.readline() while line: line = line.replace('Tellus', 'Terra') line = line.replace('tellus', 'terra') output.write(line) line = input.readline() def main(argv): file = 'dump.txt' output = 'res.txt' for i in range(len(argv)): if argv[i] == '-f': i = i + 1 file = argv[i] elif argv[i] == '-o': i = i + 1 output = argv[i] #ns = time.time() wrapped = wrapper(start, file, output) print (timeit.timeit(wrapped, number=1)*1000) #totaltime = (time.time() - ns) / 1000000 #print (totaltime) sys.exit(0) if __name__ == '__main__':main(sys.argv[1:])
mit
Python
b19429159f3c813297ba2e237abba276045f9ff1
add 0.10.17, mariadb-connector-c dependency (#11044)
iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack
var/spack/repos/builtin/packages/r-rmysql/package.py
var/spack/repos/builtin/packages/r-rmysql/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RRmysql(RPackage): """Implements 'DBI' Interface to 'MySQL' and 'MariaDB' Databases.""" homepage = "https://github.com/rstats-db/rmysql" url = "https://cran.r-project.org/src/contrib/RMySQL_0.10.9.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/RMySQL" version('0.10.17', sha256='754df4fce159078c1682ef34fc96aa5ae30981dc91f4f2bada8d1018537255f5') version('0.10.9', '3628200a1864ac3005cfd55cc7cde17a') depends_on('[email protected]:', type=('build', 'run')) depends_on('mariadb@:5.5.56')
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RRmysql(RPackage): """Implements 'DBI' Interface to 'MySQL' and 'MariaDB' Databases.""" homepage = "https://github.com/rstats-db/rmysql" url = "https://cran.r-project.org/src/contrib/RMySQL_0.10.9.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/RMySQL" version('0.10.9', '3628200a1864ac3005cfd55cc7cde17a') depends_on('r-dbi', type=('build', 'run')) depends_on('mariadb')
lgpl-2.1
Python
bcff742c27904f995d9f5e8a184f0348b58139a5
fix closing bracket
uhuramedia/cookiecutter-django,uhuramedia/cookiecutter-django
{{cookiecutter.repo_name}}/fabfile.py
{{cookiecutter.repo_name}}/fabfile.py
# -*- coding: utf-8 -*- import os import datetime from contextlib import contextmanager from fabric.api import env, run, local, prefix, sudo def live(): """Connects to the server.""" env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')] env.user = 'freshmilk' env.cwd = '/var/www/{{cookiecutter.domain_name}}' env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd) def beta(): """Connects to beta/testing server""" env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')] env.user = 'freshmilk' env.cwd = '/var/www/beta.{{cookiecutter.domain_name}}' env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd) def gitpull(tag=None): """Pulls upstream branch on the server.""" if tag is not None: run('git pull') run('git checkout %s' % tag) else: run('git pull') @contextmanager def source_env(): """Actives embedded virtual env""" with prefix('source env/bin/activate'): yield def collectstatic(): """Collect static files on server.""" with source_env(): run('python manage.py collectstatic') def migrate(): """Sync project database on server.""" with source_env(): run('python manage.py migrate') def touch(): """Touch the wsgi file.""" run('touch {{cookiecutter.repo_name}}/wsgi.py') def update(tag=None): """ Runs gitpull, develop, collectstatic, migrate and touch. """ gitpull() collectstatic() migrate() touch() def dump(): with source_env(): run('python manage.py sqldump') def sync_media(): local('rsync -avzh -e ssh %s/media/* media/' % env.connect_to) def sync_dump(): local('rsync -avPhzL -e ssh %s/var/dump.sql.gz var' % env.connect_to) def mirror(): """Runs dump, sync_media, sync_dump and sqlimport.""" dump() sync_dump() local('python manage.py sqlimport') sync_media()
# -*- coding: utf-8 -*- import os import datetime from contextlib import contextmanager from fabric.api import env, run, local, prefix, sudo def live(): """Connects to the server.""" env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')] env.user = 'freshmilk' env.cwd = '/var/www/{{cookiecutter.domain_name}}' env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd) def beta(): """Connects to beta/testing server""" env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')] env.user = 'freshmilk' env.cwd = '/var/www/beta.{{cookiecutter.domain_name}}' env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd) def gitpull(tag=None): """Pulls upstream branch on the server.""" if tag is not None: run('git pull') run('git checkout %s' % tag) else: run('git pull') @contextmanager def source_env(): """Actives embedded virtual env""" with prefix('source env/bin/activate'): yield def collectstatic(): """Collect static files on server.""" with source_env(): run('python manage.py collectstatic') def migrate(): """Sync project database on server.""" with source_env(): run('python manage.py migrate') def touch(): """Touch the wsgi file.""" run('touch {{cookiecutter.repo_name}}/wsgi.py') def update(tag=None): """ Runs gitpull, develop, collectstatic, migrate and touch. """ gitpull() collectstatic() migrate() touch() def dump(): with source_env(): run('python manage.py sqldump' def sync_media(): local('rsync -avzh -e ssh %s/media/* media/' % env.connect_to) def sync_dump(): local('rsync -avPhzL -e ssh %s/var/dump.sql.gz var' % env.connect_to) def mirror(): """Runs dump, sync_media, sync_dump and sqlimport.""" dump() sync_dump() local('python manage.py sqlimport') sync_media()
bsd-3-clause
Python
88a028663b7688af362a2ebd5c168aaccc5695c0
Comment updates
prat0318/swagger-py,analogue/bravado-core,analogue/bravado,prat0318/bravado-core,vi4m/bravado,stratosgear/bravado,admetricks/bravado-core
bravado/mapping/request.py
bravado/mapping/request.py
from bravado.mapping.operation import log from bravado.mapping.param import unmarshal_param class RequestLike(object): """ Common interface for server side request objects. Subclasses are responsible for providing attrs for __required_attrs__. """ __required_attrs__ = [ 'path', # dict of URL path parameters 'params', # dict of parameters from the query string and request body. 'headers', # dict of request headers ] def __getattr__(self, name): """ When an attempt to access a required attribute that doesn't exist is made, let the caller know that the type is non-compliant in its attempt to be `RequestList`. This is in place of the usual throwing of an AttributeError. Reminder: __getattr___ is only called when it has already been determined that this object does not have the given attr. :raises: NotImplementedError when the subclass has not provided access to a required attribute. """ if name in self.__required_attrs__: raise NotImplementedError( 'This RequestLike type {0} forgot to implement an attr ' 'for `{1}`'.format(type(self), name)) raise AttributeError( "'{0}' object has no attribute '{1}'".format(type(self), name)) def json(self, **kwargs): """ :return: request content in a json-like form :rtype: int, float, double, string, unicode, list, dict """ raise NotImplementedError("Implement json() in {0}".format(type(self))) def unmarshal_request(request, op): """Unmarshal Swagger request parameters from the passed in request like object. :type request: :class: `bravado.mapping.request.RequestLike`. :type op: :class:`bravado.mapping.operation.Operation` :returns: dict where (key, value) = (param_name, param_value) """ request_data = {} for param_name, param in op.params.iteritems(): param_value = unmarshal_param(param, request) request_data[param_name] = param_value log.debug("Swagger request_data: {0}".format(request_data)) return request_data
from bravado.mapping.operation import log from bravado.mapping.param import unmarshal_param class RequestLike(object): """ Define a common interface for bravado to interface with server side request objects. Subclasses are responsible for providing attrs for __required_attrs__. """ __required_attrs__ = [ 'path', # dict of URL path parameters 'params', # dict of parameters from the query string and request body. 'headers', # dict of request headers ] def __getattr__(self, name): """ When an attempt to access a required attribute that doesn't exist is made, let the caller know that the type is non-compliant in its attempt to be `RequestList`. This is in place of the usual throwing of an AttributeError. Reminder: __getattr___ is only called when it has already been determined that this object does not have the given attr. :raises: NotImplementedError when the subclass has not provided access to a required attribute. """ if name in self.__required_attrs__: raise NotImplementedError( 'This RequestLike type {0} forgot to implement an attr ' 'for `{1}`'.format(type(self), name)) raise AttributeError( "'{0}' object has no attribute '{1}'".format(type(self), name)) def json(self, **kwargs): """ :return: request content in a json-like form :rtype: int, float, double, string, unicode, list, dict """ raise NotImplementedError("Implement json() in {0}".format(type(self))) def unmarshal_request(request, op): """Unmarshal Swagger request parameters from the passed in request like object. :type request: :class: `bravado.mapping.request.RequestLike`. :type op: :class:`bravado.mapping.operation.Operation` :returns: dict where (key, value) = (param_name, param_value) """ request_data = {} for param_name, param in op.params.iteritems(): param_value = unmarshal_param(param, request) request_data[param_name] = param_value log.debug("Swagger request_data: {0}".format(request_data)) return request_data
bsd-3-clause
Python
10a2b3def6936d94e21ac68a15b3ae1428e75e41
Make the disassembler script work on Linux.
mewbak/idc,mewbak/idc
util/dasm.py
util/dasm.py
#!/usr/bin/env python import sys import optparse import subprocess import re def dasm(infile, outfp, verbose = True): command_line = [ 'objdump', '--disassemble', '--disassemble-zeroes', '--disassembler-options=att,suffix', #'--prefix-addresses', '--no-show-raw-insn', '--wide', infile ] p = subprocess.Popen(command_line, stdout=subprocess.PIPE, shell=False) #print p.communicate()[0]; return infp = p.stdout it = iter(infp) for line in it: # TODO: handle other sections too if line == "Disassembly of section .text:\n": break insns = [] addrs = {} for line in it: if not line: break line = line[:-1] if not line: continue if line.startswith("Disassembly of section "): break line = re.sub(r"([0-9A-Fa-f]+) <([._@A-Za-z][_@A-Za-z]*)>", r"\2", line) line = re.sub(r"([0-9A-Fa-f]+) <([^>]*)>", r"0x\1", line) addr, insn = [part.strip() for part in line.split(":", 1)] if insn == "(bad)": continue try: intaddr = int(addr, 16) except ValueError: pass else: addr = "loc" + addr addrs[intaddr] = addr insns.append((addr, insn)) def repl(mo): addr = mo.group() try: return addrs[int(addr,16)] except KeyError: return addr for addr, insn in insns: insn = re.sub(r'\b0[xX]([0-9a-fA-F]+)\b', repl, insn) outfp.write("%s: %s\n" % (addr, insn)) def main(): parser = optparse.OptionParser( usage = "\n\t%prog [options] executable ...", version = "%prog 1.0") parser.add_option( '-o', '--output', type = "string", dest = "output", help = "specify output assembly file") parser.add_option( '-v', '--verbose', action = "count", dest = "verbose", default = 1, help = "show extra information") parser.add_option( '-q', '--quiet', action = "store_const", dest = "verbose", const = 0, help = "no extra information") (options, args) = parser.parse_args(sys.argv[1:]) for arg in args: if options.output is None: # root, ext = os.path.splitext(arg) # fpout = file(root + '.s', 'wt') #elif options.output is '-': fpout = sys.stdout else: fpout = file(options.output, 'wt') dasm(arg, fpout, options.verbose) if __name__ == '__main__': main()
#!/usr/bin/env python import sys import optparse import subprocess import re def dasm(infile, outfp, verbose = True): command_line = [ 'objdump', '--disassemble', '--disassemble-zeroes', '--disassembler-options=att,suffix', '--prefix-addresses', '--no-show-raw-insn', '--wide', infile ] p = subprocess.Popen(command_line, stdout=subprocess.PIPE, shell=False) infp = p.stdout for line in infp: if line == "Disassembly of section .text:\n": break insns = [] addrs = set() for line in infp: line = line[:-1] if not line: break addr, insn = line.split(" ", 1) if insn.strip() == "(bad)": continue insns.append((addr, insn)) addrs.add(addr) def repl(mo): addr = mo.group() if addr in addrs: return "loc" + addr[2:] else: return addr for addr, insn in insns: insn = re.sub(r'\b0x[0-9a-fA-F]+\b', repl, insn) addr = "loc" + addr[2:] outfp.write("%s: %s\n" % (addr, insn)) def main(): parser = optparse.OptionParser( usage = "\n\t%prog [options] executable ...", version = "%prog 1.0") parser.add_option( '-o', '--output', type = "string", dest = "output", help = "specify output assembly file") parser.add_option( '-v', '--verbose', action = "count", dest = "verbose", default = 1, help = "show extra information") parser.add_option( '-q', '--quiet', action = "store_const", dest = "verbose", const = 0, help = "no extra information") (options, args) = parser.parse_args(sys.argv[1:]) for arg in args: if options.output is None: # root, ext = os.path.splitext(arg) # fpout = file(root + '.s', 'wt') #elif options.output is '-': fpout = sys.stdout else: fpout = file(options.output, 'wt') dasm(arg, fpout, options.verbose) if __name__ == '__main__': main()
lgpl-2.1
Python
887b03d7587525509d3652ef42b930025194d2ad
Update 2sum.py
UmassJin/Leetcode
Array/2sum.py
Array/2sum.py
Given an array of integers, find two numbers such that they add up to a specific target number. The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based. You may assume that each input would have exactly one solution. Input: numbers={2, 7, 11, 15}, target=9 Output: index1=1, index2=2 class Solution: # @return a tuple, (index1, index2) # 48s # O(n) def twoSum(self, nums, target): if not nums or len(nums) < 2: return None idict = {} for i, value in enumerate(nums): if target - value in idict: return [idict[target-value], i+1] idict[value] = i+1 # 79ms def twoSum(self, num, target): dic = {} for i in xrange(len(num)): if num[i] in dic: result1 = dic[num[i]] +1 result2 = i +1 else: dic[target-num[i]] = i return (result1,result2) # 68ms def twoSum(self, num, target): tmpnum = num[:] tmpnum.sort() length = len(num) i = 0; j = length-1 while i < j: tmpval = tmpnum[i]+tmpnum[j] if tmpval == target: res1 = num.index(tmpnum[i]) num.reverse() res2 = len(num)-1-num.index(tmpnum[j]) if res1<res2: return (res1+1,res2+1) else: return(res2+1,res1+1) if tmpval > target: j -= 1 if tmpval < target: i += 1
Given an array of integers, find two numbers such that they add up to a specific target number. The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based. You may assume that each input would have exactly one solution. Input: numbers={2, 7, 11, 15}, target=9 Output: index1=1, index2=2 class Solution: # @return a tuple, (index1, index2) def twoSum(self, nums, target): if not nums or len(nums) < 2: return None idict = {} for i, value in enumerate(nums): if target - value in idict: return [idict[target-value], i+1] idict[value] = i+1 # 79ms def twoSum(self, num, target): dic = {} for i in xrange(len(num)): if num[i] in dic: result1 = dic[num[i]] +1 result2 = i +1 else: dic[target-num[i]] = i return (result1,result2) # 68ms def twoSum(self, num, target): tmpnum = num[:] tmpnum.sort() length = len(num) i = 0; j = length-1 while i < j: tmpval = tmpnum[i]+tmpnum[j] if tmpval == target: res1 = num.index(tmpnum[i]) num.reverse() res2 = len(num)-1-num.index(tmpnum[j]) if res1<res2: return (res1+1,res2+1) else: return(res2+1,res1+1) if tmpval > target: j -= 1 if tmpval < target: i += 1
mit
Python
dbdfbc18ebadc0a1d50a6513bb982d2e3881036f
Add MAX_TURNS and some more output to train-ml-bot
JWageM/planet-wars,intelligent-systems-course/planet-wars
train-ml-bot.py
train-ml-bot.py
""" Train a machine learning model for the classifier bot. We create a player, and watch it play games against itself. Every observed state is converted to a feature vector and labeled with the eventual outcome (-1.0: player 2 won, 1.0: player 1 won) This is part of the second worksheet. """ from api import State, util # This package contains various machine learning algorithms import sys import sklearn import sklearn.linear_model from sklearn.externals import joblib from bots.rand import rand # from bots.alphabeta import alphabeta from bots.ml import ml from bots.ml.ml import features import matplotlib.pyplot as plt # How many games to play GAMES = 1000 # Number of planets in the field NUM_PLANETS = 6 # Maximum number of turns to play NUM_TURNS = 100 # The player we'll observe player = rand.Bot() # player = alphabeta.Bot() data = [] target = [] for g in range(GAMES): state, id = State.generate(NUM_PLANETS) state_vectors = [] i = 0 while not state.finished() and i <= NUM_TURNS: state_vectors.append(features(state)) move = player.get_move(state) state = state.next(move) i += 1 winner = state.winner() for state_vector in state_vectors: data.append(state_vector) target.append('won' if winner == 1 else 'lost') sys.stdout.write(".") sys.stdout.flush() if g % (GAMES/10) == 0: print("") print('game {} finished ({}%)'.format(g, (g/float(GAMES)*100))) # Train a logistic regression model learner = sklearn.linear_model.LogisticRegression() model = learner.fit(data, target) # Check for class imbalance count = {} for str in target: if str not in count: count[str] = 0 count[str] += 1 print('instances per class: {}'.format(count)) # Store the model in the ml directory joblib.dump(model, './bots/ml/model.pkl') print('Done')
""" Train a machine learning model for the classifier bot. We create a player, and watch it play games against itself. Every observed state is converted to a feature vector and labeled with the eventual outcome (-1.0: player 2 won, 1.0: player 1 won) This is part of the second worksheet. """ from api import State, util # This package contains various machine learning algorithms import sklearn import sklearn.linear_model from sklearn.externals import joblib from bots.rand import rand # from bots.alphabeta import alphabeta from bots.ml import ml from bots.ml.ml import features import matplotlib.pyplot as plt # How many games to play GAMES = 1000 # Number of planets in the field NUM_PLANETS = 6 # The player we'll observe player = rand.Bot() # player = alphabeta.Bot() data = [] target = [] for g in range(GAMES): state, id = State.generate(NUM_PLANETS) state_vectors = [] while not state.finished(): state_vectors.append(features(state)) move = player.get_move(state) state = state.next(move) winner = state.winner() for state_vector in state_vectors: data.append(state_vector) target.append('won' if winner == 1 else 'lost') if g % (GAMES/10) == 0: print('game {} finished ({}%)'.format(g, (g/float(GAMES)*100) )) # Train a logistic regression model learner = sklearn.linear_model.LogisticRegression() model = learner.fit(data, target) # Check for class imbalance count = {} for str in target: if str not in count: count[str] = 0 count[str] += 1 print('instances per class: {}'.format(count)) # Store the model in the ml directory joblib.dump(model, './bots/ml/model.pkl') print('Done')
mit
Python
6454bca66b73efa6e124fce80634fc98bd0b9c25
add new dependencies for python 3.7.6
NaturalSolutions/NsPortal,NaturalSolutions/NsPortal,NaturalSolutions/NsPortal
Back/setup.py
Back/setup.py
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.txt')) as f: README = f.read() with open(os.path.join(here, 'CHANGES.txt')) as f: CHANGES = f.read() requires = [ 'marshmallow==3.3.0', 'pyodbc==4.0.27', 'pyramid==1.10.4', 'sqlalchemy==1.3.12', 'transaction==3.0.0', 'waitress==1.4.2', 'webargs==6.0.0b3' ] setup( name='ns_portal', version='0.4', description='ns_portal', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: Pyramid", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='', author_email='', url='', keywords='web wsgi bfg pylons pyramid', packages=find_packages(), include_package_data=True, zip_safe=False, test_suite='ns_portal', install_requires=requires, entry_points="""\ [paste.app_factory] main = ns_portal:main [console_scripts] initialize_ns_portal_db = ns_portal.scripts.initializedb:main """ )
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.txt')) as f: README = f.read() with open(os.path.join(here, 'CHANGES.txt')) as f: CHANGES = f.read() requires = [ 'pyodbc==4.0.28', 'pyramid==1.10.4', 'sqlalchemy==1.3.12', 'transaction==3.0.0', 'waitress==1.4.2', 'webargs==6.0.0b2' ] setup( name='ns_portal', version='0.3', description='ns_portal', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: Pyramid", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='', author_email='', url='', keywords='web wsgi bfg pylons pyramid', packages=find_packages(), include_package_data=True, zip_safe=False, test_suite='ns_portal', install_requires=requires, entry_points="""\ [paste.app_factory] main = ns_portal:main [console_scripts] initialize_ns_portal_db = ns_portal.scripts.initializedb:main """ )
mit
Python
d9c9f9c363f5520f37800930efd9eaa1e43daed7
bump version
yerejm/ttt,yerejm/ttt
ttt/__init__.py
ttt/__init__.py
# -*- coding: utf-8 -*- __version__ = '0.3.2'
# -*- coding: utf-8 -*- __version__ = '0.3.1'
isc
Python
aa3a6dd01d7681f92d1be42fb2831126ced7a76e
Update __init__.py
adamcharnock/django-su,PetrDlouhy/django-su,PetrDlouhy/django-su,adamcharnock/django-su
django_su/__init__.py
django_su/__init__.py
import os # The fake password we will use to authenticate su'ed users SECRET_PASSWORD = os.urandom(64) VERSION = (0, 4, 8) __version__ = '.'.join([str(n) for n in VERSION])
import os # The fake password we will use to authenticate su'ed users SECRET_PASSWORD = os.urandom(64) __version__ = '0.4.8'
mit
Python
f623312b5df6e8f201f641f87193075e8d3f70ea
Add version attribute
arameshkumar/nuxeo-drive,IsaacYangSLA/nuxeo-drive,IsaacYangSLA/nuxeo-drive,loopingz/nuxeo-drive,IsaacYangSLA/nuxeo-drive,ssdi-drive/nuxeo-drive,ssdi-drive/nuxeo-drive,IsaacYangSLA/nuxeo-drive,DirkHoffmann/nuxeo-drive,rsoumyassdi/nuxeo-drive,arameshkumar/base-nuxeo-drive,arameshkumar/base-nuxeo-drive,arameshkumar/nuxeo-drive,IsaacYangSLA/nuxeo-drive,arameshkumar/base-nuxeo-drive,DirkHoffmann/nuxeo-drive,loopingz/nuxeo-drive,DirkHoffmann/nuxeo-drive,loopingz/nuxeo-drive,ssdi-drive/nuxeo-drive,rsoumyassdi/nuxeo-drive,arameshkumar/nuxeo-drive,DirkHoffmann/nuxeo-drive,arameshkumar/nuxeo-drive,DirkHoffmann/nuxeo-drive,rsoumyassdi/nuxeo-drive,rsoumyassdi/nuxeo-drive,arameshkumar/base-nuxeo-drive,loopingz/nuxeo-drive,loopingz/nuxeo-drive
nuxeo-drive-client/nxdrive/__init__.py
nuxeo-drive-client/nxdrive/__init__.py
_version_ = '1.0.0-dev'
lgpl-2.1
Python
c9277fa65afcf513c2e3000193d7837900ff8ee1
Improve logging runtime state poll fail message
opennode/nodeconductor-openstack
src/nodeconductor_openstack/tasks/base.py
src/nodeconductor_openstack/tasks/base.py
from celery import shared_task from nodeconductor.core.tasks import Task from .. import models # TODO: move this signal to itacloud assembly application @shared_task def register_instance_in_zabbix(instance_uuid): from nodeconductor.template.zabbix import register_instance instance = models.Instance.objects.get(uuid=instance_uuid) register_instance(instance) class RuntimeStateException(Exception): pass class PollRuntimeStateTask(Task): max_retries = 300 default_retry_delay = 5 def get_backend(self, instance): return instance.get_backend() def execute(self, instance, backend_pull_method, success_state, erred_state): backend = self.get_backend(instance) getattr(backend, backend_pull_method)(instance) instance.refresh_from_db() if instance.runtime_state not in (success_state, erred_state): self.retry() elif instance.runtime_state == erred_state: raise RuntimeStateException( '%s %s (PK: %s) runtime state become erred: %s' % ( instance.__class__.__name__, instance, instance.pk, erred_state)) return instance class PollBackendCheckTask(Task): max_retries = 60 default_retry_delay = 5 def get_backend(self, instance): return instance.get_backend() def execute(self, instance, backend_check_method): # backend_check_method should return True if object does not exist at backend backend = self.get_backend(instance) if not getattr(backend, backend_check_method)(instance): self.retry() return instance
from celery import shared_task from nodeconductor.core.tasks import Task from .. import models # TODO: move this signal to itacloud assembly application @shared_task def register_instance_in_zabbix(instance_uuid): from nodeconductor.template.zabbix import register_instance instance = models.Instance.objects.get(uuid=instance_uuid) register_instance(instance) class RuntimeStateException(Exception): pass class PollRuntimeStateTask(Task): max_retries = 300 default_retry_delay = 5 def get_backend(self, instance): return instance.get_backend() def execute(self, instance, backend_pull_method, success_state, erred_state): backend = self.get_backend(instance) getattr(backend, backend_pull_method)(instance) instance.refresh_from_db() if instance.runtime_state not in (success_state, erred_state): self.retry() elif instance.runtime_state == erred_state: raise RuntimeStateException( 'Instance %s (PK: %s) runtime state become erred: %s' % (instance, instance.pk, erred_state)) return instance class PollBackendCheckTask(Task): max_retries = 60 default_retry_delay = 5 def get_backend(self, instance): return instance.get_backend() def execute(self, instance, backend_check_method): # backend_check_method should return True if object does not exist at backend backend = self.get_backend(instance) if not getattr(backend, backend_check_method)(instance): self.retry() return instance
mit
Python
e3035fb91a96a3ff5627b6847203e3dc11fbc78f
Add libunwind-1.2.1 (#8145)
tmerrick1/spack,krafczyk/spack,krafczyk/spack,mfherbst/spack,matthiasdiener/spack,tmerrick1/spack,tmerrick1/spack,iulian787/spack,matthiasdiener/spack,krafczyk/spack,mfherbst/spack,krafczyk/spack,matthiasdiener/spack,LLNL/spack,krafczyk/spack,matthiasdiener/spack,LLNL/spack,LLNL/spack,mfherbst/spack,matthiasdiener/spack,mfherbst/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,mfherbst/spack,iulian787/spack,tmerrick1/spack,tmerrick1/spack,iulian787/spack
var/spack/repos/builtin/packages/libunwind/package.py
var/spack/repos/builtin/packages/libunwind/package.py
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libunwind(AutotoolsPackage): """A portable and efficient C programming interface (API) to determine the call-chain of a program.""" homepage = "http://www.nongnu.org/libunwind/" url = "http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz" version('1.2.1', '06ba9e60d92fd6f55cd9dadb084df19e') version('1.1', 'fb4ea2f6fbbe45bf032cd36e586883ce')
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libunwind(AutotoolsPackage): """A portable and efficient C programming interface (API) to determine the call-chain of a program.""" homepage = "http://www.nongnu.org/libunwind/" url = "http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz" version('1.1', 'fb4ea2f6fbbe45bf032cd36e586883ce')
lgpl-2.1
Python
77db7bb96686e3308a4061d24c257466d9987405
add delete_project dashboard api.
soasme/rio,soasme/rio,soasme/rio
rio/blueprints/dashboard.py
rio/blueprints/dashboard.py
# -*- coding: utf-8 -*- from slugify import slugify from flask import Blueprint from flask import jsonify from flask_wtf import Form from wtforms import StringField from wtforms.validators import DataRequired from wtforms.validators import ValidationError from wtforms.validators import Length from rio.utils.user import get_current_user_id from rio.utils.user import login_required from rio.utils.slugify import slugify from rio.models import add_instance from rio.models import delete_instance from rio.models import get_data_or_404 bp = Blueprint('dashboard', __name__) class NewProjectForm(Form): name = StringField('Name', validators=[DataRequired(), Length(max=64)]) class ConfirmDeleteProjectForm(Form): name = StringField('Name', validators=[DataRequired(), Length(max=64)]) @bp.errorhandler(404) def handle_not_found(exception): return jsonify(message='not found'), 404 @bp.route('/projects/new', methods=['POST']) @login_required def new_project(): """New Project.""" form = NewProjectForm() if not form.validate_on_submit(): return jsonify(errors=form.errors), 400 data = form.data data['slug'] = slugify(data['name']) data['owner_id'] = get_current_user_id() id = add_instance('project', **data) if not id: return jsonify(errors={'name': ['duplicated slug.']}), 400 project = get_data_or_404('project', id) return jsonify(**project) @bp.route('/projects/<int:project_id>', methods=['DELETE']) @login_required def delete_project(project_id): """Delete Project.""" project = get_data_or_404('project', project_id) if project['owner_id'] != get_current_user_id(): return jsonify(message='forbidden'), 403 delete_instance('project', project_id) return jsonify({}) @bp.route('/projects/<int:project_id>/transfer', methods=['POST']) def transfer_project(project_id): pass
# -*- coding: utf-8 -*- from slugify import slugify from flask import Blueprint from flask import jsonify from flask_wtf import Form from wtforms import StringField from wtforms.validators import DataRequired from wtforms.validators import ValidationError from wtforms.validators import Length from rio.utils.user import get_current_user_id from rio.utils.user import login_required from rio.utils.slugify import slugify from rio.models import add_instance from rio.models import get_data_or_404 bp = Blueprint('dashboard', __name__) class NewProjectForm(Form): name = StringField('Name', validators=[DataRequired(), Length(max=64)]) class ConfirmDeleteProjectForm(Form): name = StringField('Name', validators=[DataRequired(), Length(max=64)]) @bp.errorhandler(404) def handle_not_found(exception): return jsonify(message='not found'), 404 @bp.route('/projects/new', methods=['POST']) @login_required def new_project(): """New Project.""" form = NewProjectForm() if not form.validate_on_submit(): return jsonify(errors=form.errors), 400 data = form.data data['slug'] = slugify(data['name']) data['owner_id'] = get_current_user_id() id = add_instance('project', **data) if not id: return jsonify(errors={'name': ['duplicated slug.']}), 400 project = get_data_or_404('project', id) return jsonify(**project) @bp.route('/projects/<int:project_id>', methods=['DELETE']) @login_required def delete_project(project_id): project = get_data_or_404('project', project_id) if project['owner_id'] != get_current_user_id(): return jsonify(message='forbidden'), 403 # TODO: implement delete_project task = delete_project.delay(project_id) return jsonify() @bp.route('/projects/<int:project_id>/transfer', methods=['POST']) def transfer_project(project_id): pass
mit
Python
91a77b860387ebed146b9e4e604d007bfabf0b9e
Fix potential bug in parameter passing
thaim/ansible,thaim/ansible
lib/ansible/plugins/action/normal.py
lib/ansible/plugins/action/normal.py
# (c) 2012, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase class ActionModule(ActionBase): def run(self, tmp=None, task_vars=dict()): results = self._execute_module(tmp=tmp, task_vars=task_vars) # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. for field in ('ansible_notify',): if field in results: results.pop(field) return results
# (c) 2012, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase class ActionModule(ActionBase): def run(self, tmp=None, task_vars=dict()): results = self._execute_module(tmp, task_vars=task_vars) # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. for field in ('ansible_notify',): if field in results: results.pop(field) return results
mit
Python
e0db9a970c6ea778419cc1f20ca66adedffb7aae
Set HOME, allow errors to pass through to stdout/stderr
hotosm/osm-export-tool2,hotosm/osm-export-tool2,hotosm/osm-export-tool2,hotosm/osm-export-tool2
utils/mwm.py
utils/mwm.py
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging import os import shutil import subprocess import tempfile from string import Template from .artifact import Artifact LOG = logging.getLogger(__name__) class MWM(object): name = 'mwm' description = 'maps.me MWM' cmd = Template('generate_mwm.sh $input') def __init__(self, input): """ Initialize the MWM generation utility. Args: pbf: the source PBF """ self.input = input self.output = os.path.splitext(input)[0] + '.mwm' def run(self): if self.is_complete: LOG.debug("Skipping MWM, file exists") return convert_cmd = self.cmd.safe_substitute({ 'input': self.input, }) LOG.debug('Running: %s' % convert_cmd) tmpdir = tempfile.mkdtemp() env = os.environ.copy() env.update(HOME=tmpdir, MWM_WRITABLE_DIR=tmpdir, TARGET=os.path.dirname(self.output)) try: subprocess.check_call( convert_cmd, env=env, shell=True, executable='/bin/bash') LOG.debug('generate_mwm.sh complete') finally: shutil.rmtree(tmpdir) @property def results(self): return [Artifact([self.output], self.name)] @property def is_complete(self): return os.path.isfile(self.output)
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging import os import shutil import subprocess import tempfile from string import Template from .artifact import Artifact LOG = logging.getLogger(__name__) class MWM(object): name = 'mwm' description = 'maps.me MWM' cmd = Template('generate_mwm.sh $input') def __init__(self, input): """ Initialize the MWM generation utility. Args: pbf: the source PBF """ self.input = input self.output = os.path.splitext(input)[0] + '.mwm' def run(self): if self.is_complete: LOG.debug("Skipping MWM, file exists") return convert_cmd = self.cmd.safe_substitute({ 'input': self.input, }) LOG.debug('Running: %s' % convert_cmd) tmpdir = tempfile.mkdtemp() env = os.environ.copy() env.update(MWM_WRITABLE_DIR=tmpdir, TARGET=os.path.dirname(self.output)) try: subprocess.check_call( convert_cmd, env=env, shell=True, executable='/bin/bash', stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOG.debug('generate_mwm.sh complete') finally: shutil.rmtree(tmpdir) @property def results(self): return [Artifact([self.output], self.name)] @property def is_complete(self): return os.path.isfile(self.output)
bsd-3-clause
Python
12efb71143a18e191e05a1b5f0e6d7c59854e0ba
fix brampton scraper class name
opencivicdata/scrapers-ca,opencivicdata/scrapers-ca
ca_on_brampton/__init__.py
ca_on_brampton/__init__.py
# coding: utf-8 from utils import CanadianJurisdiction class Brampton(CanadianJurisdiction): jurisdiction_id = u'ocd-jurisdiction/country:ca/csd:3521010/council' geographic_code = 3521010 division_name = u'Brampton' name = u'Brampton City Council' url = 'http://www.brampton.ca'
# coding: utf-8 from utils import CanadianJurisdiction class London(CanadianJurisdiction): jurisdiction_id = u'ocd-jurisdiction/country:ca/csd:3521010/council' geographic_code = 3521010 division_name = u'Brampton' name = u'Brampton City Council' url = 'http://www.brampton.ca'
mit
Python
a4fbc3372a446861f086d847186726b80443f212
add utils for printing results; add ndiff table
azide0x37/CausalInference,laurencium/CausalInference
causalinference/results.py
causalinference/results.py
import numpy as np from scipy.stats import norm class Results(object): def __init__(self, causal): self.causal = causal self.table_width = 80 def _varnames(self, varnums): return ['X'+str(varnum+1) for varnum in varnums] def _make_row(self, entries): col_width = self.table_width // len(entries) first_col_width = col_width + self.table_width % len(entries) return ('%'+str(first_col_width)+'s' + ('%'+str(col_width)+'.3f')*(len(entries)-1)) % entries def ndiff(self): varnames = self._varnames(xrange(self.causal.K)) X_t_mean = self.causal.X_t.mean(0) X_t_sd = np.sqrt(self.causal.X_t.var(0)) X_c_mean = self.causal.X_c.mean(0) X_c_sd = np.sqrt(self.causal.X_c.var(0)) for i in xrange(self.causal.K): print self._make_row((varnames[i], X_t_mean[i], X_t_sd[i], X_c_mean[i], X_c_sd[i], self.causal.ndiff[i])) def propensity(self): if not hasattr(self.causal, 'pscore'): self.causal.propensity() print 'Coefficients:', self.causal.pscore['coeff'] print 'Log-likelihood:', self.causal.pscore['loglike'] def summary(self): header = ('%8s'+'%12s'*4+'%24s') % ('', 'est', 'std err', 'z', 'P>|z|', '[95% Conf. Int.]') print header print '-' * len(header) tuples = (('ATE', self.causal.ate, self.causal.ate_se), ('ATT', self.causal.att, self.causal.att_se), ('ATC', self.causal.atc, self.causal.atc_se)) for (name, coef, se) in tuples: t = coef / se p = 1 - norm.cdf(np.abs(t)) lw = coef - 1.96*se up = coef + 1.96*se print self._make_row((name, coef, se, t, p, lw, up))
import numpy as np from scipy.stats import norm class Results(object): def __init__(self, causal): self.causal = causal def ndiff(self): print self.causal.ndiff def propensity(self): if not hasattr(self.causal, 'pscore'): self.causal.propensity() print 'Coefficients:', self.causal.pscore['coeff'] print 'Log-likelihood:', self.causal.pscore['loglike'] def summary(self): header = ('%8s'+'%12s'*4+'%24s') % ('', 'coef', 'std err', 'z', 'P>|z|', '[95% Conf. Int.]') print header print '-' * len(header) tuples = (('ATE', self.causal.ate, self.causal.ate_se), ('ATT', self.causal.att, self.causal.att_se), ('ATC', self.causal.atc, self.causal.atc_se)) for (name, coef, se) in tuples: t = coef / se p = 1 - norm.cdf(np.abs(t)) lw = coef - 1.96*se up = coef + 1.96*se print ('%8s'+'%12.3f'*6) % (name, coef, se, t, p, lw, up)
bsd-3-clause
Python
d0ce2b074ffd603c507069d8a5ab1189fad0ca56
Update a version number from trunk r9016
VcamX/pywikibot-core,npdoty/pywikibot,npdoty/pywikibot,emijrp/pywikibot-core,darthbhyrava/pywikibot-local,magul/pywikibot-core,magul/pywikibot-core,h4ck3rm1k3/pywikibot-core,trishnaguha/pywikibot-core,xZise/pywikibot-core,valhallasw/pywikibot-core,Darkdadaah/pywikibot-core,icyflame/batman,PersianWikipedia/pywikibot-core,hasteur/g13bot_tools_new,wikimedia/pywikibot-core,happy5214/pywikibot-core,Darkdadaah/pywikibot-core,smalyshev/pywikibot-core,happy5214/pywikibot-core,hasteur/g13bot_tools_new,wikimedia/pywikibot-core,TridevGuha/pywikibot-core,jayvdb/pywikibot-core,h4ck3rm1k3/pywikibot-core,hasteur/g13bot_tools_new,jayvdb/pywikibot-core
pywikibot/families/wikia_family.py
pywikibot/families/wikia_family.py
# -*- coding: utf-8 -*- __version__ = '$Id$' import family # The Wikia Search family # user-config.py: usernames['wikia']['wikia'] = 'User name' class Family(family.Family): def __init__(self): family.Family.__init__(self) self.name = u'wikia' self.langs = { u'wikia': None, } def hostname(self, code): return u'www.wikia.com' def version(self, code): return "1.16.2" def scriptpath(self, code): return '' def apipath(self, code): return '/api.php'
# -*- coding: utf-8 -*- __version__ = '$Id$' import family # The Wikia Search family # user-config.py: usernames['wikia']['wikia'] = 'User name' class Family(family.Family): def __init__(self): family.Family.__init__(self) self.name = u'wikia' self.langs = { u'wikia': None, } def hostname(self, code): return u'www.wikia.com' def version(self, code): return "1.15.1" def scriptpath(self, code): return '' def apipath(self, code): return '/api.php'
mit
Python
69642fbfa143d475b3dcc548bffbda8a6dd6c680
Enable template caching in production
XeryusTC/rotd,XeryusTC/rotd,XeryusTC/rotd
rotd/settings/production.py
rotd/settings/production.py
# -*- coding: utf-8 -*- from .base import * from .util import get_env_setting DEBUG = False DOMAIN = get_env_setting('ROTD_DOMAIN') ALLOWED_HOSTS = [ DOMAIN, ] DATABASES = { "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": get_env_setting('ROTD_DB_NAME'), "USER": get_env_setting('ROTD_DB_USER'), "PASSWORD": get_env_setting('ROTD_DB_PASSWORD'), "HOST": "localhost", "PORT": "", }, } SECRET_KEY = get_env_setting('ROTD_SECRET_KEY') EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = get_env_setting('ROTD_EMAIL_HOST') EMAIL_HOST_PASSWORD = get_env_setting('ROTD_EMAIL_HOST_PASSWORD') EMAIL_HOST_USER = get_env_setting('ROTD_EMAIL_HOST_USER') EMAIL_PORT = get_env_setting('ROTD_EMAIL_PORT') EMAIL_USE_TLS = True TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.abspath(os.path.join(BASE_DIR, 'templates'))], 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], 'loaders': [ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]), ], }, }, ]
# -*- coding: utf-8 -*- from .base import * from .util import get_env_setting DEBUG = False DOMAIN = get_env_setting('ROTD_DOMAIN') ALLOWED_HOSTS = [ DOMAIN, ] DATABASES = { "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": get_env_setting('ROTD_DB_NAME'), "USER": get_env_setting('ROTD_DB_USER'), "PASSWORD": get_env_setting('ROTD_DB_PASSWORD'), "HOST": "localhost", "PORT": "", }, } SECRET_KEY = get_env_setting('ROTD_SECRET_KEY') EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = get_env_setting('ROTD_EMAIL_HOST') EMAIL_HOST_PASSWORD = get_env_setting('ROTD_EMAIL_HOST_PASSWORD') EMAIL_HOST_USER = get_env_setting('ROTD_EMAIL_HOST_USER') EMAIL_PORT = get_env_setting('ROTD_EMAIL_PORT') EMAIL_USE_TLS = True
agpl-3.0
Python
faaf1d64fc8c5b15c346f70288235426f0647757
use /usr/bin/env python to run the script
tracysmith/RGAPepPipe,tracysmith/RGAPepPipe,tracysmith/RGAPepPipe
FulltoSNP.py
FulltoSNP.py
#!/usr/bin/env python import sys import re import itertools import math from Bio import SeqIO #SNP alignment from full Alignment nexus file #Check for correct commandline arguments if len(sys.argv) != 4: print("Usage: FulltoSNP.py <nexus file> <output file> <threshold>") sys.exit(0) #Get filenames InFileName = sys.argv[1] OutFileName = sys.argv[2] threshold = sys.argv[3] PosOutFileName = sys.argv[2]+'positions' record_dict = SeqIO.to_dict(SeqIO.parse(InFileName,"nexus")) #seperate speciesnames from sequences seqs = [] titles = [] for key in record_dict: titles.append(key) x = record_dict[key] seqs.append(x.seq) #transpose string lists thresh = math.ceil(float(threshold) * len(seqs)) print(thresh) seqsTran = zip(*seqs) snps = [] #for every tuple check if value is the same, if so remove tuple pos = 1 positions=[] for s in seqsTran[:]: if len(set(s))!=1 and s.count('-')<= thresh: snps.append(s) positions.append(pos) pos=pos+1 print(len(positions)) seqsTran = [] results = zip(*snps) for i in range(len(results)): results[i] = ''.join(results[i]) SeqDict={} print(len(results[0])) for i in range(len(results)): SeqDict[titles[i]]=results[i] OutFile = open(OutFileName,'w') #write file header OutFile.write("#NEXUS" + "\n" + "Begin DATA;" + "\n\t" + "Dimensions ntax=" + str(len(SeqDict)) + " nchar=" + str(len(results[0])) + ";" + "\n\t" + "Format datatype=DNA gap=-;" + "\n\t" + "Matrix" + "\n") #write all of the SNPs into the new file for key in SeqDict: newSeq = "".join(SeqDict[key]) OutFile.write(key + "\n" + newSeq + "\n") OutFile.write(";" + "\n" + "END;") OutFile.close() OutFile2 = open(PosOutFileName,'w') for i in positions: OutFile2.write(str(i)+'\n') OutFile2.close()
#!/usr/bin/env python2.6 import sys import re import itertools import math from Bio import SeqIO #SNP alignment from full Alignment nexus file #Check for correct commandline arguments if len(sys.argv) != 4: print("Usage: FulltoSNP.py <nexus file> <output file> <threshold>") sys.exit(0) #Get filenames InFileName = sys.argv[1] OutFileName = sys.argv[2] threshold = sys.argv[3] PosOutFileName = sys.argv[2]+'positions' record_dict = SeqIO.to_dict(SeqIO.parse(InFileName,"nexus")) #seperate speciesnames from sequences seqs = [] titles = [] for key in record_dict: titles.append(key) x = record_dict[key] seqs.append(x.seq) #transpose string lists thresh = math.ceil(float(threshold) * len(seqs)) print(thresh) seqsTran = zip(*seqs) snps = [] #for every tuple check if value is the same, if so remove tuple pos = 1 positions=[] for s in seqsTran[:]: if len(set(s))!=1 and s.count('-')<= thresh: snps.append(s) positions.append(pos) pos=pos+1 print(len(positions)) seqsTran = [] results = zip(*snps) for i in range(len(results)): results[i] = ''.join(results[i]) SeqDict={} print(len(results[0])) for i in range(len(results)): SeqDict[titles[i]]=results[i] OutFile = open(OutFileName,'w') #write file header OutFile.write("#NEXUS" + "\n" + "Begin DATA;" + "\n\t" + "Dimensions ntax=" + str(len(SeqDict)) + " nchar=" + str(len(results[0])) + ";" + "\n\t" + "Format datatype=DNA gap=-;" + "\n\t" + "Matrix" + "\n") #write all of the SNPs into the new file for key in SeqDict: newSeq = "".join(SeqDict[key]) OutFile.write(key + "\n" + newSeq + "\n") OutFile.write(";" + "\n" + "END;") OutFile.close() OutFile2 = open(PosOutFileName,'w') for i in positions: OutFile2.write(str(i)+'\n') OutFile2.close()
mit
Python
d01bb6e89c6fcfe8a17d90f3ace175ad26f921b5
Support CSV files beginning with a byte order mark
git-keeper/git-keeper,git-keeper/git-keeper
git-keeper-core/gkeepcore/local_csv_files.py
git-keeper-core/gkeepcore/local_csv_files.py
# Copyright 2016 Nathan Sommer and Ben Coleman # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Provides concrete classes for reading and writing local CSV files as well as a function for getting rows from a local CSV file. """ import csv from gkeepcore.csv_files import CSVReader, CSVWriter, CSVError def csv_rows(file_path: str) -> list: """ Retrieve rows from a local CSV file. :param file_path: path to the file :return: rows as a list of lists """ try: with open(file_path) as f: rows = list(csv.reader(f)) except csv.Error: raise CSVError('Error reading from {0}'.format(file_path)) return rows class LocalCSVReader(CSVReader): """Allows reading from a local CSV file.""" def __init__(self, file_path): """ :param file_path: path to the CSV file to read """ try: with open(file_path, encoding='utf-8-sig') as f: self._rows = list(csv.reader(f)) except (csv.Error, OSError): raise CSVError('Error reading from {0}'.format(file_path)) def get_rows(self) -> list: """ Retrieve the rows from the CSV file :return: list of lists representing all rows from the file """ return self._rows class LocalCSVWriter(CSVWriter): """Allows writing to a local CSV file.""" def __init__(self, file_path): """ :param file_path: path to the CSV file to write """ self._file_path = file_path def write_rows(self, rows): """ Write rows to the file :param rows: list of lists (or tuples) to write """ try: with open(self._file_path, 'w') as f: writer = csv.writer(f) for row in rows: writer.writerow(row) except OSError as e: raise CSVError('Error writing to {0}' .format(self._file_path))
# Copyright 2016 Nathan Sommer and Ben Coleman # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Provides concrete classes for reading and writing local CSV files as well as a function for getting rows from a local CSV file. """ import csv from gkeepcore.csv_files import CSVReader, CSVWriter, CSVError def csv_rows(file_path: str) -> list: """ Retrieve rows from a local CSV file. :param file_path: path to the file :return: rows as a list of lists """ try: with open(file_path) as f: rows = list(csv.reader(f)) except csv.Error: raise CSVError('Error reading from {0}'.format(file_path)) return rows class LocalCSVReader(CSVReader): """Allows reading from a local CSV file.""" def __init__(self, file_path): """ :param file_path: path to the CSV file to read """ try: with open(file_path) as f: self._rows = list(csv.reader(f)) except (csv.Error, OSError): raise CSVError('Error reading from {0}'.format(file_path)) def get_rows(self) -> list: """ Retrieve the rows from the CSV file :return: list of lists representing all rows from the file """ return self._rows class LocalCSVWriter(CSVWriter): """Allows writing to a local CSV file.""" def __init__(self, file_path): """ :param file_path: path to the CSV file to write """ self._file_path = file_path def write_rows(self, rows): """ Write rows to the file :param rows: list of lists (or tuples) to write """ try: with open(self._file_path, 'w') as f: writer = csv.writer(f) for row in rows: writer.writerow(row) except OSError as e: raise CSVError('Error writing to {0}' .format(self._file_path))
agpl-3.0
Python
1657e46cd5c2a81df4cbb73b292b0bf9072d5c51
Fix test: make sure that Isolation Forest actually make a categorical split
h2oai/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,h2oai/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,h2oai/h2o-3
h2o-py/tests/testdir_tree/pyunit_tree_irf.py
h2o-py/tests/testdir_tree/pyunit_tree_irf.py
import h2o from h2o.tree import H2OTree from h2o.estimators import H2OIsolationForestEstimator from tests import pyunit_utils def check_tree(tree, tree_number, tree_class = None): assert tree is not None assert len(tree) > 0 assert tree._tree_number == tree_number assert tree._tree_class == tree_class assert tree.root_node is not None assert tree.left_children is not None assert tree.right_children is not None assert tree.thresholds is not None assert tree.nas is not None assert tree.descriptions is not None assert tree.node_ids is not None assert tree.model_id is not None assert tree.levels is not None assert tree.root_node.na_direction is not None assert tree.root_node.id is not None def irf_tree_Test(): cat_frame = h2o.create_frame(cols=10, categorical_fraction=1, seed=42) # check all columns are categorical assert set(cat_frame.types.values()) == set(['enum']) iso_model = H2OIsolationForestEstimator(seed=42) iso_model.train(training_frame=cat_frame) tree = H2OTree(iso_model, 5) check_tree(tree, 5, None) print(tree) if __name__ == "__main__": pyunit_utils.standalone_test(irf_tree_Test) else: irf_tree_Test()
import h2o from h2o.tree import H2OTree from h2o.estimators import H2OIsolationForestEstimator from tests import pyunit_utils def check_tree(tree, tree_number, tree_class = None): assert tree is not None assert len(tree) > 0 assert tree._tree_number == tree_number assert tree._tree_class == tree_class assert tree.root_node is not None assert tree.left_children is not None assert tree.right_children is not None assert tree.thresholds is not None assert tree.nas is not None assert tree.descriptions is not None assert tree.node_ids is not None assert tree.model_id is not None assert tree.levels is not None assert tree.root_node.na_direction is not None assert tree.root_node.id is not None def irf_tree_Test(): prostate = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate.csv")) prostate["RACE"] = prostate["RACE"].asfactor() iso_model = H2OIsolationForestEstimator() iso_model.train(training_frame = prostate, x = list(set(prostate.col_names) - set(["ID", "CAPSULE"]))) tree = H2OTree(iso_model, 5) check_tree(tree, 5, None) print(tree) if __name__ == "__main__": pyunit_utils.standalone_test(irf_tree_Test) else: irf_tree_Test()
apache-2.0
Python
7e5477682dfc0d907fe55a489c75179a6e4c832b
fix Swale import script
DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations
polling_stations/apps/data_collection/management/commands/import_swale.py
polling_stations/apps/data_collection/management/commands/import_swale.py
from data_collection.management.commands import BaseCsvStationsShpDistrictsImporter class Command(BaseCsvStationsShpDistrictsImporter): srid = 27700 council_id = 'E07000113' districts_name = 'shp/Swale Polling Districts' stations_name = 'Swale 21 Feb 2017 Polling scheme station numbers.csv' elections = ['local.kent.2017-05-04'] def district_record_to_dict(self, record): code = str(record[0]).strip() return { 'internal_council_id': code, 'name': str(record[1]).strip(), } def station_record_to_dict(self, record): codes = record.pd.split(" and ") stations = [] for code in codes: stations.append({ 'internal_council_id': code, 'postcode': '', 'address': record.premises, 'polling_district_id': code, 'location': None, }) return stations
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter class Command(BaseShpStationsShpDistrictsImporter): srid = 27700 council_id = 'E07000113' districts_name = 'shp/Swale Polling Districts' stations_name = 'shp/Swale Polling Stations.shp' #elections = ['local.kent.2017-05-04'] elections = [] def district_record_to_dict(self, record): code = str(record[0]).strip() return { 'internal_council_id': code, 'name': str(record[1]).strip(), 'polling_station_id': code, } def station_record_to_dict(self, record): return { 'internal_council_id': str(record[0]).strip(), 'postcode': '', 'address': str(record[4]).strip(), }
bsd-3-clause
Python
fc7f51877b6b991ad5a25afb755dd7a35e91dfea
Use get_or_create to avoid duplicate objects
ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend
cla_backend/apps/legalaid/migrations/0022_default_contact_for_research_methods.py
cla_backend/apps/legalaid/migrations/0022_default_contact_for_research_methods.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations import uuid from cla_common.constants import RESEARCH_CONTACT_VIA def create_default_contact_for_research_methods(apps, schema_editor): ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod") for value, name in RESEARCH_CONTACT_VIA: ContactResearchMethods.objects.get_or_create(method=value, defaults={"reference": uuid.uuid4()}) def rollback_default_contact_for_research_methods(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [("legalaid", "0021_auto_20190515_1042")] operations = [ migrations.RunPython( create_default_contact_for_research_methods, rollback_default_contact_for_research_methods ) ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations import uuid from cla_common.constants import RESEARCH_CONTACT_VIA def create_default_contact_for_research_methods(apps, schema_editor): ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod") for value, name in RESEARCH_CONTACT_VIA: ContactResearchMethods.objects.create(method=value, reference=uuid.uuid4()).save() def rollback_default_contact_for_research_methods(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [("legalaid", "0021_auto_20190515_1042")] operations = [ migrations.RunPython( create_default_contact_for_research_methods, rollback_default_contact_for_research_methods ) ]
mit
Python
d6a8e42cb3bd963632500541b5e4e71c700c246e
Fix migration
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
nodeconductor/cost_tracking/migrations/0006_add_pricelist_backend_ids.py
nodeconductor/cost_tracking/migrations/0006_add_pricelist_backend_ids.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0001_initial'), ('cost_tracking', '0005_expand_item_type_size'), ] operations = [ migrations.RenameField( model_name='defaultpricelistitem', old_name='service_content_type', new_name='resource_content_type', ), migrations.AddField( model_name='defaultpricelistitem', name='backend_choice_id', field=models.CharField(max_length=255, blank=True), preserve_default=True, ), migrations.AddField( model_name='defaultpricelistitem', name='backend_option_id', field=models.CharField(max_length=255, blank=True), preserve_default=True, ), migrations.AddField( model_name='defaultpricelistitem', name='backend_product_id', field=models.CharField(max_length=255, blank=True), preserve_default=True, ), migrations.AddField( model_name='pricelistitem', name='resource_content_type', field=models.ForeignKey(related_name='+', default=1, to='contenttypes.ContentType'), preserve_default=False, ), ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0001_initial'), ('cost_tracking', '0005_expand_item_type_size'), ] operations = [ migrations.RenameField( model_name='defaultpricelistitem', old_name='service_content_type', new_name='resource_content_type', ), migrations.AddField( model_name='defaultpricelistitem', name='backend_choice_id', field=models.CharField(max_length=255, blank=True), preserve_default=True, ), migrations.AddField( model_name='defaultpricelistitem', name='backend_option_id', field=models.CharField(max_length=255, blank=True), preserve_default=True, ), migrations.AddField( model_name='defaultpricelistitem', name='backend_product_id', field=models.CharField(max_length=255, blank=True), preserve_default=True, ), migrations.AddField( model_name='pricelistitem', name='resource_content_type', field=models.ForeignKey(related_name='+', default=0, to='contenttypes.ContentType'), preserve_default=False, ), ]
mit
Python
0b14f93121f3feaa4433eaf8275f5ad40c646b48
Update NumberPathShuffled.py
hectorpefo/hectorpefo.github.io,hectorpefo/hectorpefo.github.io,hectorpefo/hectorpefo.github.io,hectorpefo/hectorpefo.github.io
_includes/NumberPathShuffled.py
_includes/NumberPathShuffled.py
from random import shuffle N = 100 shufflePeriod = 10000000 print(N) connected = [[]] for i in range(N): connected.append([]) for m in range(1,N+1): # for n in range(1,N+1): for n in range(N,0,-1): if ((not m == n) and (m%n == 0 or n%m == 0)): connected[m].append(n) def explore(path): global longestLength, longestPath, connected, shuffleCounter, shufflePeriod shuffleCounter += 1 if shuffleCounter == shufflePeriod: shuffleCounter = 0 for L in connected: shuffle(L) print "Shuffled still",longestLength,longestPath isExtendable = 0 n = path[-1] # shuffledconnected = list(connected[n]) # shuffle(shuffledconnected) for m in connected[n]: #for m in shuffledconnected: if not m in path: isExtendable = 1 newPath = list(path) newPath.append(m) explore(newPath) if not isExtendable: if len(path) > longestLength: longestLength = len(path) longestPath = path print longestLength,longestPath longestPath = [] longestLength = 0 #for n in range(1,N+1): # print(n) # explore([n]) shuffleCounter = 0 explore([81]) print("Longest path length is",longestLength) print(longestPath)
from random import shuffle N = 100 shufflePeriod = 10000000 print(N) connected = [[]] for i in range(N): connected.append([]) for m in range(1,N+1): # for n in range(1,N+1): for n in range(N,0,-1): if ((not m == n) and (m%n == 0 or n%m == 0)): connected[m].append(n) def explore(path): global longestLength, longestPath, connected, shuffleCounter, shufflePeriod shuffleCounter += 1 if shuffleCounter == shufflePeriod: shuffleCounter = 0 for L in connected: shuffle(L) print "Shuffled" isExtendable = 0 n = path[-1] # shuffledconnected = list(connected[n]) # shuffle(shuffledconnected) for m in connected[n]: #for m in shuffledconnected: if not m in path: isExtendable = 1 newPath = list(path) newPath.append(m) explore(newPath) if not isExtendable: if len(path) > longestLength: longestLength = len(path) longestPath = path print longestLength,longestPath longestPath = [] longestLength = 0 #for n in range(1,N+1): # print(n) # explore([n]) shuffleCounter = 0 explore([81]) print("Longest path length is",longestLength) print(longestPath)
mit
Python
936382b1744c2a9b5f3082abe9a3e0f2fbba58d0
Return None when an error while reading config occurs
dbaelz/adbons
src/config.py
src/config.py
import yaml SECTION_APP = "app" SECTION_DEVICE = "device" KEY_DEFAULT = "default" def read_value(section, key): try: with open(".adbons.yml", 'r') as ymlfile: config = yaml.safe_load(ymlfile) return config[section][key] except: pass def write_value(section, key, value): try: with open(".adbons.yml", 'r+') as ymlfile: config = yaml.safe_load(ymlfile) if section not in config: config[section] = {} config[section][key] = value except: config = {} config[section] = {} config[section][key] = value with open(".adbons.yml", 'w') as ymlfile: yaml.dump(config, ymlfile, default_flow_style=False)
import yaml SECTION_APP = "app" SECTION_DEVICE = "device" KEY_DEFAULT = "default" def read_value(section, key): with open(".adbons.yml", 'r') as ymlfile: config = yaml.safe_load(ymlfile) try: return config[section][key] except: return "" def write_value(section, key, value): try: with open(".adbons.yml", 'r+') as ymlfile: config = yaml.safe_load(ymlfile) if section not in config: config[section] = {} config[section][key] = value except: config = {} config[section] = {} config[section][key] = value with open(".adbons.yml", 'w') as ymlfile: yaml.dump(config, ymlfile, default_flow_style=False)
bsd-2-clause
Python
89a1a37e91ace4af2983e63ef68ff1d22811aa32
Fix syntax error
hackeriet/nfcd,hackeriet/pyhackeriet,hackeriet/pyhackeriet,hackeriet/pyhackeriet,hackeriet/nfcd,hackeriet/nfcd
hackeriet/cardreaderd/__init__.py
hackeriet/cardreaderd/__init__.py
#!/usr/bin/env python from hackeriet import mifare from hackeriet.mqtt import MQTT from hackeriet.door import users import os, logging logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') door_name = os.getenv("DOOR_NAME", 'hackeriet') door_topic = "hackeriet/door/%s/open" % door_name door_timeout = int(os.getenv("DOOR_TIMEOUT", 2)) mqtt = MQTT() def main(): logging.debug('Starting main loop') while True: users.load() # Read data from card reader logging.debug('mifare: waiting for data...') data = mifare.try_read() if data: logging.debug('mifare: data read') user = users.auth(data[0:16]) if user: ascii_user = user.encode('ascii', 'replace').decode('ascii') logging.info('auth: card read for user %s' % ascii_user) mqtt(door_topic, user) else: logging.debug('auth: card data does not belong to a user: %s' % data[0:16]) # Avoid spewing messages every single ms while a card is in front of the reader time.sleep(door_timeout) else: logging.debug('mifare: no data read in last attempt') if __name__ == "__main__": main()
#!/usr/bin/env python from hackeriet import mifare from hackeriet.mqtt import MQTT from hackeriet.door import users import os, logging logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') door_name = os.getenv("DOOR_NAME", 'hackeriet') door_topic = "hackeriet/door/%s/open" % door_name door_timeout = int(os.getenv("DOOR_TIMEOUT", 2)) mqtt = MQTT() def main(): logging.debug('Starting main loop') while True: users.load() # Read data from card reader logging.debug('mifare: waiting for data...') data = mifare.try_read() if data: logging.debug('mifare: data read') user = users.auth(data[0:16]) if user: ascii_user = user.encode('ascii', 'replace').decode('ascii') logging.info('auth: card read for user %s' % ascii_user) mqtt(door_topic, user) else: logging.debug('auth: card data does not belong to a user: %s' % data[0:16]) # Avoid spewing messages every single ms while a card is in front of the reader time.sleep(door_timeout) else logging.debug('mifare: no data read in last attempt') if __name__ == "__main__": main()
apache-2.0
Python
15403668edf9b81b9dbb2c3b0075416e422ce55c
bump version to dev55
euroscipy/symposion,pydata/symposion,pinax/symposion,pyohio/symposion,toulibre/symposion,mbrochh/symposion,pyconau2017/symposion,pyconau2017/symposion,mbrochh/symposion,faulteh/symposion,TheOpenBastion/symposion,toulibre/symposion,TheOpenBastion/symposion,miurahr/symposion,python-spain/symposion,python-spain/symposion,miurahr/symposion,faulteh/symposion,pinax/symposion,pyohio/symposion,pydata/symposion,euroscipy/symposion
symposion/__init__.py
symposion/__init__.py
__version__ = "1.0b1.dev55"
__version__ = "1.0b1.dev54"
bsd-3-clause
Python
ec14293f02de84a12ce602d6a0dfbb3c21203bc4
fix data types from ENV
AppEnlight/channelstream,AppEnlight/channelstream,AppEnlight/channelstream
channelstream/cli/utils.py
channelstream/cli/utils.py
import argparse import copy import logging import json import pkg_resources import jinja2 import os from channelstream.cli import CONFIGURABLE_PARAMS, SHARED_DEFAULTS from channelstream.utils import set_config_types log = logging.getLogger(__name__) log.setLevel(logging.INFO) def main(): config = copy.deepcopy(SHARED_DEFAULTS) parser = argparse.ArgumentParser(add_help=True) parser.add_argument( "operation", help="Operation", default=None, choices=["make_config"] ) parser.add_argument("-j", "--json", dest="json", help="Config JSON", default=None) parser.add_argument( "-o", "--output", dest="output", help="Output file", required=True ) args = parser.parse_args() if args.json: data_json = json.loads(args.json) for key in CONFIGURABLE_PARAMS: conf_value = data_json.get(key) if conf_value: config[key] = conf_value else: for key in CONFIGURABLE_PARAMS: conf_value = os.environ.get(f"channelstream_{key}".upper()) if conf_value is not None: config[key] = conf_value config = set_config_types(config) if args.operation == "make_config": template_path = os.path.join("templates", "ini", "channelstream.ini.jinja2") template_str = pkg_resources.resource_string("channelstream", template_path) template = jinja2.Template(template_str.decode("utf8")) template_vars = config compiled = template.render(**template_vars) with open(args.output, "w") as f: f.write(compiled) log.info("Config written")
import argparse import copy import logging import json import pkg_resources import jinja2 import os from channelstream.cli import CONFIGURABLE_PARAMS, SHARED_DEFAULTS log = logging.getLogger(__name__) log.setLevel(logging.INFO) def main(): config = copy.deepcopy(SHARED_DEFAULTS) parser = argparse.ArgumentParser(add_help=True) parser.add_argument( "operation", help="Operation", default=None, choices=["make_config"] ) parser.add_argument("-j", "--json", dest="json", help="Config JSON", default=None) parser.add_argument( "-o", "--output", dest="output", help="Output file", required=True ) args = parser.parse_args() if args.json: data_json = json.loads(args.json) for key in CONFIGURABLE_PARAMS: conf_value = data_json.get(key) if conf_value: config[key] = conf_value else: for key in CONFIGURABLE_PARAMS: conf_value = os.environ.get(f"channelstream_{key}".upper()) if conf_value is not None: config[key] = conf_value if args.operation == "make_config": template_path = os.path.join("templates", "ini", "channelstream.ini.jinja2") template_str = pkg_resources.resource_string("channelstream", template_path) template = jinja2.Template(template_str.decode("utf8")) template_vars = config compiled = template.render(**template_vars) with open(args.output, "w") as f: f.write(compiled) log.info("Config written")
bsd-3-clause
Python
23fd2953a41d8b087fa5252df2de0baf36244e43
remove stupid debug string
FederatedAI/FATE,FederatedAI/FATE,FederatedAI/FATE
doc/readthedoc/conf.py
doc/readthedoc/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys from recommonmark.parser import CommonMarkParser sys.path.insert(0, os.path.abspath('_build_temp/python')) # -- Project information ----------------------------------------------------- project = 'FATE' copyright = '2020, FederatedAI' author = 'FederatedAI' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autosummary', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'autodocsumm', 'recommonmark' ] autosummary_generate = True source_parsers = { '.md': CommonMarkParser, } source_suffix = ['.rst', '.md'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_context = { 'css_files': [ '_static/theme_overrides.css', # override wide tables in RTD theme ], } add_module_names = False master_doc = 'index' # hack to replace rst file link to html link def ultimateReplace(app, docname, source): result = source[0] result = result.replace(".rst", ".html") source[0] = result def setup(app): if not os.path.exists("_build_temp"): import shutil import tempfile from pathlib import Path with tempfile.TemporaryDirectory() as d: shutil.copytree("../..", Path(d).joinpath("_build_temp")) shutil.copytree(Path(d).joinpath("_build_temp"), "_build_temp") app.add_config_value('ultimate_replacements', {}, True) app.connect('source-read', ultimateReplace)
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys from recommonmark.parser import CommonMarkParser sys.path.insert(0, os.path.abspath('_build_temp/python')) print("sage sage sage") # -- Project information ----------------------------------------------------- project = 'FATE' copyright = '2020, FederatedAI' author = 'FederatedAI' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autosummary', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'autodocsumm', 'recommonmark' ] autosummary_generate = True source_parsers = { '.md': CommonMarkParser, } source_suffix = ['.rst', '.md'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_context = { 'css_files': [ '_static/theme_overrides.css', # override wide tables in RTD theme ], } add_module_names = False master_doc = 'index' # hack to replace rst file link to html link def ultimateReplace(app, docname, source): result = source[0] result = result.replace(".rst", ".html") source[0] = result def setup(app): if not os.path.exists("_build_temp"): import shutil import tempfile from pathlib import Path with tempfile.TemporaryDirectory() as d: shutil.copytree("../..", Path(d).joinpath("_build_temp")) shutil.copytree(Path(d).joinpath("_build_temp"), "_build_temp") app.add_config_value('ultimate_replacements', {}, True) app.connect('source-read', ultimateReplace)
apache-2.0
Python
f2181d50fb17be9e1db6129300d720139ca00636
use absolute imports for compatibility with python 2.5
scrapy/parsel
scrapy/selector/__init__.py
scrapy/selector/__init__.py
""" XPath selectors Two backends are currently available: libxml2 and lxml To select the backend explicitly use the SELECTORS_BACKEND variable in your project. Otherwise, libxml2 will be tried first. If libxml2 is not available, lxml will be used. """ from scrapy.conf import settings if settings['SELECTORS_BACKEND'] == 'lxml': from scrapy.selector.lxmlsel import * elif settings['SELECTORS_BACKEND'] == 'libxml2': from scrapy.selector.libxml2sel import * elif settings['SELECTORS_BACKEND'] == 'dummy': from scrapy.selector.dummysel import * else: try: import libxml2 except ImportError: try: import lxml except ImportError: from scrapy.selector.dummysel import * else: from scrapy.selector.lxmlsel import * else: from scrapy.selector.libxml2sel import *
""" XPath selectors Two backends are currently available: libxml2 and lxml To select the backend explicitly use the SELECTORS_BACKEND variable in your project. Otherwise, libxml2 will be tried first. If libxml2 is not available, lxml will be used. """ from scrapy.conf import settings if settings['SELECTORS_BACKEND'] == 'lxml': from .lxmlsel import * elif settings['SELECTORS_BACKEND'] == 'libxml2': from .libxml2sel import * elif settings['SELECTORS_BACKEND'] == 'dummy': from .dummysel import * else: try: import libxml2 except ImportError: try: import lxml except ImportError: from .dummysel import * else: from .lxmlsel import * else: from .libxml2sel import *
bsd-3-clause
Python
d2cadcb9be08730f5ccefec5f3e0316265ebf307
Check request ID value
tisnik/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common,jpopelka/fabric8-analytics-common
integration-tests/features/src/json_utils.py
integration-tests/features/src/json_utils.py
"""Functions for handling JSON responses returned by various API endpoints.""" import string from src.attribute_checks import * def get_value_using_path(obj, path): """Get the attribute value using the XMLpath-like path specification. Return any attribute stored in the nested object and list hierarchy using the 'path' where path consists of: keys (selectors) indexes (in case of arrays) separated by slash, ie. "key1/0/key_x". Usage: get_value_using_path({"x" : {"y" : "z"}}, "x")) -> {"y" : "z"} get_value_using_path({"x" : {"y" : "z"}}, "x/y")) -> "z" get_value_using_path(["x", "y", "z"], "0")) -> "x" get_value_using_path(["x", "y", "z"], "1")) -> "y" get_value_using_path({"key1" : ["x", "y", "z"], "key2" : ["a", "b", "c", "d"]}, "key1/1")) -> "y" get_value_using_path({"key1" : ["x", "y", "z"], "key2" : ["a", "b", "c", "d"]}, "key2/1")) -> "b" """ keys = path.split("/") for key in keys: if key.isdigit(): obj = obj[int(key)] else: obj = obj[key] return obj def check_timestamp_in_json_response(context, attribute): """Check if the timestamp stored in given attribute is correct.""" timestamp = context.response.json().get(attribute) check_timestamp(timestamp) def check_request_id_value_in_json_response(context, attribute_name): """Check the request ID attribute in the JSON response. Check if ID is stored in a format like: '71769af6-0a39-4242-94be-1f84f04c8a56' """ response = context.response assert response is not None json_data = response.json() assert json_data is not None check_attribute_presence(json_data, attribute_name) id_attribute = json_data[attribute_name] assert id_attribute is not None assert check_uuid(id_attribute) def check_id_value_in_json_response(context, id_attribute_name): """Check the ID attribute in the JSON response. Check if ID is stored in a format like: '477e85660c504b698beae2b5f2a28b4e' ie. it is a string with 32 characters containing 32 hexadecimal digits """ response = context.response assert response is not None json_data = response.json() assert json_data is not None check_attribute_presence(json_data, id_attribute_name) id_attribute = json_data[id_attribute_name] assert id_attribute is not None assert isinstance(id_attribute, str) and len(id_attribute) == 32 assert all(char in string.hexdigits for char in id_attribute) def is_empty_json_response(context): """Check if the JSON response is empty (but not None).""" return context.response.json() == {}
"""Functions for handling JSON responses returned by various API endpoints.""" import string from src.attribute_checks import * def get_value_using_path(obj, path): """Get the attribute value using the XMLpath-like path specification. Return any attribute stored in the nested object and list hierarchy using the 'path' where path consists of: keys (selectors) indexes (in case of arrays) separated by slash, ie. "key1/0/key_x". Usage: get_value_using_path({"x" : {"y" : "z"}}, "x")) -> {"y" : "z"} get_value_using_path({"x" : {"y" : "z"}}, "x/y")) -> "z" get_value_using_path(["x", "y", "z"], "0")) -> "x" get_value_using_path(["x", "y", "z"], "1")) -> "y" get_value_using_path({"key1" : ["x", "y", "z"], "key2" : ["a", "b", "c", "d"]}, "key1/1")) -> "y" get_value_using_path({"key1" : ["x", "y", "z"], "key2" : ["a", "b", "c", "d"]}, "key2/1")) -> "b" """ keys = path.split("/") for key in keys: if key.isdigit(): obj = obj[int(key)] else: obj = obj[key] return obj def check_timestamp_in_json_response(context, attribute): """Check if the timestamp stored in given attribute is correct.""" timestamp = context.response.json().get(attribute) check_timestamp(timestamp) def check_id_value_in_json_response(context, id_attribute_name): """Check the ID attribute in the JSON response. Check if ID is stored in a format like: '477e85660c504b698beae2b5f2a28b4e' ie. it is a string with 32 characters containing 32 hexadecimal digits """ response = context.response assert response is not None json_data = response.json() assert json_data is not None check_attribute_presence(json_data, id_attribute_name) id_attribute = json_data[id_attribute_name] assert id_attribute is not None assert isinstance(id_attribute, str) and len(id_attribute) == 32 assert all(char in string.hexdigits for char in id_attribute) def is_empty_json_response(context): """Check if the JSON response is empty (but not None).""" return context.response.json() == {}
apache-2.0
Python
de4e5a34aaa322b2ce83161dd4bce7897953ab73
add Unix socket support to API collector
aaronkaplan/intelmq,aaronkaplan/intelmq,certtools/intelmq,aaronkaplan/intelmq,certtools/intelmq,certtools/intelmq
intelmq/bots/collectors/api/collector_api.py
intelmq/bots/collectors/api/collector_api.py
# SPDX-FileCopyrightText: 2018 tavi.poldma # # SPDX-License-Identifier: AGPL-3.0-or-later # -*- coding: utf-8 -*- """ API Collector bot """ from threading import Thread from typing import Optional import os import socket from intelmq.lib.bot import CollectorBot from intelmq.lib.exceptions import MissingDependencyError try: import tornado.web from tornado.ioloop import IOLoop from tornado.netutil import bind_unix_socket from tornado.httpserver import HTTPServer except ImportError: IOLoop = None else: class Application(tornado.web.Application): def __init__(self, request_handler, *args, **kwargs): self.request_handler = request_handler super().__init__(*args, **kwargs) class MainHandler(tornado.web.RequestHandler): def post(self): data = self.request.body self.application.request_handler(data) class APICollectorBot(CollectorBot): """Collect data by exposing a HTTP API interface""" name: str = "API" port: int = 5000 __collector_empty_process: bool = True provider: str = "APICollector" __is_multithreadable: bool = False use_socket = False socket_path = '/tmp/imq_api_default_socket' _server: Optional[HTTPServer] = None _unix_socket: Optional[socket.socket] = None def init(self): if IOLoop is None: raise MissingDependencyError("tornado") app = Application(self.request_handler, [ ("/intelmq/push", MainHandler), ]) if self.use_socket: self.server = HTTPServer(app) self._unix_socket = bind_unix_socket(self.socket_path) self.server.add_socket(self._unix_socket) else: self.server = app.listen(self.port) self.eventLoopThread = Thread(target=IOLoop.current().start) self.eventLoopThread.daemon = True self.eventLoopThread.start() def request_handler(self, data): report = self.new_report() report.add("raw", data) self.send_message(report) def process(self): pass def shutdown(self): if self.server: # Closes the server and the socket, prevents address already in use self.server.stop() if IOLoop.current(): IOLoop.current().stop() BOT = APICollectorBot
# SPDX-FileCopyrightText: 2018 tavi.poldma # # SPDX-License-Identifier: AGPL-3.0-or-later # -*- coding: utf-8 -*- """ API Collector bot """ from threading import Thread from intelmq.lib.bot import CollectorBot from intelmq.lib.exceptions import MissingDependencyError try: import tornado.web from tornado.ioloop import IOLoop except ImportError: IOLoop = None else: class Application(tornado.web.Application): def __init__(self, request_handler, *args, **kwargs): self.request_handler = request_handler super().__init__(*args, **kwargs) class MainHandler(tornado.web.RequestHandler): def post(self): data = self.request.body self.application.request_handler(data) class APICollectorBot(CollectorBot): """Collect data by exposing a HTTP API interface""" name: str = "API" port: int = 5000 __collector_empty_process: bool = True provider: str = "APICollector" __is_multithreadable: bool = False def init(self): if IOLoop is None: raise MissingDependencyError("tornado") app = Application(self.request_handler, [ ("/intelmq/push", MainHandler), ]) self.server = app.listen(self.port) self.eventLoopThread = Thread(target=IOLoop.current().start) self.eventLoopThread.daemon = True self.eventLoopThread.start() def request_handler(self, data): report = self.new_report() report.add("raw", data) self.send_message(report) def process(self): pass def shutdown(self): if self.server: # Closes the server and the socket, prevents address already in use self.server.stop() if IOLoop.current(): IOLoop.current().stop() BOT = APICollectorBot
agpl-3.0
Python
7dc01fa4593e81448db2749d460737cbfa57b63d
Return normalized version
rolandgeider/wger,rolandgeider/wger,petervanderdoes/wger,rolandgeider/wger,petervanderdoes/wger,wger-project/wger,petervanderdoes/wger,wger-project/wger,wger-project/wger,petervanderdoes/wger,wger-project/wger,rolandgeider/wger
wger/__init__.py
wger/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ :copyright: 2011, 2012 by OpenSlides team, see AUTHORS. :license: GNU GPL, see LICENSE for more details. """ VERSION = (1, 9, 0, 'beta', 1) RELEASE = False def get_version(version=None, release=None): """Derives a PEP386-compliant version number from VERSION.""" if version is None: version = VERSION if release is None: release = RELEASE assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') # Now build the two parts of the version number: # main = X.Y[.Z] # sub = .devN - for pre-alpha releases # | {a|b|c}N - for alpha, beta and rc releases main_parts = 2 if version[2] == 0 else 3 main = '.'.join(str(x) for x in version[:main_parts]) if version[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} sub = mapping[version[3]] + str(version[4]) else: sub = '' if not release: sub += '.dev0' return main + sub
#!/usr/bin/env python # -*- coding: utf-8 -*- """ :copyright: 2011, 2012 by OpenSlides team, see AUTHORS. :license: GNU GPL, see LICENSE for more details. """ VERSION = (1, 9, 0, 'beta', 1) RELEASE = False def get_version(version=None, release=None): """Derives a PEP386-compliant version number from VERSION.""" if version is None: version = VERSION if release is None: release = RELEASE assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') # Now build the two parts of the version number: # main = X.Y[.Z] # sub = .devN - for pre-alpha releases # | {a|b|c}N - for alpha, beta and rc releases main_parts = 2 if version[2] == 0 else 3 main = '.'.join(str(x) for x in version[:main_parts]) if version[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} sub = mapping[version[3]] + str(version[4]) else: sub = '' if not release: sub += '-dev' return main + sub
agpl-3.0
Python
a7a14619f7662ccb510b6a0031a58647cf0b34e7
Remove duplicated path for build script
mwilliamson/whack
whack/builder.py
whack/builder.py
import os import subprocess from . import downloads from .tempdir import create_temporary_dir from .common import WHACK_ROOT from .files import mkdir_p, write_file from .errors import FileNotFoundError def build(package_request, package_dir): with create_temporary_dir() as build_dir: _build_in_dir(package_request, build_dir, package_dir) def _build_in_dir(package_request, build_dir, package_dir): params = package_request.params() package_request.write_source_to(build_dir) build_script = "whack/build" build_script_path = os.path.join(build_dir, build_script) if not os.path.exists(build_script_path): message = "{0} script not found in package source {1}".format( build_script, package_request.source_uri ) raise FileNotFoundError(message) build_env = _params_to_build_env(params) _fetch_downloads(build_dir, build_env) mkdir_p(package_dir) build_command = [ "whack-run", os.path.abspath(package_dir), # package_dir is mounted at WHACK_ROOT build_script_path, # build_script is executed WHACK_ROOT # WHACK_ROOT is passed as the first argument to build_script ] subprocess.check_call(build_command, cwd=build_dir, env=build_env) write_file( os.path.join(package_dir, ".whack-package-name"), package_request.name() ) def _fetch_downloads(build_dir, build_env): downloads_file_path = os.path.join(build_dir, "whack/downloads") downloads.fetch_downloads(downloads_file_path, build_env, build_dir) def _params_to_build_env(params): build_env = os.environ.copy() for name, value in (params or {}).iteritems(): build_env[name.upper()] = str(value) return build_env
import os import subprocess from . import downloads from .tempdir import create_temporary_dir from .common import WHACK_ROOT from .files import mkdir_p, write_file from .errors import FileNotFoundError def build(package_request, package_dir): with create_temporary_dir() as build_dir: _build_in_dir(package_request, build_dir, package_dir) def _build_in_dir(package_request, build_dir, package_dir): params = package_request.params() package_request.write_source_to(build_dir) build_script = os.path.join(build_dir, "whack/build") if not os.path.exists(build_script): message = "whack/build script not found in package source {0}".format( package_request.source_uri ) raise FileNotFoundError(message) build_env = _params_to_build_env(params) _fetch_downloads(build_dir, build_env) mkdir_p(package_dir) build_command = [ "whack-run", os.path.abspath(package_dir), # package_dir is mounted at WHACK_ROOT build_script, # build_script is executed WHACK_ROOT # WHACK_ROOT is passed as the first argument to build_script ] subprocess.check_call(build_command, cwd=build_dir, env=build_env) write_file( os.path.join(package_dir, ".whack-package-name"), package_request.name() ) def _fetch_downloads(build_dir, build_env): downloads_file_path = os.path.join(build_dir, "whack/downloads") downloads.fetch_downloads(downloads_file_path, build_env, build_dir) def _params_to_build_env(params): build_env = os.environ.copy() for name, value in (params or {}).iteritems(): build_env[name.upper()] = str(value) return build_env
bsd-2-clause
Python
4c017462c41ad080c1f6a98f8be7ef843f379253
Fix test name
p/wolis-phpbb,p/wolis-phpbb
tests/search_backend_sphinx.py
tests/search_backend_sphinx.py
from wolis.test_case import WolisTestCase from wolis import utils class SearchBackendSphinxTest(WolisTestCase): @utils.restrict_database('mysql*', 'postgres') @utils.restrict_phpbb_version('>=3.1.0') def test_set_search_backend(self): self.login('morpheus', 'morpheus') self.acp_login('morpheus', 'morpheus') self.change_acp_knob( link_text='Search settings', check_page_text='Here you can define what search backend will be used', name='config[search_type]', value='phpbb_search_fulltext_sphinx', confirm=True, ) if __name__ == '__main__': import unittest unittest.main()
from wolis.test_case import WolisTestCase from wolis import utils class SearchBackendMysqlTest(WolisTestCase): @utils.restrict_database('mysql*', 'postgres') @utils.restrict_phpbb_version('>=3.1.0') def test_set_search_backend(self): self.login('morpheus', 'morpheus') self.acp_login('morpheus', 'morpheus') self.change_acp_knob( link_text='Search settings', check_page_text='Here you can define what search backend will be used', name='config[search_type]', value='phpbb_search_fulltext_sphinx', confirm=True, ) if __name__ == '__main__': import unittest unittest.main()
bsd-2-clause
Python
b3ef8f04fa7abd688d7c8669b4f1dfeda2a55c81
test fixed
sohaibfarooqi/flask-stargate
tests/test_resource_manager.py
tests/test_resource_manager.py
# from . import ManagerTestBase # from flask import json # import datetime # from app import db # from stargate.resource_info import resource_info # from stargate.const import ResourceInfoConst # from app.models import TestPrimaryKey # from app import init_app, db # from functools import partial # class TestResourceManager(ManagerTestBase): # @classmethod # def setUpClass(self): # super(TestResourceManager, self).setUpClass() # def test_collection_name(self): # response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"}) # self.assertEqual(response._status_code, 200) # response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"}) # self.assertEqual(response._status_code, 200) # def test_url_prefix(self): # response = self.client.get('/v1/city', headers={"Content-Type": "application/json"}) # self.assertEqual(response._status_code, 200) # def test_resource_fields(self): # response = self.client.get('/api/location', headers={"Content-Type": "application/json"}) # data = json.loads(response.get_data()) # data = data['data'] # for key in data: # keys = list(key['attributes'].keys()) # self.assertCountEqual(keys, ['latitude','longitude']) # def test_resource_exclude(self): # response = self.client.get('/v1/city', headers={"Content-Type": "application/json"}) # data = json.loads(response.get_data()) # data = data['data'] # for key in data: # keys = list(key['attributes'].keys()) # self.assertNotIn(['latitude','longitude'], keys) # def test_view_decorators(self): # response = self.client.get('/api/testprimarykey', headers={"Content-Type": "application/json", "X_AUTH_KEY":"1234567"}) # self.assertEqual(response._status_code, 200) # func = partial(self.client.get, '/api/testprimarykey', headers={"Content-Type": "application/json"}) # self.assertRaises(ValueError, func) # def test_resource_http_methods(self): # response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"}) # self.assertEqual(response._status_code, 200) # response = self.client.post('/api/mycustomcollection', headers={"Content-Type": "application/json"}) # self.assertEqual(response._status_code, 405) # def test_custom_primary_key_field(self): # primary_key = resource_info(ResourceInfoConst.PRIMARY_KEY, TestPrimaryKey) # self.assertEqual(primary_key, 'ser_id')
from . import ManagerTestBase from flask import json import datetime from app import db from stargate.resource_info import resource_info from stargate.const import ResourceInfoConst from app.models import TestPrimaryKey from app import init_app, db from functools import partial class TestResourceManager(ManagerTestBase): @classmethod def setUpClass(self): super(TestResourceManager, self).setUpClass() def test_collection_name(self): response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"}) self.assertEqual(response._status_code, 200) response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"}) self.assertEqual(response._status_code, 200) def test_url_prefix(self): response = self.client.get('/v1/city', headers={"Content-Type": "application/json"}) self.assertEqual(response._status_code, 200) def test_resource_fields(self): response = self.client.get('/api/location', headers={"Content-Type": "application/json"}) data = json.loads(response.get_data()) data = data['data'] for key in data: keys = list(key['attributes'].keys()) self.assertCountEqual(keys, ['latitude','longitude']) def test_resource_exclude(self): response = self.client.get('/v1/city', headers={"Content-Type": "application/json"}) data = json.loads(response.get_data()) data = data['data'] for key in data: keys = list(key['attributes'].keys()) self.assertNotIn(['latitude','longitude'], keys) def test_view_decorators(self): response = self.client.get('/api/testprimarykey', headers={"Content-Type": "application/json", "X_AUTH_KEY":"1234567"}) self.assertEqual(response._status_code, 200) func = partial(self.client.get, '/api/testprimarykey', headers={"Content-Type": "application/json"}) self.assertRaises(ValueError, func) def test_resource_http_methods(self): response = self.client.get('/api/mycustomcollection', headers={"Content-Type": "application/json"}) self.assertEqual(response._status_code, 200) response = self.client.post('/api/mycustomcollection', headers={"Content-Type": "application/json"}) self.assertEqual(response._status_code, 405) def test_custom_primary_key_field(self): primary_key = resource_info(ResourceInfoConst.PRIMARY_KEY, TestPrimaryKey) self.assertEqual(primary_key, 'ser_id')
agpl-3.0
Python
e721511a24f98e57e8bfeb45a953d7d42cf78f33
increase the max length of a link that is to be shortenend to 500 characters
einvalentin/django-teenyweeny
teeny_weeny/models.py
teeny_weeny/models.py
from django.db import models from django.utils import timezone class ShortLink(models.Model): short = models.CharField(max_length=128, unique=True) link = models.URLField(max_length=500) hit = models.BigIntegerField(default=0) date = models.DateTimeField(default=timezone.now) def __unicode__(self): return u'%s' % (self.short)
from django.db import models from django.utils import timezone class ShortLink(models.Model): short = models.CharField(max_length=128, unique=True) link = models.URLField() hit = models.BigIntegerField(default=0) date = models.DateTimeField(default=timezone.now) def __unicode__(self): return u'%s' % (self.short)
apache-2.0
Python
adb7f16a1b441e550e1f6f75a3ed7dfe3e25ec1c
Sort list of devices.
eunchong/build,eunchong/build,eunchong/build,eunchong/build
scripts/slave/recipe_modules/adb/resources/list_devices.py
scripts/slave/recipe_modules/adb/resources/list_devices.py
# Copyright (c) 2014 ThE Chromium Authors. All Rights Reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generates json output of the adb devices that are online. Argument 1: the repr() of the adb command to run. Argument 2: the temporary json file to write the output to. """ import subprocess import sys import json import re import logging logging.basicConfig(level=0) cmd = eval(sys.argv[1]) outFileName = sys.argv[2] output = subprocess.check_output(cmd) devices = [] for line in output.splitlines(): logging.info(line) m = re.match('^([0-9A-Za-z]+)\s+device$', line) if m: devices.append(m.group(1)) with open(outFileName, 'w') as outFile: json.dump(sorted(devices), outFile)
# Copyright (c) 2014 ThE Chromium Authors. All Rights Reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generates json output of the adb devices that are online. Argument 1: the repr() of the adb command to run. Argument 2: the temporary json file to write the output to. """ import subprocess import sys import json import re import logging logging.basicConfig(level=0) cmd = eval(sys.argv[1]) outFileName = sys.argv[2] output = subprocess.check_output(cmd) devices = [] for line in output.splitlines(): logging.info(line) m = re.match('^([0-9A-Za-z]+)\s+device$', line) if m: devices.append(m.group(1)) with open(outFileName, 'w') as outFile: json.dump(devices, outFile)
bsd-3-clause
Python
027c9d24ecf00a8435ad012fdab9e64b4201ed42
fix migration conflict, re #7128
archesproject/arches,archesproject/arches,archesproject/arches,archesproject/arches
arches/app/models/migrations/7128_resource_instance_filter.py
arches/app/models/migrations/7128_resource_instance_filter.py
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('models', '7442_delete_manifest_images_table'), ] operations = [ migrations.RunSQL(""" UPDATE d_data_types SET defaultconfig = defaultconfig || '{"searchString": "", "searchDsl": ""}'::jsonb WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list'; UPDATE nodes SET config = config || '{"searchString": "", "searchDsl": ""}'::jsonb WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list'; UPDATE public.widgets SET defaultconfig = defaultconfig || '{"defaultResourceInstance": []}'::jsonb WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget'; """,""" UPDATE nodes SET config = config - 'searchString' - 'searchDsl' WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list'; UPDATE d_data_types SET defaultconfig = defaultconfig - 'searchString' - 'searchDsl' WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list'; UPDATE public.widgets SET defaultconfig = defaultconfig - 'defaultResourceInstance' WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget'; """) ]
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('models', '7262_report_template_data_fetch_bool'), ] operations = [ migrations.RunSQL(""" UPDATE d_data_types SET defaultconfig = defaultconfig || '{"searchString": "", "searchDsl": ""}'::jsonb WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list'; UPDATE nodes SET config = config || '{"searchString": "", "searchDsl": ""}'::jsonb WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list'; UPDATE public.widgets SET defaultconfig = defaultconfig || '{"defaultResourceInstance": []}'::jsonb WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget'; """,""" UPDATE nodes SET config = config - 'searchString' - 'searchDsl' WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list'; UPDATE d_data_types SET defaultconfig = defaultconfig - 'searchString' - 'searchDsl' WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list'; UPDATE public.widgets SET defaultconfig = defaultconfig - 'defaultResourceInstance' WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget'; """) ]
agpl-3.0
Python
e987a010f2242735ad60008774d25c00b7f89f76
Tweak CI report
fran-jo/OpenIPSL,OpenIPSL/OpenIPSL,MaximeBaudette/OpenIPSL,SmarTS-Lab/OpenIPSL,tinrabuzin/OpenIPSL,SmarTS-Lab/OpenIPSL
CI/CITests.py
CI/CITests.py
import os from OMPython import OMCSessionZMQ class CITests(): ''' Python class used to run CI tests ''' def __init__(self, rootPath): ''' Constructor starts omc and loads MSL ''' self.rootPath = rootPath self.omc = OMCSessionZMQ() os.chdir(self.rootPath) self.omc.sendExpression("loadModel(Modelica)") def loadLib(self, libName, libPath): # Attempt to load the library if self.omc.sendExpression('loadFile("%s")' % (self.rootPath + libPath)): print "Load success: %s" % libName else: errmsg = libName + " was not loaded! Check the library path:\n" + libPath raise Exception(errmsg) def runSyntaxCheck(self, libName, libPath): # Load library self.loadLib(libName,libPath) ''' Checks all of the models in the library and returns number of faild checks ''' # Get the list of all classes in OpenIPSL test_list = self.omc.sendExpression('getClassNames(%s,recursive=true)' % libName) nFailed = 0 nPassed = 0 # Run the check for all classes that are model and print result msgs for test in test_list: if self.omc.sendExpression("isModel(%s)" % (test)): # Check if a class is a model passMsg = self.omc.sendExpression("checkModel(%s)" % (test)) if "completed successfully." in passMsg: nPassed += 1 else: failMsg = self.omc.sendExpression("getErrorString()") print failMsg nFailed += 1 # Print a check summary if nFailed == 0: str1 = "== %s ----------------------" % libName print "%s OK! == Models checked: %s" % (str1[:22], nPassed) else: print "==== Check Summary for %s ====" % libName print "Number of models that passed the check is: %s" % nPassed print "Number of models that failed the check is: %s" % nFailed # Return test result return (nFailed == 0)
import os from OMPython import OMCSessionZMQ class CITests(): ''' Python class used to run CI tests ''' def __init__(self, rootPath): ''' Constructor starts omc and loads MSL ''' self.rootPath = rootPath self.omc = OMCSessionZMQ() os.chdir(self.rootPath) self.omc.sendExpression("loadModel(Modelica)") def loadLib(self, libPath): # Attempt to load the library if self.omc.sendExpression('loadFile("%s")' % (self.rootPath + libPath)): print "%s is successfully loaded." % libPath else: errmsg = libPath + " was not loaded! Check the library path." raise Exception(errmsg) def runSyntaxCheck(self, libName, libPath): # Load library self.loadLib(libPath) ''' Checks all of the models in the library and returns number of faild checks ''' # Get the list of all classes in OpenIPSL test_list = self.omc.sendExpression('getClassNames(%s,recursive=true)' % libName) nFailed = 0 nPassed = 0 # Run the check for all classes that are model and print result msgs for test in test_list: if self.omc.sendExpression("isModel(%s)" % (test)): # Check if a class is a model passMsg = self.omc.sendExpression("checkModel(%s)" % (test)) if "completed successfully." in passMsg: # print passMsg nPassed += 1 else: failMsg = self.omc.sendExpression("getErrorString()") print failMsg nFailed += 1 # Print a check summary if nFailed == 0: str1 = "== %s --------------------" % libName print "%s OK! (%s models checked)" % (str1[:20], nPassed) else: print "==== Check Summary for %s ====" % libName print "Number of models that passed the check is: %s" % nPassed print "Number of models that failed the check is: %s" % nFailed # Return test result return (nFailed == 0)
bsd-3-clause
Python
33c51e6a0612aece239bf01236f110ef9fb40c86
Add some uncovered code
steve98654/pycon-tutorial-steve98564,kojoidrissa/pycon-tutorial-cbtd,steve98654/pycon-tutorial-steve98564,bazfire/satarvo_project,lif3isg0od/pycon-tutorial-learn,hrjn/2016-pycon_project,jdcorless/pycon-tutorial-jdcorless,janecofino/pycon-tutorial-mjc,jwarner308/pycon-tutorial-JW,lifan0127/2016-pycon-tutorial-project,steve98654/pycon-tutorial-steve98564,sk8asd123/oncyp,erinhaswell/pycon-tutorial-project-haswell,jm66/jm-wordcount,terryjbates/pycon-tutorial-tbates,johnmulder/pycon-tutorial-student,0x41/pycon_tut_phil
wordcount_lib.py
wordcount_lib.py
def consume(filename): chars = 0 words = 0 lines = 0 with open(filename, 'rt') as fp: for line in fp: lines += 1 words += len(line.strip().split()) chars += len(line) return chars, words, lines def daaaangerous(param=0): print("I'm the most dangerous function West of the Missippi, no test "\ "will cover me!") return 3 / param
def consume(filename): chars = 0 words = 0 lines = 0 with open(filename, 'rt') as fp: for line in fp: lines += 1 words += len(line.strip().split()) chars += len(line) return chars, words, lines
bsd-3-clause
Python
05cb079fd4e6b7a9bfd32c1470c9c638af5b7bc9
Add comments clarifying implementation choices
python/importlib_metadata
importlib_metadata/_py39compat.py
importlib_metadata/_py39compat.py
""" Compatibility layer with Python 3.8/3.9 """ from typing import TYPE_CHECKING, Any, Optional, Tuple if TYPE_CHECKING: # -> prevent circular imports on runtime. from . import Distribution, EntryPoint else: Distribution = EntryPoint = Any def normalized_name(dist: Distribution) -> Optional[str]: """ Honor name normalization for distributions that don't provide ``_normalized_name``. """ try: return dist._normalized_name except AttributeError: from . import Prepared # -> delay to prevent circular imports. return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name']) def ep_matches(ep: EntryPoint, **params) -> Tuple[EntryPoint, bool]: """ Workaround for ``EntryPoint`` objects without the ``matches`` method. For the sake of convenience, a tuple is returned containing not only the boolean value corresponding to the predicate evalutation, but also a compatible ``EntryPoint`` object that can be safely used at a later stage. For example, the following sequences of expressions should be compatible: # Sequence 1: using the compatibility layer candidates = (_py39compat.ep_matches(ep, **params) for ep in entry_points) [ep for ep, predicate in candidates if predicate] # Sequence 2: using Python 3.9+ [ep for ep in entry_points if ep.matches(**params)] """ try: return ep, ep.matches(**params) except AttributeError: from . import EntryPoint # -> delay to prevent circular imports. # Reconstruct the EntryPoint object to make sure it is compatible. _ep = EntryPoint(ep.name, ep.value, ep.group) return _ep, _ep.matches(**params)
""" Compatibility layer with Python 3.8/3.9 """ from typing import TYPE_CHECKING, Any, Optional, Tuple if TYPE_CHECKING: from . import Distribution, EntryPoint else: Distribution = EntryPoint = Any def normalized_name(dist: Distribution) -> Optional[str]: """ Honor name normalization for distributions that don't provide ``_normalized_name``. """ try: return dist._normalized_name except AttributeError: from . import Prepared return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name']) def ep_matches(ep: EntryPoint, **params) -> Tuple[EntryPoint, bool]: """ Workaround for ``EntryPoint`` objects without the ``matches`` method. For the sake of convenience, a tuple is returned containing not only the boolean value corresponding to the predicate evalutation, but also a compatible ``EntryPoint`` object that can be safely used at a later stage. For example, the following sequences of expressions should be compatible: # Sequence 1: using the compatibility layer candidates = (_py39compat.ep_matches(ep, **params) for ep in entry_points) [ep for ep, predicate in candidates if predicate] # Sequence 2: using Python 3.9+ [ep for ep in entry_points if ep.matches(**params)] """ try: return ep, ep.matches(**params) except AttributeError: from . import EntryPoint # Reconstruct the EntryPoint object to make sure it is compatible. _ep = EntryPoint(ep.name, ep.value, ep.group) return _ep, _ep.matches(**params)
apache-2.0
Python
4bd53d96be49c01c04a30d2c064774bac23fc20a
Rewrite entry update in DatabaseStorage without explicit update call
catcombo/django-speedinfo,catcombo/django-speedinfo,catcombo/django-speedinfo
speedinfo/storage/database/storage.py
speedinfo/storage/database/storage.py
# coding: utf-8 from django.db import IntegrityError from django.db.models import ExpressionWrapper, F, FloatField, IntegerField from django.forms import model_to_dict from speedinfo.models import ViewProfiler from speedinfo.storage.base import AbstractStorage from speedinfo.storage.database.models import Storage class DatabaseStorage(AbstractStorage): def add(self, view_name, method, is_anon_call, is_cache_hit, sql_time, sql_count, view_execution_time): try: vp, created = Storage.objects.get_or_create(view_name=view_name, method=method) except IntegrityError: # IntegrityError raised in the case of concurrent access # to get_or_create method from another application worker/thread vp = Storage.objects.get(view_name=view_name, method=method) vp.anon_calls = F("anon_calls") + (is_anon_call and 1 or 0) vp.cache_hits = F("cache_hits") + (is_cache_hit and 1 or 0) vp.sql_total_time = F("sql_total_time") + sql_time vp.sql_total_count = F("sql_total_count") + sql_count vp.total_calls = F("total_calls") + 1 vp.total_time = F("total_time") + view_execution_time vp.save() def fetch_all(self, ordering=None): qs = Storage.objects.annotate( anon_calls_ratio=ExpressionWrapper(100.0 * F("anon_calls") / F("total_calls"), output_field=FloatField()), cache_hits_ratio=ExpressionWrapper(100.0 * F("cache_hits") / F("total_calls"), output_field=FloatField()), sql_count_per_call=ExpressionWrapper(F("sql_total_count") / F("total_calls"), output_field=IntegerField()), sql_time_ratio=ExpressionWrapper(100.0 * F("sql_total_time") / F("total_time"), output_field=FloatField()), time_per_call=ExpressionWrapper(F("total_time") / F("total_calls"), output_field=FloatField()), ) if ordering: qs = qs.order_by(*ordering) return [ViewProfiler(**model_to_dict(item)) for item in qs] def reset(self): Storage.objects.all().delete()
# coding: utf-8 from django.db import IntegrityError from django.db.models import ExpressionWrapper, F, FloatField, IntegerField from django.forms import model_to_dict from speedinfo.models import ViewProfiler from speedinfo.storage.base import AbstractStorage from speedinfo.storage.database.models import Storage class DatabaseStorage(AbstractStorage): def add(self, view_name, method, is_anon_call, is_cache_hit, sql_time, sql_count, view_execution_time): try: vp, created = Storage.objects.get_or_create(view_name=view_name, method=method) except IntegrityError: # IntegrityError raised in the case of concurrent access # to get_or_create method from another application worker/thread vp = Storage.objects.get(view_name=view_name, method=method) Storage.objects.filter(pk=vp.pk).update( anon_calls=F("anon_calls") + (is_anon_call and 1 or 0), cache_hits=F("cache_hits") + (is_cache_hit and 1 or 0), sql_total_time=F("sql_total_time") + sql_time, sql_total_count=F("sql_total_count") + sql_count, total_calls=F("total_calls") + 1, total_time=F("total_time") + view_execution_time, ) def fetch_all(self, ordering=None): qs = Storage.objects.annotate( anon_calls_ratio=ExpressionWrapper(100.0 * F("anon_calls") / F("total_calls"), output_field=FloatField()), cache_hits_ratio=ExpressionWrapper(100.0 * F("cache_hits") / F("total_calls"), output_field=FloatField()), sql_count_per_call=ExpressionWrapper(F("sql_total_count") / F("total_calls"), output_field=IntegerField()), sql_time_ratio=ExpressionWrapper(100.0 * F("sql_total_time") / F("total_time"), output_field=FloatField()), time_per_call=ExpressionWrapper(F("total_time") / F("total_calls"), output_field=FloatField()), ) if ordering: qs = qs.order_by(*ordering) return [ViewProfiler(**model_to_dict(item)) for item in qs] def reset(self): Storage.objects.all().delete()
mit
Python
953d83119005075b9bc59d040389c209208263d5
Integrate LLVM at llvm/llvm-project@7354a73945f1
tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime
third_party/llvm/workspace.bzl
third_party/llvm/workspace.bzl
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tfrt_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "7354a73945f1c123d66b01f51374ecbdba18fab3" LLVM_SHA256 = "73a86e6f9d263a812bfdda5120b8f08467bd8ee39564b75da752854328a72803" tfrt_http_archive( name = name, build_file = "//third_party/llvm:BUILD", sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], )
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tfrt_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "7f2b016b820487f2fb69b93e784fff5d8297dea0" LLVM_SHA256 = "348e586173038ab248e76be34d4a3e5667d56429350150a4a8130fba5a318e05" tfrt_http_archive( name = name, build_file = "//third_party/llvm:BUILD", sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], )
apache-2.0
Python
509a542fd5e3171979fb74aec9226c057d289623
Integrate LLVM at llvm/llvm-project@04a5ca862bb9
tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime
third_party/llvm/workspace.bzl
third_party/llvm/workspace.bzl
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tfrt_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "04a5ca862bb989acdd2729d0991b4e5a104bf244" LLVM_SHA256 = "10a0c150c477a36eff25d49f0f50379fddf626a7d87a2b1846fb101173c742c9" tfrt_http_archive( name = name, build_file = "//third_party/llvm:BUILD", sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], )
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tfrt_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "b3a0bed5fb8766dcf27583ab1f73edc6e7232657" LLVM_SHA256 = "0ee751d5754af930e05cea8b54b061e819e4254e06f64d211e07f2faf3395adf" tfrt_http_archive( name = name, build_file = "//third_party/llvm:BUILD", sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], )
apache-2.0
Python
94fbcf6224624810a30a17cc9bc8d4c1f3458954
Integrate LLVM at llvm/llvm-project@5c7b43aa8298
tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime
third_party/llvm/workspace.bzl
third_party/llvm/workspace.bzl
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tfrt_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "5c7b43aa8298a389b906d72c792941a0ce57782e" LLVM_SHA256 = "e34534a864e2bedaff6811effb757d2eed3a50c9c1e540515ed1568addf1815d" tfrt_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], link_files = { "//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD", "//third_party/mlir:BUILD": "mlir/BUILD", "//third_party/mlir:test.BUILD": "mlir/test/BUILD", }, )
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tfrt_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "9ba661f91276dd8cc728f9b2e82905b78c0119b4" LLVM_SHA256 = "f89c033b0e8e6d4e6ff5ce3883aadc82a502b063a830cd685672cec4bea3dfb1" tfrt_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], link_files = { "//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD", "//third_party/mlir:BUILD": "mlir/BUILD", "//third_party/mlir:test.BUILD": "mlir/test/BUILD", }, )
apache-2.0
Python
fda8088ec3330ec5bc6ea7769c79d2fb9f227728
Fix bug with valid hostnames with dashes. I added underscores even though they aren't valid just for good measure
lincolnloop/salmon,lincolnloop/salmon
salmon/apps/monitor/urls.py
salmon/apps/monitor/urls.py
from django.conf.urls import patterns, url from . import views urlpatterns = patterns('', url(r'^$', views.dashboard, name="dashboard"), url(r'^(?P<name>[-\w\._]*)$', views.history, name="history"), )
from django.conf.urls import patterns, url from . import views urlpatterns = patterns('', url(r'^$', views.dashboard, name="dashboard"), url(r'^(?P<name>[\w\.]*)$', views.history, name="history"), )
bsd-3-clause
Python
0efe8e9cfbd3a5d3319553aabf4f0dd17fa53d33
fix license test
wwitzel3/awx,snahelou/awx,wwitzel3/awx,wwitzel3/awx,wwitzel3/awx,snahelou/awx,snahelou/awx,snahelou/awx
awx/main/tests/functional/api/test_settings.py
awx/main/tests/functional/api/test_settings.py
# Copyright (c) 2016 Ansible, Inc. # All Rights Reserved. # Python import pytest import os # Django from django.core.urlresolvers import reverse # AWX from awx.conf.models import Setting ''' Ensures that tests don't pick up dev container license file ''' @pytest.fixture def mock_no_license_file(mocker): os.environ['AWX_LICENSE_FILE'] = '/does_not_exist' return None @pytest.mark.django_db def test_license_cannot_be_removed_via_system_settings(mock_no_license_file, get, put, patch, delete, admin, enterprise_license): url = reverse('api:setting_singleton_detail', args=('system',)) response = get(url, user=admin, expect=200) assert not response.data['LICENSE'] Setting.objects.create(key='LICENSE', value=enterprise_license) response = get(url, user=admin, expect=200) assert response.data['LICENSE'] put(url, user=admin, data=response.data, expect=200) response = get(url, user=admin, expect=200) assert response.data['LICENSE'] patch(url, user=admin, data={}, expect=200) response = get(url, user=admin, expect=200) assert response.data['LICENSE'] delete(url, user=admin, expect=204) response = get(url, user=admin, expect=200) assert response.data['LICENSE']
# Copyright (c) 2016 Ansible, Inc. # All Rights Reserved. # Python import pytest # Django from django.core.urlresolvers import reverse # AWX from awx.conf.models import Setting @pytest.mark.django_db def test_license_cannot_be_removed_via_system_settings(get, put, patch, delete, admin, enterprise_license): url = reverse('api:setting_singleton_detail', args=('system',)) response = get(url, user=admin, expect=200) assert not response.data['LICENSE'] Setting.objects.create(key='LICENSE', value=enterprise_license) response = get(url, user=admin, expect=200) assert response.data['LICENSE'] put(url, user=admin, data=response.data, expect=200) response = get(url, user=admin, expect=200) assert response.data['LICENSE'] patch(url, user=admin, data={}, expect=200) response = get(url, user=admin, expect=200) assert response.data['LICENSE'] delete(url, user=admin, expect=204) response = get(url, user=admin, expect=200) assert response.data['LICENSE']
apache-2.0
Python
c91e7dcc969485644d8e26c459c894925b3f0720
add in fasta format
ctSkennerton/BioSQL-Extensions
scripts/dump_biodatabase.py
scripts/dump_biodatabase.py
#!/usr/bin/env python import sys from getpass import getpass from BioSQL import BioSeqDatabase from common import standard_options, generate_placeholders, chunks, extract_feature_sql def get_seqfeature_for_db(server, biodb): ''' find all seqfeatures that have the given value for the qualifier returns a list of seqfeature_id ''' sql = "SELECT qv.seqfeature_id FROM seqfeature_qualifier_value qv join seqfeature s using(seqfeature_id) join bioentry b using(bioentry_id) join biodatabase bd using(biodatabase_id) WHERE bd.name = %s" return server.adaptor.execute_and_fetchall(sql, (biodb,)) def main(args): server = BioSeqDatabase.open_database(driver=args.driver, db=args.database, user=args.user, host=args.host, passwd=args.password) if args.output_format == 'fasta': from Bio import SeqIO db = server[args.database_name] for rec in db.values(): SeqIO.write(rec, sys.stdout, args.output_format) else: seqfeature_ids = get_seqfeature_for_db(server, args.database_name) if args.output_format == 'feat-prot': extract_feature_sql(server, seqfeature_ids, type=['CDS'], translate=True ) elif args.output_format == 'feat-nucl': extract_feature_sql(server, seqfeature_ids ) if __name__ == "__main__": parser = standard_options() parser.add_argument('-D', '--database-name', help='namespace of the database that you want to add into', dest='database_name', required=True) parser.add_argument('-o', '--output_format', help='output format of the selected sequences', choices=['feat-prot', 'feat-nucl', 'fasta'], default='feat-prot') args = parser.parse_args() if args.password is None: args.password = getpass("Please enter the password for user " + \ args.user + " on database " + args.database) main(args)
#!/usr/bin/env python from getpass import getpass from BioSQL import BioSeqDatabase from common import standard_options, generate_placeholders, chunks, extract_feature_sql def get_seqfeature_for_db(server, biodb): ''' find all seqfeatures that have the given value for the qualifier returns a list of seqfeature_id ''' sql = "SELECT qv.seqfeature_id FROM seqfeature_qualifier_value qv join seqfeature s using(seqfeature_id) join bioentry b using(bioentry_id) join biodatabase bd using(biodatabase_id) WHERE bd.name = %s" return server.adaptor.execute_and_fetchall(sql, (biodb,)) def main(args): server = BioSeqDatabase.open_database(driver=args.driver, db=args.database, user=args.user, host=args.host, passwd=args.password) seqfeature_ids = get_seqfeature_for_db(server, args.database_name) if args.output_format == 'feat-prot': extract_feature_sql(server, seqfeature_ids, type=['CDS'], translate=True ) elif args.output_format == 'feat-nucl': extract_feature_sql(server, seqfeature_ids ) if __name__ == "__main__": parser = standard_options() parser.add_argument('-D', '--database-name', help='namespace of the database that you want to add into', dest='database_name', required=True) parser.add_argument('-o', '--output_format', help='output format of the selected sequences', choices=['feat-prot', 'feat-nucl'], default='feat-prot') args = parser.parse_args() if args.password is None: args.password = getpass("Please enter the password for user " + \ args.user + " on database " + args.database) main(args)
mit
Python
95d8f915e4aee6fbab4ca741197a3563eb3a5ff2
bump version to 0.4
intip/aldryn-bootstrap3,intip/aldryn-bootstrap3
aldryn_bootstrap3/__init__.py
aldryn_bootstrap3/__init__.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import __version__ = '0.4'
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import __version__ = '0.3'
bsd-3-clause
Python
de0fd677d94b7fb8b044fa597b687dba0f3e1c0e
Test coercions for generic type constructors
jcrist/blaze,AbhiAgarwal/blaze,alexmojaki/blaze,jcrist/blaze,markflorisson/blaze-core,FrancescAlted/blaze,aterrel/blaze,mrocklin/blaze,aterrel/blaze,xlhtc007/blaze,AbhiAgarwal/blaze,scls19fr/blaze,ContinuumIO/blaze,FrancescAlted/blaze,jdmcbr/blaze,jdmcbr/blaze,nkhuyu/blaze,FrancescAlted/blaze,LiaoPan/blaze,dwillmer/blaze,dwillmer/blaze,aterrel/blaze,mwiebe/blaze,markflorisson/blaze-core,ContinuumIO/blaze,maxalbert/blaze,ChinaQuants/blaze,ChinaQuants/blaze,LiaoPan/blaze,mwiebe/blaze,mwiebe/blaze,nkhuyu/blaze,cowlicks/blaze,scls19fr/blaze,cpcloud/blaze,AbhiAgarwal/blaze,alexmojaki/blaze,maxalbert/blaze,mwiebe/blaze,caseyclements/blaze,AbhiAgarwal/blaze,caseyclements/blaze,markflorisson/blaze-core,cowlicks/blaze,FrancescAlted/blaze,cpcloud/blaze,markflorisson/blaze-core,mrocklin/blaze,xlhtc007/blaze
blaze/datashape/tests/test_type_constructor.py
blaze/datashape/tests/test_type_constructor.py
# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import import unittest from blaze import error from blaze.datashape import unify_simple, promote, coerce, dshapes, coretypes as T #------------------------------------------------------------------------ # Test data #------------------------------------------------------------------------ Complex = T.TypeConstructor('Complex', 1, [{'coercible': True}]) t1 = Complex(T.int64) t2 = Complex(T.int64) t3 = Complex(T.int32) RigidComplex = T.TypeConstructor('Complex', 1, [{'coercible': False}]) rt1 = RigidComplex(T.int64) rt2 = RigidComplex(T.int64) rt3 = RigidComplex(T.int32) #------------------------------------------------------------------------ # Tests #------------------------------------------------------------------------ class TestTypeConstructors(unittest.TestCase): def test_equality(self): self.assertEqual(t1, t2) self.assertNotEqual(t1, t3) def test_unification_concrete(self): self.assertEqual(unify_simple(t1, t2), t1) def test_unification_typevar(self): tvar = Complex(T.TypeVar('A')) self.assertEqual(unify_simple(t1, tvar), t1) def test_promotion(self): self.assertEqual(promote(t1, t2), t1) self.assertEqual(promote(t1, t3), t1) self.assertEqual(promote(t3, t2), t1) self.assertEqual(promote(rt1, rt2), rt1) def test_coercion(self): self.assertEqual(coerce(t1, t2), 0) self.assertGreater(coerce(t3, t2), 0) self.assertEqual(coerce(rt1, rt2), 0) class TestErrors(unittest.TestCase): def test_promotion_error(self): self.assertRaises(error.UnificationError, promote, rt1, rt3) if __name__ == '__main__': unittest.main()
# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import import unittest from blaze import error from blaze.datashape import unify_simple, promote, coerce, dshapes, coretypes as T #------------------------------------------------------------------------ # Test data #------------------------------------------------------------------------ Complex = T.TypeConstructor('Complex', 1, [{'coercible': True}]) t1 = Complex(T.int64) t2 = Complex(T.int64) t3 = Complex(T.int32) RigidComplex = T.TypeConstructor('Complex', 1, [{'coercible': False}]) rt1 = RigidComplex(T.int64) rt2 = RigidComplex(T.int64) rt3 = RigidComplex(T.int32) #------------------------------------------------------------------------ # Tests #------------------------------------------------------------------------ class TestTypeConstructors(unittest.TestCase): def test_equality(self): self.assertEqual(t1, t2) self.assertNotEqual(t1, t3) def test_unification_concrete(self): self.assertEqual(unify_simple(t1, t2), t1) def test_unification_typevar(self): tvar = Complex(T.TypeVar('A')) self.assertEqual(unify_simple(t1, tvar), t1) def test_promotion(self): self.assertEqual(promote(t1, t2), t1) self.assertEqual(promote(t1, t3), t1) self.assertEqual(promote(t3, t2), t1) self.assertEqual(promote(rt1, rt2), rt1) class TestErrors(unittest.TestCase): def test_promotion_error(self): self.assertRaises(error.UnificationError, promote, rt1, rt3) if __name__ == '__main__': # TestTypeConstructors('test_unification').debug() unittest.main()
bsd-3-clause
Python
7da15f2e16c95a4be179a1cc1efd108dbaaa3be9
Update forward_ZMQ_Angle.py
VitorHugoAguiar/ProBot,VitorHugoAguiar/ProBot,VitorHugoAguiar/ProBot,VitorHugoAguiar/ProBot
ProBot_BeagleBone/forward_ZMQ_Angle.py
ProBot_BeagleBone/forward_ZMQ_Angle.py
#!/usr/bin/python import zmq def main(): try: context = zmq.Context(1) # Socket facing clients frontend = context.socket(zmq.SUB) frontend.bind("tcp://*:5583") frontend.setsockopt(zmq.SUBSCRIBE, "") # Socket facing services backend = context.socket(zmq.PUB) backend.bind("tcp://*:5584") zmq.device(zmq.FORWARDER, frontend, backend) except Exception, e: print e print "bringing down zmq device" finally: pass frontend.close() backend.close() context.term() if __name__ == "__main__": main()
import zmq def main(): try: context = zmq.Context(1) # Socket facing clients frontend = context.socket(zmq.SUB) frontend.bind("tcp://*:5583") frontend.setsockopt(zmq.SUBSCRIBE, "") # Socket facing services backend = context.socket(zmq.PUB) backend.bind("tcp://*:5584") zmq.device(zmq.FORWARDER, frontend, backend) except Exception, e: print e print "bringing down zmq device" finally: pass frontend.close() backend.close() context.term() if __name__ == "__main__": main()
agpl-3.0
Python
a7ac41830ac0472442069deead739ddd4c137be3
add future import for print
tomlanyon/dnspython
examples/receive_notify.py
examples/receive_notify.py
#!/usr/bin/env python3 # This is just a toy, real code would check that the received message # really was a NOTIFY, and otherwise handle errors. from __future__ import print_function import socket import dns.flags import dns.message import dns.rdataclass import dns.rdatatype address = '127.0.0.1' port = 53535 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((address, port)) while True: (wire, address) = s.recvfrom(512) notify = dns.message.from_wire(wire) soa = notify.find_rrset(notify.answer, notify.question[0].name, dns.rdataclass.IN, dns.rdatatype.SOA) # Do something with the SOA RR here print('The serial number for', soa.name, 'is', soa[0].serial) response = dns.message.make_response(notify) response.flags |= dns.flags.AA wire = response.to_wire(response) s.sendto(wire, address)
#!/usr/bin/env python3 # This is just a toy, real code would check that the received message # really was a NOTIFY, and otherwise handle errors. import socket import dns.flags import dns.message import dns.rdataclass import dns.rdatatype address = '127.0.0.1' port = 53535 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((address, port)) while True: (wire, address) = s.recvfrom(512) notify = dns.message.from_wire(wire) soa = notify.find_rrset(notify.answer, notify.question[0].name, dns.rdataclass.IN, dns.rdatatype.SOA) # Do something with the SOA RR here print('The serial number for', soa.name, 'is', soa[0].serial) response = dns.message.make_response(notify) response.flags |= dns.flags.AA wire = response.to_wire(response) s.sendto(wire, address)
isc
Python
9bbe8057a627ba81282a76de94e57ca0b0e02b89
change default port
gonicus/gosa,gonicus/gosa,gonicus/gosa,gonicus/gosa
backend/src/gosa/backend/plugins/foreman/gosa_integration.py
backend/src/gosa/backend/plugins/foreman/gosa_integration.py
#!/usr/bin/env python3 """ Foreman / GOsa3 integration to send hook events data to GOsa3 """ import hmac import sys import requests import json #. /etc/sysconfig/foreman-gosa # Gosa settings GOSA_SERVER = "http://localhost" GOSA_PORT = 8050 HTTP_X_HUB_SENDER = "foreman-hook" SECRET = "e540f417-4c36-4e5d-b78a-4d36f51727ec" HOOK_TEMP_DIR = "/usr/share/foreman/tmp" # HOOK_EVENT = update, create, before_destroy etc. # HOOK_OBJECT = to_s representation of the object, e.g. host's fqdn HOOK_EVENT, HOOK_OBJECT = (sys.argv[1], sys.argv[2]) payload = json.loads(sys.stdin.read()) # add event + object to payload payload = json.dumps({ "event": HOOK_EVENT, "object": HOOK_OBJECT, "data": payload }).encode('utf-8') signature_hash = hmac.new(bytes(SECRET, 'ascii'), msg=payload, digestmod="sha512") signature = 'sha1=' + signature_hash.hexdigest() headers = { 'Content-Type': 'application/vnd.foreman.hookevent+json', 'HTTP_X_HUB_SENDER': HTTP_X_HUB_SENDER, 'HTTP_X_HUB_SIGNATURE': signature } requests.post("%s:%s/hooks" % (GOSA_SERVER, GOSA_PORT), data=payload, headers=headers)
#!/usr/bin/env python3 """ Foreman / GOsa3 integration to send hook events data to GOsa3 """ import hmac import sys import requests import json #. /etc/sysconfig/foreman-gosa # Gosa settings GOSA_SERVER = "http://localhost" GOSA_PORT = 8000 HTTP_X_HUB_SENDER = "foreman-hook" SECRET = "e540f417-4c36-4e5d-b78a-4d36f51727ec" HOOK_TEMP_DIR = "/usr/share/foreman/tmp" # HOOK_EVENT = update, create, before_destroy etc. # HOOK_OBJECT = to_s representation of the object, e.g. host's fqdn HOOK_EVENT, HOOK_OBJECT = (sys.argv[1], sys.argv[2]) payload = json.loads(sys.stdin.read()) # add event + object to payload payload = json.dumps({ "event": HOOK_EVENT, "object": HOOK_OBJECT, "data": payload }).encode('utf-8') signature_hash = hmac.new(bytes(SECRET, 'ascii'), msg=payload, digestmod="sha512") signature = 'sha1=' + signature_hash.hexdigest() headers = { 'Content-Type': 'application/vnd.foreman.hookevent+json', 'HTTP_X_HUB_SENDER': HTTP_X_HUB_SENDER, 'HTTP_X_HUB_SIGNATURE': signature } requests.post("%s:%s/hooks" % (GOSA_SERVER, GOSA_PORT), data=payload, headers=headers)
lgpl-2.1
Python
94b216fb8c15db7228e54e35058c7143b02d103f
prepare 1.1.1 bugfix release, from now on tests for new features..
wlanslovenija/cmsplugin-blog,wlanslovenija/cmsplugin-blog,divio/cmsplugin-blog,wlanslovenija/cmsplugin-blog,divio/cmsplugin-blog,wlanslovenija/cmsplugin-blog,divio/cmsplugin-blog
cmsplugin_blog/__init__.py
cmsplugin_blog/__init__.py
# -*- coding: utf-8 -*- VERSION = (1, 1, 1, 'post', 0) def get_version(): # pragma: no cover version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3:] == ('alpha', 0): version = '%s pre-alpha' % version else: if VERSION[3] != 'final': version = '%s %s %s' % (version, VERSION[3], VERSION[4]) return version
# -*- coding: utf-8 -*- VERSION = (1, 1, 0, 'post', 0) def get_version(): # pragma: no cover version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3:] == ('alpha', 0): version = '%s pre-alpha' % version else: if VERSION[3] != 'final': version = '%s %s %s' % (version, VERSION[3], VERSION[4]) return version
bsd-3-clause
Python
77d8f11277e3b006c9f9137a35291892a73156f2
format python code
mralext20/alex-bot
alexBot/cogs/games_reposting.py
alexBot/cogs/games_reposting.py
import logging from typing import Dict import discord from discord import PartialEmoji from discord.ext import commands from discord.message import Message from discord.webhook import AsyncWebhookAdapter, WebhookMessage from emoji_data import EmojiSequence from ..tools import Cog log = logging.getLogger(__name__) class GamesReposting(Cog): def __init__(self, bot: "Bot"): super().__init__(bot) self.linked: Dict[int, WebhookMessage] = {} self.webhook = discord.Webhook.from_url( self.bot.config.nerdiowo_announcements_webhook, adapter=AsyncWebhookAdapter(session=self.bot.session) ) @Cog.listener() async def on_message(self, message: discord.Message): if message.channel.category_id == 896853287108759615: additional_content = [await x.to_file() for x in message.attachments] msg = await self.webhook.send( content=message.content, wait=True, username=message.author.name, avatar_url=message.author.avatar_url, files=additional_content, embeds=message.embeds, ) self.linked[message.id] = msg @Cog.listener() async def on_message_edit(self, before: Message, after: Message): if before.id in self.linked: if before.content != after.content: await self.linked[before.id].edit(content=after.content) def setup(bot): bot.add_cog(GamesReposting(bot))
import logging from typing import Dict import discord from discord import PartialEmoji from discord.ext import commands from discord.message import Message from discord.webhook import AsyncWebhookAdapter, WebhookMessage from emoji_data import EmojiSequence from ..tools import Cog log = logging.getLogger(__name__) class GamesReposting(Cog): def __init__(self, bot: "Bot"): super().__init__(bot) self.linked: Dict[int, WebhookMessage] = {} self.webhook = discord.Webhook.from_url( self.bot.config.nerdiowo_announcements_webhook, adapter=AsyncWebhookAdapter(session=self.bot.session) ) @Cog.listener() async def on_message(self, message: discord.Message): if message.channel.category_id == 896853287108759615: additional_content = [await x.to_file() for x in message.attachments] msg = await self.webhook.send( content=message.content, wait=True, username=message.author.name, avatar_url=message.author.avatar_url, files=additional_content, embeds=message.embeds, ) self.linked[message.id] = msg @Cog.listener() async def on_message_edit(self, before: Message, after: Message): if before.id in self.linked: if before.content != after.content: await self.linked[before.id].edit(content=after.content) def setup(bot): bot.add_cog(GamesReposting(bot))
mit
Python
cdb7c87fd133b6e99916919b525e9d277a3913dd
Fix a typo.
techhat/libcloud,t-tran/libcloud,mtekel/libcloud,aviweit/libcloud,aleGpereira/libcloud,pquentin/libcloud,niteoweb/libcloud,pantheon-systems/libcloud,mgogoulos/libcloud,supertom/libcloud,Cloud-Elasticity-Services/as-libcloud,cloudControl/libcloud,ClusterHQ/libcloud,NexusIS/libcloud,thesquelched/libcloud,SecurityCompass/libcloud,iPlantCollaborativeOpenSource/libcloud,apache/libcloud,lochiiconnectivity/libcloud,Verizon/libcloud,samuelchong/libcloud,sahildua2305/libcloud,wuyuewen/libcloud,iPlantCollaborativeOpenSource/libcloud,wrigri/libcloud,techhat/libcloud,wuyuewen/libcloud,cryptickp/libcloud,pquentin/libcloud,mbrukman/libcloud,MrBasset/libcloud,NexusIS/libcloud,ByteInternet/libcloud,supertom/libcloud,sgammon/libcloud,ByteInternet/libcloud,wrigri/libcloud,cryptickp/libcloud,sgammon/libcloud,marcinzaremba/libcloud,munkiat/libcloud,ClusterHQ/libcloud,niteoweb/libcloud,atsaki/libcloud,kater169/libcloud,aleGpereira/libcloud,sergiorua/libcloud,mgogoulos/libcloud,erjohnso/libcloud,sfriesel/libcloud,JamesGuthrie/libcloud,wido/libcloud,marcinzaremba/libcloud,MrBasset/libcloud,jerryblakley/libcloud,carletes/libcloud,JamesGuthrie/libcloud,atsaki/libcloud,samuelchong/libcloud,briancurtin/libcloud,apache/libcloud,schaubl/libcloud,SecurityCompass/libcloud,Cloud-Elasticity-Services/as-libcloud,andrewsomething/libcloud,MrBasset/libcloud,samuelchong/libcloud,Scalr/libcloud,Kami/libcloud,lochiiconnectivity/libcloud,munkiat/libcloud,watermelo/libcloud,mbrukman/libcloud,pantheon-systems/libcloud,kater169/libcloud,atsaki/libcloud,Scalr/libcloud,pquentin/libcloud,wrigri/libcloud,dcorbacho/libcloud,jerryblakley/libcloud,mistio/libcloud,supertom/libcloud,mistio/libcloud,jerryblakley/libcloud,pantheon-systems/libcloud,carletes/libcloud,niteoweb/libcloud,carletes/libcloud,dcorbacho/libcloud,erjohnso/libcloud,aviweit/libcloud,schaubl/libcloud,marcinzaremba/libcloud,Kami/libcloud,mbrukman/libcloud,munkiat/libcloud,ZuluPro/libcloud,Cloud-Elasticity-Services/as-libcloud,schaubl/libcloud,SecurityCompass/libcloud,Itxaka/libcloud,sahildua2305/libcloud,mathspace/libcloud,mathspace/libcloud,iPlantCollaborativeOpenSource/libcloud,Scalr/libcloud,DimensionDataCBUSydney/libcloud,vongazman/libcloud,apache/libcloud,lochiiconnectivity/libcloud,mistio/libcloud,cryptickp/libcloud,vongazman/libcloud,aleGpereira/libcloud,watermelo/libcloud,mtekel/libcloud,curoverse/libcloud,jimbobhickville/libcloud,erjohnso/libcloud,techhat/libcloud,Verizon/libcloud,Kami/libcloud,kater169/libcloud,Itxaka/libcloud,thesquelched/libcloud,smaffulli/libcloud,sfriesel/libcloud,illfelder/libcloud,wido/libcloud,watermelo/libcloud,thesquelched/libcloud,NexusIS/libcloud,ZuluPro/libcloud,mgogoulos/libcloud,andrewsomething/libcloud,wido/libcloud,sergiorua/libcloud,sfriesel/libcloud,DimensionDataCBUSydney/libcloud,StackPointCloud/libcloud,JamesGuthrie/libcloud,StackPointCloud/libcloud,illfelder/libcloud,smaffulli/libcloud,briancurtin/libcloud,curoverse/libcloud,StackPointCloud/libcloud,t-tran/libcloud,DimensionDataCBUSydney/libcloud,mathspace/libcloud,sergiorua/libcloud,t-tran/libcloud,smaffulli/libcloud,Itxaka/libcloud,ZuluPro/libcloud,curoverse/libcloud,cloudControl/libcloud,sahildua2305/libcloud,wuyuewen/libcloud,Verizon/libcloud,andrewsomething/libcloud,ByteInternet/libcloud,jimbobhickville/libcloud,illfelder/libcloud,dcorbacho/libcloud,jimbobhickville/libcloud,cloudControl/libcloud,mtekel/libcloud,aviweit/libcloud,vongazman/libcloud,briancurtin/libcloud
libcloud/test/compute/test_ikoula.py
libcloud/test/compute/test_ikoula.py
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.compute.drivers.ikoula import IkoulaNodeDriver from libcloud.test.compute.test_cloudstack import CloudStackCommonTestCase from libcloud.test import unittest class IkoulaNodeDriverTestCase(CloudStackCommonTestCase, unittest.TestCase): driver_klass = IkoulaNodeDriver if __name__ == '__main__': sys.exit(unittest.main())
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from libcloud.compute.drivers.ikoula import IkoulaNodeDriver from libcloud.test.compute.test_cloudstack import CloudStackCommonTestCase from libcloud.test import unittest class ExoscaleNodeDriverTestCase(CloudStackCommonTestCase, unittest.TestCase): driver_klass = IkoulaNodeDriver if __name__ == '__main__': sys.exit(unittest.main())
apache-2.0
Python
8c70752c87eb0519150e7cf17b146c97847b1460
add new preview-graylog ip to reversedns.py
alphagov/govuk-puppet,alphagov/govuk-puppet,alphagov/govuk-puppet,alphagov/govuk-puppet,alphagov/govuk-puppet,alphagov/govuk-puppet
modules/nagios/files/reversedns.py
modules/nagios/files/reversedns.py
#!/usr/bin/env python import socket import sys if sys.argv[1] == "ip-10-236-86-54.eu-west-1.compute.internal": print "frontend.production.alphagov.co.uk" exit(0) if sys.argv[1] == "ip-10-250-157-37.eu-west-1.compute.internal": print "static.production.alphagov.co.uk" exit(0) if sys.argv[1] == "ip-10-53-54-49.eu-west-1.compute.internal": print "frontend.cluster" exit(0) if sys.argv[1] == "ip-10-54-182-112.eu-west-1.compute.internal": print "signonotron.production.alphagov.co.uk" exit(0) # hack for the change to whitehalls host not being done correctly if sys.argv[1] == "ip-10-229-67-207.eu-west-1.compute.internal": # print "ip-10-224-50-207.eu-west-1.compute.internal" print "whitehall.production.alphagov.co.uk" exit(0) if sys.argv[1] == "ip-10-236-86-54.eu-west-1.compute.internal": print "frontend.production.alphagov.co.uk" # hacks to pickup correct graphs, due to local hosts and ganglia name mismatch if sys.argv[1] in ['ip-10-54-182-112.eu-west-1.compute.internal', 'ip-10-236-86-54.eu-west-1.compute.internal', 'ip-10-250-157-37.eu-west-1.compute.internal', 'ip-10-53-54-49.eu-west-1.compute.internal', 'ip-10-32-31-104.eu-west-1.compute.internal' ]: print sys.argv[1] exit(0) try: print socket.gethostbyaddr(sys.argv[1])[0] except: print sys.argv[1]
#!/usr/bin/env python import socket import sys if sys.argv[1] == "ip-10-236-86-54.eu-west-1.compute.internal": print "frontend.production.alphagov.co.uk" exit(0) if sys.argv[1] == "ip-10-250-157-37.eu-west-1.compute.internal": print "static.production.alphagov.co.uk" exit(0) if sys.argv[1] == "ip-10-53-54-49.eu-west-1.compute.internal": print "frontend.cluster" exit(0) if sys.argv[1] == "ip-10-54-182-112.eu-west-1.compute.internal": print "signonotron.production.alphagov.co.uk" exit(0) # hack for the change to whitehalls host not being done correctly if sys.argv[1] == "ip-10-229-67-207.eu-west-1.compute.internal": # print "ip-10-224-50-207.eu-west-1.compute.internal" print "whitehall.production.alphagov.co.uk" exit(0) if sys.argv[1] == "ip-10-236-86-54.eu-west-1.compute.internal": print "frontend.production.alphagov.co.uk" # hacks to pickup correct graphs, due to local hosts and ganglia name mismatch if sys.argv[1] in ['ip-10-54-182-112.eu-west-1.compute.internal', 'ip-10-236-86-54.eu-west-1.compute.internal', 'ip-10-250-157-37.eu-west-1.compute.internal', 'ip-10-53-54-49.eu-west-1.compute.internal']: print sys.argv[1] exit(0) try: print socket.gethostbyaddr(sys.argv[1])[0] except: print sys.argv[1]
mit
Python
b5b4c1f5b72494e00064b36f2ee1c53d1b5c2aca
Revert 83430 - NaCl: Re-enable tests, since they pass on the trybotsBUG=noneTEST=nacl_integrationReview URL: http://codereview.chromium.org/6904067 [email protected] Review URL: http://codereview.chromium.org/6902132
gavinp/chromium,yitian134/chromium,ropik/chromium,ropik/chromium,ropik/chromium,gavinp/chromium,ropik/chromium,adobe/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,adobe/chromium,adobe/chromium,yitian134/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,adobe/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,ropik/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,ropik/chromium,gavinp/chromium,gavinp/chromium,gavinp/chromium,yitian134/chromium,gavinp/chromium,gavinp/chromium,gavinp/chromium,adobe/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,ropik/chromium,adobe/chromium
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
#!/usr/bin/python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import subprocess import sys def Main(): # TODO(ncbray): figure out why this is failing on windows and enable. if (sys.platform in ['win32', 'cygwin'] and 'xp-nacl-chrome' not in os.environ.get('PWD', '')): return # TODO(ncbray): figure out why this is failing on mac and re-enable. if (sys.platform == 'darwin' and 'mac-nacl-chrome' not in os.environ.get('PWD', '')): return # TODO(ncbray): figure out why this is failing on some linux trybots. if (sys.platform in ['linux', 'linux2'] and 'hardy64-nacl-chrome' not in os.environ.get('PWD', '')): return script_dir = os.path.dirname(os.path.abspath(__file__)) test_dir = os.path.dirname(script_dir) chrome_dir = os.path.dirname(test_dir) src_dir = os.path.dirname(chrome_dir) nacl_integration_script = os.path.join( src_dir, 'native_client/build/buildbot_chrome_nacl_stage.py') cmd = [sys.executable, nacl_integration_script] + sys.argv[1:] print cmd subprocess.check_call(cmd) if __name__ == '__main__': Main()
#!/usr/bin/python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import subprocess import sys def Main(): script_dir = os.path.dirname(os.path.abspath(__file__)) test_dir = os.path.dirname(script_dir) chrome_dir = os.path.dirname(test_dir) src_dir = os.path.dirname(chrome_dir) nacl_integration_script = os.path.join( src_dir, 'native_client/build/buildbot_chrome_nacl_stage.py') cmd = [sys.executable, nacl_integration_script] + sys.argv[1:] print cmd subprocess.check_call(cmd) if __name__ == '__main__': Main()
bsd-3-clause
Python
4ab211d6dd50c043cacd24db93a6bc64cfdb9ed5
update tools/validate_runtests_log.py for pytest
e-q/scipy,ilayn/scipy,rgommers/scipy,endolith/scipy,anntzer/scipy,jamestwebber/scipy,nmayorov/scipy,andyfaff/scipy,arokem/scipy,grlee77/scipy,matthew-brett/scipy,aarchiba/scipy,vigna/scipy,apbard/scipy,zerothi/scipy,perimosocordiae/scipy,pizzathief/scipy,perimosocordiae/scipy,nmayorov/scipy,scipy/scipy,Stefan-Endres/scipy,rgommers/scipy,pizzathief/scipy,aarchiba/scipy,jor-/scipy,e-q/scipy,Eric89GXL/scipy,arokem/scipy,rgommers/scipy,andyfaff/scipy,jamestwebber/scipy,vigna/scipy,endolith/scipy,nmayorov/scipy,person142/scipy,perimosocordiae/scipy,matthew-brett/scipy,Stefan-Endres/scipy,lhilt/scipy,gertingold/scipy,Stefan-Endres/scipy,tylerjereddy/scipy,zerothi/scipy,WarrenWeckesser/scipy,mdhaber/scipy,nmayorov/scipy,gfyoung/scipy,WarrenWeckesser/scipy,jor-/scipy,jor-/scipy,ilayn/scipy,anntzer/scipy,apbard/scipy,zerothi/scipy,tylerjereddy/scipy,andyfaff/scipy,arokem/scipy,anntzer/scipy,jamestwebber/scipy,jamestwebber/scipy,jor-/scipy,lhilt/scipy,jamestwebber/scipy,jor-/scipy,Stefan-Endres/scipy,mdhaber/scipy,nmayorov/scipy,perimosocordiae/scipy,pizzathief/scipy,zerothi/scipy,andyfaff/scipy,gertingold/scipy,gertingold/scipy,aeklant/scipy,Eric89GXL/scipy,WarrenWeckesser/scipy,vigna/scipy,grlee77/scipy,endolith/scipy,gfyoung/scipy,person142/scipy,zerothi/scipy,matthew-brett/scipy,zerothi/scipy,Eric89GXL/scipy,perimosocordiae/scipy,scipy/scipy,pizzathief/scipy,grlee77/scipy,endolith/scipy,gfyoung/scipy,scipy/scipy,scipy/scipy,lhilt/scipy,aeklant/scipy,WarrenWeckesser/scipy,Stefan-Endres/scipy,matthew-brett/scipy,e-q/scipy,endolith/scipy,vigna/scipy,endolith/scipy,gfyoung/scipy,vigna/scipy,Eric89GXL/scipy,gfyoung/scipy,tylerjereddy/scipy,matthew-brett/scipy,perimosocordiae/scipy,rgommers/scipy,ilayn/scipy,aarchiba/scipy,aeklant/scipy,WarrenWeckesser/scipy,aarchiba/scipy,WarrenWeckesser/scipy,person142/scipy,anntzer/scipy,rgommers/scipy,pizzathief/scipy,scipy/scipy,tylerjereddy/scipy,e-q/scipy,ilayn/scipy,scipy/scipy,gertingold/scipy,aarchiba/scipy,grlee77/scipy,lhilt/scipy,Eric89GXL/scipy,arokem/scipy,mdhaber/scipy,mdhaber/scipy,andyfaff/scipy,apbard/scipy,arokem/scipy,apbard/scipy,lhilt/scipy,e-q/scipy,Stefan-Endres/scipy,mdhaber/scipy,person142/scipy,person142/scipy,grlee77/scipy,andyfaff/scipy,mdhaber/scipy,ilayn/scipy,tylerjereddy/scipy,ilayn/scipy,gertingold/scipy,apbard/scipy,Eric89GXL/scipy,anntzer/scipy,aeklant/scipy,aeklant/scipy,anntzer/scipy
tools/validate_runtests_log.py
tools/validate_runtests_log.py
#!/usr/bin/env python """ Take the test runner log output from the stdin, looking for the magic line nose runner prints when the test run was successful. In an ideal world, this should be done directly in runtests.py using the nose API, some failure modes are fooling nose to terminate the python process with zero exit code, see, eg, https://github.com/scipy/scipy/issues/4736 In short, lapack's xerbla can terminate the process with a fortran level STOP command, which (i) aborts the py process so that runtests.py does not finish, and (ii) the exit code is implementation-defined. Also check that the number of tests run is larger than some baseline number (taken from the state of the master branch at some random point in time.) This probably could/should be made less brittle. """ from __future__ import print_function import sys import re if __name__ == "__main__": # full or fast test suite? try: testmode = sys.argv[1] if testmode not in ('fast', 'full'): raise IndexError except IndexError: raise ValueError("Usage: validate.py {full|fast} < logfile.") # fetch the expected number of tests # these numbers are for 6abad09 # XXX: this should probably track the commit hash or commit date expected_size = {'full': 19055, 'fast': 17738} # read in the log, parse for the pytest printout r1 = re.compile("(?P<num_failed>\d+) failed, (?P<num_passed>\d+) passed,.* in (?P<time>\d+\S+)") r2 = re.compile("(?P<num_passed>\d+) passed,.* in (?P<time>\d+\S+)") found_it = False while True: line = sys.stdin.readline() if not line: break m = r1.search(line) if not m: m = r2.search(line) if m: found_it = True break if found_it: passed = int(m.group('num_passed')) try: failed = int(m.group('num_failed')) except IndexError: failed = 0 if failed: print("*** Looks like some tests failed.") sys.exit(-1) # now check that the number of tests run is reasonable expected = expected_size[testmode] actual = passed + failed if actual < expected: print("*** Too few tests: expected %s, run %s" % (expected, actual)) sys.exit(1) else: sys.exit(0) else: print('*** Test runner validation errored: did the run really finish?') sys.exit(-1)
#!/usr/bin/env python """ Take the test runner log output from the stdin, looking for the magic line nose runner prints when the test run was successful. In an ideal world, this should be done directly in runtests.py using the nose API, some failure modes are fooling nose to terminate the python process with zero exit code, see, eg, https://github.com/scipy/scipy/issues/4736 In short, lapack's xerbla can terminate the process with a fortran level STOP command, which (i) aborts the py process so that runtests.py does not finish, and (ii) the exit code is implementation-defined. Also check that the number of tests run is larger than some baseline number (taken from the state of the master branch at some random point in time.) This probably could/should be made less brittle. """ from __future__ import print_function import sys import re if __name__ == "__main__": # full or fast test suite? try: testmode = sys.argv[1] if testmode not in ('fast', 'full'): raise IndexError except IndexError: raise ValueError("Usage: validate.py {full|fast} < logfile.") # fetch the expected number of tests # these numbers are for 6abad09 # XXX: this should probably track the commit hash or commit date expected_size = {'full': 19055, 'fast': 17738} # read in the log, parse for the nose printout: # Ran NNN tests in MMMs # <blank line> # OK (SKIP=X, KNOWNFAIL=Y) or FAILED (errors=X, failures=Y) r = re.compile("Ran (?P<num_tests>\d+) tests in (?P<time>\d+\S+)") status, found_it = False, False while True: line = sys.stdin.readline() if not line: break m = r.search(line) if m: found_it = True sys.stdin.readline() # skip the next one line = sys.stdin.readline() if "OK" in line: status = True break if found_it: # did it errored or failed? if not status: print("*** Looks like some tests failed.") sys.exit(-1) # now check that the number of tests run is reasonable expected = expected_size[testmode] actual = int(m.group('num_tests')) if actual < expected: print("*** Too few tests: expected %s, run %s" % (expected, actual)) sys.exit(1) else: sys.exit(0) else: print('*** Test runner validation errored: did the run really finish?') sys.exit(-1)
bsd-3-clause
Python
992b9c46dd432ad409025a3cbaeb1c06f880526c
Resolve readline/ncurses dependency when building Lua
krafczyk/spack,EmreAtes/spack,iulian787/spack,LLNL/spack,mfherbst/spack,EmreAtes/spack,iulian787/spack,LLNL/spack,TheTimmy/spack,tmerrick1/spack,lgarren/spack,matthiasdiener/spack,skosukhin/spack,matthiasdiener/spack,LLNL/spack,krafczyk/spack,EmreAtes/spack,tmerrick1/spack,tmerrick1/spack,lgarren/spack,mfherbst/spack,TheTimmy/spack,EmreAtes/spack,TheTimmy/spack,krafczyk/spack,matthiasdiener/spack,mfherbst/spack,EmreAtes/spack,skosukhin/spack,tmerrick1/spack,iulian787/spack,skosukhin/spack,LLNL/spack,iulian787/spack,skosukhin/spack,tmerrick1/spack,TheTimmy/spack,skosukhin/spack,LLNL/spack,lgarren/spack,mfherbst/spack,matthiasdiener/spack,matthiasdiener/spack,iulian787/spack,krafczyk/spack,mfherbst/spack,TheTimmy/spack,krafczyk/spack,lgarren/spack,lgarren/spack
var/spack/packages/lua/package.py
var/spack/packages/lua/package.py
from spack import * import os class Lua(Package): """ The Lua programming language interpreter and library """ homepage = "http://www.lua.org" url = "http://www.lua.org/ftp/lua-5.1.5.tar.gz" version('5.3.1', '797adacada8d85761c079390ff1d9961') version('5.3.0', 'a1b0a7e92d0c85bbff7a8d27bf29f8af') version('5.2.4', '913fdb32207046b273fdb17aad70be13') version('5.2.3', 'dc7f94ec6ff15c985d2d6ad0f1b35654') version('5.2.2', 'efbb645e897eae37cad4344ce8b0a614') version('5.2.1', 'ae08f641b45d737d12d30291a5e5f6e3') version('5.2.0', 'f1ea831f397214bae8a265995ab1a93e') version('5.1.5', '2e115fe26e435e33b0d5c022e4490567') version('5.1.4', 'd0870f2de55d59c1c8419f36e8fac150') version('5.1.3', 'a70a8dfaa150e047866dc01a46272599') depends_on('ncurses') depends_on('readline') def install(self, spec, prefix): make('INSTALL_TOP=%s' % prefix, 'MYLDFLAGS=-L%s -lncurses' % spec['ncurses'].prefix.lib, 'linux') make('INSTALL_TOP=%s' % prefix, 'MYLDFLAGS=-L%s -lncurses' % spec['ncurses'].prefix.lib, 'install')
from spack import * import os class Lua(Package): """ The Lua programming language interpreter and library """ homepage = "http://www.lua.org" url = "http://www.lua.org/ftp/lua-5.1.5.tar.gz" version('5.3.1', '797adacada8d85761c079390ff1d9961') version('5.3.0', 'a1b0a7e92d0c85bbff7a8d27bf29f8af') version('5.2.4', '913fdb32207046b273fdb17aad70be13') version('5.2.3', 'dc7f94ec6ff15c985d2d6ad0f1b35654') version('5.2.2', 'efbb645e897eae37cad4344ce8b0a614') version('5.2.1', 'ae08f641b45d737d12d30291a5e5f6e3') version('5.2.0', 'f1ea831f397214bae8a265995ab1a93e') version('5.1.5', '2e115fe26e435e33b0d5c022e4490567') version('5.1.4', 'd0870f2de55d59c1c8419f36e8fac150') version('5.1.3', 'a70a8dfaa150e047866dc01a46272599') depends_on('ncurses') def install(self, spec, prefix): make('INSTALL_TOP=%s' % prefix, 'MYLDFLAGS="-L%s/lib -Wl,-rpath,%s"' % (spec['ncurses'].prefix,spec['ncurses'].prefix), 'linux') make('INSTALL_TOP=%s' % prefix, 'MYLDFLAGS="-L%s/lib -Wl,-rpath,%s"' % (spec['ncurses'].prefix,spec['ncurses'].prefix), 'install')
lgpl-2.1
Python
4f2fa4e43b314c9d05e0b9b9e73641463c16a9cb
Set up the proposal tasks on app startup
cityofsomerville/citydash,codeforboston/cornerwise,codeforboston/cornerwise,cityofsomerville/citydash,cityofsomerville/cornerwise,cityofsomerville/cornerwise,codeforboston/cornerwise,cityofsomerville/citydash,cityofsomerville/citydash,codeforboston/cornerwise,cityofsomerville/cornerwise,cityofsomerville/cornerwise
server/proposal/__init__.py
server/proposal/__init__.py
from django.apps import AppConfig class ProposalConfig(AppConfig): name = "proposal" def ready(self): # Register tasks with Celery: from . import tasks tasks.set_up_hooks()
from django.apps import AppConfig class ProposalConfig(AppConfig): name = "proposal" def ready(self): # Register tasks with Celery: from . import tasks
mit
Python
69a16e61f0b0d5eb6d1f0819ff379c0d86b67dc3
fix in lcb
numairmansur/RoBO,automl/RoBO,numairmansur/RoBO,automl/RoBO
robo/acquisition/lcb.py
robo/acquisition/lcb.py
import logging import numpy as np from robo.acquisition.base_acquisition import BaseAcquisitionFunction logger = logging.getLogger(__name__) class LCB(BaseAcquisitionFunction): def __init__(self, model, X_lower, X_upper, par=0.0, **kwargs): r""" The lower confidence bound acquisition functions that computes for a test point the acquisition value by: .. math:: LCB(X) := \mu(X) - \kappa\sigma(X) Parameters ---------- model: Model object A model that implements at least - predict(X) - getCurrentBestX(). If you want to calculate derivatives than it should also support - predictive_gradients(X) X_lower: np.ndarray (D) Lower bounds of the input space X_upper: np.ndarray (D) Upper bounds of the input space par: float Controls the balance between exploration and exploitation of the acquisition function. Default is 0.01 """ self.par = par super(LCB, self).__init__(model, X_lower, X_upper) def compute(self, X, derivative=False, **kwargs): """ Computes the LCB acquisition value and its derivatives. Parameters ---------- X: np.ndarray(1, D), The input point where the acquisition function should be evaluate. The dimensionality of X is (N, D), with N as the number of points to evaluate at and D is the number of dimensions of one X. derivative: Boolean If is set to true also the derivative of the acquisition function at X is returned. Returns ------- np.ndarray(1,1) LCB value of X np.ndarray(1,D) Derivative of LCB at X (only if derivative=True) """ mean, var = self.model.predict(X) # Minimize in f so we maximize the negative lower bound acq = - (mean - self.par * np.sqrt(var)) if derivative: dm, dv = self.model.predictive_gradients(X) grad = -(dm - self.par * dv / (2 * np.sqrt(var))) return acq, grad else: return acq def update(self, model): self.model = model
import logging import numpy as np from robo.acquisition.base_acquisition import BaseAcquisitionFunction logger = logging.getLogger(__name__) class LCB(BaseAcquisitionFunction): def __init__(self, model, X_lower, X_upper, par=0.0, **kwargs): r""" The lower confidence bound acquisition functions that computes for a test point the acquisition value by: .. math:: LCB(X) := \mu(X) - \kappa\sigma(X) Parameters ---------- model: Model object A model that implements at least - predict(X) - getCurrentBestX(). If you want to calculate derivatives than it should also support - predictive_gradients(X) X_lower: np.ndarray (D) Lower bounds of the input space X_upper: np.ndarray (D) Upper bounds of the input space par: float Controls the balance between exploration and exploitation of the acquisition function. Default is 0.01 """ self.par = par super(LCB, self).__init__(model, X_lower, X_upper) def compute(self, X, derivative=False, **kwargs): """ Computes the LCB acquisition value and its derivatives. Parameters ---------- X: np.ndarray(1, D), The input point where the acquisition function should be evaluate. The dimensionality of X is (N, D), with N as the number of points to evaluate at and D is the number of dimensions of one X. derivative: Boolean If is set to true also the derivative of the acquisition function at X is returned. Returns ------- np.ndarray(1,1) LCB value of X np.ndarray(1,D) Derivative of LCB at X (only if derivative=True) """ mean, var = self.model.predict(X) # Minimize in f so we maximize the negative lower bound acq = - mean + self.par * np.sqrt(var) if derivative: dm, dv = self.model.predictive_gradients(X) grad = -dm + self.par * dv / (2 * np.sqrt(var)) return acq, grad else: return acq def update(self, model): self.model = model
bsd-3-clause
Python
3468b32964560f4092593e03ba552d7e6b56943d
Support renaming of _ to % routines
OSEHRA/VistA,OSEHRA-Sandbox/VistA,timmvt/VistA_tt,OSEHRA/VistA,OSEHRA/VistA,timmvt/VistA_tt,apexdatasolutions/VistA,josephsnyder/VistA,josephsnyder/VistA,josephsnyder/VistA,OSEHRA-Sandbox/VistA,timmvt/VistA_tt,OSEHRA/VistA,apexdatasolutions/VistA,timmvt/VistA_tt,timmvt/VistA_tt,mdgeek/VistA-FHIR-CWF,OSEHRA-Sandbox/VistA,apexdatasolutions/VistA,OSEHRA/VistA,shabiel/VistA,shabiel/VistA,OSEHRA-Sandbox/VistA,mdgeek/VistA-FHIR-CWF,mdgeek/VistA-FHIR-CWF,apexdatasolutions/VistA,OSEHRA/VistA,shabiel/VistA,OSEHRA-Sandbox/VistA,shabiel/VistA,shabiel/VistA,mdgeek/VistA-FHIR-CWF,apexdatasolutions/VistA,mdgeek/VistA-FHIR-CWF,josephsnyder/VistA,mdgeek/VistA-FHIR-CWF,josephsnyder/VistA,josephsnyder/VistA,shabiel/VistA,OSEHRA-Sandbox/VistA
Scripts/PackRO.py
Scripts/PackRO.py
#!/usr/bin/env python # Pack .m files into M[UMPS] routine transfer format (^%RO) # # python PackRO.py *.m > routines.ro # # or # # ls *.m | python PackRO.py > routines.ro # #--------------------------------------------------------------------------- # Copyright 2011 The Open Source Electronic Health Record Agent # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #--------------------------------------------------------------------------- import sys import os def pack(files, output): output.write('Routines\n\n') for f in files: if not f.endswith('.m'): sys.stderr.write('Skipping non-.m file: %s\n' % f) continue n = os.path.basename(f)[:-2] n = n.replace("_","%") m = open(f,"r") output.write('%s\n'%n) for line in m: output.write(line) output.write('\n') output.write('\n') output.write('\n') def main(): files = sys.argv[1:] if not files: files = [a.rstrip() for a in sys.stdin] pack(files, sys.stdout) if __name__ == '__main__': main()
#!/usr/bin/env python # Pack .m files into M[UMPS] routine transfer format (^%RO) # # python PackRO.py *.m > routines.ro # # or # # ls *.m | python PackRO.py > routines.ro # #--------------------------------------------------------------------------- # Copyright 2011 The Open Source Electronic Health Record Agent # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #--------------------------------------------------------------------------- import sys import os def pack(files, output): output.write('Routines\n\n') for f in files: if not f.endswith('.m'): sys.stderr.write('Skipping non-.m file: %s\n' % f) continue n = os.path.basename(f)[:-2] m = open(f,"r") output.write('%s\n'%n) for line in m: output.write(line) output.write('\n') output.write('\n') output.write('\n') def main(): files = sys.argv[1:] if not files: files = [a.rstrip() for a in sys.stdin] pack(files, sys.stdout) if __name__ == '__main__': main()
apache-2.0
Python
d6c1774b75839192b0235e5737cdba0d17759fde
Update mqtt_easydriver_stepper.py
pumanzor/iot-redlibre,pumanzor/iot-redlibre
linkit/easydriver/mqtt_easydriver_stepper.py
linkit/easydriver/mqtt_easydriver_stepper.py
import paho.mqtt.client as mqtt import json, time import mraa pin19 = mraa.Pwm(19) pin0 = mraa.Gpio(0) pin0.dir(mraa.DIR_OUT) # ----- CHANGE THESE FOR YOUR SETUP ----- MQTT_HOST = "190.97.168.236" MQTT_PORT = 1883 #--------------------------------------- def on_connect(client, userdata, rc): print("\nConnected with result code " + str(rc) + "\n") client.subscribe("/pryxo/yxusers/motor/control/") print("Subscribed to homecontrol") def on_message_iotrl(client, userdata, msg): print("\n\t* Linkit UPDATED ("+msg.topic+"): " + str(msg.payload)) if msg.payload == "m1": pin0.write(0) pin1 = mraa.Gpio(1) pin1.dir(mraa.DIR_OUT) pin1.write(0) pin19.period_us(300) pin19.enable(True) pin19.write(0.1) time.sleep(2) client.publish("/pryxo/yxusers/iot/status/", "derecha", 2) if msg.payload == "m0": pin0.write(0) pin1 = mraa.Gpio(1) pin1.dir(mraa.DIR_OUT) pin1.write(1) pin19.period_us(300) pin19.enable(True) pin19.write(0.1) time.sleep(2) client.publish("/pryxo/yxusers/iot/status/", "izquierda", 2) if msg.payload == "m2": pin0.write(1) client.publish("/pryxo/yxusers/iot/status/", "STOP", 2) def command_error(): print("Error: Unknown command") client = mqtt.Client(client_id="linkit7688-stepper-motor") client.on_connect = on_connect client.message_callback_add("/pryxo/yxusers/motor/control/", on_message_iotrl) client.connect(MQTT_HOST, MQTT_PORT, 60) client.loop_start() # Main program loop while True: time.sleep(10)
import paho.mqtt.client as mqtt import json, time import mraa pin19 = mraa.Pwm(19) pin0 = mraa.Gpio(0) pin0.dir(mraa.DIR_OUT) # ----- CHANGE THESE FOR YOUR SETUP ----- MQTT_HOST = "190.97.168.236" MQTT_PORT = 1883 def on_connect(client, userdata, rc): print("\nConnected with result code " + str(rc) + "\n") #Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. #client.subscribe("/pyxo/xyusers/{USERNAME}/{APIKEY}/iot/control/".format(**vars()), 2) # Connect to everything in /mcu topic client.subscribe("/pryxo/yxusers/motor/control/") print("Subscribed to homecontrol") def on_message_iotrl(client, userdata, msg): print("\n\t* Linkit UPDATED ("+msg.topic+"): " + str(msg.payload)) if msg.payload == "m1": pin0.write(0) pin1 = mraa.Gpio(1) pin1.dir(mraa.DIR_OUT) pin1.write(0) pin19.period_us(300) pin19.enable(True) pin19.write(0.1) time.sleep(2) client.publish("/pryxo/yxusers/iot/status/", "derecha", 2) if msg.payload == "m0": pin1 = mraa.Gpio(1) pin1.dir(mraa.DIR_OUT) pin1.write(1) pin19.period_us(300) pin19.enable(True) pin19.write(0.1) time.sleep(2) client.publish("/pryxo/yxusers/iot/status/", "izquierda", 2) if msg.payload == "m2": pin0.write(1) client.publish("/pryxo/yxusers/iot/status/", "STOP", 2) def command_error(): print("Error: Unknown command") client = mqtt.Client(client_id="linkit7688-stepper-motor") # Callback declarations (functions run based on certain messages) client.on_connect = on_connect client.message_callback_add("/pryxo/yxusers/motor/control/", on_message_iotrl) # This is where the MQTT service connects and starts listening for messages client.connect(MQTT_HOST, MQTT_PORT, 60) client.loop_start() # Background thread to call loop() automatically # Main program loop while True: time.sleep(10)
mit
Python
6b7bd1c412b21a748b39a07a792f8b2c8461f9e2
Fix issue #17
aibor/marmoset
marmoset/installimage/installimage_config.py
marmoset/installimage/installimage_config.py
import os class InstallimageConfig: CFG_DIR = '/srv/tftp/installimage/' def __init__(self, mac): self.variables = {} self.mac = mac if self.exists(): self.__read_config_file() def add_or_set(self, key, value): self.variables[key.upper()] = value def create(self): self.__write_config_file() def exists(self): return os.path.isfile(self.file_path()) def file_name(self): '''Return the file name in the Installimage file name style.''' return self.mac.replace(":", "_") def file_path(self, name=None): '''Return the path to the config file of th instance.''' if name is None: name = self.file_name() cfgdir = InstallimageConfig.CFG_DIR.rstrip('/') return os.path.join(cfgdir, name) def __read_config_file(self, path=None): if path is None: path = self.file_path() lines = [] with open(path, 'r') as f: lines = f.readlines() f.close() for line in lines: key = line.split(" ")[0] value = line.split(" ", 1)[1] self.variables[key] = value def __write_config_file(self, path=None): if path is None: path = self.file_path() variable_lines = [] for key in self.variables: variable_lines.append("%s %s" % (key, self.variables[key])) content = "\n".join(variable_lines) os.makedirs(InstallimageConfig.CFG_DIR, exist_ok=True) with open(path, 'w') as f: f.write(content) f.close()
import os class InstallimageConfig: CFG_DIR = '/srv/tftp/installimage/' def __init__(self, mac): self.variables = {} self.mac = mac if self.exists(): self.__read_config_file() def add_or_set(self, key, value): self.variables[key] = value def create(self): self.__write_config_file() def exists(self): return os.path.isfile(self.file_path()) def file_name(self): '''Return the file name in the Installimage file name style.''' return self.mac.replace(":", "_") def file_path(self, name=None): '''Return the path to the config file of th instance.''' if name is None: name = self.file_name() cfgdir = InstallimageConfig.CFG_DIR.rstrip('/') return os.path.join(cfgdir, name) def __read_config_file(self, path=None): if path is None: path = self.file_path() lines = [] with open(path, 'r') as f: lines = f.readlines() f.close() for line in lines: key = line.split(" ")[0] value = line.split(" ", 1)[1] self.variables[key] = value def __write_config_file(self, path=None): if path is None: path = self.file_path() variable_lines = [] for key in self.variables: variable_lines.append("%s %s" % (key.upper(), self.variables[key])) content = "\n".join(variable_lines) os.makedirs(InstallimageConfig.CFG_DIR, exist_ok=True) with open(path, 'w') as f: f.write(content) f.close()
agpl-3.0
Python
74cfb1bd8e60e1d348115677e92c5e64858ec785
Add clearer instructions on no component support. (#2685)
QingChenmsft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,samedder/azure-cli,yugangw-msft/azure-cli,samedder/azure-cli,yugangw-msft/azure-cli,samedder/azure-cli,samedder/azure-cli,QingChenmsft/azure-cli,QingChenmsft/azure-cli,QingChenmsft/azure-cli
packaged_releases/patches/patch_component_custom.py
packaged_releases/patches/patch_component_custom.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.core._util import CLIError MSG_TMPL = """ az component and subcommands are not available with the current Azure CLI installation. If installed with apt-get, please use 'apt-get update' to update this installation. If installed with Docker, please use 'docker pull' to update this installation. If installed with Windows MSI, download the new MSI to update this installation. {} """ def _raise_error(msg): raise CLIError(MSG_TMPL.format(msg)) def list_components(): """ List the installed components """ _raise_error("Use 'az --version' to list component versions.") def list_available_components(): """ List publicly available components that can be installed """ _raise_error("No additional components available.") def remove(component_name): """ Remove a component """ _raise_error("Components cannot be removed.") def update(private=False, pre=False, link=None, additional_components=None, allow_third_party=False): """ Update the CLI and all installed components """ _raise_error("Components cannot be updated.")
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.core._util import CLIError MSG_TMPL = """ az component and subcommands are not available with the current Azure CLI installation. If installed with apt-get, please use apt-get to update this installation. {} """ def _raise_error(msg): raise CLIError(MSG_TMPL.format(msg)) def list_components(): """ List the installed components """ _raise_error("Use 'az --version' to list component versions.") def list_available_components(): """ List publicly available components that can be installed """ _raise_error("No additional components available.") def remove(component_name): """ Remove a component """ _raise_error("Components cannot be removed.") def update(private=False, pre=False, link=None, additional_components=None, allow_third_party=False): """ Update the CLI and all installed components """ _raise_error("Components cannot be updated.")
mit
Python
c5a31be1bd452224c2b35c4f3e3132b2df1431e7
reorder imports
liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin
meinberlin/apps/documents/exports.py
meinberlin/apps/documents/exports.py
from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from adhocracy4.comments.models import Comment from adhocracy4.exports import mixins as export_mixins from adhocracy4.exports import views as export_views from meinberlin.apps.exports import mixins as mb_export_mixins from meinberlin.apps.exports import register_export @register_export(_('Documents with comments')) class DocumentExportView( export_mixins.ExportModelFieldsMixin, mb_export_mixins.UserGeneratedContentExportMixin, export_mixins.ItemExportWithLinkMixin, export_mixins.ItemExportWithRatesMixin, mb_export_mixins.ItemExportWithRepliesToMixin, export_views.BaseItemExportView ): model = Comment fields = ['id', 'comment', 'created'] def get_queryset(self): comments = ( Comment.objects.filter(paragraph__chapter__module=self.module) | Comment.objects.filter(chapter__module=self.module) | Comment.objects.filter( parent_comment__paragraph__chapter__module=self.module) | Comment.objects.filter(parent_comment__chapter__module=self.module) ) return comments def get_base_filename(self): return '%s_%s' % (self.project.slug, timezone.now().strftime('%Y%m%dT%H%M%S'))
from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from adhocracy4.comments.models import Comment from adhocracy4.exports import mixins as export_mixins from adhocracy4.exports import views as export_views from adhocracy4.projects.mixins import ProjectMixin from meinberlin.apps.exports import mixins as mb_export_mixins from meinberlin.apps.exports import register_export @register_export(_('Documents with comments')) class DocumentExportView( export_views.BaseItemExportView, export_mixins.ExportModelFieldsMixin, mb_export_mixins.UserGeneratedContentExportMixin, export_mixins.ItemExportWithLinkMixin, export_mixins.ItemExportWithRatesMixin, mb_export_mixins.ItemExportWithRepliesToMixin, ProjectMixin): model = Comment fields = ['id', 'comment', 'created'] def get_queryset(self): comments = ( Comment.objects.filter(paragraph__chapter__module=self.module) | Comment.objects.filter(chapter__module=self.module) | Comment.objects.filter( parent_comment__paragraph__chapter__module=self.module) | Comment.objects.filter(parent_comment__chapter__module=self.module) ) return comments def get_base_filename(self): return '%s_%s' % (self.project.slug, timezone.now().strftime('%Y%m%dT%H%M%S'))
agpl-3.0
Python
1c6c31653889c8acb60a54dc1dc9ea0f8795f122
bump to next dev version: 0.6.7-dev
ocefpaf/ulmo,nathanhilbert/ulmo,cameronbracken/ulmo,nathanhilbert/ulmo,cameronbracken/ulmo,ocefpaf/ulmo
ulmo/version.py
ulmo/version.py
# set version number __version__ = '0.6.7-dev'
# set version number __version__ = '0.6.6'
bsd-3-clause
Python
8546e14e152c79f137e0db15e3cd7de71cd0e8b4
bump to next dev version: 0.7.3-dev
ulmo-dev/ulmo-common
ulmo/version.py
ulmo/version.py
# set version number __version__ = '0.7.3-dev'
# set version number __version__ = '0.7.2'
bsd-3-clause
Python
a8b43950610adb41a3de4c342c51d5b22fd5454b
Fix indents
maferelo/saleor,UITools/saleor,mociepka/saleor,mociepka/saleor,UITools/saleor,mociepka/saleor,maferelo/saleor,UITools/saleor,UITools/saleor,UITools/saleor,maferelo/saleor
saleor/product/forms.py
saleor/product/forms.py
import json from django import forms from django.utils.encoding import smart_text from django.utils.translation import pgettext_lazy from django_prices.templatetags.prices_i18n import gross from ..cart.forms import AddToCartForm class VariantChoiceField(forms.ModelChoiceField): discounts = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def label_from_instance(self, obj): variant_label = smart_text(obj) label = pgettext_lazy( 'Variant choice field label', '%(variant_label)s - %(price)s') % { 'variant_label': variant_label, 'price': gross(obj.get_price(discounts=self.discounts))} return label def update_field_data(self, product, cart): """ Function initializing fields custom data """ self.queryset = product.variants self.discounts = cart.discounts self.empty_label = None images_map = {variant.pk: [vi.image.image.url for vi in variant.variant_images.all()] for variant in product.variants.all()} self.widget.attrs['data-images'] = json.dumps(images_map) # Don't display select input if there are less than two variants if self.queryset.count() < 2: self.widget = forms.HiddenInput( {'value': product.variants.all()[0].pk}) class ProductForm(AddToCartForm): variant = VariantChoiceField(queryset=None) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) variant_field = self.fields['variant'] variant_field.update_field_data(self.product, self.cart) def get_variant(self, cleaned_data): return cleaned_data.get('variant')
import json from django import forms from django.utils.encoding import smart_text from django.utils.translation import pgettext_lazy from django_prices.templatetags.prices_i18n import gross from ..cart.forms import AddToCartForm class VariantChoiceField(forms.ModelChoiceField): discounts = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def label_from_instance(self, obj): variant_label = smart_text(obj) label = pgettext_lazy( 'Variant choice field label', '%(variant_label)s - %(price)s') % { 'variant_label': variant_label, 'price': gross(obj.get_price(discounts=self.discounts))} return label def update_field_data(self, product, cart): """ Function initializing fields custom data """ self.queryset = product.variants self.discounts = cart.discounts self.empty_label = None images_map = {variant.pk: [vi.image.image.url for vi in variant.variant_images.all()] for variant in product.variants.all()} self.widget.attrs['data-images'] = json.dumps(images_map) # Don't display select input if there are less than two variants if self.queryset.count() < 2: self.widget = forms.HiddenInput( {'value': product.variants.all()[0].pk}) class ProductForm(AddToCartForm): variant = VariantChoiceField(queryset=None) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) variant_field = self.fields['variant'] variant_field.update_field_data(self.product, self.cart) def get_variant(self, cleaned_data): return cleaned_data.get('variant')
bsd-3-clause
Python
833f8ce0673701eb64fb20ee067ccd8c58e473c6
Correct wrong inheritance on sponsorship_typo3 child_depart wizard.
MickSandoz/compassion-switzerland,ecino/compassion-switzerland,CompassionCH/compassion-switzerland,ndtran/compassion-switzerland,ndtran/compassion-switzerland,CompassionCH/compassion-switzerland,MickSandoz/compassion-switzerland,CompassionCH/compassion-switzerland,eicher31/compassion-switzerland,Secheron/compassion-switzerland,ecino/compassion-switzerland,eicher31/compassion-switzerland,Secheron/compassion-switzerland,ecino/compassion-switzerland,eicher31/compassion-switzerland
child_sync_typo3/wizard/child_depart_wizard.py
child_sync_typo3/wizard/child_depart_wizard.py
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <[email protected]> # # The licence is in the file __openerp__.py # ############################################################################## from openerp.osv import orm from ..model.sync_typo3 import Sync_typo3 class child_depart_wizard(orm.TransientModel): _inherit = 'child.depart.wizard' def child_depart(self, cr, uid, ids, context=None): wizard = self.browse(cr, uid, ids[0], context) child = wizard.child_id res = True if child.state == 'I': res = child.child_remove_from_typo3() res = super(child_depart_wizard, self).child_depart( cr, uid, ids, context) and res return res or Sync_typo3.typo3_index_error(cr, uid, self, context)
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2014 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: Emanuel Cino <[email protected]> # # The licence is in the file __openerp__.py # ############################################################################## from openerp.osv import orm from ..model.sync_typo3 import Sync_typo3 class end_sponsorship_wizard(orm.TransientModel): _inherit = 'end.sponsorship.wizard' def child_depart(self, cr, uid, ids, context=None): wizard = self.browse(cr, uid, ids[0], context) child = wizard.child_id res = True if child.state == 'I': res = child.child_remove_from_typo3() res = super(end_sponsorship_wizard, self).child_depart( cr, uid, ids, context) and res return res or Sync_typo3.typo3_index_error(cr, uid, self, context)
agpl-3.0
Python
97c5cb0312d7b093752376a373cc3773fcf44f34
Add SunOS to the basic service module
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
salt/modules/service.py
salt/modules/service.py
''' The default service module, if not otherwise specified salt will fall back to this basic module ''' import os grainmap = { 'Arch': '/etc/rc.d', 'Debian': '/etc/init.d', 'Fedora': '/etc/init.d', 'RedHat': '/etc/init.d', 'Ubuntu': '/etc/init.d', 'Gentoo': '/etc/init.d', 'CentOS': '/etc/init.d', 'SunOS': '/etc/init.d', } def __virtual__(): ''' Only work on systems which default to systemd ''' # Disable on these platforms, specific service modules exist: disable = [ 'RedHat', 'CentOS', 'Scientific', 'Fedora', 'Gentoo', 'Ubuntu', 'FreeBSD', 'Windows', ] if __grains__['os'] in disable: return False return 'service' def start(name): ''' Start the specified service CLI Example:: salt '*' service.start <service name> ''' cmd = os.path.join(grainmap[__grains__['os']], name + ' start') return not __salt__['cmd.retcode'](cmd) def stop(name): ''' Stop the specified service CLI Example:: salt '*' service.stop <service name> ''' cmd = os.path.join(grainmap[__grains__['os']], name + ' stop') return not __salt__['cmd.retcode'](cmd) def restart(name): ''' Restart the named service CLI Example:: salt '*' service.restart <service name> ''' cmd = os.path.join(grainmap[__grains__['os']], name + ' restart') return not __salt__['cmd.retcode'](cmd) def status(name, sig=None): ''' Return the status for a service, returns the PID or an empty string if the service is running or not, pass a signature to use to find the service via ps CLI Example:: salt '*' service.status <service name> [service signature] ''' sig = name if not sig else sig cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format( __grains__, sig) return __salt__['cmd.run'](cmd).strip()
''' The default service module, if not otherwise specified salt will fall back to this basic module ''' import os grainmap = { 'Arch': '/etc/rc.d', 'Debian': '/etc/init.d', 'Fedora': '/etc/init.d', 'RedHat': '/etc/init.d', 'Ubuntu': '/etc/init.d', 'Gentoo': '/etc/init.d', 'CentOS': '/etc/init.d', } def __virtual__(): ''' Only work on systems which default to systemd ''' # Disable on these platforms, specific service modules exist: disable = [ 'RedHat', 'CentOS', 'Scientific', 'Fedora', 'Gentoo', 'Ubuntu', 'FreeBSD', 'Windows', ] if __grains__['os'] in disable: return False return 'service' def start(name): ''' Start the specified service CLI Example:: salt '*' service.start <service name> ''' cmd = os.path.join(grainmap[__grains__['os']], name + ' start') return not __salt__['cmd.retcode'](cmd) def stop(name): ''' Stop the specified service CLI Example:: salt '*' service.stop <service name> ''' cmd = os.path.join(grainmap[__grains__['os']], name + ' stop') return not __salt__['cmd.retcode'](cmd) def restart(name): ''' Restart the named service CLI Example:: salt '*' service.restart <service name> ''' cmd = os.path.join(grainmap[__grains__['os']], name + ' restart') return not __salt__['cmd.retcode'](cmd) def status(name, sig=None): ''' Return the status for a service, returns the PID or an empty string if the service is running or not, pass a signature to use to find the service via ps CLI Example:: salt '*' service.status <service name> [service signature] ''' sig = name if not sig else sig cmd = "{0[ps]} | grep {1} | grep -v grep | awk '{{print $2}}'".format( __grains__, sig) return __salt__['cmd.run'](cmd).strip()
apache-2.0
Python
e40a9b3676101d7d7bd65cff8487f48a285f3139
Fix typo
googleapis/google-auth-library-python,googleapis/google-auth-library-python
scripts/obtain_user_auth.py
scripts/obtain_user_auth.py
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This program obtains a set of user credentials. These credentials are needed to run the system test for OAuth2 credentials. It's expected that a developer will run this program manually once to obtain a refresh token. It's highly recommended to use a Google account created specifically for testing. """ import json import os from oauth2client import client from oauth2client import tools HERE = os.path.dirname(__file__) CLIENT_SECRETS_PATH = os.path.abspath(os.path.join( HERE, '..', 'system_tests', 'data', 'client_secret.json')) AUTHORIZED_USER_PATH = os.path.abspath(os.path.join( HERE, '..', 'system_tests', 'data', 'authorized_user.json')) SCOPES = ['email', 'profile'] class NullStorage(client.Storage): """Null storage implementation to prevent oauth2client from failing on storage.put.""" def locked_put(self, credentials): pass def main(): flow = client.flow_from_clientsecrets(CLIENT_SECRETS_PATH, SCOPES) print('Starting credentials flow...') credentials = tools.run_flow(flow, NullStorage()) # Save the credentials in the same format as the Cloud SDK's authorized # user file. data = { 'type': 'authorized_user', 'client_id': flow.client_id, 'client_secret': flow.client_secret, 'refresh_token': credentials.refresh_token } with open(AUTHORIZED_USER_PATH, 'w') as fh: json.dump(data, fh, indent=4) print('Created {}.'.format(AUTHORIZED_USER_PATH)) if __name__ == '__main__': main()
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This program obtains a set of user credentials. These credentials are needed to run the system test for OAuth2 credentials. It's expected that a developer will run this program manually once to obtain a refresh token. It's highly recommended to use a Google account created specifically created for testing. """ import json import os from oauth2client import client from oauth2client import tools HERE = os.path.dirname(__file__) CLIENT_SECRETS_PATH = os.path.abspath(os.path.join( HERE, '..', 'system_tests', 'data', 'client_secret.json')) AUTHORIZED_USER_PATH = os.path.abspath(os.path.join( HERE, '..', 'system_tests', 'data', 'authorized_user.json')) SCOPES = ['email', 'profile'] class NullStorage(client.Storage): """Null storage implementation to prevent oauth2client from failing on storage.put.""" def locked_put(self, credentials): pass def main(): flow = client.flow_from_clientsecrets(CLIENT_SECRETS_PATH, SCOPES) print('Starting credentials flow...') credentials = tools.run_flow(flow, NullStorage()) # Save the credentials in the same format as the Cloud SDK's authorized # user file. data = { 'type': 'authorized_user', 'client_id': flow.client_id, 'client_secret': flow.client_secret, 'refresh_token': credentials.refresh_token } with open(AUTHORIZED_USER_PATH, 'w') as fh: json.dump(data, fh, indent=4) print('Created {}.'.format(AUTHORIZED_USER_PATH)) if __name__ == '__main__': main()
apache-2.0
Python
f1e1513cf739b8f25b9364226cc8ce987a47fa56
Fix check for helpers with staff perms
916253/Kurisu
utils/checks.py
utils/checks.py
import discord from discord import app_commands from discord.ext import commands from utils.configuration import StaffRank from typing import Union, TYPE_CHECKING if TYPE_CHECKING: from kurisu import Kurisu class InsufficientStaffRank(commands.CheckFailure): message: str def is_staff(role: str): async def predicate(ctx: commands.Context): if check_staff(ctx.bot, role, ctx.author.id) or (ctx.guild and ctx.author == ctx.guild.owner): return True raise InsufficientStaffRank(f"You must be at least {role} to use this command.") return commands.check(predicate) def is_staff_app(role: str): async def predicate(interaction: discord.Interaction) -> bool: if (interaction.guild and interaction.user == interaction.guild.owner) or check_staff(interaction.client, role, interaction.user.id): # type: ignore return True raise InsufficientStaffRank(f"You must be at least {role} to use this command.") return app_commands.check(predicate) def check_staff(bot: 'Kurisu', role: str, user_id: int) -> bool: position = bot.configuration.staff.get(user_id) if not position and bot.configuration.helpers.get(user_id): position = StaffRank.Helper if position is None: return False return position <= StaffRank[role] async def check_bot_or_staff(ctx: Union[commands.Context, discord.Interaction], target: Union[discord.Member, discord.User], action: str): bot = ctx.bot if isinstance(ctx, commands.Context) else ctx.client if target.bot: who = "a bot" elif check_staff(bot, "Helper", target.id): who = "another staffer" else: return False if isinstance(ctx, commands.Context): await ctx.send(f"You can't {action} {who} with this command!") else: await ctx.response.send_message(f"You can't {action} {who} with this command!", ephemeral=True) return True def check_if_user_can_sr(): async def predicate(ctx): author = ctx.author if not check_staff(ctx.bot, 'Helper', author.id) and (ctx.bot.roles['Verified'] not in author.roles) and ( ctx.bot.roles['Trusted'] not in author.roles) and (ctx.bot.roles['Retired Staff'] not in author.roles): return False return True return commands.check(predicate) def check_if_user_can_ready(): async def predicate(ctx): channel = ctx.channel if channel != ctx.bot.channels['newcomers']: return False return True return commands.check(predicate)
import discord from discord import app_commands from discord.ext import commands from utils.configuration import StaffRank from typing import Union, TYPE_CHECKING if TYPE_CHECKING: from kurisu import Kurisu class InsufficientStaffRank(commands.CheckFailure): message: str def is_staff(role: str): async def predicate(ctx: commands.Context): if check_staff(ctx.bot, role, ctx.author.id) or (ctx.guild and ctx.author == ctx.guild.owner): return True raise InsufficientStaffRank(f"You must be at least {role} to use this command.") return commands.check(predicate) def is_staff_app(role: str): async def predicate(interaction: discord.Interaction) -> bool: if (interaction.guild and interaction.user == interaction.guild.owner) or check_staff(interaction.client, role, interaction.user.id): # type: ignore return True raise InsufficientStaffRank(f"You must be at least {role} to use this command.") return app_commands.check(predicate) def check_staff(bot: 'Kurisu', role: str, user_id: int) -> bool: if bot.configuration.helpers.get(user_id): position = StaffRank.Helper else: position = bot.configuration.staff.get(user_id) if position is None: return False return position <= StaffRank[role] async def check_bot_or_staff(ctx: Union[commands.Context, discord.Interaction], target: Union[discord.Member, discord.User], action: str): bot = ctx.bot if isinstance(ctx, commands.Context) else ctx.client if target.bot: who = "a bot" elif check_staff(bot, "Helper", target.id): who = "another staffer" else: return False if isinstance(ctx, commands.Context): await ctx.send(f"You can't {action} {who} with this command!") else: await ctx.response.send_message(f"You can't {action} {who} with this command!", ephemeral=True) return True def check_if_user_can_sr(): async def predicate(ctx): author = ctx.author if not check_staff(ctx.bot, 'Helper', author.id) and (ctx.bot.roles['Verified'] not in author.roles) and ( ctx.bot.roles['Trusted'] not in author.roles) and (ctx.bot.roles['Retired Staff'] not in author.roles): return False return True return commands.check(predicate) def check_if_user_can_ready(): async def predicate(ctx): channel = ctx.channel if channel != ctx.bot.channels['newcomers']: return False return True return commands.check(predicate)
apache-2.0
Python
6cba22ad2c26185f6b3454116c3e31ea14160db8
Make collect-sprite-metadata.py work from any directory
johnsoft/dustcourse,johnsoft/dustcourse,johnsoft/dustcourse,johnsoft/dustcourse,johnsoft/dustcourse,johnsoft/dustcourse
scripts/collect-sprite-metadata.py
scripts/collect-sprite-metadata.py
from collections import OrderedDict import glob import json import os def main(): c = collect() c.sort(key = lambda x: x[0]) c = OrderedDict(c) print(json.dumps(c, separators=(',',':'))) def collect(): root = os.path.dirname(os.path.abspath(__file__)) + '/../build/website/assets/sprites' hitboxes = [] for (dirpath, dirnames, filenames) in os.walk(root): for fn in glob.glob(dirpath + '/*.json'): metadata = json.load(open(fn)) name = os.path.relpath(fn, root).replace('\\', '/')[:-5] hitboxes.append((name, metadata['hitbox'])) return hitboxes if __name__ == '__main__': main()
from collections import OrderedDict import glob import json import os def main(): c = collect() c.sort(key = lambda x: x[0]) c = OrderedDict(c) print(json.dumps(c, separators=(',',':'))) def collect(): root = '../build/website/static/sprites' hitboxes = [] for (dirpath, dirnames, filenames) in os.walk(root): for fn in glob.glob(dirpath + '/*.json'): metadata = json.load(open(fn)) name = os.path.relpath(fn, root).replace('\\', '/')[:-5] hitboxes.append((name, metadata['hitbox'])) return hitboxes if __name__ == '__main__': main()
mit
Python
2192219d92713c6eb76593d0c6c29413d040db6a
Revert "Added script for cron job to load surveys to database."
paepcke/json_to_relation,paepcke/json_to_relation,paepcke/json_to_relation,paepcke/json_to_relation
scripts/cronRefreshEdxQualtrics.py
scripts/cronRefreshEdxQualtrics.py
from surveyextractor import QualtricsExtractor import getopt, sys # Script for scheduling regular EdxQualtrics updates # Usage for cron should be "cronRefreshEdxQualtrics.py -m -s -r" qe = QualtricsExtractor() opts, args = getopt.getopt(sys.argv[1:], 'amsr', ['--reset', '--loadmeta', '--loadsurveys', '--loadresponses']) for opt, arg in opts: if opt in ('-a', '--reset'): qe.resetMetadata() qe.resetSurveys() qe.resetResponses() elif opt in ('-m', '--loadmeta'): qe.loadSurveyMetadata() elif opt in ('-s', '--loadsurvey'): qe.resetSurveys() qe.loadSurveyData() elif opt in ('-r', '--loadresponses'): qe.loadResponseData()
from surveyextractor import QualtricsExtractor import getopt import sys ### Script for scheduling regular EdxQualtrics updates ### Usage for cron should be "cronRefreshEdxQualtrics.py -m -s -r" # Append directory for dependencies to PYTHONPATH sys.path.append("/home/dataman/Code/qualtrics_etl/src/qualtrics_etl/") qe = QualtricsExtractor() opts, args = getopt.getopt(sys.argv[1:], 'amsr', ['--reset', '--loadmeta', '--loadsurveys', '--loadresponses']) for opt, arg in opts: if opt in ('-a', '--reset'): qe.resetMetadata() qe.resetSurveys() qe.resetResponses() elif opt in ('-m', '--loadmeta'): qe.loadSurveyMetadata() elif opt in ('-s', '--loadsurvey'): qe.resetSurveys() qe.loadSurveyData() elif opt in ('-r', '--loadresponses'): qe.loadResponseData()
bsd-3-clause
Python
98311b8b80d28ac6e6d92dbae3bcf987d5027e7a
Fix for housekeeping script error
damianmoore/photo-manager,damianmoore/photo-manager,damianmoore/photo-manager,damianmoore/photo-manager
photonix/photos/management/commands/housekeeping.py
photonix/photos/management/commands/housekeeping.py
import os from pathlib import Path from shutil import rmtree from time import sleep from django.conf import settings from django.core.management.base import BaseCommand from photonix.photos.models import Photo, Task from photonix.photos.utils.thumbnails import THUMBNAILER_VERSION class Command(BaseCommand): help = 'Makes sure that if there have been upgrades to thumbnailing or image analysis code then jobs get rescheduled.' def housekeeping(self): # Remove old cache directories try: for directory in os.listdir(settings.THUMBNAIL_ROOT): if directory not in ['photofile']: path = Path(settings.THUMBNAIL_ROOT) / directory print(f'Removing old cache directory {path}') rmtree(path) except FileNotFoundError: # In case thumbnail dir hasn't been created yet pass # Regenerate any outdated thumbnails photos = Photo.objects.filter(thumbnailed_version__lt=THUMBNAILER_VERSION) if photos.count(): print(f'Rescheduling {photos.count()} photos to have their thumbnails regenerated') for photo in photos: Task( type='generate_thumbnails', subject_id=photo.id, library=photo.library).save() def handle(self, *args, **options): self.housekeeping()
import os from pathlib import Path from shutil import rmtree from time import sleep from django.conf import settings from django.core.management.base import BaseCommand from photonix.photos.models import Photo, Task from photonix.photos.utils.thumbnails import THUMBNAILER_VERSION class Command(BaseCommand): help = 'Makes sure that if there have been upgrades to thumbnailing or image analysis code then jobs get rescheduled.' def housekeeping(self): # Remove old cache directories for directory in os.listdir(settings.THUMBNAIL_ROOT): if directory not in ['photofile']: path = Path(settings.THUMBNAIL_ROOT) / directory print(f'Removing old cache directory {path}') rmtree(path) # Regenerate any outdated thumbnails photos = Photo.objects.filter(thumbnailed_version__lt=THUMBNAILER_VERSION) if photos.count(): print(f'Rescheduling {photos.count()} photos to have their thumbnails regenerated') for photo in photos: Task( type='generate_thumbnails', subject_id=photo.id, library=photo.library).save() def handle(self, *args, **options): self.housekeeping()
agpl-3.0
Python
3fadef637ad17458f629a4baeba7fd38205a1510
Bump Katib Python SDK to 0.12.0rc0 version (#1640)
kubeflow/katib,kubeflow/katib,kubeflow/katib,kubeflow/katib,kubeflow/katib,kubeflow/katib
sdk/python/v1beta1/setup.py
sdk/python/v1beta1/setup.py
# Copyright 2021 The Kubeflow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools with open('requirements.txt') as f: REQUIRES = f.readlines() setuptools.setup( name='kubeflow-katib', version='0.12.0rc0', author="Kubeflow Authors", author_email='[email protected]', license="Apache License Version 2.0", url="https://github.com/kubeflow/katib/tree/master/sdk/python/v1beta1", description="Katib Python SDK for APIVersion v1beta1", long_description="Katib Python SDK for APIVersion v1beta1", packages=setuptools.find_packages( include=("kubeflow*")), package_data={}, include_package_data=False, zip_safe=False, classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], install_requires=REQUIRES )
# Copyright 2021 The Kubeflow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools with open('requirements.txt') as f: REQUIRES = f.readlines() setuptools.setup( name='kubeflow-katib', version='0.10.1', author="Kubeflow Authors", author_email='[email protected]', license="Apache License Version 2.0", url="https://github.com/kubeflow/katib/tree/master/sdk/python/v1beta1", description="Katib Python SDK for APIVersion v1beta1", long_description="Katib Python SDK for APIVersion v1beta1", packages=setuptools.find_packages( include=("kubeflow*")), package_data={}, include_package_data=False, zip_safe=False, classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], install_requires=REQUIRES )
apache-2.0
Python
8517803a2cb3f3dd46911ec63acdeae283f23efd
Increase fund graph detail
felamaslen/budget,felamaslen/budget,felamaslen/budget,felamaslen/budget,felamaslen/budget,felamaslen/budget
srv/config.py
srv/config.py
""" Global configuration variables """ import os.path PIE_TOLERANCE = 0.075 PIE_DETAIL = 30 GRAPH_FUND_HISTORY_DETAIL = 200 OVERVIEW_NUM_LAST = 25 OVERVIEW_NUM_FUTURE = 10 START_YEAR = 2014 START_MONTH = 9 LIST_CATEGORIES = ('funds', 'in', 'bills', 'food', 'general', 'holiday', 'social') # common columns are added programmatically LIST_DATA_FORM_SCHEMA = { 'funds': { 'units': ('float', True), }, 'in': { }, 'bills': { }, 'food': { 'category': ('string', True), 'shop': ('string', True) }, 'general': { 'category': ('string', True), 'shop': ('string', True) }, 'holiday': { 'holiday': ('string', True), 'shop': ('string', True) }, 'social': { 'society': ('string', True), 'shop': ('string', True) } } IP_BAN_TIME = 60 IP_BAN_TRIES = 10 BASE_DIR = os.path.dirname(os.path.realpath(__file__)) + "/.." SERIAL_FILE = BASE_DIR + "/resources/serial" FUND_SALT = 'a963anx2' # error messages E_NO_PARAMS = "Not enough parameters given" E_BAD_PARAMS = "Invalid parameters given" E_NO_FORM = "Not enough form data given" E_BAD_FORM = "Invalid form data given" E_NO_ITEM = "Must supply an item (at least)"
""" Global configuration variables """ import os.path PIE_TOLERANCE = 0.075 PIE_DETAIL = 30 GRAPH_FUND_HISTORY_DETAIL = 100 OVERVIEW_NUM_LAST = 25 OVERVIEW_NUM_FUTURE = 10 START_YEAR = 2014 START_MONTH = 9 LIST_CATEGORIES = ('funds', 'in', 'bills', 'food', 'general', 'holiday', 'social') # common columns are added programmatically LIST_DATA_FORM_SCHEMA = { 'funds': { 'units': ('float', True), }, 'in': { }, 'bills': { }, 'food': { 'category': ('string', True), 'shop': ('string', True) }, 'general': { 'category': ('string', True), 'shop': ('string', True) }, 'holiday': { 'holiday': ('string', True), 'shop': ('string', True) }, 'social': { 'society': ('string', True), 'shop': ('string', True) } } IP_BAN_TIME = 60 IP_BAN_TRIES = 10 BASE_DIR = os.path.dirname(os.path.realpath(__file__)) + "/.." SERIAL_FILE = BASE_DIR + "/resources/serial" FUND_SALT = 'a963anx2' # error messages E_NO_PARAMS = "Not enough parameters given" E_BAD_PARAMS = "Invalid parameters given" E_NO_FORM = "Not enough form data given" E_BAD_FORM = "Invalid form data given" E_NO_ITEM = "Must supply an item (at least)"
mit
Python
899e3c9f81a43dcb94e290ce0a86f128bd94effd
Apply filter channel published on menu list (channel context processors)
YACOWS/opps,jeanmask/opps,YACOWS/opps,opps/opps,williamroot/opps,jeanmask/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,jeanmask/opps,opps/opps,williamroot/opps,williamroot/opps,opps/opps,YACOWS/opps,opps/opps
opps/channel/context_processors.py
opps/channel/context_processors.py
# -*- coding: utf-8 -*- from django.utils import timezone from .models import Channel def channel_context(request): """ Channel context processors """ opps_menu = Channel.objects.filter(date_available__lte=timezone.now(), published=True) return {'opps_menu': opps_menu}
# -*- coding: utf-8 -*- from .models import Channel def channel_context(request): return {'opps_menu': Channel.objects.all()}
mit
Python
54451c4030bfeece4ab2157afe1ee3f8f65c4dcb
Fix sentry_useremail "duplicate key" error (#16)
kmlebedev/getsentry-ldap-auth,Banno/getsentry-ldap-auth
sentry_ldap_auth/backend.py
sentry_ldap_auth/backend.py
from __future__ import absolute_import from django_auth_ldap.backend import LDAPBackend from django.conf import settings from sentry.models import ( Organization, OrganizationMember, UserOption, ) class SentryLdapBackend(LDAPBackend): def get_or_create_user(self, username, ldap_user): model = super(SentryLdapBackend, self).get_or_create_user(username, ldap_user) if len(model) < 1: return model user = model[0] user.is_managed = True try: from sentry.models import (UserEmail) except ImportError: pass else: userEmail = UserEmail.objects.get(user=user) if not userEmail: userEmail = UserEmail.objects.create(user=user) userEmail.email=ldap_user.attrs.get('mail', ' ')[0] or '' userEmail.save() # Check to see if we need to add the user to an organization if not settings.AUTH_LDAP_DEFAULT_SENTRY_ORGANIZATION: return model # If the user is already a member of an organization, leave them be orgs = OrganizationMember.objects.filter(user=user) if orgs != None and len(orgs) > 0: return model # Find the default organization organizations = Organization.objects.filter(name=settings.AUTH_LDAP_DEFAULT_SENTRY_ORGANIZATION) if not organizations or len(organizations) < 1: return model member_role = getattr(settings, 'AUTH_LDAP_SENTRY_ORGANIZATION_ROLE_TYPE', 'member') has_global_access = getattr(settings, 'AUTH_LDAP_SENTRY_ORGANIZATION_GLOBAL_ACCESS', False) # Add the user to the organization with global access OrganizationMember.objects.create( organization=organizations[0], user=user, role=member_role, has_global_access=has_global_access, flags=getattr(OrganizationMember.flags, 'sso:linked'), ) if not getattr(settings, 'AUTH_LDAP_SENTRY_SUBSCRIBE_BY_DEFAULT', True): UserOption.objects.set_value( user=user, project=None, key='subscribe_by_default', value='0', ) return model
from __future__ import absolute_import from django_auth_ldap.backend import LDAPBackend from django.conf import settings from sentry.models import ( Organization, OrganizationMember, UserOption, ) class SentryLdapBackend(LDAPBackend): def get_or_create_user(self, username, ldap_user): model = super(SentryLdapBackend, self).get_or_create_user(username, ldap_user) if len(model) < 1: return model user = model[0] user.is_managed = True try: from sentry.models import (UserEmail) except ImportError: pass else: UserEmail.objects.update( user=user, email=ldap_user.attrs.get('mail', ' ')[0] or '', ) # Check to see if we need to add the user to an organization if not settings.AUTH_LDAP_DEFAULT_SENTRY_ORGANIZATION: return model # If the user is already a member of an organization, leave them be orgs = OrganizationMember.objects.filter(user=user) if orgs != None and len(orgs) > 0: return model # Find the default organization organizations = Organization.objects.filter(name=settings.AUTH_LDAP_DEFAULT_SENTRY_ORGANIZATION) if not organizations or len(organizations) < 1: return model member_role = getattr(settings, 'AUTH_LDAP_SENTRY_ORGANIZATION_ROLE_TYPE', 'member') has_global_access = getattr(settings, 'AUTH_LDAP_SENTRY_ORGANIZATION_GLOBAL_ACCESS', False) # Add the user to the organization with global access OrganizationMember.objects.create( organization=organizations[0], user=user, role=member_role, has_global_access=has_global_access, flags=getattr(OrganizationMember.flags, 'sso:linked'), ) if not getattr(settings, 'AUTH_LDAP_SENTRY_SUBSCRIBE_BY_DEFAULT', True): UserOption.objects.set_value( user=user, project=None, key='subscribe_by_default', value='0', ) return model
apache-2.0
Python
c55bf8d153c47500615b8ded3c95957be8ee70a3
Refactor JSONResponse views to include ListView
okfse/froide,stefanw/froide,LilithWittmann/froide,CodeforHawaii/froide,catcosmo/froide,ryankanno/froide,stefanw/froide,okfse/froide,catcosmo/froide,ryankanno/froide,fin/froide,LilithWittmann/froide,okfse/froide,ryankanno/froide,ryankanno/froide,LilithWittmann/froide,fin/froide,CodeforHawaii/froide,LilithWittmann/froide,stefanw/froide,catcosmo/froide,okfse/froide,catcosmo/froide,LilithWittmann/froide,CodeforHawaii/froide,okfse/froide,catcosmo/froide,ryankanno/froide,fin/froide,fin/froide,CodeforHawaii/froide,CodeforHawaii/froide,stefanw/froide,stefanw/froide
froide/helper/json_view.py
froide/helper/json_view.py
from django import http from django.views.generic import DetailView, ListView class JSONResponseMixin(object): def render_to_json_response(self, context): "Returns a JSON response containing 'context' as payload" return self.get_json_response(self.convert_context_to_json(context)) def get_json_response(self, content, **httpresponse_kwargs): "Construct an `HttpResponse` object." return http.HttpResponse(content, content_type='application/json', **httpresponse_kwargs) class JSONResponseListView(ListView, JSONResponseMixin): def get_context_data(self, **kwargs): self.format = "html" if "format" in self.kwargs: self.format = self.kwargs['format'] context = super(JSONResponseListView, self).get_context_data(**kwargs) return context def convert_context_to_json(self, context): "Convert the context dictionary into a JSON object" return "[%s]" % ",".join([o.as_json() for o in context['object_list']]) class JSONResponseDetailView(DetailView, JSONResponseMixin): def convert_context_to_json(self, context): "Convert the context dictionary into a JSON object" return context['object'].as_json() def get_context_data(self, **kwargs): self.format = "html" if "format" in self.kwargs: self.format = self.kwargs['format'] context = super(JSONResponseDetailView, self).get_context_data(**kwargs) return context def render_to_response(self, context): if self.format == "json": return self.render_to_json_response(context) else: return super(DetailView, self).render_to_response(context)
from django import http from django.views.generic import DetailView class JSONResponseDetailView(DetailView): def render_to_json_response(self, context): "Returns a JSON response containing 'context' as payload" return self.get_json_response(self.convert_context_to_json(context)) def get_json_response(self, content, **httpresponse_kwargs): "Construct an `HttpResponse` object." return http.HttpResponse(content, content_type='application/json', **httpresponse_kwargs) def convert_context_to_json(self, context): "Convert the context dictionary into a JSON object" return context['object'].as_json() def render_to_response(self, context): if self.format == "json": return self.render_to_json_response(context) else: return super(DetailView, self).render_to_response(context)
mit
Python
e03103c74a066184178980f1073505724e094394
Fix url order
stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten
stadt/urls.py
stadt/urls.py
from django.conf import settings, urls from django.conf.urls import static from django.contrib import admin urlpatterns = [ urls.url(r'^stadt/admin/', admin.site.urls), urls.url(r'^stadt/api/', urls.include('core.api_urls')), urls.url(r'^stadt/', urls.include('account.urls')), urls.url(r'^stadt/', urls.include('content.urls')), urls.url(r'^stadt/', urls.include('entities.urls')), urls.url(r'^stadt/', urls.include('features.articles.urls')), urls.url(r'^stadt/', urls.include('features.associations.urls')), urls.url(r'^stadt/', urls.include('features.conversations.urls')), urls.url(r'^stadt/', urls.include('features.memberships.urls')), urls.url(r'^stadt/', urls.include('features.sharing.urls')), urls.url(r'^stadt/', urls.include('features.subscriptions.urls')), urls.url(r'^stadt/', urls.include('features.tags.urls')), urls.url(r'^', urls.include('features.stadt.urls')), urls.url(r'^', urls.include('features.events.urls')), # matches /*/, should be included late, groups before gestalten urls.url(r'^', urls.include('features.groups.urls')), urls.url(r'^', urls.include('features.gestalten.urls')), # matches /*/*/, should be included at last urls.url(r'^', urls.include('features.content.urls')), ] + static.static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
from django.conf import settings, urls from django.conf.urls import static from django.contrib import admin urlpatterns = [ urls.url(r'^stadt/admin/', admin.site.urls), urls.url(r'^stadt/api/', urls.include('core.api_urls')), urls.url(r'^stadt/', urls.include('account.urls')), urls.url(r'^stadt/', urls.include('content.urls')), urls.url(r'^stadt/', urls.include('entities.urls')), urls.url(r'^stadt/', urls.include('features.articles.urls')), urls.url(r'^stadt/', urls.include('features.associations.urls')), urls.url(r'^stadt/', urls.include('features.conversations.urls')), urls.url(r'^stadt/', urls.include('features.memberships.urls')), urls.url(r'^stadt/', urls.include('features.sharing.urls')), urls.url(r'^stadt/', urls.include('features.subscriptions.urls')), urls.url(r'^stadt/', urls.include('features.tags.urls')), urls.url(r'^', urls.include('features.stadt.urls')), urls.url(r'^', urls.include('features.events.urls')), urls.url(r'^', urls.include('features.content.urls')), urls.url(r'^', urls.include('features.groups.urls')), urls.url(r'^', urls.include('features.gestalten.urls')), ] + static.static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
agpl-3.0
Python
a72f72c16aaf1689fc364311afe3b42a6fed7eae
add examples
Tosta-Mixta/CourierToDovecot,Tosta-Mixta/CourierToDovecot
CourierToDovecot.py
CourierToDovecot.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- # ----------------------- # Author : jgo # Execute a perl script into all mailbox newly created, # on the Dovecot server. # ----------------------- import subprocess import os import logging from logging.handlers import RotatingFileHandler ## [Config VARS] -------------------------------------------- # Don't change this value! :) init_path = os.path.dirname(os.path.realpath(__file__)) # Change this value with your target dir (example : '/var/spool/mail') dest_path = '/var/spool/mail/' # Change this value with your script path (example: '/script.sh') script_path = '/courier-dovecot-migrate.pl --to-dovecot --convert --recursive' ## ---------------------------------------------------------- ## [Logging] ------------------------------------------------ # Create logger object used to write logfile logger = logging.getLogger() # Set your Log level to debug => Write everything logger.setLevel(logging.DEBUG) # Choose how you want your log format formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s') # Create a file (valhalla.log) in "append mode", max size => 30Mb # and 1 backup. logfile = 'valhalla.log' file_handler = RotatingFileHandler(logfile, 'a', 30000000, 1) # Assign our formatter and set to debug mode. file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(formatter) logger.addHandler(file_handler) # Create a second handler to display the log on the console steam_handler = logging.StreamHandler() steam_handler.setLevel(logging.DEBUG) logger.addHandler(steam_handler) ## ---------------------------------------------------------- print '====================================================' print '[SCRIPT STATUS]' print '====================================================' # Create a list with all directory output = subprocess.check_output( 'ls -R ' + dest_path + ' | grep "[[:alnum:]]\+@[[:alnum:]]\+" | tr ":" "/" | grep "/"', shell=True ) # Transform the output to a list output = output.split() obj = len(output) # Execute the script into all dir try: for path in output: os.chdir(path) logger.info('[Job] - Working on %s' % path) subprocess.call(init_path + script_path, shell=True) except SyntaxError: logger.error('SyntaxError, your target already exists.') print 'Please check your log file SyntaxError detected' except OSError: logger.error('OSError, this script can\'t be used on files') print 'Please check your log file OSError detected' finally: os.chdir(init_path) print '' print 'Number of objects handled : %s' % obj print 'Log file : %s' % logfile print '===================================================='
#!/usr/bin/env python2 # -*- coding: utf-8 -*- # ----------------------- # Author : jgo # Execute a perl script into all mailbox newly created, # on the Dovecot server. # ----------------------- import subprocess import os import logging from logging.handlers import RotatingFileHandler ## [Config VARS] -------------------------------------------- # Don't change this value! :) init_path = os.path.dirname(os.path.realpath(__file__)) # Change this value with your target dir dest_path = '/var/spool/mail/' # Change this value with your script path script_path = '/courier-dovecot-migrate.pl --to-dovecot --convert --recursive' ## ---------------------------------------------------------- ## [Logging] ------------------------------------------------ # Create logger object used to write logfile logger = logging.getLogger() # Set your Log level to debug => Write everything logger.setLevel(logging.DEBUG) # Choose how you want your log format formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s') # Create a file (valhalla.log) in "append mode", max size => 30Mb # and 1 backup. logfile = 'valhalla.log' file_handler = RotatingFileHandler(logfile, 'a', 30000000, 1) # Assign our formatter and set to debug mode. file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(formatter) logger.addHandler(file_handler) # Create a second handler to display the log on the console steam_handler = logging.StreamHandler() steam_handler.setLevel(logging.DEBUG) logger.addHandler(steam_handler) ## ---------------------------------------------------------- print '====================================================' print '[SCRIPT STATUS]' print '====================================================' # Create a list with all directory output = subprocess.check_output( 'ls -R ' + dest_path + ' | grep "[[:alnum:]]\+@[[:alnum:]]\+" | tr ":" "/" | grep "/"', shell=True ) # Transform the output to a list output = output.split() obj = len(output) # Execute the script into all dir try: for path in output: os.chdir(path) logger.info('[Job] - Working on %s' % path) subprocess.call(init_path + script_path, shell=True) except SyntaxError: logger.error('SyntaxError, your target already exists.') print 'Please check your log file SyntaxError detected' except OSError: logger.error('OSError, this script can\'t be used on files') print 'Please check your log file OSError detected' finally: os.chdir(init_path) print '' print 'Number of objects handled : %s' % obj print 'Log file : %s' % logfile print '===================================================='
apache-2.0
Python
83ed8a4fd258f351da2ea358613ff57dadbf03f6
Remove blank line
pythonindia/junction,ChillarAnand/junction,ChillarAnand/junction,pythonindia/junction,ChillarAnand/junction,pythonindia/junction,pythonindia/junction,ChillarAnand/junction
junction/proposals/permissions.py
junction/proposals/permissions.py
# -*- coding: utf-8 -*- # Third Party Stuff from django.core.exceptions import PermissionDenied # Junction Stuff from junction.conferences.models import ConferenceProposalReviewer from junction.base.constants import ConferenceStatus from .models import ProposalSectionReviewer def is_proposal_voting_allowed(proposal): return proposal.conference.status != ConferenceStatus.SCHEDULE_PUBLISHED def is_proposal_author(user, proposal): return user.is_authenticated() and proposal.author == user def is_proposal_reviewer(user, conference): authenticated = user.is_authenticated() is_reviewer = ConferenceProposalReviewer.objects.filter( reviewer=user.id, conference=conference, active=True).exists() return authenticated and is_reviewer def is_proposal_section_reviewer(user, conference, proposal): return user.is_authenticated() and ProposalSectionReviewer.objects.filter( conference_reviewer__reviewer=user, conference_reviewer__conference=conference, proposal_section=proposal.proposal_section, active=True).exists() def is_proposal_author_or_proposal_reviewer(user, conference, proposal): reviewer = is_proposal_reviewer(user, conference) author = is_proposal_author(user, proposal) return reviewer or author def is_proposal_author_or_proposal_section_reviewer(user, conference, proposal): return is_proposal_author(user, proposal) or \ is_proposal_section_reviewer(user, conference, proposal) def is_proposal_author_or_permisson_denied(user, proposal): if is_proposal_author(user, proposal): return True raise PermissionDenied def is_conference_moderator(user, conference): if user.is_superuser: return True users = [mod.moderator for mod in conference.moderators.all()] return user in users
# -*- coding: utf-8 -*- # Third Party Stuff from django.core.exceptions import PermissionDenied # Junction Stuff from junction.conferences.models import ConferenceProposalReviewer from junction.base.constants import ConferenceStatus from .models import ProposalSectionReviewer def is_proposal_voting_allowed(proposal): return proposal.conference.status != ConferenceStatus.SCHEDULE_PUBLISHED def is_proposal_author(user, proposal): return user.is_authenticated() and proposal.author == user def is_proposal_reviewer(user, conference): authenticated = user.is_authenticated() is_reviewer = ConferenceProposalReviewer.objects.filter( reviewer=user.id, conference=conference, active=True).exists() return authenticated and is_reviewer def is_proposal_section_reviewer(user, conference, proposal): return user.is_authenticated() and ProposalSectionReviewer.objects.filter( conference_reviewer__reviewer=user, conference_reviewer__conference=conference, proposal_section=proposal.proposal_section, active=True).exists() def is_proposal_author_or_proposal_reviewer(user, conference, proposal): reviewer = is_proposal_reviewer(user, conference) author = is_proposal_author(user, proposal) return reviewer or author def is_proposal_author_or_proposal_section_reviewer(user, conference, proposal): return is_proposal_author(user, proposal) or \ is_proposal_section_reviewer(user, conference, proposal) def is_proposal_author_or_permisson_denied(user, proposal): if is_proposal_author(user, proposal): return True raise PermissionDenied def is_conference_moderator(user, conference): if user.is_superuser: return True users = [mod.moderator for mod in conference.moderators.all()] return user in users
mit
Python
f8ea5ef37280366b4b3991442e406952bb0575b3
Create calculate_cosine_distance.py
bt3gl/Advanced-Machine-Learning
k-NN/calculate_cosine_distance.py
k-NN/calculate_cosine_distance.py
''' Calculates the cosine distance for an input data ''' import math import numpy as np import scipy.io __author__ = """Marina von Steinkirch""" def cosineDistance(x, y): ''' This function computes the cosine distance between feature vectors x and y. This distance is frequently used for text classification. It varies between 0 and 1. The distance is 0 if x==y. ''' denom = math.sqrt(sum(x**2)*sum(y**2)) dist = 1.0-(np.dot(x, y.conj().transpose()))/denom return round(dist, 6) def print_to_file(distances): with open('cos_distances.dat', 'w') as f: for i, col in enumerate(distances): f.write('# distance for example %d to others\n' %(i+1)) for item in col: f.write(str(item) + ' ') f.write('\n') def main(): f = scipy.io.loadmat('cvdataset.mat') traindata = f['traindata'] trainlabels = f['trainlabels'] testdata = f['testdata'] evaldata = f['evaldata'] testlabels = f['testlabels'] distances = [] for i in range(len(trainlabels)): first_train_example_class1 = traindata[i] aux = [] for j in range (len(trainlabels)): first_train_example_class2 = traindata[j] d = cosineDistance(first_train_example_class1, first_train_example_class2) aux.append(d) distances.append(aux) print_to_file(distances) if __name__ == '__main__': main()
''' Calculates the cosine distance for an input data ''' import math import numpy as np import scipy.io __author__ = """Mari Wahl""" def cosineDistance(x, y): ''' This function computes the cosine distance between feature vectors x and y. This distance is frequently used for text classification. It varies between 0 and 1. The distance is 0 if x==y. ''' denom = math.sqrt(sum(x**2)*sum(y**2)) dist = 1.0-(np.dot(x, y.conj().transpose()))/denom return round(dist, 6) def print_to_file(distances): with open('cos_distances.dat', 'w') as f: for i, col in enumerate(distances): f.write('# distance for example %d to others\n' %(i+1)) for item in col: f.write(str(item) + ' ') f.write('\n') def main(): f = scipy.io.loadmat('cvdataset.mat') traindata = f['traindata'] trainlabels = f['trainlabels'] testdata = f['testdata'] evaldata = f['evaldata'] testlabels = f['testlabels'] distances = [] for i in range(len(trainlabels)): first_train_example_class1 = traindata[i] aux = [] for j in range (len(trainlabels)): first_train_example_class2 = traindata[j] d = cosineDistance(first_train_example_class1, first_train_example_class2) aux.append(d) distances.append(aux) print_to_file(distances) if __name__ == '__main__': main()
mit
Python
2cde35bb6f948f861026921daf7fe24b353af273
Add bulleted and numbered list to CKEditor
vikoivun/kerrokantasi,stephawe/kerrokantasi,stephawe/kerrokantasi,City-of-Helsinki/kerrokantasi,stephawe/kerrokantasi,City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi,vikoivun/kerrokantasi,vikoivun/kerrokantasi,City-of-Helsinki/kerrokantasi
kerrokantasi/settings/__init__.py
kerrokantasi/settings/__init__.py
from .util import get_settings, load_local_settings, load_secret_key from . import base settings = get_settings(base) load_local_settings(settings, "local_settings") load_secret_key(settings) if not settings["DEBUG"] and settings["JWT_AUTH"]["JWT_SECRET_KEY"] == "kerrokantasi": raise ValueError("Refusing to run out of DEBUG mode with insecure JWT secret key.") settings['CKEDITOR_CONFIGS'] = { 'default': { 'stylesSet': [ { "name": 'Lead', "element": 'p', "attributes": {'class': 'lead'}, }, ], 'contentsCss': ['%sckeditor/ckeditor/contents.css' % settings['STATIC_URL'], '.lead { font-weight: bold;}'], 'extraAllowedContent': 'video [*]{*}(*);source [*]{*}(*);', 'extraPlugins': 'video', 'toolbar': [ ['Styles', 'Format'], ['Bold', 'Italic', 'Underline', 'StrikeThrough', 'Undo', 'Redo'], ['Link', 'Unlink', 'Anchor'], ['BulletedList', 'NumberedList'], ['Image', 'Video', 'Flash', 'Table', 'HorizontalRule'], ['TextColor', 'BGColor'], ['Smiley', 'SpecialChar'], ['Source'] ] }, } globals().update(settings) # Export the settings for Django to use.
from .util import get_settings, load_local_settings, load_secret_key from . import base settings = get_settings(base) load_local_settings(settings, "local_settings") load_secret_key(settings) if not settings["DEBUG"] and settings["JWT_AUTH"]["JWT_SECRET_KEY"] == "kerrokantasi": raise ValueError("Refusing to run out of DEBUG mode with insecure JWT secret key.") settings['CKEDITOR_CONFIGS'] = { 'default': { 'stylesSet': [ { "name": 'Lead', "element": 'p', "attributes": {'class': 'lead'}, }, ], 'contentsCss': ['%sckeditor/ckeditor/contents.css' % settings['STATIC_URL'], '.lead { font-weight: bold;}'], 'extraAllowedContent': 'video [*]{*}(*);source [*]{*}(*);', 'extraPlugins': 'video', 'toolbar': [ ['Styles', 'Format'], ['Bold', 'Italic', 'Underline', 'StrikeThrough', 'Undo', 'Redo'], ['Link', 'Unlink', 'Anchor'], ['Image', 'Video', 'Flash', 'Table', 'HorizontalRule'], ['TextColor', 'BGColor'], ['Smiley', 'SpecialChar'], ['Source'] ] }, } globals().update(settings) # Export the settings for Django to use.
mit
Python
d09cc197d11efa2181ce68ef4212cb9df5ee285c
add daemon argument to launcher
commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot
selfdrive/athena/manage_athenad.py
selfdrive/athena/manage_athenad.py
#!/usr/bin/env python3 import time from multiprocessing import Process from common.params import Params from selfdrive.manager.process import launcher from selfdrive.swaglog import cloudlog from selfdrive.version import get_version, is_dirty ATHENA_MGR_PID_PARAM = "AthenadPid" def main(): params = Params() dongle_id = params.get("DongleId").decode('utf-8') cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=is_dirty()) try: while 1: cloudlog.info("starting athena daemon") proc = Process(name='athenad', target=launcher, args=('selfdrive.athena.athenad', 'athenad')) proc.start() proc.join() cloudlog.event("athenad exited", exitcode=proc.exitcode) time.sleep(5) except Exception: cloudlog.exception("manage_athenad.exception") finally: params.delete(ATHENA_MGR_PID_PARAM) if __name__ == '__main__': main()
#!/usr/bin/env python3 import time from multiprocessing import Process from common.params import Params from selfdrive.manager.process import launcher from selfdrive.swaglog import cloudlog from selfdrive.version import get_version, is_dirty ATHENA_MGR_PID_PARAM = "AthenadPid" def main(): params = Params() dongle_id = params.get("DongleId").decode('utf-8') cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=is_dirty()) try: while 1: cloudlog.info("starting athena daemon") proc = Process(name='athenad', target=launcher, args=('selfdrive.athena.athenad',)) proc.start() proc.join() cloudlog.event("athenad exited", exitcode=proc.exitcode) time.sleep(5) except Exception: cloudlog.exception("manage_athenad.exception") finally: params.delete(ATHENA_MGR_PID_PARAM) if __name__ == '__main__': main()
mit
Python