commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
2fdb9d17b2c033370d663b4e72d71c1c7e105a84
fix test for python 3
zhihu/redis-shard,keakon/redis-shard
tests/test_pipeline.py
tests/test_pipeline.py
#!/usr/bin/python # -*- coding: utf-8 -*- import unittest from nose.tools import eq_ from redis_shard.shard import RedisShardAPI from redis_shard._compat import b from .config import settings class TestShard(unittest.TestCase): def setUp(self): self.client = RedisShardAPI(**settings) self.clear_db() def tearDown(self): pass def clear_db(self): self.client.delete('testset') self.client.delete('testzset') self.client.delete('testlist') def test_pipeline(self): self.client.set('test', '1') pipe = self.client.pipeline() pipe.set('test', '2') pipe.zadd('testzset', 'first', 1) pipe.zincrby('testzset', 'first') pipe.zadd('testzset', 'second', 2) pipe.execute() pipe.reset() eq_(self.client.get('test'), b'2') eq_(self.client.zscore('testzset', 'first'), 2.0) eq_(self.client.zscore('testzset', 'second'), 2.0) with self.client.pipeline() as pipe: pipe.set('test', '3') pipe.zadd('testzset', 'first', 4) pipe.zincrby('testzset', 'first') pipe.zadd('testzset', 'second', 5) pipe.execute() eq_(self.client.get('test'), b'3') eq_(self.client.zscore('testzset', 'first'), 5.0) eq_(self.client.zscore('testzset', 'second'), 5.0) def test_pipeline_script(self): pipe = self.client.pipeline() for i in range(100): pipe.eval(""" redis.call('set', KEYS[1], ARGV[1]) """, 1, 'testx%d' % i, i) pipe.execute() for i in range(100): eq_(self.client.get('testx%d' % i), b('%d' % i))
#!/usr/bin/python # -*- coding: utf-8 -*- import unittest from nose.tools import eq_ from redis_shard.shard import RedisShardAPI from redis_shard._compat import b from .config import settings class TestShard(unittest.TestCase): def setUp(self): self.client = RedisShardAPI(**settings) self.clear_db() def tearDown(self): pass def clear_db(self): self.client.delete('testset') self.client.delete('testzset') self.client.delete('testlist') def test_pipeline(self): self.client.set('test', '1') pipe = self.client.pipeline() pipe.set('test', '2') pipe.zadd('testzset', 'first', 1) pipe.zincrby('testzset', 'first') pipe.zadd('testzset', 'second', 2) pipe.execute() pipe.reset() eq_(self.client.get('test'), '2') eq_(self.client.zscore('testzset', 'first'), 2.0) eq_(self.client.zscore('testzset', 'second'), 2.0) with self.client.pipeline() as pipe: pipe.set('test', '3') pipe.zadd('testzset', 'first', 4) pipe.zincrby('testzset', 'first') pipe.zadd('testzset', 'second', 5) pipe.execute() eq_(self.client.get('test'), '3') eq_(self.client.zscore('testzset', 'first'), 5.0) eq_(self.client.zscore('testzset', 'second'), 5.0) def test_pipeline_script(self): pipe = self.client.pipeline() for i in range(100): pipe.eval(""" redis.call('set', KEYS[1], ARGV[1]) """, 1, 'testx%d' % i, i) pipe.execute() for i in range(100): eq_(self.client.get('testx%d' % i), b('%d' % i))
bsd-2-clause
Python
ab4c02c1f5f5cf3ba46b4924c48693d028dc23db
Split pipeline tests
valohai/valohai-yaml
tests/test_pipeline.py
tests/test_pipeline.py
from valohai_yaml.objs import Config, DeploymentNode def test_pipeline_valid(pipeline_config: Config): assert pipeline_config.lint().is_valid() def test_little_pipeline(pipeline_config: Config): assert any( ( edge.source_node == "batch1" and edge.source_type == "parameter" and edge.source_key == "aspect-ratio" and edge.target_node == "batch2" and edge.target_type == "parameter" and edge.target_key == "aspect-ratio" ) for edge in pipeline_config.pipelines["My little pipeline"].edges ) def test_deployment_pipeline(pipeline_config: Config): dp = pipeline_config.pipelines["My deployment pipeline"] assert any( ( edge.source_node == "train" and edge.source_type == "output" and edge.source_key == "model" and edge.target_node == "deploy-predictor" and edge.target_type == "file" and edge.target_key == "predict-digit.model" ) for edge in dp.edges ) dn_predict = dp.get_node_by(name='deploy-predictor') assert isinstance(dn_predict, DeploymentNode) assert "predictor-staging" in dn_predict.aliases assert "predict-digit" in dn_predict.endpoints dn_no_preset = dp.get_node_by(name='deploy-no-presets') assert isinstance(dn_no_preset, DeploymentNode) assert dn_no_preset.aliases == [] assert dn_no_preset.endpoints == [] def test_medium_pipeline(pipeline_config: Config): assert any( (edge.source_type == "output" and edge.source_key == "model.pb") for edge in pipeline_config.pipelines["My medium pipeline"].edges )
from valohai_yaml.objs import Config, DeploymentNode def test_pipeline(pipeline_config: Config): lr = pipeline_config.lint() assert lr.is_valid() assert any( ( edge.source_node == "batch1" and edge.source_type == "parameter" and edge.source_key == "aspect-ratio" and edge.target_node == "batch2" and edge.target_type == "parameter" and edge.target_key == "aspect-ratio" ) for edge in pipeline_config.pipelines["My little pipeline"].edges ) assert any( ( edge.source_node == "train" and edge.source_type == "output" and edge.source_key == "model" and edge.target_node == "deploy-predictor" and edge.target_type == "file" and edge.target_key == "predict-digit.model" ) for edge in pipeline_config.pipelines["My deployment pipeline"].edges ) dp = pipeline_config.pipelines["My deployment pipeline"] dn_predict = dp.get_node_by(name='deploy-predictor') assert isinstance(dn_predict, DeploymentNode) assert "predictor-staging" in dn_predict.aliases assert "predict-digit" in dn_predict.endpoints dn_no_preset = dp.get_node_by(name='deploy-no-presets') assert isinstance(dn_no_preset, DeploymentNode) assert dn_no_preset.aliases == [] assert dn_no_preset.endpoints == [] assert any( (edge.source_type == "output" and edge.source_key == "model.pb") for edge in pipeline_config.pipelines["My medium pipeline"].edges )
mit
Python
9e57e467ab508cd0e5fab2862a2c9b651eaa7838
rename tag basisofRecords to BASISOFRECORDS
Datafable/gbif-dataset-metrics,Datafable/gbif-dataset-metrics,Datafable/gbif-dataset-metrics
bin/aggregate_metrics.py
bin/aggregate_metrics.py
import sys import os import json SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src' sys.path.append(SRC_DIR) from aggregator import ReportAggregator, CartoDBWriter def check_arguments(): if len(sys.argv) != 3: print 'usage: aggregate_metrics.py <data directory> <settings.json>\n' print ' data directory: this should point to a directory' print ' containing chunks of metric data.' print ' metric data should be in json and' print ' ordered by dataset key.\n' print ' settings.json: contains the `api_key` that will' print ' be used to contact the cartodb API.' sys.exit(-1) data_dir, settings_file = sys.argv[1:] return [data_dir, settings_file] def aggregate_metrics(data_dir): agg = ReportAggregator() data = agg.aggregate(data_dir) return data def write_data(data, settings_file): settings = json.load(open(settings_file)) writer = CartoDBWriter() basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'MATERIAL_SAMPLE', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'LITERATURE', 'UNKNOWN'] for dataset in data: row = [dataset] basis_of_records = data[dataset]['BASISOFRECORDS'] for metric_name in basis_of_records_metrics: if metric_name in basis_of_records: row.append(basis_of_records[metric_name]) else: row.append(0) nr_of_records = data[dataset]['NUMBER_OF_RECORDS'] row.append(nr_of_records) writer.write_basis_of_record(row, settings['api_key']) def main(): data_dir, settings_file = check_arguments() data = aggregate_metrics(data_dir) write_data(data, settings_file) main()
import sys import os import json SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src' sys.path.append(SRC_DIR) from aggregator import ReportAggregator, CartoDBWriter def check_arguments(): if len(sys.argv) != 3: print 'usage: aggregate_metrics.py <data directory> <settings.json>\n' print ' data directory: this should point to a directory' print ' containing chunks of metric data.' print ' metric data should be in json and' print ' ordered by dataset key.\n' print ' settings.json: contains the `api_key` that will' print ' be used to contact the cartodb API.' sys.exit(-1) data_dir, settings_file = sys.argv[1:] return [data_dir, settings_file] def aggregate_metrics(data_dir): agg = ReportAggregator() data = agg.aggregate(data_dir) return data def write_data(data, settings_file): settings = json.load(open(settings_file)) writer = CartoDBWriter() basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'MATERIAL_SAMPLE', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'LITERATURE', 'UNKNOWN'] for dataset in data: row = [dataset] basis_of_records = data[dataset]['basisofRecords'] for metric_name in basis_of_records_metrics: if metric_name in basis_of_records: row.append(basis_of_records[metric_name]) else: row.append(0) nr_of_records = data[dataset]['NUMBER_OF_RECORDS'] row.append(nr_of_records) writer.write_basis_of_record(row, settings['api_key']) def main(): data_dir, settings_file = check_arguments() data = aggregate_metrics(data_dir) write_data(data, settings_file) main()
mit
Python
77c0ad615c7f0270c0425866f06edde8856892b9
Add Augur Unit Tests For parseIntelXML()
magneticstain/Inquisition,magneticstain/Inquisition,magneticstain/Inquisition,magneticstain/Inquisition
build/tests/test_augur.py
build/tests/test_augur.py
#!/usr/bin/python3 """ test_augur.py APP: Inquisition DESC: Unit test for Augur library CREATION_DATE: 2017-11-25 """ # MODULES # | Native import configparser import unittest # | Third-Party from bs4 import BeautifulSoup as BSoup # | Custom from lib.destiny.Augur import Augur # METADATA __author__ = 'Joshua Carlson-Purcell' __copyright__ = 'Copyright 2017, CarlsoNet' __license__ = 'MIT' __version__ = '1.0.0-alpha' __maintainer__ = 'Joshua Carlson-Purcell' __email__ = '[email protected]' __status__ = 'Development' class AugurTestCase(unittest.TestCase): def setUp(self): # generate config cfg = configparser.ConfigParser() cfg.read('build/tests/unit_tests_GOOD.cfg') self.augur = Augur(cfg=cfg) def test_getXMLSrcData_validURL(self): responseData = self.augur.getXMLSrcData(url='https://isc.sans.edu/api/openiocsources/') self.assertIsInstance(responseData, BSoup) def test_getXMLSrcData_invalidURL(self): responseData = self.augur.getXMLSrcData(url='https://invalid.url/') self.assertEqual(responseData, {}) def test_getXMLSrcData_blankURL(self): try: responseData = self.augur.getXMLSrcData(url='') except ValueError: self.assertTrue(True) def test_mapIOCItemNameToFieldName(self): fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='remoteIP') self.assertEqual(fieldName, 'src_ip') def test_mapIOCItemNameToFieldName_blankFieldName(self): try: fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='') except ValueError: self.assertTrue(True) def test_parseIntelXML(self): responseData = self.augur.getXMLSrcData(url='https://isc.sans.edu/api/openiocsources/') parsedData = self.augur.parseIntelXML(responseData) self.assertNotEqual(parsedData, {}) if __name__ == '__main__': unittest.main()
#!/usr/bin/python3 """ test_augur.py APP: Inquisition DESC: Unit test for Augur library CREATION_DATE: 2017-11-25 """ # MODULES # | Native import configparser import unittest # | Third-Party from bs4 import BeautifulSoup as BSoup # | Custom from lib.destiny.Augur import Augur # METADATA __author__ = 'Joshua Carlson-Purcell' __copyright__ = 'Copyright 2017, CarlsoNet' __license__ = 'MIT' __version__ = '1.0.0-alpha' __maintainer__ = 'Joshua Carlson-Purcell' __email__ = '[email protected]' __status__ = 'Development' class AugurTestCase(unittest.TestCase): def setUp(self): # generate config cfg = configparser.ConfigParser() cfg.read('build/tests/unit_tests_GOOD.cfg') self.augur = Augur(cfg=cfg) def test_getXMLSrcData_validURL(self): responseData = self.augur.getXMLSrcData(url='https://isc.sans.edu/api/openiocsources/') self.assertIsInstance(responseData, BSoup) def test_getXMLSrcData_invalidURL(self): responseData = self.augur.getXMLSrcData(url='https://invalid.url/') self.assertEqual(responseData, {}) def test_getXMLSrcData_blankURL(self): try: responseData = self.augur.getXMLSrcData(url='') except ValueError: self.assertTrue(True) def test_mapIOCItemNameToFieldName(self): fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='remoteIP') self.assertEqual(fieldName, 'src_ip') def test_mapIOCItemNameToFieldName_blankFieldName(self): try: fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='') except ValueError: self.assertTrue(True) if __name__ == '__main__': unittest.main()
mit
Python
b6572ec32295365862947845a8c916eae428700f
Clean up temporary files on 'nt'.
stpettersens/makemodule,stpettersens/makemodule
makemodule.py
makemodule.py
#!/bin/env python """ makemodule Module generation tool Copyright (c) 2015 Sam Saint-Pettersen. Released under the MIT/X11 License. """ import sys import os import xml.dom.minidom as xml class makemodule: def __init__(self, args): if len(args) == 1: self.displayUsage() else: self.writeModuleXML() def displayUsage(self): print(__doc__) print('Usage: makemodule [module..module]\n') sys.exit(1) def writeModuleXML(self): names = [] enabled = [] redirect = '' cleanup = False if os.name == 'nt': redirect = ' > a.tmp 2>&1' cleanup = True else: redirect = ' >> /dev/null 2>&1' for arg in sys.argv[1:]: names.append(arg) exitCode = int(os.system(arg + redirect)) if exitCode == 32512: enabled.append(False) else: enabled.append(True) doc = xml.Document() c = doc.createElement('configuration') doc.appendChild(c) i = 0 for name in names: m = doc.createElement('module') c.appendChild(m) n = doc.createElement('name') m.appendChild(n) n_is = doc.createTextNode(name) n.appendChild(n_is) e = doc.createElement('enabled') m.appendChild(e) e_is = doc.createTextNode(str(enabled[i])) e.appendChild(e_is) i = i + 1 print('Writing modules.xml...') f = open('modules.xml', 'w') f.write(doc.toprettyxml()) f.close() if os.name == 'nt': os.remove('a.tmp') makemodule(sys.argv)
#!/bin/env python """ makemodule Module generation tool Copyright (c) 2015 Sam Saint-Pettersen. Released under the MIT/X11 License. """ import sys import os import xml.dom.minidom as xml class makemodule: def __init__(self, args): if len(args) == 1: self.displayUsage() else: self.writeModuleXML() def displayUsage(self): print(__doc__) print('Usage: makemodule [module..module]\n') sys.exit(1) def writeModuleXML(self): names = [] enabled = [] redirect = '' cleanup = False if os.name == 'nt': redirect = ' > a.tmp 2>&1' cleanup = True else: redirect = ' >> /dev/null 2>&1' for arg in sys.argv[1:]: names.append(arg) exitCode = int(os.system(arg + redirect)) if exitCode == 32512: enabled.append(False) else: enabled.append(True) doc = xml.Document() c = doc.createElement('configuration') doc.appendChild(c) i = 0 for name in names: m = doc.createElement('module') c.appendChild(m) n = doc.createElement('name') m.appendChild(n) n_is = doc.createTextNode(name) n.appendChild(n_is) e = doc.createElement('enabled') m.appendChild(e) e_is = doc.createTextNode(str(enabled[i])) e.appendChild(e_is) i = i + 1 print('Writing modules.xml...') f = open('modules.xml', 'w') f.write(doc.toprettyxml()) f.close() makemodule(sys.argv)
mit
Python
9aae92fb0e22c97f559b6e3ee895d9959e010e05
Add missing import
cleverhans-lab/cleverhans,carlini/cleverhans,openai/cleverhans,cihangxie/cleverhans,carlini/cleverhans,fartashf/cleverhans,cleverhans-lab/cleverhans,cleverhans-lab/cleverhans
tests_tf/test_model.py
tests_tf/test_model.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest from cleverhans.model import Model, CallableModelWrapper class TestModelClass(unittest.TestCase): def test_get_layer(self): # Define empty model model = Model() x = [] # Exception is thrown when `get_layer` not implemented with self.assertRaises(Exception) as context: model.get_layer(x, layer='') self.assertTrue(context.exception) def test_get_logits(self): # Define empty model model = Model() x = [] # Exception is thrown when `get_logits` not implemented with self.assertRaises(Exception) as context: model.get_logits(x) self.assertTrue(context.exception) def test_get_probs(self): # Define empty model model = Model() x = [] # Exception is thrown when `get_probs` not implemented with self.assertRaises(Exception) as context: model.get_probs(x) self.assertTrue(context.exception) def test_get_layer_names(self): # Define empty model model = Model() # Exception is thrown when `get_layer_names` not implemented with self.assertRaises(Exception) as context: model.get_layer_names() self.assertTrue(context.exception) def test_fprop(self): # Define empty model model = Model() x = [] # Exception is thrown when `fprop` not implemented with self.assertRaises(Exception) as context: model.fprop(x) self.assertTrue(context.exception) class TestCallableModelWrapperInitArguments(unittest.TestCase): def test_output_layer(self): def model(): return True # The following two calls should not raise Exceptions wrap = CallableModelWrapper(model, 'probs') wrap = CallableModelWrapper(model, 'logits') if __name__ == '__main__': unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest from cleverhans.model import Model class TestModelClass(unittest.TestCase): def test_get_layer(self): # Define empty model model = Model() x = [] # Exception is thrown when `get_layer` not implemented with self.assertRaises(Exception) as context: model.get_layer(x, layer='') self.assertTrue(context.exception) def test_get_logits(self): # Define empty model model = Model() x = [] # Exception is thrown when `get_logits` not implemented with self.assertRaises(Exception) as context: model.get_logits(x) self.assertTrue(context.exception) def test_get_probs(self): # Define empty model model = Model() x = [] # Exception is thrown when `get_probs` not implemented with self.assertRaises(Exception) as context: model.get_probs(x) self.assertTrue(context.exception) def test_get_layer_names(self): # Define empty model model = Model() # Exception is thrown when `get_layer_names` not implemented with self.assertRaises(Exception) as context: model.get_layer_names() self.assertTrue(context.exception) def test_fprop(self): # Define empty model model = Model() x = [] # Exception is thrown when `fprop` not implemented with self.assertRaises(Exception) as context: model.fprop(x) self.assertTrue(context.exception) class TestCallableModelWrapperInitArguments(unittest.TestCase): def test_output_layer(self): def model(): return True # The following two calls should not raise Exceptions wrap = CallableModelWrapper(model, 'probs') wrap = CallableModelWrapper(model, 'logits') if __name__ == '__main__': unittest.main()
mit
Python
4646e7c682ba9a0291815a5d0de98674a9de3410
Fix RemoteCapture definition
Adamshick012/pyshark,eaufavor/pyshark-ssl,KimiNewt/pyshark
src/pyshark/capture/remote_capture.py
src/pyshark/capture/remote_capture.py
from pyshark import LiveCapture class RemoteCapture(LiveCapture): """ A capture which is performed on a remote machine which has an rpcapd service running. """ def __init__(self, remote_host, remote_interface, remote_port=2002, bpf_filter=None): """ Creates a new remote capture which will connect to a remote machine which is running rpcapd. Use the sniff() method to get packets. Note: The remote machine should have rpcapd running in null authentication mode (-n). Be warned that the traffic is unencrypted! :param remote_host: The remote host to capture on (IP or hostname). Should be running rpcapd. :param remote_interface: The remote interface on the remote machine to capture on. Note that on windows it is not the device display name but the true interface name (i.e. \\Device\\NPF_..). :param remote_port: The remote port the rpcapd service is listening on :param bpf_filter: A BPF (tcpdump) filter to apply on the cap before reading. """ interface = 'rpcap://%s:%d/%s' % (remote_host, remote_port, remote_interface) super(RemoteCapture, self).__init__(interface, bpf_filter=bpf_filter)
from pyshark import LiveCapture class RemoteCapture(LiveCapture): """ A capture which is performed on a remote machine which has an rpcapd service running. """ def __init__(self, remote_host, remote_interface, remote_port=2002, bpf_filter=None): """ Creates a new remote capture which will connect to a remote machine which is running rpcapd. Use the sniff() method to get packets. Note: The remote machine should have rpcapd running in null authentication mode (-n). Be warned that the traffic is unencrypted! :param remote_host: The remote host to capture on (IP or hostname). Should be running rpcapd. :param remote_interface: The remote interface on the remote machine to capture on. Note that on windows it is not the device display name but the true interface name (i.e. \Device\NPF_..). :param remote_port: The remote port the rpcapd service is listening on :param bpf_filter: A BPF (tcpdump) filter to apply on the cap before reading. """ interface = 'rpcap://%s:%d/%s' % (remote_host, remote_port, remote_interface) super(RemoteCapture, self).__init__(interface, bpf_filter=bpf_filter)
mit
Python
1a9f0320b3a8aecc50cfee6335c3b6e8dc81c233
Make this tool less hacky.
HIPERFIT/futhark,diku-dk/futhark,diku-dk/futhark,diku-dk/futhark,HIPERFIT/futhark,diku-dk/futhark,diku-dk/futhark,HIPERFIT/futhark
tools/commit-impact.py
tools/commit-impact.py
#!/usr/bin/env python # # See the impact of a Futhark commit compared to the previous one we # have benchmarking for. import sys import subprocess from urllib.request import urlopen from urllib.error import HTTPError import json import tempfile import os def url_for(backend, system, commit): return 'https://futhark-lang.org/benchmark-results/futhark-{}-{}-{}.json'.format(backend, system, commit) def results_for_commit(backend, system, commit): try: url = url_for(backend, system, commit) print('Fetching {}...'.format(url)) return json.loads(urlopen(url).read()) except HTTPError: return None def first_commit_with_results(backend, system, commits): for commit in commits: res = results_for_commit(backend, system, commit) if res: return commit, res def find_commits(start): return subprocess.check_output(['git', 'rev-list', start]).decode('utf-8').splitlines() if __name__ == '__main__': backend, system, commit = sys.argv[1:4] now = results_for_commit(backend, system, commit) if not now: print('No results found') sys.exit(1) if len(sys.argv) == 5: commits = find_commits(sys.argv[4]) else: commits = find_commits(commit)[1:] then_commit, then = first_commit_with_results(backend, system, commits[1:]) print('Comparing {}'.format(commit)) print(' with {}'.format(then_commit)) with tempfile.NamedTemporaryFile(prefix=commit, mode='w') as now_file: with tempfile.NamedTemporaryFile(prefix=then_commit, mode='w') as then_file: json.dump(now, now_file) json.dump(then, then_file) now_file.flush() then_file.flush() os.system('tools/cmp-bench-json.py {} {}'.format(then_file.name, now_file.name))
#!/usr/bin/env python # # See the impact of a Futhark commit compared to the previous one we # have benchmarking for. import sys import subprocess from urllib.request import urlopen from urllib.error import HTTPError import json def url_for(backend, system, commit): return 'https://futhark-lang.org/benchmark-results/futhark-{}-{}-{}.json'.format(backend, system, commit) def results_for_commit(backend, system, commit): try: url = url_for(backend, system, commit) print('Fetching {}...'.format(url)) return json.loads(urlopen(url).read()) except HTTPError: return None def first_commit_with_results(backend, system, commits): for commit in commits: res = results_for_commit(backend, system, commit) if res: return commit, res if __name__ == '__main__': backend, system, commit = sys.argv[1:] commits = subprocess.check_output(['git', 'rev-list', commit]).decode('utf-8').splitlines() now = results_for_commit(backend, system, commit) if not now: print('No results found') sys.exit(1) then_commit, then = first_commit_with_results(backend, system, commits[1:]) print('Comparing {}'.format(commit)) print(' with {}'.format(then_commit)) # Hacky hacky... m = __import__('cmp-bench-json') m.compare(then, now)
isc
Python
0fa1e147fc7d2522a4352c0bbc60e4da67380257
add a missing statement
amandersillinois/landlab,cmshobe/landlab,landlab/landlab,amandersillinois/landlab,cmshobe/landlab,cmshobe/landlab,landlab/landlab,landlab/landlab
landlab/utils/tests/test_stream_length.py
landlab/utils/tests/test_stream_length.py
from landlab import RasterModelGrid, FieldError from landlab.components import FlowAccumulator, FastscapeEroder, FlowDirectorSteepest import numpy as np from landlab.utils.stream_length import calculate_stream_length from nose.tools import assert_equal, assert_true, assert_false, assert_raises def test_no_flow_recievers(): """Test that correct error is raised when no flow recievers are on the grid.""" # instantiate a model grid, do not run flow accumulation on it mg = RasterModelGrid(30, 70) # test that the stream length utility will fail because of a ValueError assert_raises(FieldError, calculate_stream_length, mg) def test_no_upstream_array(): """Test that correct error is raised when no flow__upstream_node_order.""" # instantiate a model grid, do not run flow accumulation on it mg = RasterModelGrid(30, 70) fd = FlowDirectorSteepest(mg) fd.run_one_step() # test that the stream length utility will fail because of a ValueError assert_raises(FieldError, calculate_stream_length, mg)
from landlab import RasterModelGrid, FieldError from landlab.components import FlowAccumulator, FastscapeEroder, FlowDirectorSteepest import numpy as np from landlab.utils.stream_length import calculate_stream_length from nose.tools import assert_equal, assert_true, assert_false, assert_raises def test_no_flow_recievers(): """Test that correct error is raised when no flow recievers are on the grid.""" # instantiate a model grid, do not run flow accumulation on it mg = RasterModelGrid(30, 70) # test that the stream length utility will fail because of a ValueError assert_raises(FieldError, calculate_stream_length, mg) def test_no_upstream_array(): """Test that correct error is raised when no flow__upstream_node_order.""" # instantiate a model grid, do not run flow accumulation on it mg = RasterModelGrid(30, 70) z = mg.add_zeros('topographic__elevation', at='node') fd = FlowDirectorSteepest(mg) fd.run_one_step() # test that the stream length utility will fail because of a ValueError assert_raises(FieldError, calculate_stream_length, mg)
mit
Python
85ee5f5e6d7a5937b67c9d11ae127709749f7490
Bump to version 0.4.1
rfleschenberg/djangocms-cascade,Julien-Blanc/djangocms-cascade,Julien-Blanc/djangocms-cascade,aldryn/djangocms-cascade,aldryn/djangocms-cascade,datafyit/djangocms-cascade,zhangguiyu/djangocms-cascade,jrief/djangocms-cascade,datafyit/djangocms-cascade,jrief/djangocms-cascade,schacki/djangocms-cascade,aldryn/djangocms-cascade,schacki/djangocms-cascade,jtiki/djangocms-cascade,haricot/djangocms-bs4forcascade,jtiki/djangocms-cascade,Julien-Blanc/djangocms-cascade,rfleschenberg/djangocms-cascade,coxm/djangocms-cascade,jrief/djangocms-cascade,jtiki/djangocms-cascade,rfleschenberg/djangocms-cascade,haricot/djangocms-bs4forcascade,zhangguiyu/djangocms-cascade,coxm/djangocms-cascade,coxm/djangocms-cascade,schacki/djangocms-cascade,datafyit/djangocms-cascade,zhangguiyu/djangocms-cascade
cmsplugin_cascade/__init__.py
cmsplugin_cascade/__init__.py
__version__ = "0.4.1"
__version__ = "0.4.0"
mit
Python
59b8ae5f17e556c09ef8592723f9c684843c7dcc
update function and comment
berkeley-stat159/project-theta
code/utils/outlierfunction.py
code/utils/outlierfunction.py
# find outliers based on DVARS and FD def outlier(data, bound): ''' Input: data: array of values bound: threshold for outliers Output: indices of outliers ''' outlier = [] # set nonoutlier values to 0, outliers to nonzero for i in data: if i <= bound: outlier.append(0) else: outlier.append(i) # find outlier indices outlier_indices = np.nonzero(outlier) return outlier_indices
# find outliers based on DVARS and FD def outlier(data, bound): ''' Input: data: array of values bound: threshold for outliers Output: indices of outliers ''' outlier = [] # set outlier values to 0 for i in data: if i <= bound: outlier.append(0) else: outlier.append(i) # find outlier indices outlier_indices = np.nonzero(outlier) return outlier_indices
bsd-3-clause
Python
270c8ca68357f92999474fbf110fed7b01cdfdf2
Use proper way to access package resources.
bbirand/python-driver,mike-tr-adamson/python-driver,HackerEarth/cassandra-python-driver,vipjml/python-driver,coldeasy/python-driver,jregovic/python-driver,jregovic/python-driver,datastax/python-driver,thobbs/python-driver,stef1927/python-driver,markflorisson/python-driver,coldeasy/python-driver,bbirand/python-driver,jfelectron/python-driver,sontek/python-driver,mambocab/python-driver,datastax/python-driver,tempbottle/python-driver,mobify/python-driver,sontek/python-driver,yi719/python-driver,tempbottle/python-driver,HackerEarth/cassandra-python-driver,jfelectron/python-driver,kracekumar/python-driver,thelastpickle/python-driver,mambocab/python-driver,kracekumar/python-driver,beobal/python-driver,thobbs/python-driver,thelastpickle/python-driver,yi719/python-driver,kishkaru/python-driver,vipjml/python-driver,mobify/python-driver,cqlengine/cqlengine,beobal/python-driver,markflorisson/python-driver,kishkaru/python-driver,stef1927/python-driver,mike-tr-adamson/python-driver
cqlengine/__init__.py
cqlengine/__init__.py
import os import pkg_resources from cqlengine.columns import * from cqlengine.functions import * from cqlengine.models import Model from cqlengine.query import BatchQuery __cqlengine_version_path__ = pkg_resources.resource_filename('cqlengine', 'VERSION') __version__ = open(__cqlengine_version_path__, 'r').readline().strip() # compaction SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" LeveledCompactionStrategy = "LeveledCompactionStrategy" ANY = "ANY" ONE = "ONE" TWO = "TWO" THREE = "THREE" QUORUM = "QUORUM" LOCAL_QUORUM = "LOCAL_QUORUM" EACH_QUORUM = "EACH_QUORUM" ALL = "ALL"
import os from cqlengine.columns import * from cqlengine.functions import * from cqlengine.models import Model from cqlengine.query import BatchQuery __cqlengine_version_path__ = os.path.realpath(__file__ + '/../VERSION') __version__ = open(__cqlengine_version_path__, 'r').readline().strip() # compaction SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy" LeveledCompactionStrategy = "LeveledCompactionStrategy" ANY = "ANY" ONE = "ONE" TWO = "TWO" THREE = "THREE" QUORUM = "QUORUM" LOCAL_QUORUM = "LOCAL_QUORUM" EACH_QUORUM = "EACH_QUORUM" ALL = "ALL"
apache-2.0
Python
5584ec8c6aa8e6567b3ddd286c1c7305fad070a3
fix init
peerchemist/cryptotik
cryptotik/__init__.py
cryptotik/__init__.py
from cryptotik.poloniex import Poloniex from cryptotik.bittrex import Bittrex from cryptotik.btce import Btce from cryptotik.therock import TheRock from cryptotik.livecoin import Livecoin from cryptotik.okcoin import OKcoin from cryptotik.hitbtc import Hitbtc
from cryptotik.poloniex import Poloniex from cryptotik.bittrex import Bittrex from cryptotik.btce import Btce from cryptotik.therock import TheRock from cryptotik.livecoin import Livecoin <<<<<<< HEAD from cryptotik.okcoin import OKcoin ======= from cryptotik.hitbtc import Hitbtc >>>>>>> 7e948ea7ab42a9ad57d9ec1259539995ff34fb34
bsd-3-clause
Python
35cc2bce4e5fb62083ec1a44bda85c2da064d119
Remove debug print statements
StoDevX/cs251-toolkit,StoDevX/cs251-toolkit,StoDevX/cs251-toolkit,StoDevX/cs251-toolkit
cs251tk/specs/load.py
cs251tk/specs/load.py
from logging import warning from glob import iglob import json import os import shutil from .cache import cache_specs from .dirs import get_specs_dir def load_all_specs(*, basedir=get_specs_dir()): os.makedirs(basedir, exist_ok=True) # the repo has a /specs folder basedir = os.path.join(basedir, 'specs') cache_specs(basedir) spec_files = iglob(os.path.join(basedir, '_cache', '*.json')) # load_spec returns a (name, spec) tuple, so we just let the dict() constructor # turn that into the {name: spec} pairs of a dictionary for us return dict([load_spec(filename, basedir) for filename in spec_files]) def load_some_specs(idents, *, basedir=get_specs_dir()): # the repo has a /specs folder basedir = os.path.join(basedir, 'specs') cache_specs(basedir) wanted_spec_files = [os.path.join(basedir, '_cache', '{}.json'.format(ident)) for ident in idents] all_spec_files = iglob(os.path.join(basedir, '_cache', '*.json')) loadable_spec_files = set(all_spec_files).intersection(wanted_spec_files) print(loadable_spec_files) # load_spec returns a (name, spec) tuple, so we just let the dict() constructor # turn that into the {name: spec} pairs of a dictionary for us return dict([load_spec(filename) for filename in loadable_spec_files]) def load_spec(filename, basedir): with open(filename, 'r', encoding='utf-8') as specfile: loaded_spec = json.load(specfile) name = os.path.splitext(os.path.basename(filename))[0] assignment = loaded_spec['assignment'] # Ask if user wants to re-cache specs to fix discrepancy if name != assignment: warning('assignment "{}" does not match the filename {}'.format(assignment, filename)) recache = input("Re-cache specs? (Y/N)") if recache == "Y" or recache == "y": shutil.rmtree(os.path.join(basedir, '_cache')) cache_specs(basedir) return assignment, loaded_spec
from logging import warning from glob import iglob import json import os import shutil import sys from .cache import cache_specs from .dirs import get_specs_dir def load_all_specs(*, basedir=get_specs_dir()): os.makedirs(basedir, exist_ok=True) # the repo has a /specs folder basedir = os.path.join(basedir, 'specs') cache_specs(basedir) spec_files = iglob(os.path.join(basedir, '_cache', '*.json')) # load_spec returns a (name, spec) tuple, so we just let the dict() constructor # turn that into the {name: spec} pairs of a dictionary for us return dict([load_spec(filename, basedir) for filename in spec_files]) def load_some_specs(idents, *, basedir=get_specs_dir()): # the repo has a /specs folder basedir = os.path.join(basedir, 'specs') cache_specs(basedir) wanted_spec_files = [os.path.join(basedir, '_cache', '{}.json'.format(ident)) for ident in idents] all_spec_files = iglob(os.path.join(basedir, '_cache', '*.json')) loadable_spec_files = set(all_spec_files).intersection(wanted_spec_files) print(loadable_spec_files) # load_spec returns a (name, spec) tuple, so we just let the dict() constructor # turn that into the {name: spec} pairs of a dictionary for us return dict([load_spec(filename) for filename in loadable_spec_files]) def load_spec(filename, basedir): with open(filename, 'r', encoding='utf-8') as specfile: loaded_spec = json.load(specfile) name = os.path.splitext(os.path.basename(filename))[0] assignment = loaded_spec['assignment'] if name != assignment: warning('assignment "{}" does not match the filename {}'.format(assignment, filename)) # warning("Re-caching specs\n") # print(file=sys.stderr) recache = input("Re-cache specs? (Y/N)") if recache == "Y" or recache == "y": shutil.rmtree(os.path.join(basedir, '_cache')) cache_specs(basedir) return assignment, loaded_spec
mit
Python
096d3c44a60c83820410a85cd6a56f20b13b9ccd
更新 API Infor, 使用新格式改寫 users_total_count API 的回應
yrchen/CommonRepo,yrchen/CommonRepo,yrchen/CommonRepo,yrchen/CommonRepo
commonrepo/infor_api/views.py
commonrepo/infor_api/views.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django.http import HttpResponse from django.views.decorators.csrf import csrf_exempt from rest_framework import permissions from rest_framework import renderers from rest_framework import status from rest_framework import viewsets from rest_framework.decorators import api_view, detail_route from rest_framework.renderers import JSONRenderer from rest_framework.response import Response from rest_framework.parsers import JSONParser from commonrepo.elos.models import ELO from commonrepo.users.models import User as User from .permissions import IsOwnerOrReadOnly # ELOs @api_view(['GET']) def elos_total_count(request): if request.method == 'GET': return Response({"code": 202, "status": "ok", "result": { "total_elos": ELO.objects.all().count() } }, status=status.HTTP_202_ACCEPTED) else: return Response({"code": 400, "status": "error" }, status=status.HTTP_400_BAD_REQUEST) # Users @api_view(['GET']) def users_total_count(request): if request.method == 'GET': return Response({"code": 202, "status": "ok", "result": { "total_users": User.objects.all().count() } }, status=status.HTTP_202_ACCEPTED) else: return Response({"code": 400, "status": "error" }, status=status.HTTP_400_BAD_REQUEST)
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django.http import HttpResponse from django.views.decorators.csrf import csrf_exempt from rest_framework import permissions from rest_framework import renderers from rest_framework import status from rest_framework import viewsets from rest_framework.decorators import api_view, detail_route from rest_framework.renderers import JSONRenderer from rest_framework.response import Response from rest_framework.parsers import JSONParser from commonrepo.elos.models import ELO from commonrepo.users.models import User as User from .permissions import IsOwnerOrReadOnly # ELOs @api_view(['GET']) def elos_total_count(request): if request.method == 'GET': return Response({"code": 202, "status": "ok", "result": { "total_elos": ELO.objects.all().count() } }, status=status.HTTP_202_ACCEPTED) else: return Response({"code": 400, "status": "error" }, status=status.HTTP_400_BAD_REQUEST) # Users @api_view(['GET']) def users_total_count(request): if request.method == 'GET': return Response({"total_users": User.objects.all().count() }, status=status.HTTP_202_ACCEPTED) else: return Response(status=status.HTTP_400_BAD_REQUEST)
apache-2.0
Python
6a5729d566a6e75c97b67a544dd7aed9c857e6de
update attachment attributes
leVirve/NTHU_Course,henryyang42/NTHU_Course,henryyang42/NTHU_Course,leVirve/NTHU_Course,leVirve/NTHU_Course,leVirve/NTHU_Course,henryyang42/NTHU_Course,henryyang42/NTHU_Course
data_center/models.py
data_center/models.py
# -*- coding: utf-8 -*- from datetime import datetime from django.db import models from django.utils.http import urlquote attachment_url_format = 'https://www.ccxp.nthu.edu.tw/ccxp/INQUIRE/JH/output/6_6.1_6.1.12/%s.pdf' # noqa class Course(models.Model): """Course database schema""" no = models.CharField(max_length=20, blank=True) code = models.CharField(max_length=20, blank=True) eng_title = models.CharField(max_length=200, blank=True) chi_title = models.CharField(max_length=200, blank=True) note = models.TextField(blank=True) objective = models.CharField(max_length=80, blank=True) time = models.CharField(max_length=20, blank=True) time_token = models.CharField(max_length=20, blank=True) teacher = models.CharField(max_length=40, blank=True) # Only save Chinese room = models.CharField(max_length=20, blank=True) credit = models.IntegerField(blank=True, null=True) limit = models.IntegerField(blank=True, null=True) prerequisite = models.BooleanField(default=False, blank=True) ge = models.CharField(max_length=80, blank=True) hit = models.IntegerField(default=0) syllabus = models.TextField(blank=True) # pure text has_attachment = models.BooleanField(default=False) # has pdf def __str__(self): return self.no @property def attachment_url(self): return attachment_url_format % urlquote(self.no) class Department(models.Model): dept_name = models.CharField(max_length=20, blank=True) required_course = models.ManyToManyField(Course, blank=True) def __unicode__(self): return self.dept_name class Announcement(models.Model): TAG_CHOICE = ( ('Info', '公告'), ('Bug', '已知問題'), ('Fix', '問題修復'), ) content = models.TextField(blank=True) time = models.DateTimeField(default=datetime.now) tag = models.CharField(max_length=10, choices=TAG_CHOICE, default='Info') def __unicode__(self): return '%s|%s' % (self.time, self.tag)
# -*- coding: utf-8 -*- from datetime import datetime from django.db import models class Course(models.Model): """Course database schema""" no = models.CharField(max_length=20, blank=True) code = models.CharField(max_length=20, blank=True) eng_title = models.CharField(max_length=200, blank=True) chi_title = models.CharField(max_length=200, blank=True) note = models.TextField(blank=True) objective = models.CharField(max_length=80, blank=True) time = models.CharField(max_length=20, blank=True) time_token = models.CharField(max_length=20, blank=True) teacher = models.CharField(max_length=40, blank=True) # Only save Chinese room = models.CharField(max_length=20, blank=True) credit = models.IntegerField(blank=True, null=True) limit = models.IntegerField(blank=True, null=True) prerequisite = models.BooleanField(default=False, blank=True) ge = models.CharField(max_length=80, blank=True) hit = models.IntegerField(default=0) syllabus = models.TextField(blank=True) # pure text def __str__(self): return self.no class Department(models.Model): dept_name = models.CharField(max_length=20, blank=True) required_course = models.ManyToManyField(Course, blank=True) def __unicode__(self): return self.dept_name class Announcement(models.Model): TAG_CHOICE = ( ('Info', '公告'), ('Bug', '已知問題'), ('Fix', '問題修復'), ) content = models.TextField(blank=True) time = models.DateTimeField(default=datetime.now) tag = models.CharField(max_length=10, choices=TAG_CHOICE, default='Info') def __unicode__(self): return '%s|%s' % (self.time, self.tag)
mit
Python
00dec661c39437e2fd031328431ab59ca428aaf3
Fix deprecation warning regarding BaseException.message
ozgur/python-linkedin,alisterion/python-linkedin,marshallhumble/python-linkedin,DEKHTIARJonathan/python3-linkedin,narrowcast/python-linkedin,ViralLeadership/python-linkedin,stephanieleevillanueva/python-linkedin,bpartridge/python-linkedin,fivejjs/python-linkedin,Reachpodofficial/python-linkedin
linkedin/utils.py
linkedin/utils.py
# -*- coding: utf-8 -*- import requests from .exceptions import LinkedInError, get_exception_for_error_code try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: import simplejson as json except ImportError: try: from django.utils import simplejson as json except ImportError: import json def enum(enum_type='enum', base_classes=None, methods=None, **attrs): """ Generates a enumeration with the given attributes. """ # Enumerations can not be initalized as a new instance def __init__(instance, *args, **kwargs): raise RuntimeError('%s types can not be initialized.' % enum_type) if base_classes is None: base_classes = () if methods is None: methods = {} base_classes = base_classes + (object,) for k, v in methods.iteritems(): methods[k] = classmethod(v) attrs['enums'] = attrs.copy() methods.update(attrs) methods['__init__'] = __init__ return type(enum_type, base_classes, methods) def to_utf8(st): if isinstance(st, unicode): return st.encode('utf-8') else: return bytes(st) def raise_for_error(response): try: response.raise_for_status() except (requests.HTTPError, requests.ConnectionError) as error: try: if len(response.content) == 0: # There is nothing we can do here since LinkedIn has neither sent # us a 2xx response nor a response content. return response = response.json() if ('error' in response) or ('errorCode' in response): message = '%s: %s' % (response.get('error', str(error)), response.get('message', 'Unknown Error')) error_code = response.get('status') ex = get_exception_for_error_code(error_code) raise ex(message) else: raise LinkedInError(error.message) except (ValueError, TypeError): raise LinkedInError(error.message) HTTP_METHODS = enum('HTTPMethod', GET='GET', POST='POST', PUT='PUT', DELETE='DELETE', PATCH='PATCH')
# -*- coding: utf-8 -*- import requests from .exceptions import LinkedInError, get_exception_for_error_code try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: import simplejson as json except ImportError: try: from django.utils import simplejson as json except ImportError: import json def enum(enum_type='enum', base_classes=None, methods=None, **attrs): """ Generates a enumeration with the given attributes. """ # Enumerations can not be initalized as a new instance def __init__(instance, *args, **kwargs): raise RuntimeError('%s types can not be initialized.' % enum_type) if base_classes is None: base_classes = () if methods is None: methods = {} base_classes = base_classes + (object,) for k, v in methods.iteritems(): methods[k] = classmethod(v) attrs['enums'] = attrs.copy() methods.update(attrs) methods['__init__'] = __init__ return type(enum_type, base_classes, methods) def to_utf8(st): if isinstance(st, unicode): return st.encode('utf-8') else: return bytes(st) def raise_for_error(response): try: response.raise_for_status() except (requests.HTTPError, requests.ConnectionError) as error: try: if len(response.content) == 0: # There is nothing we can do here since LinkedIn has neither sent # us a 2xx response nor a response content. return response = response.json() if ('error' in response) or ('errorCode' in response): message = '%s: %s' % (response.get('error', error.message), response.get('message', 'Unknown Error')) error_code = response.get('status') ex = get_exception_for_error_code(error_code) raise ex(message) else: raise LinkedInError(error.message) except (ValueError, TypeError): raise LinkedInError(error.message) HTTP_METHODS = enum('HTTPMethod', GET='GET', POST='POST', PUT='PUT', DELETE='DELETE', PATCH='PATCH')
mit
Python
36e8335bc146e4eda6801b2c148410c3ea620ae5
Update scipy.py
vadimkantorov/wigwam
wigs/scipy.py
wigs/scipy.py
class scipy(PythonWig): tarball_uri = 'https://github.com/scipy/scipy/releases/download/v$RELEASE_VERSION$/scipy-$RELEASE_VERSION$.tar.gz' last_release_version = 'v0.18.1' git_uri = 'https://github.com/scipy/scipy' dependencies = ['numpy'] optional_dependencies = ['openblas'] supported_features = ['openblas'] default_features = ['+openblas'] def setup(self): self.site_cfg = [] def switch_openblas_on(self): self.require('openblas') include_dirs = map(os.path.abspath, P.prefix_include_dirs) lib_dirs = map(os.path.abspath, P.prefix_lib_dirs) self.site_cfg += [ '[openblas]', 'libraries = openblas', 'include_dirs = %s' % os.path.pathsep.join(include_dirs), 'library_dirs = %s' % os.path.pathsep.join(lib_dirs), 'runtime_library_dirs = %s' % os.path.pathsep.join(lib_dirs) ]
class scipy(PythonWig): tarball_uri = 'https://github.com/scipy/scipy/releases/download/v$RELEASE_VERSION$/scipy-$RELEASE_VERSION$.tar.gz' last_release_version = 'v0.18.1' git_uri = 'https://github.com/scipy/scipy' dependencies = ['numpy']
mit
Python
374c386a6b2dd1ad1ba75ba70009de6c7ee3c3fc
Add process_request method to Application
phantomii/restalchemy
restalchemy/api/applications.py
restalchemy/api/applications.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2014 Eugene Frolov <[email protected]> # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import dec from restalchemy.api import resources from restalchemy.api import routes DEFAULT_CONTENT_TYPE = 'application/json' class WSGIApp(object): def __init__(self, route_class): super(WSGIApp, self).__init__() self._main_route = routes.route(route_class) resources.ResourceMap.set_resource_map( routes.Route.build_resource_map(route_class)) def process_request(self, req): return self._main_route(req).do() @dec.wsgify def __call__(self, req): return self.process_request(req) Application = WSGIApp
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2014 Eugene Frolov <[email protected]> # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import dec from restalchemy.api import resources from restalchemy.api import routes DEFAULT_CONTENT_TYPE = 'application/json' class WSGIApp(object): def __init__(self, route_class): super(WSGIApp, self).__init__() self._main_route = routes.route(route_class) resources.ResourceMap.set_resource_map( routes.Route.build_resource_map(route_class)) @dec.wsgify def __call__(self, req): return self._main_route(req).do() Application = WSGIApp
apache-2.0
Python
b7e8af6ef92c0244bd5121c528e3e85441b0d835
Disable test/mac/gyptest-objc-gc.py when using Xcode 5.1
geekboxzone/lollipop_external_chromium_org_tools_gyp,pandaxcl/gyp,android-ia/platform_external_chromium_org_tools_gyp,azunite/gyp,bulldy80/gyp_unofficial,mapbox/gyp,amoikevin/gyp,bnq4ever/gypgoogle,LazyCodingCat/gyp,Danath/gyp,carlTLR/gyp,openpeer/webrtc-gyp,erikge/watch_gyp,dougbeal/gyp,Omegaphora/external_chromium_org_tools_gyp,azunite/gyp,sanyaade-teachings/gyp,Jack-Q/GYP-copy,erikge/watch_gyp,Chilledheart/gyp,Chilledheart/gyp,erikge/watch_gyp,saghul/gyn,Chilledheart/gyp,sanyaade-teachings/gyp,dougbeal/gyp,AWhetter/gyp,okumura/gyp,Phuehvk/gyp,enkripsi/gyp,cysp/gyp,cchamberlain/gyp,azunite/gyp_20150930,openpeer/webrtc-gyp,duanhjlt/gyp,azunite/gyp,bpsinc-native/src_tools_gyp,sport-monkey/GYP,ttyangf/gyp,AWhetter/gyp,okumura/gyp,mapbox/gyp,ttyangf/pdfium_gyp,cysp/gyp,adblockplus/gyp,cchamberlain/gyp,AOSPU/external_chromium_org_tools_gyp,sanyaade-teachings/gyp,yjhjstz/gyp,azunite/gyp_20150930,pandaxcl/gyp,Jack-Q/GYP-copy,yjhjstz/gyp,android-ia/platform_external_chromium_org_tools_gyp,cchamberlain/gyp,dougbeal/gyp,chromium/gyp,msc-/gyp,ttyangf/pdfium_gyp,ttyangf/gyp,enkripsi/gyp,trafi/gyp,sdklite/gyp,msc-/gyp,azunite/gyp_20150930,Jack-Q/GYP-copy,Danath/gyp,LazyCodingCat/gyp,springmeyer/gyp,bnq4ever/gypgoogle,sport-monkey/GYP,bulldy80/gyp_unofficial,mgamer/gyp,sdklite/gyp,chromium/gyp,tarc/gyp,AWhetter/gyp,AOSPU/external_chromium_org_tools_gyp,azunite/gyp_20150930,turbulenz/gyp,Omegaphora/external_chromium_org_tools_gyp,carlTLR/gyp,okumura/gyp,bpsinc-native/src_tools_gyp,AWhetter/gyp,Omegaphora/external_chromium_org_tools_gyp,clar/gyp,Chilledheart/gyp,trafi/gyp,sport-monkey/GYP,bpsinc-native/src_tools_gyp,android-ia/platform_external_chromium_org_tools_gyp,openpeer/webrtc-gyp,amoikevin/gyp,Phuehvk/gyp,trafi/gyp,LazyCodingCat/gyp,tarc/gyp,bnq4ever/gypgoogle,android-ia/platform_external_chromium_org_tools_gyp,enkripsi/gyp,AOSPU/external_chromium_org_tools_gyp,ttyangf/gyp,cysp/gyp,geekboxzone/lollipop_external_chromium_org_tools_gyp,LazyCodingCat/gyp,mgamer/gyp,cchamberlain/gyp,geekboxzone/lollipop_external_chromium_org_tools_gyp,Phuehvk/gyp,saghul/gyn,Danath/gyp,clar/gyp,pandaxcl/gyp,azunite/gyp,azunite/gyp_20150930,bnq4ever/gypgoogle,yjhjstz/gyp,Phuehvk/gyp,cysp/gyp,amoikevin/gyp,bnoordhuis/gyp,sdklite/gyp,bpsinc-native/src_tools_gyp,trafi/gyp,bnq4ever/gypgoogle,mgamer/gyp,xin3liang/platform_external_chromium_org_tools_gyp,trafi/gyp,sport-monkey/GYP,bulldy80/gyp_unofficial,geekboxzone/lollipop_external_chromium_org_tools_gyp,sanyaade-teachings/gyp,tarc/gyp,mgamer/gyp,mapbox/gyp,springmeyer/gyp,enkripsi/gyp,Danath/gyp,sdklite/gyp,xin3liang/platform_external_chromium_org_tools_gyp,xin3liang/platform_external_chromium_org_tools_gyp,Chilledheart/gyp,turbulenz/gyp,tarc/gyp,adblockplus/gyp,saghul/gyn,adblockplus/gyp,yjhjstz/gyp,saghul/gyn,pandaxcl/gyp,bulldy80/gyp_unofficial,tarc/gyp,amoikevin/gyp,azunite/gyp,msc-/gyp,openpeer/webrtc-gyp,ttyangf/gyp,amoikevin/gyp,pandaxcl/gyp,Danath/gyp,turbulenz/gyp,adblockplus/gyp,dougbeal/gyp,sport-monkey/GYP,bnoordhuis/gyp,ttyangf/pdfium_gyp,mapbox/gyp,duanhjlt/gyp,carlTLR/gyp,cysp/gyp,mgamer/gyp,chromium/gyp,clar/gyp,bulldy80/gyp_unofficial,bnoordhuis/gyp,AOSPU/external_chromium_org_tools_gyp,chromium/gyp,msc-/gyp,bnoordhuis/gyp,Phuehvk/gyp,chromium/gyp,springmeyer/gyp,erikge/watch_gyp,turbulenz/gyp,adblockplus/gyp,Jack-Q/GYP-copy,duanhjlt/gyp,okumura/gyp,Omegaphora/external_chromium_org_tools_gyp,xin3liang/platform_external_chromium_org_tools_gyp,AWhetter/gyp,yjhjstz/gyp,springmeyer/gyp,saghul/gyn,cchamberlain/gyp,enkripsi/gyp,mapbox/gyp,sanyaade-teachings/gyp,bnoordhuis/gyp,duanhjlt/gyp,ryfx/gyp,ryfx/gyp,turbulenz/gyp,carlTLR/gyp,duanhjlt/gyp,LazyCodingCat/gyp,ttyangf/pdfium_gyp,sdklite/gyp,openpeer/webrtc-gyp,msc-/gyp,ryfx/gyp,ryfx/gyp,springmeyer/gyp,dougbeal/gyp,carlTLR/gyp,erikge/watch_gyp,clar/gyp,ttyangf/pdfium_gyp,clar/gyp,ttyangf/gyp,ryfx/gyp,Jack-Q/GYP-copy
test/mac/gyptest-objc-gc.py
test/mac/gyptest-objc-gc.py
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that GC objc settings are handled correctly. """ import TestGyp import TestMac import sys if sys.platform == 'darwin': # set |match| to ignore build stderr output. test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match = lambda a, b: True) # Xcode 5.1 removed support for garbage-collection: # error: garbage collection is no longer supported if TestMac.Xcode.Version() < '0510': CHDIR = 'objc-gc' test.run_gyp('test.gyp', chdir=CHDIR) build_error_code = { 'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`) 'make': 2, 'ninja': 1, }[test.format] test.build('test.gyp', 'gc_exe_fails', chdir=CHDIR, status=build_error_code) test.build( 'test.gyp', 'gc_off_exe_req_lib', chdir=CHDIR, status=build_error_code) test.build('test.gyp', 'gc_req_exe', chdir=CHDIR) test.run_built_executable('gc_req_exe', chdir=CHDIR, stdout="gc on: 1\n") test.build('test.gyp', 'gc_exe_req_lib', chdir=CHDIR) test.run_built_executable( 'gc_exe_req_lib', chdir=CHDIR, stdout="gc on: 1\n") test.build('test.gyp', 'gc_exe', chdir=CHDIR) test.run_built_executable('gc_exe', chdir=CHDIR, stdout="gc on: 1\n") test.build('test.gyp', 'gc_off_exe', chdir=CHDIR) test.run_built_executable('gc_off_exe', chdir=CHDIR, stdout="gc on: 0\n") test.pass_test()
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that GC objc settings are handled correctly. """ import TestGyp import sys if sys.platform == 'darwin': # set |match| to ignore build stderr output. test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match = lambda a, b: True) CHDIR = 'objc-gc' test.run_gyp('test.gyp', chdir=CHDIR) build_error_code = { 'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`) 'make': 2, 'ninja': 1, }[test.format] test.build('test.gyp', 'gc_exe_fails', chdir=CHDIR, status=build_error_code) test.build( 'test.gyp', 'gc_off_exe_req_lib', chdir=CHDIR, status=build_error_code) test.build('test.gyp', 'gc_req_exe', chdir=CHDIR) test.run_built_executable('gc_req_exe', chdir=CHDIR, stdout="gc on: 1\n") test.build('test.gyp', 'gc_exe_req_lib', chdir=CHDIR) test.run_built_executable('gc_exe_req_lib', chdir=CHDIR, stdout="gc on: 1\n") test.build('test.gyp', 'gc_exe', chdir=CHDIR) test.run_built_executable('gc_exe', chdir=CHDIR, stdout="gc on: 1\n") test.build('test.gyp', 'gc_off_exe', chdir=CHDIR) test.run_built_executable('gc_off_exe', chdir=CHDIR, stdout="gc on: 0\n") test.pass_test()
bsd-3-clause
Python
da557b0b26d144095988a8809a97b83791077f20
fix number
ErickMurillo/plataforma_fadcanic,CARocha/plataforma_fadcanic,ErickMurillo/plataforma_fadcanic,CARocha/plataforma_fadcanic,CARocha/plataforma_fadcanic,ErickMurillo/plataforma_fadcanic
biblioteca/views.py
biblioteca/views.py
from django.shortcuts import render from .models import Temas, Biblioteca from django.shortcuts import get_object_or_404 from django.db.models import Q # Create your views here. def index(request,template='biblioteca/index.html',slug=None): temas = Temas.objects.all() ultimas_guias = Biblioteca.objects.filter(tipo_documento=1).order_by('-fecha')[:12] return render(request, template, locals()) def buscar_guia(request, template='biblioteca/lista_guias.html'): buscar_palabra = request.GET.get('q') resultado = Biblioteca.objects.filter(tipo_documento=1).filter(Q(titulo__icontains=buscar_palabra) | Q(descripcion__icontains=buscar_palabra)) return render(request, template, locals()) def buscar_tema(request, template='biblioteca/lista_guias.html', id=None): temas = Temas.objects.all() buscar_palabra = get_object_or_404(Temas,id=id) resultado = Biblioteca.objects.filter(tema=buscar_palabra) return render(request, template, locals()) def detalle_guia(request,template='biblioteca/detalle.html',slug=None): temas = Temas.objects.all() la_guia = get_object_or_404(Biblioteca, slug=slug) return render(request, template, locals())
from django.shortcuts import render from .models import Temas, Biblioteca from django.shortcuts import get_object_or_404 from django.db.models import Q # Create your views here. def index(request,template='biblioteca/index.html',slug=None): temas = Temas.objects.all() ultimas_guias = Biblioteca.objects.filter(tipo_documento=1).order_by('-fecha')[:6] return render(request, template, locals()) def buscar_guia(request, template='biblioteca/lista_guias.html'): buscar_palabra = request.GET.get('q') resultado = Biblioteca.objects.filter(tipo_documento=1).filter(Q(titulo__icontains=buscar_palabra) | Q(descripcion__icontains=buscar_palabra)) return render(request, template, locals()) def buscar_tema(request, template='biblioteca/lista_guias.html', id=None): temas = Temas.objects.all() buscar_palabra = get_object_or_404(Temas,id=id) resultado = Biblioteca.objects.filter(tema=buscar_palabra) return render(request, template, locals()) def detalle_guia(request,template='biblioteca/detalle.html',slug=None): temas = Temas.objects.all() la_guia = get_object_or_404(Biblioteca, slug=slug) return render(request, template, locals())
mit
Python
1412c1a15f4b8b09beb4b7eb4b3245eaeb343a14
Bump sleep time for Github API reader
ndm25/notifyable
src/api_readers/github_daemon.py
src/api_readers/github_daemon.py
from api_reader_daemon import APIReaderDaemon import datetime import time from models import GithubRepo from models import GithubRepoEvent from github import Github class GithubReaderDaemon(APIReaderDaemon): def __init__(self, **kwargs): # neh. don't need it. pass def start(self): while True: a_minute_ago = datetime.datetime.now() - datetime.timedelta(seconds = 60) repos_to_read = self.session.query(GithubRepo).all() for repo in repos_to_read: try: gh = Github() e_repo = gh.get_repo(repo.gh_username + '/' + repo.gh_repo) events = e_repo.get_events() if events[0].created_at > a_minute_ago and events[0].type == 'PushEvent': author = events[0].actor commit = events[0].payload['commits'][0]['message'] new_event = GithubRepoEvent(repo.id, author.name, author.avatar_url, commit) self.session.add(new_event) except: continue self.session.commit() time.sleep(120) def stop(self): # or whatever pass if __name__ == '__main__': GithubReaderDaemon().start()
from api_reader_daemon import APIReaderDaemon import datetime import time from models import GithubRepo from models import GithubRepoEvent from github import Github class GithubReaderDaemon(APIReaderDaemon): def __init__(self, **kwargs): # neh. don't need it. pass def start(self): while True: a_minute_ago = datetime.datetime.now() - datetime.timedelta(seconds = 60) repos_to_read = self.session.query(GithubRepo).all() for repo in repos_to_read: try: gh = Github() e_repo = gh.get_repo(repo.gh_username + '/' + repo.gh_repo) events = e_repo.get_events() if events[0].created_at > a_minute_ago and events[0].type == 'PushEvent': author = events[0].actor commit = events[0].payload['commits'][0]['message'] new_event = GithubRepoEvent(repo.id, author.name, author.avatar_url, commit) self.session.add(new_event) except: continue self.session.commit() time.sleep(60) def stop(self): # or whatever pass if __name__ == '__main__': GithubReaderDaemon().start()
mit
Python
d01430e40d923fdced0d753822a1f62fe69a916e
add analytics folder to path
datactive/bigbang,datactive/bigbang,datactive/bigbang
bigbang/__init__.py
bigbang/__init__.py
from . import analysis
mit
Python
17147f02abdb50f6df6398c8c3c750d858c1c758
fix docs
n0ano/gantt,n0ano/gantt
doc/ext/nova_autodoc.py
doc/ext/nova_autodoc.py
import gettext import os gettext.install('nova') from nova import utils def setup(app): rootdir = os.path.abspath(app.srcdir + '/..') print "**Autodocumenting from %s" % rootdir os.chdir(rootdir) rv = utils.execute('./generate_autodoc_index.sh') print rv[0]
import gettext import os gettext.install('nova') from nova import utils def setup(app): rootdir = os.path.abspath(app.srcdir + '/..') print "**Autodocumenting from %s" % rootdir rv = utils.execute('cd %s && ./generate_autodoc_index.sh' % rootdir) print rv[0]
apache-2.0
Python
dabbf0b5796a4d16bdd588e9d8c541c1f3c8559b
Support for building multiple images at once
johscheuer/portainer,duedil-ltd/portainer,duedil-ltd/portainer,johscheuer/portainer
src/ddocker/app/build.py
src/ddocker/app/build.py
""" """ import logging import pesos.scheduler import os import threading import time from pesos.vendor.mesos import mesos_pb2 from ddocker.app import subcommand from ddocker.app.scheduler import Scheduler from Queue import Queue logger = logging.getLogger("ddocker.build") def args(parser): parser.add_argument("dockerfile", nargs="+") parser.add_argument("--tag", action="append", default=[], dest="tags", help="Multiple tags to apply to the image once built") parser.add_argument("--executor-uri", dest="executor", required=True, help="URI to the ddocker executor for mesos") # Isolation group = parser.add_argument_group("isolation") group.add_argument("--cpu-limit", default=1.0, help="CPU allocated to building the image") group.add_argument("--mem-limit", default=256, help="Memory allocated to building the image (mb)") # Arguments for the staging filesystem group = parser.add_argument_group("fs") group.add_argument("--staging-uri", default="/tmp/ddocker", help="The URI to use as a base directory for staging files.") group.add_argument("--aws-access-key-id", default=os.environ.get("AWS_ACCESS_KEY_ID"), help="Access key for using the S3 filesystem") group.add_argument("--aws-secret-access-key", default=os.environ.get("AWS_SECRET_ACCESS_KEY"), help="Secret key for using the S3 filesystem") @subcommand("build", callback=args) def main(args): logger.info("Building docker image from %s", args.dockerfile) task_queue = Queue() # Launch the mesos framework framework = mesos_pb2.FrameworkInfo() framework.user = "" # Let mesos fill this in framework.name = "ddocker" if args.framework_id: framework.id.value = args.framework_id # Kick off the scheduler driver scheduler = Scheduler( task_queue, args.executor, args.cpu_limit, args.mem_limit, args ) driver = pesos.scheduler.MesosSchedulerDriver( scheduler, framework, args.mesos_master ) # Put the task onto the queue for dockerfile in args.dockerfile: task_queue.put((dockerfile, args.tags)) thread = threading.Thread(target=driver.run) thread.setDaemon(True) thread.start() # Wait here until the tasks are done while thread.isAlive(): time.sleep(0.5)
""" """ import logging import pesos.scheduler import os import threading import time from pesos.vendor.mesos import mesos_pb2 from ddocker.app import subcommand from ddocker.app.scheduler import Scheduler from Queue import Queue logger = logging.getLogger("ddocker.build") def args(parser): parser.add_argument("dockerfile") parser.add_argument("--tag", action="append", default=[], dest="tags", help="Multiple tags to apply to the image once built") parser.add_argument("--executor-uri", dest="executor", required=True, help="URI to the ddocker executor for mesos") # Isolation group = parser.add_argument_group("isolation") group.add_argument("--cpu-limit", default=1.0, help="CPU allocated to building the image") group.add_argument("--mem-limit", default=256, help="Memory allocated to building the image (mb)") # Arguments for the staging filesystem group = parser.add_argument_group("fs") group.add_argument("--staging-uri", default="/tmp/ddocker", help="The URI to use as a base directory for staging files.") group.add_argument("--aws-access-key-id", default=os.environ.get("AWS_ACCESS_KEY_ID"), help="Access key for using the S3 filesystem") group.add_argument("--aws-secret-access-key", default=os.environ.get("AWS_SECRET_ACCESS_KEY"), help="Secret key for using the S3 filesystem") @subcommand("build", callback=args) def main(args): logger.info("Building docker image from %s", args.dockerfile) task_queue = Queue() # Launch the mesos framework framework = mesos_pb2.FrameworkInfo() framework.user = "" # Let mesos fill this in framework.name = "ddocker" if args.framework_id: framework.id.value = args.framework_id # Kick off the scheduler driver scheduler = Scheduler( task_queue, args.executor, args.cpu_limit, args.mem_limit, args ) driver = pesos.scheduler.MesosSchedulerDriver( scheduler, framework, args.mesos_master ) # Put the task onto the queue task_queue.put((args.dockerfile, args.tags)) thread = threading.Thread(target=driver.run) thread.setDaemon(True) thread.start() # Wait here until the tasks are done while thread.isAlive(): time.sleep(0.5)
mit
Python
4fe36d96d3810b39fcd15dee87318763d0d277a9
remove time
adrn/gala,adrn/gala,adrn/gary,adrn/gala,adrn/gary,adrn/gary
streamteam/io/nbody6.py
streamteam/io/nbody6.py
# coding: utf-8 """ Class for reading data from NBODY6 simulations """ from __future__ import division, print_function __author__ = "adrn <[email protected]>" # Standard library import os, sys import logging import re # Third-party import numpy as np import astropy.units as u from astropy.constants import G from astropy.table import Table # Project from .core import NBodyReader # Create logger logger = logging.getLogger(__name__) __all__ = ["NBODY6Reader"] class NBODY6Reader(NBodyReader): def _read_units(self): """ """ units = dict(length=u.pc, speed=u.km/u.s, dimensionless=u.dimensionless_unscaled) return units def read_snapshot(self, filename, units=None): """ Given a filename, read and return the data. By default, returns data in simulation units, but this can be changed with the `units` kwarg. Parameters ---------- filename : str The name of the shapshot file to read. units : dict (optional) A unit system to transform the data to. If None, will return the data in simulation units. """ # read the first line to get the numer of particles and timestep fullpath = os.path.join(self.path, filename) # column names for SNAP file, in simulation units colnames = "id x y z vx vy vz".split() coltypes = "dimensionless length length length speed speed speed".split() colunits = [self.sim_units[x] for x in coltypes] data = np.genfromtxt(fullpath, skiprows=1, names=colnames) if units is not None: new_colunits = [] for colname,colunit in zip(colnames,colunits): newdata = (data[colname]*colunit).decompose(units) data[colname] = newdata.value new_colunits.append(newdata.unit) colunits = new_colunits tbl = Table(data) for colname,colunit in zip(colnames,colunits): tbl[colname].unit = colunit return tbl
# coding: utf-8 """ Class for reading data from NBODY6 simulations """ from __future__ import division, print_function __author__ = "adrn <[email protected]>" # Standard library import os, sys import logging import re # Third-party import numpy as np import astropy.units as u from astropy.constants import G from astropy.table import Table # Project from .core import NBodyReader # Create logger logger = logging.getLogger(__name__) __all__ = ["NBODY6Reader"] class NBODY6Reader(NBodyReader): def _read_units(self): """ """ units = dict(length=u.pc, speed=u.km/u.s, dimensionless=u.dimensionless_unscaled) return units def read_snapshot(self, filename, units=None): """ Given a filename, read and return the data. By default, returns data in simulation units, but this can be changed with the `units` kwarg. Parameters ---------- filename : str The name of the shapshot file to read. units : dict (optional) A unit system to transform the data to. If None, will return the data in simulation units. """ # read the first line to get the numer of particles and timestep fullpath = os.path.join(self.path, filename) # column names for SNAP file, in simulation units colnames = "id x y z vx vy vz".split() coltypes = "dimensionless length length length speed speed speed".split() colunits = [self.sim_units[x] for x in coltypes] data = np.genfromtxt(fullpath, skiprows=1, names=colnames) if units is not None: new_colunits = [] for colname,colunit in zip(colnames,colunits): newdata = (data[colname]*colunit).decompose(units) data[colname] = newdata.value new_colunits.append(newdata.unit) time = time.decompose(units) colunits = new_colunits tbl = Table(data, meta=dict(time=time.value)) for colname,colunit in zip(colnames,colunits): tbl[colname].unit = colunit return tbl
mit
Python
73fbfd435c849c0690121b0a3fc8545057247c8a
Fix command options issues
int32bit/mistral-actions,int32bit/mistral-actions
mistral_actions/client/shell.py
mistral_actions/client/shell.py
import sys from mistral_actions.client import actions as actions_cli import mistral_actions.utils as utils def do_clear(args): """Unregister all actions from Mistral.""" actions_cli.unregister_all() print("All actions are removed from Mistral successfully.") @utils.arg( '--override', dest='override', action="store_true", default=False, help="Set true will override all actions exist in Mistral.") def do_register(args): """Register all actions to Mistral.""" override = args.override try: sys.argv.remove("--override") except: pass registered_actions = actions_cli.get_all_registered() discovered_actions = actions_cli.discover() registered_action_names = [a['name'] for a in registered_actions] discovered_action_names = [a['name'] for a in discovered_actions] intersection = set(registered_action_names) & set(discovered_action_names) if override: for name in intersection: actions_cli.unregister(name) else: discovered_actions = filter( lambda a: a['name'] not in registered_action_names, discovered_actions) if len(discovered_actions): try: actions_cli.register_all(discovered_actions) print("Follow actions have been registered: ") for action in discovered_actions: print(action['name']) except Exception as ex: print("Fail to register actions: %s" % ex) else: print("No action need to register.") def do_discover(args): """Discover all actions from this project.""" discovered_actions = actions_cli.discover() fileds = ['name', 'description', 'input_str'] print("Follow actions discovered: ") utils.print_list(discovered_actions, fileds, sortby_index=0) @utils.arg('name', metavar='<name>', help='Name of action.') def do_unregister(args): """Unregister a action from Mistral.""" name = args.name sys.argv.remove(name) actions_cli.unregister(name) def do_markdown_dump(args): """Dump all discovered actions to stdout as markdown table.""" sorted_actions = sorted(actions_cli.discover(), key=lambda a: a['name']) fileds = ['name', 'description', 'input_str'] utils.dump_as_markdown_table(sorted_actions, fileds) def do_action_list(args): """List all actions have been registered in Mistral.""" actions = actions_cli.get_all_registered() fileds = ['name', 'description', 'input_str'] utils.print_list(actions, fileds, sortby_index=0)
import sys from mistral_actions.client import actions as actions_cli import mistral_actions.utils as utils def do_clear(args): """Unregister all actions from Mistral.""" actions_cli.unregister_all() print("All actions are removed from Mistral successfully.") @utils.arg( '--override', dest='override', action="store_true", default=False, help="Set true will override all actions exist in Mistral.") def do_register(args): """Register all actions to Mistral.""" registered_actions = actions_cli.get_all_registered() discovered_actions = actions_cli.discover() registered_action_names = [a['name'] for a in registered_actions] discovered_action_names = [a['name'] for a in discovered_actions] intersection = set(registered_action_names) & set(discovered_action_names) if args.override: for name in intersection: actions_cli.unregister(name) else: discovered_actions = filter( lambda a: a['name'] not in registered_action_names, discovered_actions) actions_cli.register_all(discovered_actions) def do_discover(args): """Discover all actions from this project.""" discovered_actions = actions_cli.discover() fileds = ['name', 'description', 'input_str'] print("Follow actions discovered: ") utils.print_list(discovered_actions, fileds, sortby_index=0) @utils.arg('name', metavar='<name>', help='Name of action.') def do_unregister(args): """Unregister a action from Mistral.""" name = args.name sys.argv.remove(name) actions_cli.unregister(name) def do_md_dump(args): """Dump all discovered actions to stdout.""" sorted_actions = sorted(actions_cli.discover(), key=lambda a: a['name']) fileds = ['name', 'description', 'input_str'] utils.dump_as_markdown_table(sorted_actions, fileds) def do_action_list(args): """List all actions has been registered in Mistral.""" actions = actions_cli.get_all_registered() fileds = ['name', 'description', 'input_str'] utils.print_list(actions, fileds, sortby_index=0)
mit
Python
f846f58891e1389941f008e3f53c95ffd1b6558d
Update to add email functionality based on threshold checking.
pault2k14/dbtracker
dbtracker/__init__.py
dbtracker/__init__.py
import logging from dbtracker.cli import Cli import argparse def main(argv=None): parser = argparse.ArgumentParser( description="Queries MySQL and PostgreSQL for stats") parser.add_argument( "-S", "--save", action="store_true", help="generate and save database stats") parser.add_argument( "-g", "--growth", help="display a graph of the growth. Arguments in the form of run number ranges e.g. 3-4 or 4", type=str) parser.add_argument( "-H", "--history", help="List the datetime stamps of the last n saved runs", type=int) parser.add_argument( "-c", "--count", action="store_true", help="Gets database row counts but does not save") parser.add_argument( "-d", "--dates", type=str, help="compares two datetime stamps e.g. 2015-04-24 16:18:57.166095-07:00 - 2015-04-22 17:00:50.746688-07:00") parser.add_argument( "-s", "--silent", action="store_true", help="turns logging levels down to ERROR only") parser.add_argument( "--min", type=int, help="Minimum threshold of a database row change, before a notification is sent.") parser.add_argument( "--max", type=int, help="Maximum threshold of a database row change, before a notification is sent.") parser.add_argument( "-C", "--config", type=str, help="use a custom configuration file path") args = parser.parse_args(argv) if args.silent: logging.basicConfig(level=logging.ERROR) else: logging.basicConfig(level=logging.INFO) cli = Cli(args) cli.main()
import logging from dbtracker.cli import Cli import argparse def main(argv=None): parser = argparse.ArgumentParser( description="Queries MySQL and PostgreSQL for stats") parser.add_argument( "-S", "--save", action="store_true", help="generate and save database stats") parser.add_argument( "-g", "--growth", help="display a graph of the growth. Arguments in the form of run number ranges e.g. 3-4 or 4", type=str) parser.add_argument( "-H", "--history", help="List the datetime stamps of the last n saved runs", type=int) parser.add_argument( "-c", "--count", action="store_true", help="Gets database row counts but does not save") parser.add_argument( "-d", "--dates", type=str, help="compares two datetime stamps e.g. 2015-04-24 16:18:57.166095-07:00 - 2015-04-22 17:00:50.746688-07:00") parser.add_argument( "-s", "--silent", action="store_true", help="turns logging levels down to ERROR only") parser.add_argument( "-C", "--config", type=str, help="use a custom configuration file path") args = parser.parse_args(argv) if args.silent: logging.basicConfig(level=logging.ERROR) else: logging.basicConfig(level=logging.INFO) cli = Cli(args) cli.main()
mit
Python
838d8c8952f63464dfafaaeba3b16b681317c15e
add plot
nschloe/matplotlib2tikz
tests/test_annotate.py
tests/test_annotate.py
import matplotlib.pyplot as plt import numpy as np def plot(): fig = plt.figure(1, figsize=(8, 5)) ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1, 5), ylim=(-4, 3)) t = np.arange(0.0, 5.0, 0.2) s = np.cos(2 * np.pi * t) ax.plot(t, s, color="blue") ax.annotate( "text", xy=(4.0, 1.0), xycoords="data", xytext=(4.5, 1.5), textcoords="data", arrowprops=dict(arrowstyle="->", ec="r"), ) ax.annotate( "arrowstyle", xy=(0, 1), xycoords="data", xytext=(-50, 30), textcoords="offset points", arrowprops=dict(arrowstyle="->"), ) ax.annotate( "no arrow", xy=(0, 1), xycoords="data", xytext=(50, -30), textcoords="offset pixels", ) return fig def test(): from .helpers import assert_equality assert_equality(plot, __file__[:-3] + "_reference.tex") if __name__ == "__main__": plot() plt.show()
import matplotlib.pyplot as plt import numpy as np def plot(): fig = plt.figure(1, figsize=(8, 5)) ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1, 5), ylim=(-4, 3)) t = np.arange(0.0, 5.0, 0.2) s = np.cos(2 * np.pi * t) ax.plot(t, s, color="blue") ax.annotate( "text", xy=(4.0, 1.0), xycoords="data", xytext=(4.5, 1.5), textcoords="data", arrowprops=dict(arrowstyle="->", ec="r"), ) ax.annotate( "arrowstyle", xy=(0, 1), xycoords="data", xytext=(-50, 30), textcoords="offset points", arrowprops=dict(arrowstyle="->"), ) ax.annotate( "no arrow", xy=(0, 1), xycoords="data", xytext=(50, -30), textcoords="offset pixels", ) return fig def test(): from .helpers import assert_equality assert_equality(plot, __file__[:-3] + "_reference.tex")
mit
Python
8e10a62052f252c21c3898f70fc10d23c7261af0
Update urls.py
TarunISCO/Dnet,TarunISCO/Dnet,TarunISCO/Dnet
submify/submify/urls.py
submify/submify/urls.py
"""submify URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples:: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from django.conf.urls.static import static from django.conf import settings urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^accounts/', include('allauth.urls')), url(r'^student/', include('student.urls')) ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
"""submify URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from django.conf.urls.static import static from django.conf import settings urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^accounts/', include('allauth.urls')), url(r'^student/', include('student.urls')) ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
mit
Python
cfb68d7e1146241b9783d82d09f7f813e658d4aa
fix doctests
KenKundert/quantiphy,KenKundert/quantiphy
tests/test_doctests.py
tests/test_doctests.py
# encoding: utf8 from quantiphy import Quantity import pytest import doctest import glob import sys def test_README(): if sys.version_info < (3, 6): # code used in doctests assumes python3.6 return Quantity.reset_prefs() rv = doctest.testfile('../README.rst', optionflags=doctest.ELLIPSIS) assert rv.failed == 0 assert rv.attempted == 29 def test_quantiphy(): if sys.version_info < (3, 6): # code used in doctests assumes python3.6 return Quantity.reset_prefs() rv = doctest.testfile('../quantiphy.py', optionflags=doctest.ELLIPSIS) assert rv.failed == 0 assert rv.attempted == 100 # this target should be undated when the number of doctests change def test_manual(): if sys.version_info < (3, 6): # code used in doctests assumes python3.6 return Quantity.reset_prefs() expected_test_count = { '../doc/index.rst': 31, '../doc/user.rst': 368, '../doc/api.rst': 0, '../doc/examples.rst': 36, '../doc/accessories.rst': 12, '../doc/releases.rst': 0, } found = glob.glob('../doc/*.rst') for f in found: assert f in expected_test_count, f for path, tests in expected_test_count.items(): rv = doctest.testfile(path, optionflags=doctest.ELLIPSIS) assert rv.failed == 0, path assert rv.attempted == tests, path if __name__ == '__main__': # As a debugging aid allow the tests to be run on their own, outside pytest. # This makes it easier to see and interpret and textual output. defined = dict(globals()) for k, v in defined.items(): if callable(v) and k.startswith('test_'): print() print('Calling:', k) print((len(k)+9)*'=') v()
# encoding: utf8 from quantiphy import Quantity import pytest import doctest import glob import sys def test_README(): if sys.version_info < (3, 6): # code used in doctests assumes python3.6 return Quantity.reset_prefs() rv = doctest.testfile('../README.rst', optionflags=doctest.ELLIPSIS) assert rv.failed == 0 assert rv.attempted == 29 def test_quantiphy(): if sys.version_info < (3, 6): # code used in doctests assumes python3.6 return Quantity.reset_prefs() rv = doctest.testfile('../quantiphy.py', optionflags=doctest.ELLIPSIS) assert rv.failed == 0 assert rv.attempted == 100 # this target should be undated when the number of doctests change def test_manual(): if sys.version_info < (3, 6): # code used in doctests assumes python3.6 return Quantity.reset_prefs() expected_test_count = { '../doc/index.rst': 29, '../doc/user.rst': 368, '../doc/api.rst': 0, '../doc/examples.rst': 36, '../doc/accessories.rst': 12, '../doc/releases.rst': 0, } found = glob.glob('../doc/*.rst') for f in found: assert f in expected_test_count, f for path, tests in expected_test_count.items(): rv = doctest.testfile(path, optionflags=doctest.ELLIPSIS) assert rv.failed == 0, path assert rv.attempted == tests, path if __name__ == '__main__': # As a debugging aid allow the tests to be run on their own, outside pytest. # This makes it easier to see and interpret and textual output. defined = dict(globals()) for k, v in defined.items(): if callable(v) and k.startswith('test_'): print() print('Calling:', k) print((len(k)+9)*'=') v()
mit
Python
72f7162b2a307297798dbeb866d54de5acfdeffb
correct input dimension comment
ksu-mechatronics-research/deep-visual-odometry
models/alexnet_14/alexNet_14.py
models/alexnet_14/alexNet_14.py
# The Model of DeepVO from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape from keras.layers.convolutional import Convolution2D, MaxPooling2D from keras.layers.normalization import BatchNormalization from keras import backend as K #enable tensorflow functions #AlexNet with batch normalization in Keras #input image is 128x128 def create_model(): """ This model is designed to take in multiple inputs and give multiple outputs. Here is what the network was designed for: Inputs: two 128x128 RGB images stacked (RGBRGB) Outputs: Rotation between images in quaternion form Translation between two images """ main_input = Convolution2D(96, 11, 11, border_mode='same', input_shape=(128, 128, 6), name='main_input') x = BatchNormalization()(main_input) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(11, 11), strides=(1, 1), border_mode='same')(x) x = Convolution2D(384, 3, 3, border_mode='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same')(x) x = Flatten()(x) x = Dense(4096, init='normal')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Dense(4096, init='normal')(x) x = BatchNormalization()(x) x = Activation('relu')(x) # Delta rotation in quaternion form quaternion_rotation = Dense(4, activation='tanh', name='quaternion_rotation')(x) quaternion_rotation = Lambda(normalize_quaternion)(quaternion_rotation) # Delta Translation output translation = Dense(3, activation='linear', name='translation') model = Model(input=main_input, output=[translation, quaternion_rotation]) return model def normalize_quaternion(x): "use tensorflow normalize function on this layer to ensure valid quaternion rotation" x = K.l2_normalize(x, dim=1) return x def run_model(model, Xtr, Ytr, Xte, Yte, save_path=None): "Note: y should be [[translation],[quat rotation]] model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_absolute_error']) history = model.fit(Xtr, Ytr, batch_size=8, nb_epoch=30, verbose=1).history score = model.evaluate(Xte, Yte, verbose=1) if (save_path != None): model.save(save_path) return score, history
# The Model of DeepVO from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape from keras.layers.convolutional import Convolution2D, MaxPooling2D from keras.layers.normalization import BatchNormalization from keras import backend as K #enable tensorflow functions #AlexNet with batch normalization in Keras #input image is 224x224 def create_model(): """ This model is designed to take in multiple inputs and give multiple outputs. Here is what the network was designed for: Inputs: two 128x128 RGB images stacked (RGBRGB) Outputs: Rotation between images in quaternion form Translation between two images """ main_input = Convolution2D(96, 11, 11, border_mode='same', input_shape=(128, 128, 6), name='main_input') x = BatchNormalization()(main_input) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(11, 11), strides=(1, 1), border_mode='same')(x) x = Convolution2D(384, 3, 3, border_mode='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same')(x) x = Flatten()(x) x = Dense(4096, init='normal')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Dense(4096, init='normal')(x) x = BatchNormalization()(x) x = Activation('relu')(x) # Delta rotation in quaternion form quaternion_rotation = Dense(4, activation='tanh', name='quaternion_rotation')(x) quaternion_rotation = Lambda(normalize_quaternion)(quaternion_rotation) # Delta Translation output translation = Dense(3, activation='linear', name='translation') model = Model(input=main_input, output=[translation, quaternion_rotation]) return model def normalize_quaternion(x): "use tensorflow normalize function on this layer to ensure valid quaternion rotation" x = K.l2_normalize(x, dim=1) return x def run_model(model, Xtr, Ytr, Xte, Yte, save_path=None): "Note: y should be a 2d list of quaternion rotations and translations."" model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_absolute_error']) history = model.fit(Xtr, Ytr, batch_size=8, nb_epoch=30, verbose=1).history score = model.evaluate(Xte, Yte, verbose=1) if (save_path != None): model.save(save_path) return score, history
mit
Python
198d4944e961fd998d6e896b3e75ca2e815ffaa5
Add log to file function for vimapt package
howl-anderson/vimapt,howl-anderson/vimapt
src/vimapt/library/vimapt/__init__.py
src/vimapt/library/vimapt/__init__.py
import logging logging.basicConfig(filename='/var/log/vimapt.log', level=logging.INFO) logger = logging.getLogger(__name__)
mit
Python
a84dde598297495fe6f0f8b233b3a3761b0df7d4
Update test to check newer logic
pypa/pip,pfmoore/pip,pypa/pip,pradyunsg/pip,rouge8/pip,xavfernandez/pip,pradyunsg/pip,rouge8/pip,xavfernandez/pip,xavfernandez/pip,rouge8/pip,sbidoul/pip,sbidoul/pip,techtonik/pip,techtonik/pip,techtonik/pip,pfmoore/pip
tests/functional/test_warning.py
tests/functional/test_warning.py
import textwrap def test_environ(script, tmpdir): """$PYTHONWARNINGS was added in python2.7""" demo = tmpdir.join('warnings_demo.py') demo.write(textwrap.dedent(''' from logging import basicConfig from pip._internal.utils import deprecation deprecation.install_warning_logger() basicConfig() deprecation.deprecated("deprecated!", replacement=None, gone_in=None) ''')) result = script.run('python', demo, expect_stderr=True) expected = 'WARNING:pip._internal.deprecations:DEPRECATION: deprecated!\n' assert result.stderr == expected script.environ['PYTHONWARNINGS'] = 'ignore' result = script.run('python', demo) assert result.stderr == ''
def test_environ(script, tmpdir): """$PYTHONWARNINGS was added in python2.7""" demo = tmpdir.join('warnings_demo.py') demo.write(''' from pip._internal.utils import deprecation deprecation.install_warning_logger() from logging import basicConfig basicConfig() from warnings import warn warn("deprecated!", deprecation.PipDeprecationWarning) ''') result = script.run('python', demo, expect_stderr=True) assert result.stderr == \ 'ERROR:pip._internal.deprecations:DEPRECATION: deprecated!\n' script.environ['PYTHONWARNINGS'] = 'ignore' result = script.run('python', demo) assert result.stderr == ''
mit
Python
9c92cf39a69bbc6a078a8ffd7fcd8ea8f95b2678
fix tests
devops-s17-payments/payments,devops-s17-payments/payments,devops-s17-payments/payments
tests/test_payments.py
tests/test_payments.py
# Test cases can be run with either of the following: # python -m unittest discover # nosetests -v --rednose --nologcapture import unittest from app import payments from db import app_db, models class TestModels(unittest.TestCase): def setUp(self): payments.app.debug = True payments.app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://payments:payments@localhost:5432/test' app_db.drop_all() # clean up the last tests app_db.create_all() # make our sqlalchemy tables data = {'nickname' : 'my credit', 'user_id' : 1, 'payment_type' : 'credit', 'details' : {'user_name' : 'Jimmy Jones', 'card_number' : '1111222233334444', 'expires' : '01/2019', 'card_type' : 'Mastercard'}} payment = models.Payment() payment.deserialize(data) app_db.session.add(payment) app_db.session.commit() self.app = payments.app.test_client() def tearDown(self): app_db.session.remove() app_db.drop_all() def test_db_has_one_item(self): p1 = app_db.session.query(models.Payment).get(1) self.assertNotEqual(p1, None) p2 = app_db.session.query(models.Payment).get(2) self.assertEqual(p2, None) def test_credit_has_no_paypal_fields(self): payment = db.session.query(models.Payment).get(1) self.assertEqual(payment.nickname, 'my credit') detail = payment.details self.assertEqual(detail.is_linked, None) self.assertEqual(detail.user_email, None)
# Test cases can be run with either of the following: # python -m unittest discover # nosetests -v --rednose --nologcapture import unittest import db from app import payments from db import db, models class TestModels(unittest.TestCase): def setUp(self): payments.app.debug = True payments.app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://payments:payments@localhost:5432/test' db.drop_all() # clean up the last tests db.create_all() # make our sqlalchemy tables data = {'nickname' : 'my credit', 'user_id' : 1, 'payment_type' : 'credit', 'details' : {'user_name' : 'Jimmy Jones', 'card_number' : '1111222233334444', 'expires' : '01/2019', 'card_type' : 'Mastercard'}} payment = models.Payment() payment.deserialize(data) db.session.add(payment) db.session.commit() self.app = payments.app.test_client() def tearDown(self): db.session.remove() db.drop_all() def test_db_has_one_item(self): p1 = db.session.query(models.Payment).get(1) self.assertNotEqual(p1, None) p2 = db.session.query(models.Payment).get(2) self.assertEqual(p2, None) def test_credit_has_no_paypal_fields(self): payment = db.session.query(models.Payment).get(1) self.assertEqual(payment.nickname, 'my credit') detail = payment.details self.assertEqual(detail.is_linked, None) self.assertEqual(detail.user_email, None)
apache-2.0
Python
ee0f31857028a68116f2912054877f37bd64683a
fix vdsClient connections
oVirt/ovirt-hosted-engine-ha,oVirt/ovirt-hosted-engine-ha,oVirt/ovirt-hosted-engine-ha,oVirt/ovirt-hosted-engine-ha
ovirt_hosted_engine_ha/broker/submonitor_util.py
ovirt_hosted_engine_ha/broker/submonitor_util.py
# # ovirt-hosted-engine-ha -- ovirt hosted engine high availability # Copyright (C) 2013 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # import logging import socket import time from otopi import util from vdsm import vdscli from . import constants def run_vds_client_cmd(address, use_ssl, command): """ Run the passed in command name from the vdsClient library and either throw an exception with the error message or return the results. """ # FIXME pass context to allow for shared or persistent vdsm connection log = logging.getLogger('SubmonitorUtil') log.debug("Connecting to vdsClient at %s with ssl=%r", address, use_ssl) vdsClient = util.loadModule( path=constants.VDS_CLIENT_DIR, name='vdsClient' ) if vdsClient._glusterEnabled: serv = vdsClient.ge.GlusterService() else: serv = vdsClient.service() serv.use_ssl = use_ssl if hasattr(vdscli, 'cannonizeAddrPort'): server, server_port = vdscli.cannonizeAddrPort( address ).split(':', 1) serv.do_connect(server, server_port) else: host_port = vdscli.cannonizeHostPort(address) serv.do_connect(host_port) log.debug("Connected") method = getattr(serv.s, command) retry = 0 while retry < constants.VDS_CLIENT_MAX_RETRY: try: response = method() break except socket.error: log.debug("Error", exc_info=True) retry += 1 time.sleep(1) if retry >= constants.VDS_CLIENT_MAX_RETRY: raise Exception("VDSM initialization timeout") if response['status']['code'] != 0: raise Exception("Error {0} from {1}: {2}", response['status']['code'], command, response['status']['message']) return response
# # ovirt-hosted-engine-ha -- ovirt hosted engine high availability # Copyright (C) 2013 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # import logging import socket import time from otopi import util from vdsm import vdscli from . import constants def run_vds_client_cmd(address, use_ssl, command): """ Run the passed in command name from the vdsClient library and either throw an exception with the error message or return the results. """ # FIXME pass context to allow for shared or persistent vdsm connection log = logging.getLogger('SubmonitorUtil') log.debug("Connecting to vdsClient at %s with ssl=%r", address, use_ssl) vdsClient = util.loadModule( path=constants.VDS_CLIENT_DIR, name='vdsClient' ) if vdsClient._glusterEnabled: serv = vdsClient.ge.GlusterService() else: serv = vdsClient.service() serv.use_ssl = use_ssl if hasattr(vdscli, 'cannonizeAddrPort'): server, server_port = vdscli.cannonizeAddrPort( address ).split(':', 1) serv.do_connect(server, server_port) else: host_port = vdscli.cannonizeHostPort(address) serv.do_connect(host_port) serv.do_connect(server, server_port) log.debug("Connected") method = getattr(serv.s, command) retry = 0 while retry < constants.VDS_CLIENT_MAX_RETRY: try: response = method() break except socket.error: log.debug("Error", exc_info=True) retry += 1 time.sleep(1) if retry >= constants.VDS_CLIENT_MAX_RETRY: raise Exception("VDSM initialization timeout") if response['status']['code'] != 0: raise Exception("Error {0} from {1}: {2}", response['status']['code'], command, response['status']['message']) return response
lgpl-2.1
Python
727b42a1cdec461d715b845872c321326ce18554
Load aliases on module load
HubbeKing/Hubbot_Twisted
Modules/Alias.py
Modules/Alias.py
from ModuleInterface import ModuleInterface from IRCResponse import IRCResponse, ResponseType import GlobalVars class Alias(ModuleInterface): triggers = ["alias"] help = 'alias <alias> <command> <params> - aliases <alias> to the specified command and parameters\n' \ 'you can specify where parameters given to the alias should be inserted with $1, $2, $n. ' \ 'you can use $1+, $2+ for all parameters after the first, second one' \ 'The whole parameter string is $0. $sender and $channel can also be used.' def onLoad(self): self.bot.moduleHandler.commandAliases = self.bot.moduleHandler.loadAliases() def onTrigger(self, message): if message.User.Name not in GlobalVars.admins: return IRCResponse(ResponseType.Say, "Only my admins may create new aliases!", message.ReplyTo) if len(message.ParameterList) <= 1: return IRCResponse(ResponseType.Say, "Alias what?", message.ReplyTo) triggerFound = False for (name, module) in self.bot.moduleHandler.modules.items(): if message.ParameterList[0] in module.triggers: return IRCResponse(ResponseType.Say, "'{}' is already a command!".format(message.ParameterList[0]), message.ReplyTo) if message.ParameterList[1] in module.triggers: triggerFound = True if not triggerFound: return IRCResponse(ResponseType.Say, "'{}' is not a valid command!".format(message.ParameterList[1]), message.ReplyTo) if message.ParameterList[0] in self.bot.moduleHandler.commandAliases.keys(): return IRCResponse(ResponseType.Say, "'{}' is already an alias!".format(message.ParameterList[0]), message.ReplyTo) newAlias = [] for word in message.ParameterList[1:]: newAlias.append(word.lower()) self.bot.moduleHandler.commandAliases[message.ParameterList[0]] = newAlias self.bot.moduleHandler.newAlias(message.ParameterList[0], newAlias) return IRCResponse(ResponseType.Say, "Created a new alias '{}' for '{}'.".format(message.ParameterList[0], " ".join(message.ParameterList[1:])), message.ReplyTo)
from ModuleInterface import ModuleInterface from IRCResponse import IRCResponse, ResponseType import GlobalVars class Alias(ModuleInterface): triggers = ["alias"] help = 'alias <alias> <command> <params> - aliases <alias> to the specified command and parameters\n' \ 'you can specify where parameters given to the alias should be inserted with $1, $2, $n. ' \ 'you can use $1+, $2+ for all parameters after the first, second one' \ 'The whole parameter string is $0. $sender and $channel can also be used.' def onTrigger(self, message): if message.User.Name not in GlobalVars.admins: return IRCResponse(ResponseType.Say, "Only my admins may create new aliases!", message.ReplyTo) if len(message.ParameterList) <= 1: return IRCResponse(ResponseType.Say, "Alias what?", message.ReplyTo) triggerFound = False for (name, module) in self.bot.moduleHandler.modules.items(): if message.ParameterList[0] in module.triggers: return IRCResponse(ResponseType.Say, "'{}' is already a command!".format(message.ParameterList[0]), message.ReplyTo) if message.ParameterList[1] in module.triggers: triggerFound = True if not triggerFound: return IRCResponse(ResponseType.Say, "'{}' is not a valid command!".format(message.ParameterList[1]), message.ReplyTo) if message.ParameterList[0] in self.bot.moduleHandler.commandAliases.keys(): return IRCResponse(ResponseType.Say, "'{}' is already an alias!".format(message.ParameterList[0]), message.ReplyTo) newAlias = [] for word in message.ParameterList[1:]: newAlias.append(word.lower()) self.bot.moduleHandler.commandAliases[message.ParameterList[0]] = newAlias self.bot.moduleHandler.newAlias(message.ParameterList[0], newAlias) return IRCResponse(ResponseType.Say, "Created a new alias '{}' for '{}'.".format(message.ParameterList[0], " ".join(message.ParameterList[1:])), message.ReplyTo)
mit
Python
a69a346e2fd35e531c72b06a2c895d928340c110
Fix `includes_today` trait fo `MembershipFactory`
agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft
tests/factories/property.py
tests/factories/property.py
from datetime import datetime, timedelta, timezone from functools import partial from itertools import chain import factory from pycroft.model.user import Membership, PropertyGroup from pycroft.helpers import interval from .base import BaseFactory from .user import UserFactory class MembershipFactory(BaseFactory): class Meta: model = Membership exclude = ('begins_at', 'ends_at') begins_at = datetime.now(timezone.utc) ends_at = None active_during = interval.closedopen(begins_at, ends_at) user = factory.SubFactory(UserFactory) # note: group is non-nullable! group = None class Params: includes_today = factory.Trait( active_during=interval.closedopen( datetime.now(timezone.utc) - timedelta(1), datetime.now(timezone.utc) + timedelta(1), ), ) def _maybe_append_seq(n, prefix): """Append a sequence value to a prefix if non-zero""" if not n: return prefix return "{} {}".format(prefix, n) class PropertyGroupFactory(BaseFactory): class Meta: model = PropertyGroup exclude = ('granted', 'denied') granted = frozenset() denied = frozenset() name = factory.Sequence(lambda n: "Property group %s" % n) permission_level = factory.LazyAttribute(lambda _: 0) @factory.lazy_attribute def property_grants(self): return dict(chain(((k, True) for k in self.granted), ((k, False) for k in self.denied))) class AdminPropertyGroupFactory(PropertyGroupFactory): name = factory.Sequence(partial(_maybe_append_seq, prefix="Admin-Gruppe")) granted = frozenset(( 'user_show', 'user_change', 'user_mac_change', 'infrastructure_show', 'infrastructure_change', 'facilities_show', 'facilities_change', 'groups_show', 'groups_change_membership', 'groups_change', )) permission_level = 10 class FinancePropertyGroupFactory(PropertyGroupFactory): name = factory.Sequence(partial(_maybe_append_seq, prefix="Finanzer-Gruppe")) granted = frozenset(('finance_show', 'finance_change')) permission_level = 80 class MemberPropertyGroupFactory(PropertyGroupFactory): name = factory.Sequence(partial(_maybe_append_seq, prefix="Mitglied-Gruppe")) granted = frozenset(( 'ldap', 'ldap_login_enabled', 'mail', 'member', 'membership_fee', 'network_access', 'userdb', 'userwww' ))
from datetime import datetime, timedelta, timezone from functools import partial from itertools import chain import factory from pycroft.model.user import Membership, PropertyGroup from pycroft.helpers import interval from .base import BaseFactory from .user import UserFactory class MembershipFactory(BaseFactory): class Meta: model = Membership exclude = ('begins_at', 'ends_at') begins_at = datetime.now(timezone.utc) ends_at = None active_during = interval.closedopen(begins_at, ends_at) user = factory.SubFactory(UserFactory) # note: group is non-nullable! group = None class Params: includes_today = factory.Trait( begins_at=datetime.now(timezone.utc) - timedelta(1), ends_at=datetime.now(timezone.utc) + timedelta(1), ) def _maybe_append_seq(n, prefix): """Append a sequence value to a prefix if non-zero""" if not n: return prefix return "{} {}".format(prefix, n) class PropertyGroupFactory(BaseFactory): class Meta: model = PropertyGroup exclude = ('granted', 'denied') granted = frozenset() denied = frozenset() name = factory.Sequence(lambda n: "Property group %s" % n) permission_level = factory.LazyAttribute(lambda _: 0) @factory.lazy_attribute def property_grants(self): return dict(chain(((k, True) for k in self.granted), ((k, False) for k in self.denied))) class AdminPropertyGroupFactory(PropertyGroupFactory): name = factory.Sequence(partial(_maybe_append_seq, prefix="Admin-Gruppe")) granted = frozenset(( 'user_show', 'user_change', 'user_mac_change', 'infrastructure_show', 'infrastructure_change', 'facilities_show', 'facilities_change', 'groups_show', 'groups_change_membership', 'groups_change', )) permission_level = 10 class FinancePropertyGroupFactory(PropertyGroupFactory): name = factory.Sequence(partial(_maybe_append_seq, prefix="Finanzer-Gruppe")) granted = frozenset(('finance_show', 'finance_change')) permission_level = 80 class MemberPropertyGroupFactory(PropertyGroupFactory): name = factory.Sequence(partial(_maybe_append_seq, prefix="Mitglied-Gruppe")) granted = frozenset(( 'ldap', 'ldap_login_enabled', 'mail', 'member', 'membership_fee', 'network_access', 'userdb', 'userwww' ))
apache-2.0
Python
24439d318668897d8d1aff99df1606e80d45b875
add watchdog test
kontron/python-ipmi
tests/test_bmc.py
tests/test_bmc.py
#!/usr/bin/env python #-*- coding: utf-8 -*- from nose.tools import eq_, raises from pyipmi.bmc import * import pyipmi.msgs.bmc from pyipmi.msgs import encode_message from pyipmi.msgs import decode_message def test_watchdog_object(): m = pyipmi.msgs.bmc.GetWatchdogTimerRsp() decode_message(m, '\x00\x41\x42\x33\x44\x55\x66\x77\x88') w = Watchdog(m) eq_(w.timer_use, 1) eq_(w.is_running, 1) eq_(w.dont_log, 0) eq_(w.timeout_action, 2) eq_(w.pre_timeout_interrupt, 4) eq_(w.pre_timeout_interval, 0x33) eq_(w.timer_use_expiration_flags, 0x44) eq_(w.initial_countdown, 0x6655) eq_(w.present_countdown, 0x8877) def test_deviceid_object(): m = pyipmi.msgs.bmc.GetDeviceIdRsp() decode_message(m, '\x00\x12\x84\x05\x67\x51\x55\x12\x34\x56\x44\x55') d = DeviceId(m) eq_(d.device_id, 18) eq_(d.revision, 4) eq_(d.provides_sdrs, True) eq_(str(d.fw_revision), '5.67') eq_(str(d.ipmi_version), '1.5') eq_(d.manufacturer_id, 5649426) eq_(d.product_id, 21828) eq_(d.aux, None)
#!/usr/bin/env python #-*- coding: utf-8 -*- from nose.tools import eq_, raises from pyipmi.bmc import * import pyipmi.msgs.bmc from pyipmi.msgs import encode_message from pyipmi.msgs import decode_message def test_deviceid_object(): m = pyipmi.msgs.bmc.GetDeviceIdRsp() decode_message(m, '\x00\x12\x84\x05\x67\x51\x55\x12\x34\x56\x44\x55') d = DeviceId(m) eq_(d.device_id, 18) eq_(d.revision, 4) eq_(d.provides_sdrs, True) eq_(str(d.fw_revision), '5.67') eq_(str(d.ipmi_version), '1.5') eq_(d.manufacturer_id, 5649426) eq_(d.product_id, 21828) eq_(d.aux, None)
lgpl-2.1
Python
e6519d121ab80467fafdab6a2183964d97ef60e8
Add test for set_meta command.
xouillet/sigal,xouillet/sigal,saimn/sigal,saimn/sigal,xouillet/sigal,jasuarez/sigal,jasuarez/sigal,t-animal/sigal,saimn/sigal,t-animal/sigal,t-animal/sigal,jasuarez/sigal
tests/test_cli.py
tests/test_cli.py
# -*- coding: utf-8 -*- import os from click.testing import CliRunner from sigal import init from sigal import serve from sigal import set_meta def test_init(tmpdir): config_file = str(tmpdir.join('sigal.conf.py')) runner = CliRunner() result = runner.invoke(init, [config_file]) assert result.exit_code == 0 assert result.output.startswith('Sample config file created:') assert os.path.isfile(config_file) result = runner.invoke(init, [config_file]) assert result.exit_code == 1 assert result.output == ("Found an existing config file, will abort to " "keep it safe.\n") def test_serve(tmpdir): config_file = str(tmpdir.join('sigal.conf.py')) runner = CliRunner() result = runner.invoke(init, [config_file]) assert result.exit_code == 0 result = runner.invoke(serve) assert result.exit_code == 2 result = runner.invoke(serve, ['-c', config_file]) assert result.exit_code == 1 def test_set_meta(tmpdir): testdir = tmpdir.mkdir("test") testfile = tmpdir.join("test.jpg") testfile.write("") runner = CliRunner() result = runner.invoke(set_meta, [str(testdir), "title", "testing"]) assert result.exit_code == 0 assert result.output.startswith("1 metadata key(s) written to") assert os.path.isfile(str(testdir.join("index.md"))) assert testdir.join("index.md").read() == "Title: testing\n" # Run again, should give file exists error result = runner.invoke(set_meta, [str(testdir), "title", "testing"]) assert result.exit_code == 2 result = runner.invoke(set_meta, [str(testdir.join("non-existant.jpg")), "title", "testing"]) assert result.exit_code == 1 result = runner.invoke(set_meta, [str(testfile), "title", "testing"]) assert result.exit_code == 0 assert result.output.startswith("1 metadata key(s) written to") assert os.path.isfile(str(tmpdir.join("test.md"))) assert tmpdir.join("test.md").read() == "Title: testing\n"
# -*- coding: utf-8 -*- import os from click.testing import CliRunner from sigal import init from sigal import serve def test_init(tmpdir): config_file = str(tmpdir.join('sigal.conf.py')) runner = CliRunner() result = runner.invoke(init, [config_file]) assert result.exit_code == 0 assert result.output.startswith('Sample config file created:') assert os.path.isfile(config_file) result = runner.invoke(init, [config_file]) assert result.exit_code == 1 assert result.output == ("Found an existing config file, will abort to " "keep it safe.\n") def test_serve(tmpdir): config_file = str(tmpdir.join('sigal.conf.py')) runner = CliRunner() result = runner.invoke(init, [config_file]) assert result.exit_code == 0 result = runner.invoke(serve) assert result.exit_code == 2 result = runner.invoke(serve, ['-c', config_file]) assert result.exit_code == 1
mit
Python
d2de2d44a46ff521ab8c1d8bbc57d4eeb8d5dc53
Fix an error
CMLL/taiga-back,obimod/taiga-back,gam-phon/taiga-back,frt-arch/taiga-back,crr0004/taiga-back,gam-phon/taiga-back,WALR/taiga-back,seanchen/taiga-back,coopsource/taiga-back,taigaio/taiga-back,EvgeneOskin/taiga-back,EvgeneOskin/taiga-back,taigaio/taiga-back,gauravjns/taiga-back,xdevelsistemas/taiga-back-community,gauravjns/taiga-back,coopsource/taiga-back,jeffdwyatt/taiga-back,coopsource/taiga-back,dayatz/taiga-back,Tigerwhit4/taiga-back,Zaneh-/bearded-tribble-back,joshisa/taiga-back,joshisa/taiga-back,WALR/taiga-back,gauravjns/taiga-back,CoolCloud/taiga-back,astronaut1712/taiga-back,xdevelsistemas/taiga-back-community,Tigerwhit4/taiga-back,rajiteh/taiga-back,WALR/taiga-back,EvgeneOskin/taiga-back,obimod/taiga-back,Rademade/taiga-back,jeffdwyatt/taiga-back,19kestier/taiga-back,rajiteh/taiga-back,CoolCloud/taiga-back,dycodedev/taiga-back,frt-arch/taiga-back,frt-arch/taiga-back,Rademade/taiga-back,CMLL/taiga-back,Zaneh-/bearded-tribble-back,astronaut1712/taiga-back,astronaut1712/taiga-back,Rademade/taiga-back,taigaio/taiga-back,astagi/taiga-back,CMLL/taiga-back,seanchen/taiga-back,dycodedev/taiga-back,xdevelsistemas/taiga-back-community,Tigerwhit4/taiga-back,Tigerwhit4/taiga-back,joshisa/taiga-back,crr0004/taiga-back,joshisa/taiga-back,jeffdwyatt/taiga-back,Zaneh-/bearded-tribble-back,Rademade/taiga-back,forging2012/taiga-back,jeffdwyatt/taiga-back,obimod/taiga-back,Rademade/taiga-back,bdang2012/taiga-back-casting,astronaut1712/taiga-back,bdang2012/taiga-back-casting,dayatz/taiga-back,seanchen/taiga-back,bdang2012/taiga-back-casting,astagi/taiga-back,CMLL/taiga-back,forging2012/taiga-back,rajiteh/taiga-back,CoolCloud/taiga-back,gauravjns/taiga-back,dycodedev/taiga-back,astagi/taiga-back,19kestier/taiga-back,bdang2012/taiga-back-casting,forging2012/taiga-back,astagi/taiga-back,EvgeneOskin/taiga-back,dycodedev/taiga-back,gam-phon/taiga-back,forging2012/taiga-back,obimod/taiga-back,CoolCloud/taiga-back,coopsource/taiga-back,gam-phon/taiga-back,rajiteh/taiga-back,dayatz/taiga-back,WALR/taiga-back,seanchen/taiga-back,crr0004/taiga-back,crr0004/taiga-back,19kestier/taiga-back
taiga/users/services.py
taiga/users/services.py
# Copyright (C) 2014 Andrey Antukh <[email protected]> # Copyright (C) 2014 Jesús Espino <[email protected]> # Copyright (C) 2014 David Barragán <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ This model contains a domain logic for users application. """ from django.db.models.loading import get_model from django.db.models import Q from easy_thumbnails.files import get_thumbnailer from taiga.base import exceptions as exc from taiga.base.utils.urls import get_absolute_url from .gravatar import get_gravatar_url def get_and_validate_user(*, username:str, password:str) -> bool: """ Check if user with username/email exists and specified password matchs well with existing user password. if user is valid, user is returned else, corresponding exception is raised. """ user_model = get_model("users", "User") qs = user_model.objects.filter(Q(username=username) | Q(email=username)) if len(qs) == 0: raise exc.WrongArguments("Username or password does not matches user.") user = qs[0] if not user.check_password(password): raise exc.WrongArguments("Username or password does not matches user.") return user def get_photo_url(photo): """Get a photo absolute url and the photo automatically cropped.""" url = get_thumbnailer(photo)['avatar'].url return get_absolute_url(url) def get_photo_or_gravatar_url(user): """Get the user's photo/gravatar url.""" if user: return get_photo_url(user.photo) if user.photo else get_gravatar_url(user.email) return ""
# Copyright (C) 2014 Andrey Antukh <[email protected]> # Copyright (C) 2014 Jesús Espino <[email protected]> # Copyright (C) 2014 David Barragán <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ This model contains a domain logic for users application. """ from django.db.models.loading import get_model from django.db.models import Q from easy_thumbnails.files import get_thumbnailer from taiga.base import exceptions as exc from taiga.base.utils.urls import get_absolute_url from .gravatar import get_gravatar_url def get_and_validate_user(*, username:str, password:str) -> bool: """ Check if user with username/email exists and specified password matchs well with existing user password. if user is valid, user is returned else, corresponding exception is raised. """ user_model = get_model("users", "User") qs = user_model.objects.filter(Q(username=username) | Q(email=username)) if len(qs) == 0: raise exc.WrongArguments("Username or password does not matches user.") user = qs[0] if not user.check_password(password): raise exc.WrongArguments("Username or password does not matches user.") return user def get_photo_url(photo): """Get a photo absolute url and the photo automatically cropped.""" url = get_thumbnailer(photo)['avatar'].url return get_absolute_url(url) def get_photo_or_gravatar_url(user): """Get the user's photo/gravatar url.""" return get_photo_url(user.photo) if user.photo else get_gravatar_url(user.email)
agpl-3.0
Python
31a2439c1137068d8532c5f85cc1c8fb913d7ee8
Add reconnect to clamscan
awest1339/multiscanner,jmlong1027/multiscanner,MITRECND/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,mitre/multiscanner,mitre/multiscanner,MITRECND/multiscanner,awest1339/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,mitre/multiscanner,jmlong1027/multiscanner
modules/Antivirus/ClamAVScan.py
modules/Antivirus/ClamAVScan.py
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import division, absolute_import, with_statement, print_function, unicode_literals try: import pyclamd except: print("pyclamd module not installed...") pyclamd = None __author__ = 'Mike Long' __license__ = "MPL 2.0" DEFAULTCONF ={ "ENABLED": True, } def check(conf=DEFAULTCONF): if not conf['ENABLED']: return False if not pyclamd: return False return True def _connect_clam(): try: clamScanner = pyclamd.ClamdUnixSocket() clamScanner.ping() except: clamScanner = pyclamd.ClamdNetworkSocket() try: clamScanner.ping() except: raise ValueError("Unable to connect to clamd") return clamScanner def scan(filelist, conf=DEFAULTCONF): results = [] clamScanner = _connect_clam() # Scan each file from filelist for virus for f in filelist: output = clamScanner.scan_file(f) if output is None: continue if list(output.values())[0][0] == 'ERROR': with open(f, 'rb') as file_handle: try: output = clamScanner.scan_stream(file_handle.read()) except pyclamd.BufferTooLongError: continue except Exception as e: print(e) clamScanner = _connect_clam() output = clamScanner.scan_stream(file_handle.read()) if output is None: continue if list(output.values())[0][0] == 'FOUND': results.append((f, list(output.values())[0][1])) elif list(output.values())[0][0] == 'ERROR': print('ClamAV: ERROR:', list(output.values())[0][1]) # Set metadata tags metadata = { 'Name': "ClamAV", 'Type': "Antivirus", 'Version': clamScanner.version() } return (results, metadata)
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import division, absolute_import, with_statement, print_function, unicode_literals try: import pyclamd except: print("pyclamd module not installed...") pyclamd = None __author__ = 'Mike Long' __license__ = "MPL 2.0" DEFAULTCONF ={ "ENABLED": True, } def check(conf=DEFAULTCONF): if not conf['ENABLED']: return False if not pyclamd: return False return True def scan(filelist, conf=DEFAULTCONF): results = [] try: clamScanner = pyclamd.ClamdUnixSocket() clamScanner.ping() except: clamScanner = pyclamd.ClamdNetworkSocket() try: clamScanner.ping() except: raise ValueError("Unable to connect to clamd") # Scan each file from filelist for virus for f in filelist: output = clamScanner.scan_file(f) if output is None: continue if list(output.values())[0][0] == 'ERROR': with open(f, 'rb') as file_handle: try: output = clamScanner.scan_stream(file_handle.read()) except pyclamd.BufferTooLongError: continue if output is None: continue if list(output.values())[0][0] == 'FOUND': results.append((f, list(output.values())[0][1])) elif list(output.values())[0][0] == 'ERROR': print('ClamAV: ERROR:', list(output.values())[0][1]) # Set metadata tags metadata = { 'Name': "ClamAV", 'Type': "Antivirus", 'Version': clamScanner.version() } return (results, metadata)
mpl-2.0
Python
821e191e05269b9c1cc5f58b3d4cecf5bd20e896
Correct Range sample
wmarshall484/streamsx.topology,ibmkendrick/streamsx.topology,wmarshall484/streamsx.topology,ibmkendrick/streamsx.topology,wmarshall484/streamsx.topology,IBMStreams/streamsx.topology,wmarshall484/streamsx.topology,ddebrunner/streamsx.topology,ddebrunner/streamsx.topology,wmarshall484/streamsx.topology,wmarshall484/streamsx.topology,IBMStreams/streamsx.topology,ddebrunner/streamsx.topology,IBMStreams/streamsx.topology,ibmkendrick/streamsx.topology,wmarshall484/streamsx.topology,wmarshall484/streamsx.topology,ibmkendrick/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,ibmkendrick/streamsx.topology,IBMStreams/streamsx.topology,ibmkendrick/streamsx.topology,ddebrunner/streamsx.topology,IBMStreams/streamsx.topology,ibmkendrick/streamsx.topology,ddebrunner/streamsx.topology,ddebrunner/streamsx.topology,ddebrunner/streamsx.topology
samples/python/com.ibm.streamsx.topology.pysamples/opt/python/streams/spl_sources.py
samples/python/com.ibm.streamsx.topology.pysamples/opt/python/streams/spl_sources.py
# Licensed Materials - Property of IBM # Copyright IBM Corp. 2015, 2016 from __future__ import absolute_import, division, print_function # Simple inclusion of Python logic within an SPL application # as a SPL "Function" operator. A "Function" operator has # a single input port and single output port, a function # is called for every input tuple, and results in # no submission or a single tuple being submitted. # Import the SPL decorators from streamsx.spl import spl # Any function in a Python module (.py file) within the # toolkit's opt/python/streams directory is converted to a primitive operator # with a single input and output port. The primitive operator # is a C++ primitive that embeds the Python runtime. # # The function must be decorated with one of these # # @spl.pipe - Function is a pipe operator # @spl.sink - Function is a sink operator # @spl.ignore - Function is ignored # Attributes of the input SPL tuple are passed # as a Python Tuple and thus are available as positional arguments. # (see examples below) # Any returned value from a function must be a Tuple. # # If nothing is returned then no tuple is submitted # by the operator for the input tuple. # # When a Tuple is returned, its values are assigned # to the first N attributes of the output tuple, # that is by position. # The returned values in the Tuple must be assignable # to the output tuple attribute types. # # If the output port has more than N attributes # then any remaining attributes are set from the # input tuple if there is a matching input attribute by # name and type, otherwise the attribute remains at # its default value. # # If the output port has fewer attributes than N # then any additional values are ignored. # Any function whose name starts with spl is not created # as an operator, such functions are reserved as a mechanism # to pass information back to the primitive operator generator. # The description of the function becomes the description # of the primitive operator model in its operator model. #------------------------------------------------------------------ # Example functions #------------------------------------------------------------------ # Defines the SPL namespace for any functions in this module # Multiple modules can map to the same namespace def splNamespace(): return "com.ibm.streamsx.topology.pysamples.sources" @spl.source() class Range: def __init__(self, count): self.count = count def __iter__(self): # Use zip to convert the single returned value # into a tuple to allow it to be returned to SPL return zip(range(self.count))
# Licensed Materials - Property of IBM # Copyright IBM Corp. 2015, 2016 from __future__ import absolute_import, division, print_function # Simple inclusion of Python logic within an SPL application # as a SPL "Function" operator. A "Function" operator has # a single input port and single output port, a function # is called for every input tuple, and results in # no submission or a single tuple being submitted. # Import the SPL decorators from streamsx.spl import spl # Any function in a Python module (.py file) within the # toolkit's opt/python/streams directory is converted to a primitive operator # with a single input and output port. The primitive operator # is a C++ primitive that embeds the Python runtime. # # The function must be decorated with one of these # # @spl.pipe - Function is a pipe operator # @spl.sink - Function is a sink operator # @spl.ignore - Function is ignored # Attributes of the input SPL tuple are passed # as a Python Tuple and thus are available as positional arguments. # (see examples below) # Any returned value from a function must be a Tuple. # # If nothing is returned then no tuple is submitted # by the operator for the input tuple. # # When a Tuple is returned, its values are assigned # to the first N attributes of the output tuple, # that is by position. # The returned values in the Tuple must be assignable # to the output tuple attribute types. # # If the output port has more than N attributes # then any remaining attributes are set from the # input tuple if there is a matching input attribute by # name and type, otherwise the attribute remains at # its default value. # # If the output port has fewer attributes than N # then any additional values are ignored. # Any function whose name starts with spl is not created # as an operator, such functions are reserved as a mechanism # to pass information back to the primitive operator generator. # The description of the function becomes the description # of the primitive operator model in its operator model. #------------------------------------------------------------------ # Example functions #------------------------------------------------------------------ # Defines the SPL namespace for any functions in this module # Multiple modules can map to the same namespace def splNamespace(): return "com.ibm.streamsx.topology.pysamples.sources" @spl.source() class Range: def __init__(self, count): self.count = count def __iter__(self): return map(tuple, iter(range(self.count)))
apache-2.0
Python
b23a887edd6b55f2386c45c9b93c04431bceba5e
remove all__vary_rounds setting (deprecated in Passlib 1.7)
GLolol/PyLink
coremods/login.py
coremods/login.py
""" login.py - Implement core login abstraction. """ from pylinkirc import conf, utils, world from pylinkirc.log import log try: from passlib.context import CryptContext except ImportError: CryptContext = None log.warning("Hashed passwords are disabled because passlib is not installed. Please install " "it (pip3 install passlib) and restart for this feature to work.") pwd_context = None if CryptContext: pwd_context = CryptContext(["sha512_crypt", "sha256_crypt"], sha256_crypt__default_rounds=180000, sha512_crypt__default_rounds=90000) def checkLogin(user, password): """Checks whether the given user and password is a valid combination.""" accounts = conf.conf['login'].get('accounts') if not accounts: # No accounts specified, return. return False # Lowercase account names to make them case insensitive. TODO: check for # duplicates. user = user.lower() accounts = {k.lower(): v for k, v in accounts.items()} try: account = accounts[user] except KeyError: # Invalid combination return False else: passhash = account.get('password') if not passhash: # No password given, return. XXX: we should allow plugins to override # this in the future. return False # Encryption in account passwords is optional (to not break backwards # compatibility). if account.get('encrypted', False): return verifyHash(password, passhash) else: return password == passhash def verifyHash(password, passhash): """Checks whether the password given matches the hash.""" if password: if not pwd_context: raise utils.NotAuthorizedError("Cannot log in to an account with a hashed password " "because passlib is not installed.") return pwd_context.verify(password, passhash) return False # No password given!
""" login.py - Implement core login abstraction. """ from pylinkirc import conf, utils, world from pylinkirc.log import log try: from passlib.context import CryptContext except ImportError: CryptContext = None log.warning("Hashed passwords are disabled because passlib is not installed. Please install " "it (pip3 install passlib) and restart for this feature to work.") pwd_context = None if CryptContext: pwd_context = CryptContext(["sha512_crypt", "sha256_crypt"], all__vary_rounds=0.1, sha256_crypt__default_rounds=180000, sha512_crypt__default_rounds=90000) def checkLogin(user, password): """Checks whether the given user and password is a valid combination.""" accounts = conf.conf['login'].get('accounts') if not accounts: # No accounts specified, return. return False # Lowercase account names to make them case insensitive. TODO: check for # duplicates. user = user.lower() accounts = {k.lower(): v for k, v in accounts.items()} try: account = accounts[user] except KeyError: # Invalid combination return False else: passhash = account.get('password') if not passhash: # No password given, return. XXX: we should allow plugins to override # this in the future. return False # Encryption in account passwords is optional (to not break backwards # compatibility). if account.get('encrypted', False): return verifyHash(password, passhash) else: return password == passhash def verifyHash(password, passhash): """Checks whether the password given matches the hash.""" if password: if not pwd_context: raise utils.NotAuthorizedError("Cannot log in to an account with a hashed password " "because passlib is not installed.") return pwd_context.verify(password, passhash) return False # No password given!
mpl-2.0
Python
b79a80d894bdc39c8fa6f76fe50e222567f00df1
Update cofnig_default: add elastic search config
tranhuucuong91/simple-notebooks,tranhuucuong91/simple-notebooks,tranhuucuong91/simple-notebooks
config_default.py
config_default.py
# -*- coding: utf-8 -*- """ Created on 2015-10-23 08:06:00 @author: Tran Huu Cuong <[email protected]> """ import os # Blog configuration values. # You may consider using a one-way hash to generate the password, and then # use the hash again in the login view to perform the comparison. This is just # for simplicity. ADMIN_PASSWORD = 'admin@secret' APP_DIR = os.path.dirname(os.path.realpath(__file__)) PATH_SQLITE_DB=os.path.join(APP_DIR, 'blog.db') # The playhouse.flask_utils.FlaskDB object accepts database URL configuration. DATABASE = 'sqliteext:///{}'.format(PATH_SQLITE_DB) DEBUG = False # The secret key is used internally by Flask to encrypt session data stored # in cookies. Make this unique for your app. SECRET_KEY = 'shhh, secret!' # This is used by micawber, which will attempt to generate rich media # embedded objects with maxwidth=800. SITE_WIDTH = 800 APP_HOST='127.0.0.1' APP_PORT=5000 ES_HOST = { "host": "172.17.42.1", "port": 9200 } ES_INDEX_NAME = 'notebooks' ES_TYPE_NAME = 'notebooks'
# -*- coding: utf-8 -*- """ Created on 2015-10-23 08:06:00 @author: Tran Huu Cuong <[email protected]> """ import os # Blog configuration values. # You may consider using a one-way hash to generate the password, and then # use the hash again in the login view to perform the comparison. This is just # for simplicity. ADMIN_PASSWORD = 'admin@secret' APP_DIR = os.path.dirname(os.path.realpath(__file__)) PATH_SQLITE_DB=os.path.join(APP_DIR, 'blog.db') # The playhouse.flask_utils.FlaskDB object accepts database URL configuration. DATABASE = 'sqliteext:///{}'.format(PATH_SQLITE_DB) DEBUG = False # The secret key is used internally by Flask to encrypt session data stored # in cookies. Make this unique for your app. SECRET_KEY = 'shhh, secret!' # This is used by micawber, which will attempt to generate rich media # embedded objects with maxwidth=800. SITE_WIDTH = 800 APP_HOST='127.0.0.1' APP_PORT=5000
mit
Python
a7c084b4ff3d5529ca54209283d0e1a5984ebea2
Fix lint error
john-kurkowski/tldextract
tldextract/cli.py
tldextract/cli.py
'''tldextract CLI''' import logging import sys from .tldextract import TLDExtract from ._version import version as __version__ def main(): '''tldextract CLI main command.''' import argparse logging.basicConfig() parser = argparse.ArgumentParser( prog='tldextract', description='Parse hostname from a url or fqdn') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('input', metavar='fqdn|url', type=str, nargs='*', help='fqdn or url') parser.add_argument('-u', '--update', default=False, action='store_true', help='force fetch the latest TLD definitions') parser.add_argument('-c', '--cache_dir', help='use an alternate TLD definition caching folder') parser.add_argument('-p', '--private_domains', default=False, action='store_true', help='Include private domains') args = parser.parse_args() tld_extract = TLDExtract(include_psl_private_domains=args.private_domains) if args.cache_dir: tld_extract.cache_file = args.cache_file if args.update: tld_extract.update(True) elif not args.input: parser.print_usage() sys.exit(1) return for i in args.input: print(' '.join(tld_extract(i))) # pylint: disable=superfluous-parens
'''tldextract CLI''' import logging import sys from .tldextract import TLDExtract from ._version import version as __version__ def main(): '''tldextract CLI main command.''' import argparse logging.basicConfig() parser = argparse.ArgumentParser( prog='tldextract', description='Parse hostname from a url or fqdn') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) parser.add_argument('input', metavar='fqdn|url', type=str, nargs='*', help='fqdn or url') parser.add_argument('-u', '--update', default=False, action='store_true', help='force fetch the latest TLD definitions') parser.add_argument('-c', '--cache_dir', help='use an alternate TLD definition caching folder') parser.add_argument('-p', '--private_domains', default=False, action='store_true', help='Include private domains') args = parser.parse_args() tld_extract = TLDExtract(include_psl_private_domains=args.private_domains) if args.cache_dir: tld_extract.cache_file = args.cache_file if args.update: tld_extract.update(True) elif not args.input: parser.print_usage() sys.exit(1) return for i in args.input: print(' '.join(tld_extract(i))) # pylint: disable=superfluous-parens
bsd-3-clause
Python
3f0930f4c7758bc690f01d09f743e24068db05c1
extend benchmark to run both upload and download tests
deluge-clone/libtorrent,deluge-clone/libtorrent,deluge-clone/libtorrent,deluge-clone/libtorrent,deluge-clone/libtorrent,deluge-clone/libtorrent
tools/run_benchmark.py
tools/run_benchmark.py
import os import time import shutil import subprocess import sys toolset = '' if len(sys.argv) > 1: toolset = sys.argv[1] ret = os.system('cd ../examples && bjam boost=source profile statistics=on -j3 %s stage_client_test' % toolset) ret = os.system('cd ../examples && bjam boost=source release -j3 %s stage_connection_tester' % toolset) if ret != 0: print 'ERROR: build failed: %d' % ret sys.exit(1) if not os.path.exists('cpu_benchmark.torrent'): ret = os.system('../examples/connection_tester gen-torrent -s 10000 -n 15 -t cpu_benchmark.torrent') if ret != 0: print 'ERROR: connection_tester failed: %d' % ret sys.exit(1) try: shutil.rmtree('t') except: pass def run_test(name, test_cmd, client_arg, num_peers): output_dir = 'logs_%s' % name try: os.mkdir(output_dir) except: pass port = (int(time.time()) % 50000) + 2000 try: shutil.rmtree('session_stats') except: pass try: shutil.rmtree('session_stats_report') except: pass start = time.time(); client_cmd = '../examples/client_test -p %d cpu_benchmark.torrent -k -z -H -X -q 120 %s -h -c %d -T %d' \ % (port, client_arg, num_peers *2, num_peers*2) test_cmd = '../examples/connection_tester %s -c %d -d 127.0.0.1 -p %d -t cpu_benchmark.torrent' % (test_cmd, num_peers, port) client_out = open('%s/client.out' % output_dir, 'w+') test_out = open('%s/test.out' % output_dir, 'w+') print client_cmd c = subprocess.Popen(client_cmd.split(' '), stdout=client_out, stderr=client_out, stdin=subprocess.PIPE) time.sleep(2) print test_cmd t = subprocess.Popen(test_cmd.split(' '), stdout=test_out, stderr=test_out) t.wait() c.communicate('q') c.wait() end = time.time(); print 'runtime %d seconds' % (end - start) print 'analyzing proile...' os.system('gprof ../examples/client_test >%s/gprof.out' % output_dir) print 'generating profile graph...' os.system('python gprof2dot.py <%s/gprof.out | dot -Tps -o %s/cpu_profile.ps' % (output_dir, output_dir)) os.system('python parse_session_stats.py session_stats/*.log') try: shutil.move('session_stats_report', '%s/session_stats_report' % output_dir) except: pass try: shutil.move('session_stats', '%s/session_stats' % output_dir) except: pass run_test('download', 'upload', '', 50) run_test('upload', 'download', '-G', 5)
import os import time import shutil import subprocess import sys port = (int(time.time()) % 50000) + 2000 toolset = '' if len(sys.argv) > 1: toolset = sys.argv[1] ret = os.system('cd ../examples && bjam boost=source profile statistics=on -j3 %s stage_client_test' % toolset) ret = os.system('cd ../examples && bjam boost=source release -j3 %s stage_connection_tester' % toolset) if ret != 0: print 'ERROR: build failed: %d' % ret sys.exit(1) if not os.path.exists('cpu_benchmark.torrent'): ret = os.system('../examples/connection_tester gen-torrent -s 10000 -n 15 -t cpu_benchmark.torrent') if ret != 0: print 'ERROR: connection_tester failed: %d' % ret sys.exit(1) try: shutil.rmtree('torrent_storage') except: pass try: shutil.rmtree('session_stats') except: pass try: os.mkdir('logs') except: pass start = time.time(); client_cmd = '../examples/client_test -p %d cpu_benchmark.torrent -k -0 -z -H -X -q 120' % port test_cmd = '../examples/connection_tester upload -c 50 -d 127.0.0.1 -p %d -t cpu_benchmark.torrent' % port client_out = open('logs/client.out', 'w+') test_out = open('logs/test.out', 'w+') print client_cmd c = subprocess.Popen(client_cmd.split(' '), stdout=client_out, stderr=client_out, stdin=subprocess.PIPE) time.sleep(2) print test_cmd t = subprocess.Popen(test_cmd.split(' '), stdout=test_out, stderr=test_out) t.wait() c.communicate('q') c.wait() end = time.time(); print 'runtime %d seconds' % (end - start) print 'analyzing proile...' os.system('gprof ../examples/client_test >logs/gprof.out') print 'generating profile graph...' os.system('python gprof2dot.py <logs/gprof.out | dot -Tpng -o logs/cpu_profile.png') os.system('python parse_session_stats.py session_stats/*.log') try: os.rename('session_stats_report', 'logs/session_stats_report') except: pass try: os.rename('session_stats', 'logs/session_stats') except: pass
bsd-3-clause
Python
e7c462af8382a5eb7f5fee2abfc04f002e36b193
Add varint and varlong tests
SpockBotMC/SpockBot,nickelpro/SpockBot,gamingrobot/SpockBot,MrSwiss/SpockBot,Gjum/SpockBot,luken/SpockBot
tests/mcp/test_datautils.py
tests/mcp/test_datautils.py
from spock.mcp import datautils from spock.utils import BoundBuffer def test_unpack_varint(): largebuff = BoundBuffer(b'\x80\x94\xeb\xdc\x03') smallbuff = BoundBuffer(b'\x14') assert datautils.unpack_varint(smallbuff) == 20 assert datautils.unpack_varint(largebuff) == 1000000000 def test_pack_varint(): assert datautils.pack_varint(20) == b'\x14' assert datautils.pack_varint(1000000000) == b'\x80\x94\xeb\xdc\x03' assert datautils.pack_varint(-10000000000) is None assert datautils.pack_varint(10000000000) is None def test_unpack_varlong(): largebuff = BoundBuffer(b'\x80\xc8\xaf\xa0%') smallbuff = BoundBuffer(b'\x14') assert datautils.unpack_varlong(smallbuff) == 20 assert datautils.unpack_varlong(largebuff) == 10000000000 pass def test_pack_varlong(): assert datautils.pack_varlong(20) == b'\x14' assert datautils.pack_varlong(10000000000) == b'\x80\xc8\xaf\xa0%' assert datautils.pack_varlong(10000000000000000000) is None assert datautils.pack_varlong(-10000000000000000000) is None
mit
Python
ca885203ab82026ca21a200c1bee5ad3c0a82cb5
Change default interval
awesto/djangocms-carousel,awesto/djangocms-carousel,awesto/djangocms-carousel
src/cmsplugin_carousel/models.py
src/cmsplugin_carousel/models.py
from adminsortable.models import SortableMixin from cms.models import CMSPlugin from cms.models.fields import PageField from django.db import models from django.utils.translation import ugettext_lazy as _ from filer.fields.image import FilerImageField class CarouselPlugin(CMSPlugin): interval = models.PositiveIntegerField(_('Interval'), default=5) title = models.CharField(_('Title'), max_length=255, default='', blank=True) def __str__(self): return self.title or str(self.pk) def copy_relations(self, oldinstance): super(CarouselPlugin, self).copy_relations(oldinstance) for picture in oldinstance.pictures.all().iterator(): picture.pk = None picture.plugin = self picture.save() class CarouselPicture(SortableMixin): plugin = models.ForeignKey(CarouselPlugin, related_name='pictures') image = FilerImageField(verbose_name=_('Image'), related_name='+') alt_tag = models.CharField(_('Alt tag'), max_length=255, blank=True) text = models.TextField(verbose_name=_('Text over image'), blank=True) url = models.CharField(verbose_name=_('URL'), blank=True, null=True, max_length=500) page = PageField(verbose_name=_("Page"), blank=True, null=True) open_in_tab = models.BooleanField(verbose_name=_('Open in new window')) ordering = models.PositiveIntegerField(default=0, editable=False, db_index=True) class Meta: ordering = ['ordering', ] def link(self): if self.page is not None: return self.page else: return self.url def __str__(self): return self.alt_tag
from adminsortable.models import SortableMixin from cms.models import CMSPlugin from cms.models.fields import PageField from django.db import models from django.utils.translation import ugettext_lazy as _ from filer.fields.image import FilerImageField class CarouselPlugin(CMSPlugin): interval = models.PositiveIntegerField(_('Interval'), default=1) title = models.CharField(_('Title'), max_length=255, default='', blank=True) def __str__(self): return self.title or str(self.pk) def copy_relations(self, oldinstance): super(CarouselPlugin, self).copy_relations(oldinstance) for picture in oldinstance.pictures.all().iterator(): picture.pk = None picture.plugin = self picture.save() class CarouselPicture(SortableMixin): plugin = models.ForeignKey(CarouselPlugin, related_name='pictures') image = FilerImageField(verbose_name=_('Image'), related_name='+') alt_tag = models.CharField(_('Alt tag'), max_length=255, blank=True) text = models.TextField(verbose_name=_('Text over image'), blank=True) url = models.CharField(verbose_name=_('URL'), blank=True, null=True, max_length=500) page = PageField(verbose_name=_("Page"), blank=True, null=True) open_in_tab = models.BooleanField(verbose_name=_('Open in new window')) ordering = models.PositiveIntegerField(default=0, editable=False, db_index=True) class Meta: ordering = ['ordering', ] def link(self): if self.page is not None: return self.page else: return self.url def __str__(self): return self.alt_tag
mit
Python
f96f3f6ac5ca5f9301c2c463b0a3f4f710187f21
Use utf-8
Astalaseven/pdftoics
constantes.py
constantes.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from BeautifulSoup import BeautifulSoup import requests def get_profs(): r = requests.get("http://www.heb.be/esi/personnel_fr.htm") soup = BeautifulSoup(r.text) soup = soup.findAll('ul')[2] profs = {} for line in soup: line = str(line) if "profs" in line: abbr = line.split("(")[1].split(")")[0] prof = line.split(">")[2].split("<")[0] profs[abbr] = prof.decode('utf-8') HOURS = [ '08:15', '09:15', '10:30', '11:30', '12:30', '13:45', '14:45', '16:00', '17:00', ] DAYS = { 0: 'Lundi', 1: 'Mardi', 2: 'Mercredi', 3: 'Jeudi', 4: 'Vendredi', } MONTHS = { 'janvier' : '01', 'février' : '02', 'mars' : '03', 'avril' : '04', 'mai' : '05', 'juin' : '06', 'juillet' : '07', 'aout' : '08', 'septembre': '09', 'octobre' : '10', 'novembre' : '11', 'décembre' : '12', } PROFS = { 'ADT': 'Alain Detaille', 'ARO': 'Anne Rousseau', 'ART': 'Anne Rayet', 'BDL': 'Bénoni Delfosse', 'BEJ': 'Jonas Beleho', 'CIH': 'Yashar Cihan', 'CLG': 'Christine Leignel', 'CLR': 'Catherine Leruste', 'CUV': 'Geneviève Cuvelier', 'DNA': 'David Nabet', 'DWI': 'Didier Willame', 'EFO': 'Eric Fontaine', 'EGR': 'Eric Georges', 'ELV': 'Eytan Levy', 'FPL': 'Frédéric Pluquet', 'GVA': 'Gilles Van Assche', 'HAL': 'Amine Hallal', 'JCJ': 'Jean-Claude Jaumain', 'JDM': 'Jacqueline De Mesmaeker', 'JDS': 'Jérôme Dossogne', 'JMA': 'Jean-Marc André', 'LBC': 'Laurent Beeckmans', 'MAP': 'Michel Applaincourt', 'MBA': 'Monica Bastreghi', 'MCD': 'Marco Codutti', 'MHI': 'Mohamed Hadjili', 'MWA': 'Moussa Wahid', 'MWI': 'Michel Willemse', 'NPX': 'Nicolas Pettiaux', 'NVS': 'Nicolas Vansteenkiste', 'PBT': 'Pierre Bettens', 'PMA': 'Pantelis Matsos', 'RPL': 'René-Philippe Legrand', 'SRV': 'Frédéric Servais', 'YPR': 'Yves Pierseaux', }
#!/usr/bin/env python # -*- coding: utf-8 -*- from BeautifulSoup import BeautifulSoup import requests def get_profs(): r = requests.get("http://www.heb.be/esi/personnel_fr.htm") soup = BeautifulSoup(r.text) soup = soup.findAll('ul')[2] profs = {} for line in soup: line = str(line) if "profs" in line: abbr = line.split("(")[1].split(")")[0] prof = line.split(">")[2].split("<")[0] profs[abbr] = prof.decode('utf-8') HOURS = [ '08:15', '09:15', '10:30', '11:30', '12:30', '13:45', '14:45', '16:00', '17:00', ] DAYS = { 0: 'Lundi', 1: 'Mardi', 2: 'Mercredi', 3: 'Jeudi', 4: 'Vendredi', } MONTHS = { 'janvier' : '01', 'février' : '02', 'mars' : '03', 'avril' : '04', 'mai' : '05', 'juin' : '06', 'juillet' : '07', 'aout' : '08', 'septembre': '09', 'octobre' : '10', 'novembre' : '11', 'décembre' : '12', } PROFS = { 'ADT': 'Alain Detaille', 'ARO': 'Anne Rousseau', 'ART': 'Anne Rayet', 'BDL': 'Bénoni Delfosse', 'BEJ': 'Jonas Beleho', 'CIH': 'Yashar Cihan', 'CLG': 'Christine Leignel', 'CLR': 'Catherine Leruste', 'CUV': 'Geneviève Cuvelier', 'DNA': 'David Nabet', 'DWI': 'Didier Willame', 'EFO': 'Eric Fontaine', 'EGR': 'Eric Georges', 'ELV': 'Eytan Levy', 'FPL': 'Frédéric Pluquet', 'GVA': 'Gilles Van Assche', 'HAL': 'Amine Hallal', 'JCJ': 'Jean-Claude Jaumain', 'JDM': 'Jacqueline De Mesmaeker', 'JDS': 'Jérôme Dossogne', 'JMA': 'Jean-Marc André', 'LBC': 'Laurent Beeckmans', 'MAP': 'Michel Applaincourt', 'MBA': 'Monica Bastreghi', 'MCD': 'Marco Codutti', 'MHI': 'Mohamed Hadjili', 'MWA': 'Moussa Wahid', 'MWI': 'Michel Willemse', 'NPX': 'Nicolas Pettiaux', 'NVS': 'Nicolas Vansteenkiste', 'PBT': 'Pierre Bettens', 'PMA': 'Pantelis Matsos', 'RPL': 'René-Philippe Legrand', 'SRV': 'Fréd\éric Servais', 'YPR': 'Yves Pierseaux', }
mit
Python
afdc58945c710f623714e6b07c593489c0cd42be
Implement basic list command
xii/xii,xii/xii
src/xii/builtin/commands/list/list.py
src/xii/builtin/commands/list/list.py
import datetime from xii import definition, command, error from xii.need import NeedLibvirt, NeedSSH class ListCommand(command.Command): """List all currently defined components """ name = ['list', 'ls'] help = "list all currently defined components" @classmethod def argument_parser(cls): parser = command.Command.argument_parser(cls.name[0]) parser.add_argument("-d", "--definition", default=None, help="Define which xii definition file should be used") parser.add_argument("--all", default=False, action="store_true", help="Show all components defined by the xii") parser.add_argument("--host", default=None, help="Specify host to connect to. (A libvirt url is required)") parser.add_argument("--only", type=str, default=None, help="Show only secified components [nodes,pools,networks]") return parser def _get_uptime(self, time): now = datetime.datetime.now() delta = now - datetime.datetime.fromtimestamp(time) if delta.days > 1: return "{} days".format(delta.days) if delta.seconds / 3600 > 1: return "{} hours".format(delta.seconds / 3600) if delta.seconds / 60 > 1: return "{} minutes".format(delta.seconds / 60) return "{} seconds".format(delta.seconds) def run(self): rows = [] for c in self.children(): meta = c.fetch_metadata() create = "---" if meta is not None: created_at = float(meta["created"]) create = self._get_uptime(created_at) rows.append((c.entity(), c.get_virt_url(), create, c.status() )) self.show_table(["name", "host", "uptime", "status"], rows)
from xii import definition, command, error from xii.need import NeedLibvirt, NeedSSH class ListCommand(command.Command): """List all currently defined components """ name = ['list', 'ls'] help = "list all currently defined components" @classmethod def argument_parser(cls): parser = command.Command.argument_parser(cls.name[0]) parser.add_argument("-d", "--definition", default=None, help="Define which xii definition file should be used") parser.add_argument("--all", default=False, action="store_true", help="Show all components defined by the xii") parser.add_argument("--host", default=None, help="Specify host to connect to. (A libvirt url is required)") parser.add_argument("--only", type=str, default=None, help="Show only secified components [nodes,pools,networks]") return parser def run(self): pass
apache-2.0
Python
095ec4c38015f1b1b53cb88ae59fbf6a7596b492
update VAF
MaxInGaussian/ZS-VAFNN
mnist/training.py
mnist/training.py
# Copyright 2017 Max W. Y. Lam # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import print_function from __future__ import division import sys sys.path.append("../") import os import time import tensorflow as tf import tensorflow.contrib.layers as layers from six.moves import range, zip import numpy as np import zhusuan as zs import six import gzip from six.moves import cPickle as pickle from expt import run_experiment DATA_PATH = 'mnist.pkl.gz' def load_data(n_folds): def to_one_hot(x, depth): ret = np.zeros((x.shape[0], depth)) ret[np.arange(x.shape[0]), x] = 1 return ret f = gzip.open(path, 'rb') if six.PY2: train_set, valid_set, test_set = pickle.load(f) else: train_set, valid_set, test_set = pickle.load(f, encoding='latin1') f.close() X_train, y_train = train_set[0], train_set[1] X_valid, y_valid = valid_set[0], valid_set[1] X_test, y_test = test_set[0], test_set[1] X_train = np.vstack([X_train, X_valid]).astype('float32') y_train = np.vstack([y_train, y_valid]) return [X_train, to_one_hot(y_train, 10), X_test, to_one_hot(y_test, 10)] if __name__ == '__main__': if('cpu' in sys.argv): os.environ['CUDA_VISIBLE_DEVICES'] = '-1' model_names = ['VAFNN', 'BNN'] train_test_set = load_data(5) D, P = train_test_set[0][0].shape[1], train_test_set[0][1].shape[1] # Fair Model Comparison - Same Architecture & Optimization Rule training_settings = { 'task': 'classification', 'plot_err': True, 'lb_samples': 20, 'll_samples': 100, 'n_basis': 50, 'n_hiddens': [100], 'batch_size': 10, 'learn_rate': 1e-3, 'max_epochs': 10000, 'early_stop': 10, 'check_freq': 5, } eval_mses, eval_lls = run_experiment( model_names, 'MNIST', load_data(5), **training_settings)
# Copyright 2017 Max W. Y. Lam # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import print_function from __future__ import division import sys sys.path.append("../") import os import time import tensorflow as tf import tensorflow.contrib.layers as layers from six.moves import range, zip import numpy as np import zhusuan as zs import six from six.moves import cPickle as pickle from expt import run_experiment DATA_PATH = 'mnist.pkl.gz' def load_data(n_folds): def to_one_hot(x, depth): ret = np.zeros((x.shape[0], depth)) ret[np.arange(x.shape[0]), x] = 1 return ret f = gzip.open(path, 'rb') if six.PY2: train_set, valid_set, test_set = pickle.load(f) else: train_set, valid_set, test_set = pickle.load(f, encoding='latin1') f.close() X_train, y_train = train_set[0], train_set[1] X_valid, y_valid = valid_set[0], valid_set[1] X_test, y_test = test_set[0], test_set[1] X_train = np.vstack([X_train, X_valid]).astype('float32') y_train = np.vstack([y_train, y_valid]) return [X_train, to_one_hot(y_train, 10), X_test, to_one_hot(y_test, 10)] if __name__ == '__main__': if('cpu' in sys.argv): os.environ['CUDA_VISIBLE_DEVICES'] = '-1' model_names = ['VAFNN', 'BNN'] train_test_set = load_data(5) D, P = train_test_set[0][0].shape[1], train_test_set[0][1].shape[1] # Fair Model Comparison - Same Architecture & Optimization Rule training_settings = { 'task': 'classification', 'plot_err': True, 'lb_samples': 20, 'll_samples': 100, 'n_basis': 50, 'n_hiddens': [100], 'batch_size': 10, 'learn_rate': 1e-3, 'max_epochs': 10000, 'early_stop': 10, 'check_freq': 5, } eval_mses, eval_lls = run_experiment( model_names, 'MNIST', load_data(5), **training_settings)
apache-2.0
Python
85537e3f8557a76b8b2ad89edc41848c29622c24
Update the paint tool shape with the viewer image changes
paalge/scikit-image,ajaybhat/scikit-image,michaelaye/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image,vighneshbirodkar/scikit-image,rjeli/scikit-image,SamHames/scikit-image,keflavich/scikit-image,robintw/scikit-image,GaZ3ll3/scikit-image,jwiggins/scikit-image,michaelpacer/scikit-image,emon10005/scikit-image,ofgulban/scikit-image,GaZ3ll3/scikit-image,bsipocz/scikit-image,robintw/scikit-image,pratapvardhan/scikit-image,chriscrosscutler/scikit-image,bsipocz/scikit-image,ofgulban/scikit-image,ClinicalGraphics/scikit-image,ofgulban/scikit-image,michaelpacer/scikit-image,chintak/scikit-image,bennlich/scikit-image,newville/scikit-image,juliusbierk/scikit-image,WarrenWeckesser/scikits-image,paalge/scikit-image,Hiyorimi/scikit-image,warmspringwinds/scikit-image,youprofit/scikit-image,oew1v07/scikit-image,newville/scikit-image,rjeli/scikit-image,Midafi/scikit-image,vighneshbirodkar/scikit-image,chriscrosscutler/scikit-image,oew1v07/scikit-image,WarrenWeckesser/scikits-image,rjeli/scikit-image,bennlich/scikit-image,SamHames/scikit-image,ClinicalGraphics/scikit-image,emon10005/scikit-image,vighneshbirodkar/scikit-image,dpshelio/scikit-image,blink1073/scikit-image,Britefury/scikit-image,Hiyorimi/scikit-image,dpshelio/scikit-image,warmspringwinds/scikit-image,chintak/scikit-image,chintak/scikit-image,Britefury/scikit-image,chintak/scikit-image,juliusbierk/scikit-image,SamHames/scikit-image,michaelaye/scikit-image,jwiggins/scikit-image,Midafi/scikit-image,keflavich/scikit-image,youprofit/scikit-image,ajaybhat/scikit-image,blink1073/scikit-image,SamHames/scikit-image
skimage/viewer/plugins/labelplugin.py
skimage/viewer/plugins/labelplugin.py
import numpy as np from .base import Plugin from ..widgets import ComboBox, Slider from ..canvastools import PaintTool __all__ = ['LabelPainter'] rad2deg = 180 / np.pi class LabelPainter(Plugin): name = 'LabelPainter' def __init__(self, max_radius=20, **kwargs): super(LabelPainter, self).__init__(**kwargs) # These widgets adjust plugin properties instead of an image filter. self._radius_widget = Slider('radius', low=1, high=max_radius, value=5, value_type='int', ptype='plugin') labels = [str(i) for i in range(6)] labels[0] = 'Erase' self._label_widget = ComboBox('label', labels, ptype='plugin') self.add_widget(self._radius_widget) self.add_widget(self._label_widget) print(self.help()) def help(self): helpstr = ("Label painter", "Hold left-mouse button and paint on canvas.") return '\n'.join(helpstr) def attach(self, image_viewer): super(LabelPainter, self).attach(image_viewer) image = image_viewer.original_image self.paint_tool = PaintTool(self.image_viewer.ax, image.shape, on_enter=self.on_enter) self.paint_tool.radius = self.radius self.paint_tool.label = self._label_widget.index = 1 self.artists.append(self.paint_tool) def _on_new_image(self, image): """Update plugin for new images.""" self.paint_tool.shape = image.shape def on_enter(self, overlay): pass @property def radius(self): return self._radius_widget.val @radius.setter def radius(self, val): self.paint_tool.radius = val @property def label(self): return self._label_widget.val @label.setter def label(self, val): self.paint_tool.label = val
import numpy as np from .base import Plugin from ..widgets import ComboBox, Slider from ..canvastools import PaintTool __all__ = ['LabelPainter'] rad2deg = 180 / np.pi class LabelPainter(Plugin): name = 'LabelPainter' def __init__(self, max_radius=20, **kwargs): super(LabelPainter, self).__init__(**kwargs) # These widgets adjust plugin properties instead of an image filter. self._radius_widget = Slider('radius', low=1, high=max_radius, value=5, value_type='int', ptype='plugin') labels = [str(i) for i in range(6)] labels[0] = 'Erase' self._label_widget = ComboBox('label', labels, ptype='plugin') self.add_widget(self._radius_widget) self.add_widget(self._label_widget) print(self.help()) def help(self): helpstr = ("Label painter", "Hold left-mouse button and paint on canvas.") return '\n'.join(helpstr) def attach(self, image_viewer): super(LabelPainter, self).attach(image_viewer) image = image_viewer.original_image self.paint_tool = PaintTool(self.image_viewer.ax, image.shape, on_enter=self.on_enter) self.paint_tool.radius = self.radius self.paint_tool.label = self._label_widget.index = 1 self.artists.append(self.paint_tool) def on_enter(self, overlay): pass @property def radius(self): return self._radius_widget.val @radius.setter def radius(self, val): self.paint_tool.radius = val @property def label(self): return self._label_widget.val @label.setter def label(self, val): self.paint_tool.label = val
bsd-3-clause
Python
8a6b100e671b4f22dee6b0399eb8a4bc8bf1a97e
update longdesc string
poldracklab/mriqc,oesteban/mriqc,poldracklab/mriqc,oesteban/mriqc,oesteban/mriqc,poldracklab/mriqc,oesteban/mriqc,poldracklab/mriqc
mriqc/info.py
mriqc/info.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ MRIQC """ __versionbase__ = '0.8.6' __versionrev__ = 'a4' __version__ = __versionbase__ + __versionrev__ __author__ = 'Oscar Esteban' __email__ = '[email protected]' __maintainer__ = 'Oscar Esteban' __copyright__ = ('Copyright 2016, Center for Reproducible Neuroscience, ' 'Stanford University') __credits__ = 'Oscar Esteban' __license__ = '3-clause BSD' __status__ = 'Prototype' __description__ = 'NR-IQMs (no-reference Image Quality Metrics) for MRI' __longdesc__ = """\ MRIQC provides a series of image processing workflows to extract and compute a series of \ NR (no-reference), IQMs (image quality metrics) to be used in QAPs (quality assessment \ protocols) for MRI (magnetic resonance imaging). This open-source neuroimaging data processing tool is being developed as a part of the \ MRI image analysis and reproducibility platform offered by the CRN. This pipeline derives \ from, and is heavily influenced by, the PCP Quality Assessment Protocol. This tool extracts a series of IQMs from structural and functional MRI data. It is also \ scheduled to add diffusion MRI to the target imaging families. """ URL = 'http://mriqc.readthedocs.org/' DOWNLOAD_URL = ('https://pypi.python.org/packages/source/m/mriqc/' 'mriqc-{}.tar.gz'.format(__version__)) CLASSIFIERS = [ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: Image Recognition', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', ] REQUIRES = [ 'numpy', 'future', 'lockfile', 'six', 'matplotlib', 'nibabel', 'niworkflows>=0.0.3a5', 'pandas', 'dipy', 'jinja2', 'seaborn', 'pyPdf2', 'PyYAML', 'nitime', 'nilearn', 'sklearn', 'scikit-learn' ] LINKS_REQUIRES = [ 'git+https://github.com/oesteban/nipype.git@master#egg=nipype', 'git+https://github.com/oesteban/rst2pdf.git@futurize/stage2#egg=rst2pdf' ] TESTS_REQUIRES = [ 'mock', 'codecov', 'nose', 'doctest-ignore-unicode' ] EXTRA_REQUIRES = { 'doc': ['sphinx'], 'tests': TESTS_REQUIRES, 'duecredit': ['duecredit'] } # Enable a handle to install all extra dependencies at once EXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())]
#!/usr/bin/env python # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ MRIQC """ __versionbase__ = '0.8.6' __versionrev__ = 'a4' __version__ = __versionbase__ + __versionrev__ __author__ = 'Oscar Esteban' __email__ = '[email protected]' __maintainer__ = 'Oscar Esteban' __copyright__ = ('Copyright 2016, Center for Reproducible Neuroscience, ' 'Stanford University') __credits__ = 'Oscar Esteban' __license__ = '3-clause BSD' __status__ = 'Prototype' __description__ = 'NR-IQMs (no-reference Image Quality Metrics) for MRI' __longdesc__ = """ MRIQC provides a series of image processing workflows to extract and compute a series of NR (no-reference), IQMs (image quality metrics) to be used in QAPs (quality assessment protocols) for MRI (magnetic resonance imaging). This open-source neuroimaging data processing tool is being developed as a part of the MRI image analysis and reproducibility platform offered by the CRN. This pipeline derives from, and is heavily influenced by, the PCP Quality Assessment Protocol. This tool extracts a series of IQMs from structural and functional MRI data. It is also scheduled to add diffusion MRI to the target imaging families. """ URL = 'http://mriqc.readthedocs.org/' DOWNLOAD_URL = ('https://pypi.python.org/packages/source/m/mriqc/' 'mriqc-{}.tar.gz'.format(__version__)) CLASSIFIERS = [ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: Image Recognition', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', ] REQUIRES = [ 'numpy', 'future', 'lockfile', 'six', 'matplotlib', 'nibabel', 'niworkflows>=0.0.3a5', 'pandas', 'dipy', 'jinja2', 'seaborn', 'pyPdf2', 'PyYAML', 'nitime', 'nilearn', 'sklearn', 'scikit-learn' ] LINKS_REQUIRES = [ 'git+https://github.com/oesteban/nipype.git@master#egg=nipype', 'git+https://github.com/oesteban/rst2pdf.git@futurize/stage2#egg=rst2pdf' ] TESTS_REQUIRES = [ 'mock', 'codecov', 'nose', 'doctest-ignore-unicode' ] EXTRA_REQUIRES = { 'doc': ['sphinx'], 'tests': TESTS_REQUIRES, 'duecredit': ['duecredit'] } # Enable a handle to install all extra dependencies at once EXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())]
apache-2.0
Python
fb2c9469f6d026e77e0f8c20a12f4373e68f9ba2
update dependency xgboost to v1 (#543)
GoogleCloudPlatform/ai-platform-samples,GoogleCloudPlatform/ai-platform-samples
training/xgboost/structured/base/setup.py
training/xgboost/structured/base/setup.py
#!/usr/bin/env python # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from setuptools import find_packages from setuptools import setup # While this is an xgboost sample, we will still require tensorflow and # scikit-learn to be installed, since the sample uses certain functionalities # available in those libraries: # tensorflow: mainly to copy files seamlessly to GCS # scikit-learn: the helpfer functions it provides, e.g. splitting datasets REQUIRED_PACKAGES = [ 'tensorflow==1.15.4', 'scikit-learn==0.20.2', 'pandas==0.24.2', 'xgboost==1.5.0', 'cloudml-hypertune', ] setup( name='trainer', version='0.1', install_requires=REQUIRED_PACKAGES, packages=find_packages(), include_package_data=True, description='AI Platform | Training | xgboost | Base' )
#!/usr/bin/env python # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from setuptools import find_packages from setuptools import setup # While this is an xgboost sample, we will still require tensorflow and # scikit-learn to be installed, since the sample uses certain functionalities # available in those libraries: # tensorflow: mainly to copy files seamlessly to GCS # scikit-learn: the helpfer functions it provides, e.g. splitting datasets REQUIRED_PACKAGES = [ 'tensorflow==1.15.4', 'scikit-learn==0.20.2', 'pandas==0.24.2', 'xgboost==0.81', 'cloudml-hypertune', ] setup( name='trainer', version='0.1', install_requires=REQUIRED_PACKAGES, packages=find_packages(), include_package_data=True, description='AI Platform | Training | xgboost | Base' )
apache-2.0
Python
06a1b635b02e001e798fa57e70a56ad17f9df7d0
fix country cleanup migrate script 5
DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj
portality/migrate/p1p2/country_cleanup.py
portality/migrate/p1p2/country_cleanup.py
import sys from datetime import datetime from portality import models from portality import xwalk def main(argv=sys.argv): start = datetime.now() journal_iterator = models.Journal.all_in_doaj() counter = 0 for j in journal_iterator: counter += 1 oldcountry = j.bibjson().country j.bibjson().country = xwalk.get_country_code(j.bibjson().country) newcountry = j.bibjson().country print j.bibjson().title.encode('utf-8'), ',', j.bibjson().get_one_identifier(j.bibjson().P_ISSN), j.bibjson().get_one_identifier(j.bibjson().E_ISSN), ',', 'Old country:', oldcountry.encode('utf-8'), ',', 'New country:', newcountry.encode('utf-8') j.prep() j.save() end = datetime.now() print "Updated Journals", counter print start, end print 'Time taken:', end-start if __name__ == '__main__': main()
import sys from datetime import datetime from portality import models from portality import xwalk def main(argv=sys.argv): start = datetime.now() journal_iterator = models.Journal.all_in_doaj() counter = 0 for j in journal_iterator: counter += 1 oldcountry = j.bibjson().country j.bibjson().country = xwalk.get_country_code(j.bibjson().country) newcountry = j.bibjson().country print j.bibjson().title.decode('utf-8'), ',', j.bibjson().get_one_identifier(j.bibjson().P_ISSN), j.bibjson().get_one_identifier(j.bibjson().E_ISSN), ',', 'Old country:', oldcountry.decode('utf-8'), ',', 'New country:', newcountry.decode('utf-8') j.prep() j.save() end = datetime.now() print "Updated Journals", counter print start, end print 'Time taken:', end-start if __name__ == '__main__': main()
apache-2.0
Python
ccaca70aa28bdd3e4f2a9c6e46d76e3ff8653f88
Fix public page hashids issue
crestify/crestify,crestify/crestify,crestify/crestify
crestify/views/public.py
crestify/views/public.py
from crestify import app, hashids from crestify.models import Bookmark from flask import render_template @app.route('/public/<string:bookmark_id>', methods=['GET']) def bookmark_public(bookmark_id): bookmark_id = hashids.decode(str(bookmark_id))[0] query = Bookmark.query.get(bookmark_id) return render_template("public/bookmark_share.html", bookmark=query)
from crestify import app, hashids from crestify.models import Bookmark from flask import render_template @app.route('/public/<string:bookmark_id>', methods=['GET']) def bookmark_public(bookmark_id): bookmark_id = hashids.decode(bookmark_id)[0] query = Bookmark.query.get(bookmark_id) return render_template("public/bookmark_share.html", bookmark=query)
bsd-3-clause
Python
bfdf4bffdb30e6f9651c96afb711d2a871b9ff87
fix output to shell
ContinuumIO/pypi-conda-builds
create_recipes.py
create_recipes.py
import argparse import subprocess parser = argparse.ArgumentParser() parser.add_argument("package_list", help="List of packages for which" + " recipies will be created") args = parser.parse_args() package_names = [package.strip() for package in open(args.package_list, 'r').readlines()] log_dir = "./logs/" recipes_dir = "./recipes/" recipe_log_file = open(log_dir + 'recipe_log', 'w') successes = [] failures = [] for package in package_names: msg = "Creating Conda recipe for %s\n" % (package) print(msg) err = subprocess.call(['conda', 'skeleton', 'pypi', package, '--output-dir', recipes_dir], stdout=recipe_log_file, stderr=recipe_log_file) if err is 0: msg = "Succesfully created conda recipe for %s\n" % (package) successes.append(package) else: msg = "Failed to create conda recipe for %s\n" % (package) failures.append(package) print(msg) recipe_log_file.close() successful_recipes_file = open(log_dir + 'successful_recipes', 'w') failed_recipes_file = open(log_dir + 'failed_recipes', 'w') successful_recipes_file.write('\n'.join(successes)) failed_recipes_file.write('\n'.join(failures)) successful_recipes_file.close() failed_recipes_file.close()
import argparse import subprocess parser = argparse.ArgumentParser() parser.add_argument("package_list", help="List of packages for which" + " recipies will be created") args = parser.parse_args() package_names = [package.strip() for package in open(args.package_list, 'r').readlines()] log_dir = "./logs/" recipes_dir = "./recipes/" recipe_log_file = open(log_dir + 'recipe_log', 'w') successes = [] failures = [] for package in package_names: msg = "Creating Conda recipe for %s\n" % (package) recipe_log_file.write(msg) print(msg) err = subprocess.call(['conda', 'skeleton', 'pypi', package, '--output-dir', recipes_dir], stdout=recipe_log_file, stderr=recipe_log_file) if err is 0: successes.append(package) else: failures.append(package) recipe_log_file.close() successful_recipes_file = open(log_dir + 'successful_recipes', 'w') failed_recipes_file = open(log_dir + 'failed_recipes', 'w') successful_recipes_file.write('\n'.join(successes)) failed_recipes_file.write('\n'.join(failures)) successful_recipes_file.close() failed_recipes_file.close()
bsd-3-clause
Python
21000dfd4bf63ceae0e8c6ac343624fbf5c5bea2
read tags before people
shaylavi/codeandtalk.com,szabgab/codeandtalk.com,szabgab/codeandtalk.com,mhorvvitz/codeandtalk.com,shaylavi/codeandtalk.com,szabgab/codeandtalk.com,szabgab/codeandtalk.com,mhorvvitz/codeandtalk.com,mhorvvitz/codeandtalk.com,rollandf/codeandtalk.com,shaylavi/codeandtalk.com,rollandf/codeandtalk.com,rollandf/codeandtalk.com,rollandf/codeandtalk.com
cat/test_cat.py
cat/test_cat.py
from cat.code import GenerateSite import unittest import json import os import sys def read_json(file): with open(file) as fh: return json.loads(fh.read()) #return fh.read() class TestDemo(unittest.TestCase): def test_generate(self): GenerateSite().generate_site() assert True # This fails on travis, we probably need better reporting to see what is the actual difference # as I cannot see it. Unless it is only the file_date files = [ 'html/v/yougottalovefrontend-2016/vitaly-friedman-cutting-edge-responsive-web-design.json', 'html/p/zohar-babin.json', ] for result_file in files: expected_file = 'samples/' + os.path.basename(result_file) #sys.stderr.write(result_file) #sys.stderr.write("\n") #sys.stderr.write(expected_file) #sys.stderr.write("\n") # read both files result = read_json(result_file) expected = read_json(expected_file) if 'file_date' in expected: del(expected['file_date']) del(result['file_date']) if result != expected: print("While testing {}\n".format(result_file)) print("Expected: {}".format(expected)) print("Received: {}".format(result)) assert result == expected def test_videos(self): gs = GenerateSite() gs.read_videos() report = gs.check_videos() sys.stderr.write(report) assert report == '' def test_people(self): gs = GenerateSite() gs.read_tags() gs.read_people() report = gs.check_people() sys.stderr.write(report) assert report == '' # vim: expandtab
from cat.code import GenerateSite import unittest import json import os import sys def read_json(file): with open(file) as fh: return json.loads(fh.read()) #return fh.read() class TestDemo(unittest.TestCase): def test_generate(self): GenerateSite().generate_site() assert True # This fails on travis, we probably need better reporting to see what is the actual difference # as I cannot see it. Unless it is only the file_date files = [ 'html/v/yougottalovefrontend-2016/vitaly-friedman-cutting-edge-responsive-web-design.json', 'html/p/zohar-babin.json', ] for result_file in files: expected_file = 'samples/' + os.path.basename(result_file) #sys.stderr.write(result_file) #sys.stderr.write("\n") #sys.stderr.write(expected_file) #sys.stderr.write("\n") # read both files result = read_json(result_file) expected = read_json(expected_file) if 'file_date' in expected: del(expected['file_date']) del(result['file_date']) if result != expected: print("While testing {}\n".format(result_file)) print("Expected: {}".format(expected)) print("Received: {}".format(result)) assert result == expected def test_videos(self): gs = GenerateSite() gs.read_videos() report = gs.check_videos() sys.stderr.write(report) assert report == '' def test_people(self): gs = GenerateSite() gs.read_people() report = gs.check_people() sys.stderr.write(report) assert report == '' # vim: expandtab
apache-2.0
Python
b220af1b5219c59735bd1f35493b0a659c627738
Fix cookie handling for tornado
joelstanner/python-social-auth,S01780/python-social-auth,VishvajitP/python-social-auth,degs098/python-social-auth,JJediny/python-social-auth,bjorand/python-social-auth,python-social-auth/social-app-cherrypy,clef/python-social-auth,robbiet480/python-social-auth,falcon1kr/python-social-auth,rsteca/python-social-auth,msampathkumar/python-social-auth,clef/python-social-auth,degs098/python-social-auth,jameslittle/python-social-auth,contracode/python-social-auth,mrwags/python-social-auth,barseghyanartur/python-social-auth,mchdks/python-social-auth,alrusdi/python-social-auth,msampathkumar/python-social-auth,ariestiyansyah/python-social-auth,mathspace/python-social-auth,iruga090/python-social-auth,noodle-learns-programming/python-social-auth,chandolia/python-social-auth,lawrence34/python-social-auth,barseghyanartur/python-social-auth,drxos/python-social-auth,ononeor12/python-social-auth,cjltsod/python-social-auth,merutak/python-social-auth,san-mate/python-social-auth,ononeor12/python-social-auth,rsteca/python-social-auth,san-mate/python-social-auth,VishvajitP/python-social-auth,ByteInternet/python-social-auth,jameslittle/python-social-auth,fearlessspider/python-social-auth,lamby/python-social-auth,python-social-auth/social-app-django,lneoe/python-social-auth,daniula/python-social-auth,jeyraof/python-social-auth,joelstanner/python-social-auth,JerzySpendel/python-social-auth,merutak/python-social-auth,wildtetris/python-social-auth,wildtetris/python-social-auth,joelstanner/python-social-auth,Andygmb/python-social-auth,webjunkie/python-social-auth,noodle-learns-programming/python-social-auth,python-social-auth/social-app-django,cmichal/python-social-auth,falcon1kr/python-social-auth,bjorand/python-social-auth,webjunkie/python-social-auth,jameslittle/python-social-auth,alrusdi/python-social-auth,lawrence34/python-social-auth,DhiaEddineSaidi/python-social-auth,merutak/python-social-auth,firstjob/python-social-auth,mchdks/python-social-auth,ByteInternet/python-social-auth,cmichal/python-social-auth,lawrence34/python-social-auth,jeyraof/python-social-auth,drxos/python-social-auth,JerzySpendel/python-social-auth,alrusdi/python-social-auth,fearlessspider/python-social-auth,jneves/python-social-auth,cjltsod/python-social-auth,JerzySpendel/python-social-auth,iruga090/python-social-auth,jneves/python-social-auth,firstjob/python-social-auth,clef/python-social-auth,ariestiyansyah/python-social-auth,muhammad-ammar/python-social-auth,noodle-learns-programming/python-social-auth,henocdz/python-social-auth,lamby/python-social-auth,mathspace/python-social-auth,chandolia/python-social-auth,Andygmb/python-social-auth,daniula/python-social-auth,mrwags/python-social-auth,ByteInternet/python-social-auth,san-mate/python-social-auth,lamby/python-social-auth,jneves/python-social-auth,python-social-auth/social-docs,chandolia/python-social-auth,S01780/python-social-auth,mathspace/python-social-auth,henocdz/python-social-auth,michael-borisov/python-social-auth,michael-borisov/python-social-auth,muhammad-ammar/python-social-auth,contracode/python-social-auth,python-social-auth/social-core,mchdks/python-social-auth,contracode/python-social-auth,msampathkumar/python-social-auth,lneoe/python-social-auth,S01780/python-social-auth,tkajtoch/python-social-auth,tkajtoch/python-social-auth,JJediny/python-social-auth,nirmalvp/python-social-auth,fearlessspider/python-social-auth,lneoe/python-social-auth,nirmalvp/python-social-auth,iruga090/python-social-auth,drxos/python-social-auth,rsalmaso/python-social-auth,muhammad-ammar/python-social-auth,VishvajitP/python-social-auth,barseghyanartur/python-social-auth,JJediny/python-social-auth,wildtetris/python-social-auth,tkajtoch/python-social-auth,nirmalvp/python-social-auth,python-social-auth/social-storage-sqlalchemy,daniula/python-social-auth,tobias47n9e/social-core,henocdz/python-social-auth,DhiaEddineSaidi/python-social-auth,degs098/python-social-auth,Andygmb/python-social-auth,ononeor12/python-social-auth,rsalmaso/python-social-auth,ariestiyansyah/python-social-auth,webjunkie/python-social-auth,python-social-auth/social-core,robbiet480/python-social-auth,cmichal/python-social-auth,robbiet480/python-social-auth,falcon1kr/python-social-auth,michael-borisov/python-social-auth,rsteca/python-social-auth,bjorand/python-social-auth,jeyraof/python-social-auth,DhiaEddineSaidi/python-social-auth,python-social-auth/social-app-django,mrwags/python-social-auth,firstjob/python-social-auth
social/strategies/tornado_strategy.py
social/strategies/tornado_strategy.py
import json from tornado.template import Loader, Template from social.utils import build_absolute_uri from social.strategies.base import BaseStrategy, BaseTemplateStrategy class TornadoTemplateStrategy(BaseTemplateStrategy): def render_template(self, tpl, context): path, tpl = tpl.rsplit('/', 1) return Loader(path).load(tpl).generate(**context) def render_string(self, html, context): return Template(html).generate(**context) class TornadoStrategy(BaseStrategy): DEFAULT_TEMPLATE_STRATEGY = TornadoTemplateStrategy def __init__(self, storage, request_handler, tpl=None): self.request_handler = request_handler self.request = self.request_handler.request super(TornadoStrategy, self).__init__(storage, tpl) def get_setting(self, name): return self.request_handler.settings[name] def request_data(self, merge=True): # Multiple valued arguments not supported yet return dict((key, val[0]) for key, val in self.request.arguments.iteritems()) def request_host(self): return self.request.host def redirect(self, url): return self.request_handler.redirect(url) def html(self, content): self.request_handler.write(content) def session_get(self, name, default=None): value = self.request_handler.get_secure_cookie(name) if value: return json.loads(value.decode()) return default def session_set(self, name, value): self.request_handler.set_secure_cookie(name, json.dumps(value).encode()) def session_pop(self, name): value = self.session_get(name) self.request_handler.clear_cookie(name) return value def session_setdefault(self, name, value): pass def build_absolute_uri(self, path=None): return build_absolute_uri('{0}://{1}'.format(self.request.protocol, self.request.host), path) def partial_to_session(self, next, backend, request=None, *args, **kwargs): return json.dumps(super(TornadoStrategy, self).partial_to_session( next, backend, request=request, *args, **kwargs )) def partial_from_session(self, session): if session: return super(TornadoStrategy, self).partial_to_session( json.loads(session) )
import json from tornado.template import Loader, Template from social.utils import build_absolute_uri from social.strategies.base import BaseStrategy, BaseTemplateStrategy class TornadoTemplateStrategy(BaseTemplateStrategy): def render_template(self, tpl, context): path, tpl = tpl.rsplit('/', 1) return Loader(path).load(tpl).generate(**context) def render_string(self, html, context): return Template(html).generate(**context) class TornadoStrategy(BaseStrategy): DEFAULT_TEMPLATE_STRATEGY = TornadoTemplateStrategy def __init__(self, storage, request_handler, tpl=None): self.request_handler = request_handler self.request = self.request_handler.request super(TornadoStrategy, self).__init__(storage, tpl) def get_setting(self, name): return self.request_handler.settings[name] def request_data(self, merge=True): # Multiple valued arguments not supported yet return dict((key, val[0]) for key, val in self.request.arguments.iteritems()) def request_host(self): return self.request.host def redirect(self, url): return self.request_handler.redirect(url) def html(self, content): self.request_handler.write(content) def session_get(self, name, default=None): return self.request_handler.get_secure_cookie(name) or default def session_set(self, name, value): self.request_handler.set_secure_cookie(name, str(value)) def session_pop(self, name): value = self.request_handler.get_secure_cookie(name) self.request_handler.set_secure_cookie(name, '') return value def session_setdefault(self, name, value): pass def build_absolute_uri(self, path=None): return build_absolute_uri('{0}://{1}'.format(self.request.protocol, self.request.host), path) def partial_to_session(self, next, backend, request=None, *args, **kwargs): return json.dumps(super(TornadoStrategy, self).partial_to_session( next, backend, request=request, *args, **kwargs )) def partial_from_session(self, session): if session: return super(TornadoStrategy, self).partial_to_session( json.loads(session) )
bsd-3-clause
Python
c5db8af5faca762e574a5b3b6117a0253e59cd05
use new urls module
dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
couchexport/urls.py
couchexport/urls.py
from django.conf.urls import * urlpatterns = patterns('', url(r'^model/$', 'couchexport.views.export_data', name='model_download_excel'), url(r'^async/$', 'couchexport.views.export_data_async', name='export_data_async'), url(r'^saved/(?P<export_id>[\w-]+)/$', 'couchexport.views.download_saved_export', name='couchexport_download_saved_export'), )
from django.conf.urls.defaults import * urlpatterns = patterns('', url(r'^model/$', 'couchexport.views.export_data', name='model_download_excel'), url(r'^async/$', 'couchexport.views.export_data_async', name='export_data_async'), url(r'^saved/(?P<export_id>[\w-]+)/$', 'couchexport.views.download_saved_export', name='couchexport_download_saved_export'), )
bsd-3-clause
Python
b68da6c5b64009dbd2d53206be4c8d98ed1b0a45
Add print option to exercise_oaipmh.py
libris/librisxl,libris/librisxl,libris/librisxl
librisxl-tools/scripts/exercise_oaipmh.py
librisxl-tools/scripts/exercise_oaipmh.py
import requests from lxml import etree from StringIO import StringIO import time PMH = "{http://www.openarchives.org/OAI/2.0/}" def parse_oaipmh(start_url, name, passwd, do_print=False): start_time = time.time() resumption_token = None record_count = 0 while True: url = make_next_url(start_url, resumption_token) res = requests.get(url, auth=(name, passwd), stream=True, timeout=3600) if do_print: data = res.raw.read() print data source = StringIO(data) else: source = res.raw record_root = etree.parse(source) record_count += len(record_root.findall("{0}ListRecords/{0}record".format(PMH))) resumption_token = record_root.findtext("{0}ListRecords/{0}resumptionToken".format(PMH)) elapsed = time.time() - start_time print "Record count: %s. Got resumption token: %s. Elapsed time: %s. Records/second: %s" % (record_count, resumption_token, elapsed, record_count / elapsed) if not resumption_token: break def make_next_url(base_url, resumption_token=None): params = "?verb=ListRecords&resumptionToken=%s" % resumption_token if resumption_token else "?verb=ListRecords&metadataPrefix=marcxml" return base_url + params if __name__ == '__main__': from sys import argv args = argv[1:] if '-p' in args: args.remove('-p') do_print = True else: do_print = False if not args: print "Usage: %s OAI_PMH_URL [NAME, PASSWORD] [-p]" % argv[0] exit() start_url = args.pop(0) if args: name, passwd = args[:2] else: name, passwd = None, None parse_oaipmh(start_url, name, passwd, do_print)
import requests from lxml import etree import time PMH = "{http://www.openarchives.org/OAI/2.0/}" def parse_oaipmh(start_url, name, passwd): start_time = time.time() resumption_token = None record_count = 0 while True: url = make_next_url(start_url, resumption_token) res = requests.get(url, auth=(name, passwd), stream=True, timeout=3600) record_root = etree.parse(res.raw) record_count += len(record_root.findall("{0}ListRecords/{0}record".format(PMH))) resumption_token = record_root.findtext("{0}ListRecords/{0}resumptionToken".format(PMH)) elapsed = time.time() - start_time print "Record count: %s. Got resumption token: %s. Elapsed time: %s. Records/second: %s" % (record_count, resumption_token, elapsed, record_count / elapsed) if not resumption_token: break def make_next_url(base_url, resumption_token=None): params = "?verb=ListRecords&resumptionToken=%s" % resumption_token if resumption_token else "?verb=ListRecords&metadataPrefix=marcxml" return base_url + params if __name__ == '__main__': from sys import argv args = argv[1:] start_url = (args.pop(0) if len(args) == 3 else "http://data.libris.kb.se/hold/oaipmh") name, passwd = args[:2] parse_oaipmh(start_url, name, passwd)
apache-2.0
Python
f4408cb2feb5a28a5117fefebe782a61ea80de96
fix res_company
marcok/odoo_modules,marcok/odoo_modules,marcok/odoo_modules
hr_employee_time_clock/models/__init__.py
hr_employee_time_clock/models/__init__.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2016 - now Bytebrand Outsourcing AG (<http://www.bytebrand.net>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import hr_timesheet_sheet from . import hr_attendance from . import hr_holidays_public from . import employee_attendance_analytic from . import resource_calendar from . import hr_holidays from . import account_analytic_line from . import hr_department from . import hr_employee from . import hr_timesheet_sheet_day from . import hr_timesheet_sheet_account from . import res_company from . import hr_contract # from . import res_config_settings from . import res_users
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2016 - now Bytebrand Outsourcing AG (<http://www.bytebrand.net>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import hr_timesheet_sheet from . import hr_attendance from . import hr_holidays_public from . import employee_attendance_analytic from . import resource_calendar from . import hr_holidays from . import account_analytic_line from . import hr_department from . import hr_employee from . import hr_timesheet_sheet_day from . import hr_timesheet_sheet_account # from . import res_company from . import hr_contract # from . import res_config_settings from . import res_users
agpl-3.0
Python
96158b6b5a153db6b9a5e5d40699efefc728a9b3
Make our LiveWidget handle a 'topics' property along with 'topic'
mokshaproject/moksha,pombredanne/moksha,pombredanne/moksha,mokshaproject/moksha,mokshaproject/moksha,lmacken/moksha,lmacken/moksha,pombredanne/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha,pombredanne/moksha,ralphbean/moksha,ralphbean/moksha
moksha/api/widgets/live/live.py
moksha/api/widgets/live/live.py
# This file is part of Moksha. # Copyright (C) 2008-2009 Red Hat, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Authors: Luke Macken <[email protected]> import moksha from tw.api import Widget from moksha.exc import MokshaException from moksha.api.widgets.stomp import StompWidget, stomp_subscribe, stomp_unsubscribe class LiveWidget(Widget): """ A live streaming widget. This widget handles automatically subscribing your widget to any given topics, and registers all of the stomp callbacks. """ engine_name = 'mako' def update_params(self, d): """ Register this widgets stomp callbacks """ super(LiveWidget, self).update_params(d) topics = d.get('topic', getattr(self, 'topic', d.get('topics', getattr(self, 'topics', None)))) if not topics: raise MokshaException('You must specify a `topic` to subscribe to') topics = isinstance(topics, list) and topics or [topics] for callback in StompWidget.callbacks: if callback == 'onmessageframe': for topic in topics: cb = getattr(self, 'onmessage').replace('${id}', self.id) moksha.stomp[callback][topic].append(cb) elif callback == 'onconnectedframe': moksha.stomp['onconnectedframe'].append(stomp_subscribe(topics)) elif callback in self.params: moksha.stomp[callback].append(getattr(self, callback)) def get_topics(self): topics = [] for key in ('topic', 'topics'): if hasattr(self, key): topic = getattr(self, key) if topic: if isinstance(topic, basestring): map(topics.append, topic.split()) else: topics += topic return topics # Moksha Topic subscription handling methods subscribe_topics = stomp_subscribe unsubscribe_topics = stomp_unsubscribe
# This file is part of Moksha. # Copyright (C) 2008-2009 Red Hat, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Authors: Luke Macken <[email protected]> import moksha from tw.api import Widget from moksha.exc import MokshaException from moksha.api.widgets.stomp import StompWidget, stomp_subscribe, stomp_unsubscribe class LiveWidget(Widget): """ A live streaming widget. This widget handles automatically subscribing your widget to any given topics, and registers all of the stomp callbacks. """ engine_name = 'mako' def update_params(self, d): """ Register this widgets stomp callbacks """ super(LiveWidget, self).update_params(d) topics = d.get('topic', getattr(self, 'topic', None)) if not topics: raise MokshaException('You must specify a `topic` to subscribe to') topics = isinstance(topics, list) and topics or [topics] for callback in StompWidget.callbacks: if callback == 'onmessageframe': for topic in topics: cb = getattr(self, 'onmessage').replace('${id}', self.id) moksha.stomp[callback][topic].append(cb) elif callback == 'onconnectedframe': moksha.stomp['onconnectedframe'].append(stomp_subscribe(topics)) elif callback in self.params: moksha.stomp[callback].append(getattr(self, callback)) def get_topics(self): topics = [] for key in ('topic', 'topics'): if hasattr(self, key): topic = getattr(self, key) if topic: if isinstance(topic, basestring): map(topics.append, topic.split()) else: topics += topic return topics # Moksha Topic subscription handling methods subscribe_topics = stomp_subscribe unsubscribe_topics = stomp_unsubscribe
apache-2.0
Python
410f02a4f657f9a8b9c839f3e08b176f443de9e8
Handle cases when searched word is only part of the people name.
nihn/linkedin-scraper,nihn/linkedin-scraper
linkedin_scraper/spiders/people_search.py
linkedin_scraper/spiders/people_search.py
from os import environ from scrapy_splash import SplashRequest from scrapy.spiders.init import InitSpider from scrapy.http import Request, FormRequest class PeopleSearchSpider(InitSpider): name = 'people_search' allowed_domains = ['linkedin.com'] login_page = 'https://www.linkedin.com/uas/login' def __init__(self, *args, **kwargs): try: self.username = (kwargs.pop('username', None) or environ['SPIDER_USERNAME']) self.password = (kwargs.pop('password', None) or environ['SPIDER_PASSWORD']) except KeyError: raise Exception('Both username and password need to be specified ' 'by -a option or SPIDER_<PARAM> environment var') query = kwargs.pop('query', 'Mateusz+Moneta') self.start_urls = [ 'https://www.linkedin.com/vsearch/f?type=people&keywords=%s' % query ] super().__init__(*args, **kwargs) def init_request(self): return Request(url=self.login_page, callback=self.login) def login(self, response): return FormRequest.from_response( response, callback=self.check_login_response, formdata={'session_key': self.username, 'session_password': self.password}) def parse(self, response): for search_result in response.css('li.mod.result.people'): names = search_result.css('a.title.main-headline').xpath( 'string(.)').extract_first() *first_name, last_name = names.split() yield { 'first_name': ' '.join(first_name), 'last_name': last_name, } def check_login_response(self, response): if b'Sign Out' in response.body: self.logger.debug("Successfully logged in. Let's start crawling!") return self.initialized() self.logger.error('Login failed!') def make_requests_from_url(self, url): # Do SplashRequest instead of regular one to be able to evaluate # JavaScript responsible for dynamic page generation. return SplashRequest(url)
from os import environ from scrapy_splash import SplashRequest from scrapy.spiders.init import InitSpider from scrapy.http import Request, FormRequest class PeopleSearchSpider(InitSpider): name = 'people_search' allowed_domains = ['linkedin.com'] login_page = 'https://www.linkedin.com/uas/login' def __init__(self, *args, **kwargs): try: self.username = (kwargs.pop('username', None) or environ['SPIDER_USERNAME']) self.password = (kwargs.pop('password', None) or environ['SPIDER_PASSWORD']) except KeyError: raise Exception('Both username and password need to be specified ' 'by -a option or SPIDER_<PARAM> environment var') query = kwargs.pop('query', 'Mateusz+Moneta') self.start_urls = [ 'https://www.linkedin.com/vsearch/f?type=people&keywords=%s' % query ] super().__init__(*args, **kwargs) def init_request(self): return Request(url=self.login_page, callback=self.login) def login(self, response): return FormRequest.from_response( response, callback=self.check_login_response, formdata={'session_key': self.username, 'session_password': self.password}) def parse(self, response): for search_result in response.css('li.mod.result.people'): *first_name, last_name = search_result.css('b::text').extract() yield { 'first_name': ' '.join(first_name), 'last_name': last_name, } def check_login_response(self, response): if b'Sign Out' in response.body: self.logger.debug("Successfully logged in. Let's start crawling!") return self.initialized() self.logger.error('Login failed!') def make_requests_from_url(self, url): # Do SplashRequest instead of regular one to be able to evaluate # JavaScript responsible for dynamic page generation. return SplashRequest(url)
mit
Python
655fcce56abd0d3f0da9b52e911636d931157443
bump version
penkin/python-dockercloud,penkin/python-dockercloud,docker/python-dockercloud,docker/python-dockercloud
dockercloud/__init__.py
dockercloud/__init__.py
import base64 import logging import os import requests from future.standard_library import install_aliases install_aliases() from dockercloud.api import auth from dockercloud.api.service import Service from dockercloud.api.container import Container from dockercloud.api.repository import Repository from dockercloud.api.node import Node from dockercloud.api.action import Action from dockercloud.api.nodecluster import NodeCluster from dockercloud.api.nodetype import NodeType from dockercloud.api.nodeprovider import Provider from dockercloud.api.noderegion import Region from dockercloud.api.tag import Tag from dockercloud.api.trigger import Trigger from dockercloud.api.stack import Stack from dockercloud.api.exceptions import ApiError, AuthError, ObjectNotFound, NonUniqueIdentifier from dockercloud.api.utils import Utils from dockercloud.api.events import Events from dockercloud.api.nodeaz import AZ __version__ = '1.0.6' dockercloud_auth = os.environ.get('DOCKERCLOUD_AUTH') basic_auth = auth.load_from_file("~/.docker/config.json") if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_PASS'): basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_PASS'))) if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_APIKEY'): basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_APIKEY'))) rest_host = os.environ.get("DOCKERCLOUD_REST_HOST") or 'https://cloud.docker.com/' stream_host = os.environ.get("DOCKERCLOUD_STREAM_HOST") or 'wss://ws.cloud.docker.com/' namespace = os.environ.get('DOCKERCLOUD_NAMESPACE') user_agent = None logging.basicConfig() logger = logging.getLogger("python-dockercloud") try: requests.packages.urllib3.disable_warnings() except: pass
import base64 import logging import os import requests from future.standard_library import install_aliases install_aliases() from dockercloud.api import auth from dockercloud.api.service import Service from dockercloud.api.container import Container from dockercloud.api.repository import Repository from dockercloud.api.node import Node from dockercloud.api.action import Action from dockercloud.api.nodecluster import NodeCluster from dockercloud.api.nodetype import NodeType from dockercloud.api.nodeprovider import Provider from dockercloud.api.noderegion import Region from dockercloud.api.tag import Tag from dockercloud.api.trigger import Trigger from dockercloud.api.stack import Stack from dockercloud.api.exceptions import ApiError, AuthError, ObjectNotFound, NonUniqueIdentifier from dockercloud.api.utils import Utils from dockercloud.api.events import Events from dockercloud.api.nodeaz import AZ __version__ = '1.0.5' dockercloud_auth = os.environ.get('DOCKERCLOUD_AUTH') basic_auth = auth.load_from_file("~/.docker/config.json") if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_PASS'): basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_PASS'))) if os.environ.get('DOCKERCLOUD_USER') and os.environ.get('DOCKERCLOUD_APIKEY'): basic_auth = base64.b64encode("%s:%s" % (os.environ.get('DOCKERCLOUD_USER'), os.environ.get('DOCKERCLOUD_APIKEY'))) rest_host = os.environ.get("DOCKERCLOUD_REST_HOST") or 'https://cloud.docker.com/' stream_host = os.environ.get("DOCKERCLOUD_STREAM_HOST") or 'wss://ws.cloud.docker.com/' namespace = os.environ.get('DOCKERCLOUD_NAMESPACE') user_agent = None logging.basicConfig() logger = logging.getLogger("python-dockercloud") try: requests.packages.urllib3.disable_warnings() except: pass
apache-2.0
Python
dae16f72b9ca5d96c7f894601aa3a69facbbb00e
Fix memory limit in MongoDB while loading logs (#5)
xenx/recommendation_system,xenx/recommendation_system
scripts/load_logs_to_mongodb.py
scripts/load_logs_to_mongodb.py
import os import sys from datetime import datetime from collections import defaultdict from pymongo import MongoClient logs_file = open(sys.argv[1]) article_urls = set() article_views = defaultdict(list) # article_url: list of user's id's article_times = {} for line in logs_file: try: timestamp, url, user = line.strip().split('\t') except IndexError: continue timestamp = timestamp.strip(' GET').strip('Z') # Delete ms from timestamp timestamp = ''.join(timestamp.split('.')[:-1]) event_time = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S') if not url or not user: continue if not url.startswith('https://tvrain.ru/'): continue article_urls.add(url) article_views[url].append(user) # Save time of only first event if url not in article_times: article_times[url] = event_time mongodb_client = MongoClient(os.environ['MONGODB_URL']) db = mongodb_client.tvrain parsed_articles = db.tvrain articles = db.articles # Clear articles articles.remove({}) for article in parsed_articles.find(): if article['url'] not in article_urls: continue views = article_views[article['url']] compressed_views = [] # Save only every 10th view for i in range(len(views)): if i % 10 == 0: compressed_views.append(views[i]) articles.insert_one({ '_id': article['_id'], 'title': article['title'], 'text': article['text'], 'views': compressed_views, 'time': article_times[article['url']] })
import os import sys from datetime import datetime from collections import defaultdict from pymongo import MongoClient logs_file = open(sys.argv[1]) article_urls = set() article_views = defaultdict(list) # article_url: list of user's id's article_times = {} for line in logs_file: try: timestamp, url, user = line.strip().split('\t') except IndexError: continue timestamp = timestamp.strip(' GET').strip('Z') # Delete ms from timestamp timestamp = ''.join(timestamp.split('.')[:-1]) event_time = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S') if not url or not user: continue if not url.startswith('https://tvrain.ru/'): continue article_urls.add(url) article_views[url].append(user) # Save time of only first event if url not in article_times: article_times[url] = event_time mongodb_client = MongoClient(os.environ['MONGODB_URL']) db = mongodb_client.tvrain parsed_articles = db.tvrain articles = db.articles for article in parsed_articles.find(): if article['url'] not in article_urls: continue articles.insert_one({ '_id': article['_id'], 'title': article['title'], 'text': article['text'], 'views': article_views[article['url']], 'time': article_times[article['url']] })
mit
Python
3375c9cd3311bff8ff3ab07c361e18c68226784c
remove stray print
praekelt/mc2,praekelt/mc2,praekelt/mc2,praekelt/mc2,praekelt/mc2
mc2/controllers/base/managers/rabbitmq.py
mc2/controllers/base/managers/rabbitmq.py
import base64 import hashlib import random import time import uuid from django.conf import settings from pyrabbit.api import Client from pyrabbit.http import HTTPError class ControllerRabbitMQManager(object): def __init__(self, controller): """ A helper manager to get to connect to RabbitMQ :param controller Controller: A Controller model instance """ self.ctrl = controller self.client = Client( settings.RABBITMQ_API_HOST, settings.RABBITMQ_API_USERNAME, settings.RABBITMQ_API_PASSWORD) def _create_password(self): # Guranteed random dice rolls return base64.b64encode( hashlib.sha1(uuid.uuid1().hex).hexdigest())[:24] def _create_username(self): return base64.b64encode(str( time.time() + random.random() * time.time())).strip('=').lower() def create_rabbitmq_vhost(self): """ Attempts to create a new vhost. Returns false if vhost already exists. The new username/password will be saved on the controller if a new vhost was created :returns: bool """ try: self.client.get_vhost(self.ctrl.rabbitmq_vhost_name) return False # already exists except HTTPError: pass self.client.create_vhost(self.ctrl.rabbitmq_vhost_name) # create user/pass username = self._create_username() password = self._create_password() self.client.create_user(username, password) # save newly created username/pass self.ctrl.rabbitmq_vhost_username = username self.ctrl.rabbitmq_vhost_password = password self.ctrl.rabbitmq_vhost_host = settings.RABBITMQ_APP_HOST self.ctrl.save() self.client.set_vhost_permissions( self.ctrl.rabbitmq_vhost_name, username, '.*', '.*', '.*') return True
import base64 import hashlib import random import time import uuid from django.conf import settings from pyrabbit.api import Client from pyrabbit.http import HTTPError class ControllerRabbitMQManager(object): def __init__(self, controller): """ A helper manager to get to connect to RabbitMQ :param controller Controller: A Controller model instance """ self.ctrl = controller self.client = Client( settings.RABBITMQ_API_HOST, settings.RABBITMQ_API_USERNAME, settings.RABBITMQ_API_PASSWORD) print self.client def _create_password(self): # Guranteed random dice rolls return base64.b64encode( hashlib.sha1(uuid.uuid1().hex).hexdigest())[:24] def _create_username(self): return base64.b64encode(str( time.time() + random.random() * time.time())).strip('=').lower() def create_rabbitmq_vhost(self): """ Attempts to create a new vhost. Returns false if vhost already exists. The new username/password will be saved on the controller if a new vhost was created :returns: bool """ try: self.client.get_vhost(self.ctrl.rabbitmq_vhost_name) return False # already exists except HTTPError: pass self.client.create_vhost(self.ctrl.rabbitmq_vhost_name) # create user/pass username = self._create_username() password = self._create_password() self.client.create_user(username, password) # save newly created username/pass self.ctrl.rabbitmq_vhost_username = username self.ctrl.rabbitmq_vhost_password = password self.ctrl.rabbitmq_vhost_host = settings.RABBITMQ_APP_HOST self.ctrl.save() self.client.set_vhost_permissions( self.ctrl.rabbitmq_vhost_name, username, '.*', '.*', '.*') return True
bsd-2-clause
Python
2cee1d5bff32831a9c15755e7482057ac7b9a39a
Update packets.py
jvanbrug/cs143sim,jvanbrug/cs143sim,jvanbrug/cs143sim
cs143sim/packets.py
cs143sim/packets.py
"""This module contains all packet definitions. .. autosummary:: Packet DataPacket RouterPacket .. moduleauthor:: Lan Hongjian <[email protected]> .. moduleauthor:: Yamei Ou <[email protected]> .. moduleauthor:: Samuel Richerd <[email protected]> .. moduleauthor:: Jan Van Bruggen <[email protected]> .. moduleauthor:: Junlin Zhang <[email protected]> """ from cs143sim.constants import PACKET_SIZE class Packet(object): """Representation of a quantum of information Packets carry information along the network, between :class:`Hosts <.Host>` or :class:`Routers <.Router>`. :param destination: destination :class:`.Host` or :class:`.Router` :param source: source :class:`.Host` or :class:`.Router` :param str timestamp: time at which the packet was created :ivar destination: destination :class:`.Host` or :class:`.Router` :ivar source: source :class:`.Host` or :class:`.Router` :ivar str timestamp: time at which the packet was created """ def __init__(self, destination, source, timestamp): self.timestamp = timestamp self.source = source self.destination = destination self.size = PACKET_SIZE class DataPacket(Packet): """A packet used for transferring data :param destination: destination :class:`.Host` or :class:`.Router` :param source: source :class:`.Host` or :class:`.Router` :param str timestamp: time at which the packet was created """ def __init__(self, destination, source, timestamp, acknowledgement, number): # TODO: define number and acknowledgement in docstring super(DataPacket, self).__init__(timestamp=timestamp, source=source, destination=destination) self.number = number self.acknowledgement = acknowledgement class RouterPacket(Packet): """A packet used to update routing tables :param source: source :class:`.Host` or :class:`.Router` :param str timestamp: time at which the packet was created """ def __init__(self, source, timestamp, router_table, acknowledgement): # TODO: define router_table in docstring super(RouterPacket, self).__init__(timestamp=timestamp, source=source, destination=0) self.router_table = router_table self.number = 0 self.acknowledgement = acknowledgement
"""This module contains all packet definitions. .. autosummary:: Packet DataPacket RouterPacket .. moduleauthor:: Lan Hongjian <[email protected]> .. moduleauthor:: Yamei Ou <[email protected]> .. moduleauthor:: Samuel Richerd <[email protected]> .. moduleauthor:: Jan Van Bruggen <[email protected]> .. moduleauthor:: Junlin Zhang <[email protected]> """ from cs143sim.constants import PACKET_SIZE class Packet(object): """Representation of a quantum of information Packets carry information along the network, between :class:`Hosts <.Host>` or :class:`Routers <.Router>`. :param destination: destination :class:`.Host` or :class:`.Router` :param source: source :class:`.Host` or :class:`.Router` :param str timestamp: time at which the packet was created :ivar destination: destination :class:`.Host` or :class:`.Router` :ivar source: source :class:`.Host` or :class:`.Router` :ivar str timestamp: time at which the packet was created """ def __init__(self, destination, source, timestamp): self.timestamp = timestamp self.source = source self.destination = destination self.size = PACKET_SIZE class DataPacket(Packet): """A packet used for transferring data :param destination: destination :class:`.Host` or :class:`.Router` :param source: source :class:`.Host` or :class:`.Router` :param str timestamp: time at which the packet was created """ def __init__(self, destination, source, timestamp, acknowledgement, number): # TODO: define number and acknowledgement in docstring super(DataPacket, self).__init__(timestamp=timestamp, source=source, destination=destination) self.number = number self.acknowledgement = acknowledgement class RouterPacket(Packet): """A packet used to update routing tables :param source: source :class:`.Host` or :class:`.Router` :param str timestamp: time at which the packet was created """ def __init__(self, source, timestamp, router_table, acknowledgement): # TODO: define router_table in docstring super(RouterPacket, self).__init__(timestamp=timestamp, source=source, destination=0) self.router_table = router_table self.acknowledgement = acknowledgement
mit
Python
0c35c0f7fe126b87eccdf4f69933b84927956658
Fix account __type__
vuolter/pyload,vuolter/pyload,vuolter/pyload
module/plugins/accounts/XFileSharingPro.py
module/plugins/accounts/XFileSharingPro.py
# -*- coding: utf-8 -*- import re from module.plugins.internal.XFSPAccount import XFSPAccount class XFileSharingPro(XFSPAccount): __name__ = "XFileSharingPro" __type__ = "account" __version__ = "0.02" __description__ = """XFileSharingPro multi-purpose account plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "[email protected]")] def init(self): pattern = self.core.pluginManager.hosterPlugins[self.__name__]['pattern'] self.HOSTER_NAME = re.match(pattern, self.pyfile.url).group(1).lower()
# -*- coding: utf-8 -*- import re from module.plugins.internal.XFSPAccount import XFSPAccount class XFileSharingPro(XFSPAccount): __name__ = "XFileSharingPro" __type__ = "crypter" __version__ = "0.01" __description__ = """XFileSharingPro dummy account plugin for hook""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "[email protected]")] def init(self): pattern = self.core.pluginManager.hosterPlugins[self.__name__]['pattern'] self.HOSTER_NAME = re.match(pattern, self.pyfile.url).group(1).lower()
agpl-3.0
Python
a396d3e7b4de10710c2f2e0beab0ef82acaf866b
Create first test
masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api
web/impact/impact/tests/test_track_api_calls.py
web/impact/impact/tests/test_track_api_calls.py
from django.test import ( TestCase, ) from mock import mock, patch from impact.tests.api_test_case import APITestCase class TestTrackAPICalls(APITestCase): @patch('impact.middleware.track_api_calls.TrackAPICalls.process_request.logger') def test_when_user_authenticated(self, logger_info_patch): with self.login(email=self.basic_user().email): response = self.client.get(/) logger_info_patch.info.assert_called_with() def test_when_no_user_authenticated(self): pass
from django.test import ( RequestFactory, TestCase, ) from mock import patch class TestTrackAPICalls(TestCase): def test_when_user_auth(self): pass def test_when_no_user_auth(self): pass
mit
Python
9e577694d2f8665599d590299e58355dd7472011
Fix less
cupy/cupy,keisuke-umezawa/chainer,chainer/chainer,sinhrks/chainer,yanweifu/chainer,hvy/chainer,wkentaro/chainer,tkerola/chainer,niboshi/chainer,laysakura/chainer,hidenori-t/chainer,cemoody/chainer,ysekky/chainer,pfnet/chainer,chainer/chainer,okuta/chainer,hvy/chainer,cupy/cupy,kashif/chainer,keisuke-umezawa/chainer,niboshi/chainer,rezoo/chainer,kiyukuta/chainer,AlpacaDB/chainer,wkentaro/chainer,minhpqn/chainer,okuta/chainer,ktnyt/chainer,t-abe/chainer,niboshi/chainer,ikasumi/chainer,truongdq/chainer,wkentaro/chainer,AlpacaDB/chainer,benob/chainer,okuta/chainer,ktnyt/chainer,truongdq/chainer,ytoyama/yans_chainer_hackathon,t-abe/chainer,keisuke-umezawa/chainer,cupy/cupy,sou81821/chainer,niboshi/chainer,benob/chainer,jnishi/chainer,aonotas/chainer,1986ks/chainer,jnishi/chainer,kikusu/chainer,ronekko/chainer,hvy/chainer,muupan/chainer,chainer/chainer,tigerneil/chainer,keisuke-umezawa/chainer,delta2323/chainer,muupan/chainer,umitanuki/chainer,ktnyt/chainer,cupy/cupy,jnishi/chainer,hvy/chainer,masia02/chainer,chainer/chainer,jnishi/chainer,okuta/chainer,ktnyt/chainer,kikusu/chainer,sinhrks/chainer,anaruse/chainer,Kaisuke5/chainer,tscohen/chainer,wkentaro/chainer
cupy/logic/comparison.py
cupy/logic/comparison.py
from cupy.logic import ufunc def allclose(a, b, rtol=1e-05, atol=1e-08): # TODO(beam2d): Implement it raise NotImplementedError def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False, allocator=None): # TODO(beam2d): Implement it raise NotImplementedError def array_equal(a1, a2): # TODO(beam2d): Implement it raise NotImplementedError def array_equiv(a1, a2): # TODO(beam2d): Implement it raise NotImplementedError greater = ufunc.create_comparison( 'greater', '>', '''Tests elementwise if ``x1 > x2``. .. seealso:: :data:`numpy.greater` ''') greater_equal = ufunc.create_comparison( 'greater_equal', '>=', '''Tests elementwise if ``x1 >= x2``. .. seealso:: :data:`numpy.greater_equal` ''') less = ufunc.create_comparison( 'less', '<', '''Tests elementwise if ``x1 < x2``. .. seealso:: :data:`numpy.less` ''') less_equal = ufunc.create_comparison( 'less_equal', '<=', '''Tests elementwise if ``x1 <= x2``. .. seealso:: :data:`numpy.less_equal` ''') equal = ufunc.create_comparison( 'equal', '==', '''Tests elementwise if ``x1 == x2``. .. seealso:: :data:`numpy.equal` ''') not_equal = ufunc.create_comparison( 'not_equal', '!=', '''Tests elementwise if ``x1 != x2``. .. seealso:: :data:`numpy.equal` ''')
from cupy.logic import ufunc def allclose(a, b, rtol=1e-05, atol=1e-08): # TODO(beam2d): Implement it raise NotImplementedError def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False, allocator=None): # TODO(beam2d): Implement it raise NotImplementedError def array_equal(a1, a2): # TODO(beam2d): Implement it raise NotImplementedError def array_equiv(a1, a2): # TODO(beam2d): Implement it raise NotImplementedError greater = ufunc.create_comparison( 'greater', '>', '''Tests elementwise if ``x1 > x2``. .. seealso:: :data:`numpy.greater` ''') greater_equal = ufunc.create_comparison( 'greater_equal', '>=', '''Tests elementwise if ``x1 >= x2``. .. seealso:: :data:`numpy.greater_equal` ''') less = ufunc.create_comparison( 'less', '<' '''Tests elementwise if ``x1 < x2``. .. seealso:: :data:`numpy.less` ''') less_equal = ufunc.create_comparison( 'less_equal', '<=', '''Tests elementwise if ``x1 <= x2``. .. seealso:: :data:`numpy.less_equal` ''') equal = ufunc.create_comparison( 'equal', '==', '''Tests elementwise if ``x1 == x2``. .. seealso:: :data:`numpy.equal` ''') not_equal = ufunc.create_comparison( 'not_equal', '!=', '''Tests elementwise if ``x1 != x2``. .. seealso:: :data:`numpy.equal` ''')
mit
Python
244f3262989b0331a120eb546ca22c9bea9194e4
add DownloadDelta to the admin
crate-archive/crate-site,crateio/crate.pypi,crate-archive/crate-site
crate_project/apps/packages/admin.py
crate_project/apps/packages/admin.py
from django.contrib import admin from packages.models import Package, Release, ReleaseFile, TroveClassifier, PackageURI from packages.models import ReleaseRequire, ReleaseProvide, ReleaseObsolete, ReleaseURI, ChangeLog from packages.models import DownloadDelta, ReadTheDocsPackageSlug class PackageURIAdmin(admin.TabularInline): model = PackageURI extra = 0 class PackageAdmin(admin.ModelAdmin): inlines = [PackageURIAdmin] list_display = ["name", "created", "modified", "downloads_synced_on"] list_filter = ["created", "modified", "downloads_synced_on"] search_fields = ["name"] class ReleaseRequireInline(admin.TabularInline): model = ReleaseRequire extra = 0 class ReleaseProvideInline(admin.TabularInline): model = ReleaseProvide extra = 0 class ReleaseObsoleteInline(admin.TabularInline): model = ReleaseObsolete extra = 0 class ReleaseFileInline(admin.TabularInline): model = ReleaseFile extra = 0 class ReleaseURIInline(admin.TabularInline): model = ReleaseURI extra = 0 class ReleaseAdmin(admin.ModelAdmin): inlines = [ReleaseURIInline, ReleaseFileInline, ReleaseRequireInline, ReleaseProvideInline, ReleaseObsoleteInline] list_display = ["__unicode__", "package", "version", "summary", "author", "author_email", "maintainer", "maintainer_email", "created", "modified"] list_filter = ["created", "modified", "hidden"] search_fields = ["package__name", "version", "summary", "author", "author_email", "maintainer", "maintainer_email"] raw_id_fields = ["package"] class TroveClassifierAdmin(admin.ModelAdmin): list_display = ["trove"] search_fields = ["trove"] class ReleaseFileAdmin(admin.ModelAdmin): list_display = ["release", "type", "python_version", "downloads", "comment", "created", "modified"] list_filter = ["type", "created", "modified"] search_fields = ["release__package__name", "filename", "digest"] raw_id_fields = ["release"] class DownloadDeltaAdmin(admin.ModelAdmin): list_display = ["file", "date", "delta"] list_filter = ["date"] search_fields = ["file__release__package__name", "file__filename"] raw_id_fields = ["file"] class ChangeLogAdmin(admin.ModelAdmin): list_display = ["package", "release", "type", "created", "modified"] list_filter = ["type", "created", "modified"] search_fields = ["package__name"] raw_id_fields = ["package", "release"] class ReadTheDocsPackageSlugAdmin(admin.ModelAdmin): list_display = ["package", "slug"] search_fields = ["package__name", "slug"] raw_id_fields = ["package"] admin.site.register(Package, PackageAdmin) admin.site.register(Release, ReleaseAdmin) admin.site.register(ReleaseFile, ReleaseFileAdmin) admin.site.register(TroveClassifier, TroveClassifierAdmin) admin.site.register(DownloadDelta, DownloadDeltaAdmin) admin.site.register(ChangeLog, ChangeLogAdmin) admin.site.register(ReadTheDocsPackageSlug, ReadTheDocsPackageSlugAdmin)
from django.contrib import admin from packages.models import Package, Release, ReleaseFile, TroveClassifier, PackageURI from packages.models import ReleaseRequire, ReleaseProvide, ReleaseObsolete, ReleaseURI, ChangeLog from packages.models import ReadTheDocsPackageSlug class PackageURIAdmin(admin.TabularInline): model = PackageURI extra = 0 class PackageAdmin(admin.ModelAdmin): inlines = [PackageURIAdmin] list_display = ["name", "created", "modified", "downloads_synced_on"] list_filter = ["created", "modified", "downloads_synced_on"] search_fields = ["name"] class ReleaseRequireInline(admin.TabularInline): model = ReleaseRequire extra = 0 class ReleaseProvideInline(admin.TabularInline): model = ReleaseProvide extra = 0 class ReleaseObsoleteInline(admin.TabularInline): model = ReleaseObsolete extra = 0 class ReleaseFileInline(admin.TabularInline): model = ReleaseFile extra = 0 class ReleaseURIInline(admin.TabularInline): model = ReleaseURI extra = 0 class ReleaseAdmin(admin.ModelAdmin): inlines = [ReleaseURIInline, ReleaseFileInline, ReleaseRequireInline, ReleaseProvideInline, ReleaseObsoleteInline] list_display = ["__unicode__", "package", "version", "summary", "author", "author_email", "maintainer", "maintainer_email", "created", "modified"] list_filter = ["created", "modified", "hidden"] search_fields = ["package__name", "version", "summary", "author", "author_email", "maintainer", "maintainer_email"] raw_id_fields = ["package"] class TroveClassifierAdmin(admin.ModelAdmin): list_display = ["trove"] search_fields = ["trove"] class ReleaseFileAdmin(admin.ModelAdmin): list_display = ["release", "type", "python_version", "downloads", "comment", "created", "modified"] list_filter = ["type", "created", "modified"] search_fields = ["release__package__name", "filename", "digest"] raw_id_fields = ["release"] class ChangeLogAdmin(admin.ModelAdmin): list_display = ["package", "release", "type", "created", "modified"] list_filter = ["type", "created", "modified"] search_fields = ["package__name"] raw_id_fields = ["package", "release"] class ReadTheDocsPackageSlugAdmin(admin.ModelAdmin): list_display = ["package", "slug"] search_fields = ["package__name", "slug"] raw_id_fields = ["package"] admin.site.register(Package, PackageAdmin) admin.site.register(Release, ReleaseAdmin) admin.site.register(ReleaseFile, ReleaseFileAdmin) admin.site.register(TroveClassifier, TroveClassifierAdmin) admin.site.register(ChangeLog, ChangeLogAdmin) admin.site.register(ReadTheDocsPackageSlug, ReadTheDocsPackageSlugAdmin)
bsd-2-clause
Python
4c703480fe395ddef5faa6d388a472b7053f26af
Add debug command line option.
osks/jskom,osks/jskom,osks/jskom,osks/jskom
jskom/__main__.py
jskom/__main__.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import asyncio import logging from hypercorn.asyncio import serve from hypercorn.config import Config from jskom import app, init_app log = logging.getLogger("jskom.main") def run(host, port): # use 127.0.0.1 instead of localhost to avoid delays related to ipv6. # http://werkzeug.pocoo.org/docs/serving/#troubleshooting init_app() config = Config() config.bind = ["{}:{}".format(host, port)] asyncio.run(serve(app, config), debug=True) def main(): logging.basicConfig(format='%(asctime)s %(levelname)-7s %(name)-15s %(message)s', level=logging.INFO) parser = argparse.ArgumentParser(description='Jskom') parser.add_argument( '--debug', help='Enable debug logging', default=False, action='store_true') # use 127.0.0.1 instead of localhost to avoid delays related to ipv6. # http://werkzeug.pocoo.org/docs/serving/#troubleshooting parser.add_argument( '--host', help='Hostname or IP to listen on', default='127.0.0.1') parser.add_argument( '--port', help='Port to listen on', type=int, default=5000) args = parser.parse_args() loglevel = logging.DEBUG if args.debug else logging.INFO logging.getLogger().setLevel(loglevel) if not args.debug: # asyncio logs quite verbose also on INFO level, so set to WARNING. logging.getLogger('asyncio').setLevel(logging.WARNING) log.info("Using args: %s", args) run(args.host, args.port) if __name__ == "__main__": main()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import asyncio import logging from hypercorn.asyncio import serve from hypercorn.config import Config from jskom import app, init_app log = logging.getLogger("jskom.main") def run(host, port): # use 127.0.0.1 instead of localhost to avoid delays related to ipv6. # http://werkzeug.pocoo.org/docs/serving/#troubleshooting init_app() config = Config() config.bind = ["{}:{}".format(host, port)] asyncio.run(serve(app, config), debug=True) def main(): logging.basicConfig(format='%(asctime)s %(levelname)-7s %(name)-15s %(message)s', level=logging.DEBUG) parser = argparse.ArgumentParser(description='Jskom') # use 127.0.0.1 instead of localhost to avoid delays related to ipv6. # http://werkzeug.pocoo.org/docs/serving/#troubleshooting parser.add_argument('--host', help='Hostname or IP to listen on', default='127.0.0.1') parser.add_argument('--port', help='Port to listen on', type=int, default=5000) args = parser.parse_args() log.info("Using args: %s", args) run(args.host, args.port) if __name__ == "__main__": main()
mit
Python
cdcc807ecd7126f533bbc01721276d62a4a72732
fix all_docs dbs to work after flip
dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq
corehq/couchapps/__init__.py
corehq/couchapps/__init__.py
from corehq.preindex import CouchAppsPreindexPlugin from django.conf import settings CouchAppsPreindexPlugin.register('couchapps', __file__, { 'form_question_schema': 'meta', 'users_extra': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB), 'noneulized_users': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB), 'all_docs': (None, settings.NEW_USERS_GROUPS_DB), })
from corehq.preindex import CouchAppsPreindexPlugin from django.conf import settings CouchAppsPreindexPlugin.register('couchapps', __file__, { 'form_question_schema': 'meta', 'users_extra': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB), 'noneulized_users': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB), 'all_docs': (settings.USERS_GROUPS_DB, settings.NEW_USERS_GROUPS_DB), })
bsd-3-clause
Python
a27e667dedeaaa0aefadc3328149f311bb277c45
Update bottlespin.py
kallerdaller/Cogs-Yorkfield
bottlespin/bottlespin.py
bottlespin/bottlespin.py
import discord from discord.ext import commands from random import choice class Bottlespin: """Spins a bottle and lands on a random user.""" def __init__(self, bot): self.bot = bot @commands.command(pass_context=True, no_pm=True, alias=["bottlespin"]) async def spin(self, ctx, role): """Spin the bottle""" await self.bot.say(str(role)) roles = [ctx.message.server.roles] await self.bot.say(str(roles[1])) if role in roles: await self.bot.say(str(role)) await self.bot.say(str(roles)) author = ctx.message.author server = ctx.message.server if len(server.members) < 2: await self.bot.say("`Not enough people are around to spin the bottle`") return if role in roles: roleexist = True else: await self.bot.say("`{} is not a exising role`".format(role)) return if roleexist: target = [m for m in server.members if m != author and role in [ s.name for s in m.roles] and str(m.status) == "online" or str(m.status) == "idle"] else: target = [m for m in server.members if m != author and str( m.status) == "online" or str(m.status) == "idle"] if not target: if role: await self.bot.say("`Sorry I couldnt find anyone to point the bottle at with the role {}`".format(role)) else: await self.bot.say("`Sorry I couldnt find anyone to point the bottle at`") return else: target = choice(list(target)) await self.bot.say("`{0.display_name}#{0.discriminator} spinned the bottle and it landed on {1.display_name}#{1.discriminator}`".format(author, target)) def setup(bot): n = Bottlespin(bot) bot.add_cog(n)
import discord from discord.ext import commands from random import choice class Bottlespin: """Spins a bottle and lands on a random user.""" def __init__(self, bot): self.bot = bot @commands.command(pass_context=True, no_pm=True, alias=["bottlespin"]) async def spin(self, ctx, role): """Spin the bottle""" roles = [ctx.message.server.roles] role = discord.Role.name if role in roles: await self.bot.say(str(role)) await self.bot.say(str(roles)) author = ctx.message.author server = ctx.message.server if len(server.members) < 2: await self.bot.say("`Not enough people are around to spin the bottle`") return if role in roles: roleexist = True else: await self.bot.say("`{} is not a exising role`".format(role)) return if roleexist: target = [m for m in server.members if m != author and role in [ s.name for s in m.roles] and str(m.status) == "online" or str(m.status) == "idle"] else: target = [m for m in server.members if m != author and str( m.status) == "online" or str(m.status) == "idle"] if not target: if role: await self.bot.say("`Sorry I couldnt find anyone to point the bottle at with the role {}`".format(role)) else: await self.bot.say("`Sorry I couldnt find anyone to point the bottle at`") return else: target = choice(list(target)) await self.bot.say("`{0.display_name}#{0.discriminator} spinned the bottle and it landed on {1.display_name}#{1.discriminator}`".format(author, target)) def setup(bot): n = Bottlespin(bot) bot.add_cog(n)
mit
Python
c63463ff040f79c605d6c0414261527dda3ed00a
Switch to new babel version in require test.
amol-/dukpy,amol-/dukpy,amol-/dukpy
tests/test_jsinterpreter.py
tests/test_jsinterpreter.py
import unittest from dukpy._dukpy import JSRuntimeError import dukpy from diffreport import report_diff class TestJSInterpreter(unittest.TestCase): def test_interpreter_keeps_context(self): interpreter = dukpy.JSInterpreter() ans = interpreter.evaljs("var o = {'value': 5}; o") assert ans == {'value': 5} ans = interpreter.evaljs("o.value += 1; o") assert ans == {'value': 6} def test_call_python(self): def _say_hello(num, who): return 'Hello ' + ' '.join([who]*num) interpreter = dukpy.JSInterpreter() interpreter.export_function('say_hello', _say_hello) res = interpreter.evaljs("call_python('say_hello', 3, 'world')") assert res == 'Hello world world world', res def test_module_loader(self): interpreter = dukpy.JSInterpreter() res = interpreter.evaljs(''' babel = require('babel-6.26.0.min'); babel.transform(dukpy.es6code, {presets: ["es2015"]}).code; ''', es6code='let i=5;') expected = '''"use strict"; var i = 5;''' assert res == expected, report_diff(expected, res) def test_module_loader_unexisting(self): interpreter = dukpy.JSInterpreter() with self.assertRaises(JSRuntimeError) as err: interpreter.evaljs("require('missing_module');") assert 'cannot find module: missing_module' in str(err.exception)
import unittest from dukpy._dukpy import JSRuntimeError import dukpy from diffreport import report_diff class TestJSInterpreter(unittest.TestCase): def test_interpreter_keeps_context(self): interpreter = dukpy.JSInterpreter() ans = interpreter.evaljs("var o = {'value': 5}; o") assert ans == {'value': 5} ans = interpreter.evaljs("o.value += 1; o") assert ans == {'value': 6} def test_call_python(self): def _say_hello(num, who): return 'Hello ' + ' '.join([who]*num) interpreter = dukpy.JSInterpreter() interpreter.export_function('say_hello', _say_hello) res = interpreter.evaljs("call_python('say_hello', 3, 'world')") assert res == 'Hello world world world', res def test_module_loader(self): interpreter = dukpy.JSInterpreter() res = interpreter.evaljs(''' babel = require('babel-6.14.0.min'); babel.transform(dukpy.es6code, {presets: ["es2015"]}).code; ''', es6code='let i=5;') expected = '''"use strict"; var i = 5;''' assert res == expected, report_diff(expected, res) def test_module_loader_unexisting(self): interpreter = dukpy.JSInterpreter() with self.assertRaises(JSRuntimeError) as err: interpreter.evaljs("require('missing_module');") assert 'cannot find module: missing_module' in str(err.exception)
mit
Python
3681ada3917d5811e1e959270e1df0edea7ebf55
Update __init__.py
rchristie/mapclientplugins.smoothfitstep
mapclientplugins/smoothfitstep/__init__.py
mapclientplugins/smoothfitstep/__init__.py
''' MAP Client Plugin ''' __version__ = '0.1.0' __author__ = 'Richard Christie' __stepname__ = 'smoothfit' __location__ = '' # import class that derives itself from the step mountpoint. from mapclientplugins.smoothfitstep import step # Import the resource file when the module is loaded, # this enables the framework to use the step icon. from . import resources_rc
''' MAP Client Plugin ''' __version__ = '0.1.0' __author__ = 'Richard Christie' __stepname__ = 'smoothfit' __location__ = '' # import class that derives itself from the step mountpoint. from mapclientplugins.smoothfitstep import step # Import the resource file when the module is loaded, # this enables the framework to use the step icon. from . import resources_rc
apache-2.0
Python
379d2df1041605d3c8a21d543f9955601ee07558
Add threading to syncer
creativecommons/open-ledger,creativecommons/open-ledger,creativecommons/open-ledger
imageledger/management/commands/syncer.py
imageledger/management/commands/syncer.py
from collections import namedtuple import itertools import logging from multiprocessing.dummy import Pool as ThreadPool from elasticsearch import helpers from django.core.management.base import BaseCommand, CommandError from django.db import connection, transaction from imageledger import models, search console = logging.StreamHandler() log = logging.getLogger(__name__) log.setLevel(logging.INFO) MAX_CONNECTION_RETRIES = 10 RETRY_WAIT = 5 # Number of sections to wait before retrying DEFAULT_CHUNK_SIZE = 1000 class Command(BaseCommand): can_import_settings = True requires_migrations_checks = True def add_arguments(self, parser): parser.add_argument("--verbose", action="store_true", default=False, help="Be very chatty and run logging at DEBUG") parser.add_argument("--chunk-size", dest="chunk_size", default=DEFAULT_CHUNK_SIZE, type=int, help="The number of records to batch process at once") parser.add_argument("--with-fingerprinting", dest="with_fingerprinting", action="store_true", help="Whether to run the expensive perceptual hash routine as part of syncing") def handle(self, *args, **options): if options['verbose']: log.addHandler(console) log.setLevel(logging.DEBUG) self.sync_all_images(chunk_size=options['chunk_size'], with_fingerprinting=options['with_fingerprinting']) def sync_all_images(self, chunk_size=DEFAULT_CHUNK_SIZE, with_fingerprinting=False, num_iterations=5): """Sync all of the images, sorting from least-recently-synced""" pool = ThreadPool(4) starts = [i * chunk_size for i in range(0, num_iterations)] pool.starmap(do_sync, zip(starts, itertools.repeat(chunk_size, num_iterations), itertools.repeat(with_fingerprinting, num_iterations))) pool.close() pool.join() def do_sync(start, chunk_size, with_fingerprinting): end = start + chunk_size log.info("Starting sync in range from %d to %d...", start, end) imgs = models.Image.objects.all().order_by('-last_synced_with_source')[start:end] for img in imgs: img.sync(attempt_perceptual_hash=with_fingerprinting)
from collections import namedtuple import itertools import logging from elasticsearch import helpers from django.core.management.base import BaseCommand, CommandError from django.db import connection, transaction from imageledger import models, search console = logging.StreamHandler() log = logging.getLogger(__name__) log.setLevel(logging.INFO) MAX_CONNECTION_RETRIES = 10 RETRY_WAIT = 5 # Number of sections to wait before retrying DEFAULT_CHUNK_SIZE = 1000 class Command(BaseCommand): can_import_settings = True requires_migrations_checks = True def add_arguments(self, parser): parser.add_argument("--verbose", action="store_true", default=False, help="Be very chatty and run logging at DEBUG") parser.add_argument("--chunk-size", dest="chunk_size", default=DEFAULT_CHUNK_SIZE, type=int, help="The number of records to batch process at once") parser.add_argument("--with-fingerprinting", dest="with_fingerprinting", action="store_true", help="Whether to run the expensive perceptual hash routine as part of syncing") def handle(self, *args, **options): if options['verbose']: log.addHandler(console) log.setLevel(logging.DEBUG) self.sync_all_images(chunk_size=options['chunk_size'], with_fingerprinting=options['with_fingerprinting']) def sync_all_images(self, chunk_size=DEFAULT_CHUNK_SIZE, with_fingerprinting=False, num_iterations=1000): """Sync all of the images, sorting from least-recently-synced""" count = 0 while count < num_iterations: imgs = models.Image.objects.all().order_by('-last_synced_with_source')[0:chunk_size] for img in imgs: img.sync(attempt_perceptual_hash=with_fingerprinting) count += 1
mit
Python
33b7e9371305c4171594c21c154cd5724ea013cb
allow segment and overlap be specified as a parameter
jts/nanopolish,jts/nanopolish,jts/nanopolish,mateidavid/nanopolish,mateidavid/nanopolish,mateidavid/nanopolish,jts/nanopolish,mateidavid/nanopolish,mateidavid/nanopolish,jts/nanopolish
scripts/nanopolish_makerange.py
scripts/nanopolish_makerange.py
import sys import argparse from Bio import SeqIO parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments') parser.add_argument('--segment-length', type=int, default=50000) parser.add_argument('--overlap-length', type=int, default=200) args, extra = parser.parse_known_args() if len(extra) != 1: sys.stderr.write("Error: a genome file is expected\n") filename = extra[0] recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")] SEGMENT_LENGTH = args.segment_length OVERLAP_LENGTH = args.overlap_length for name, length in recs: n_segments = (length / SEGMENT_LENGTH) + 1 for n in xrange(0, length, SEGMENT_LENGTH): if ( n + SEGMENT_LENGTH) > length: print "%s:%d-%d" % (name, n, length - 1) else: print "%s:%d-%d" % (name, n, n + SEGMENT_LENGTH + OVERLAP_LENGTH)
import sys from Bio import SeqIO recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(sys.argv[1]), "fasta")] SEGMENT_LENGTH = 50000 OVERLAP_LENGTH = 200 for name, length in recs: n_segments = (length / SEGMENT_LENGTH) + 1 for n in xrange(0, length, SEGMENT_LENGTH): if ( n + SEGMENT_LENGTH) > length: print "%s:%d-%d" % (name, n, length - 1) else: print "%s:%d-%d" % (name, n, n + SEGMENT_LENGTH + OVERLAP_LENGTH)
mit
Python
105a413b18456f9a505dd1ed4bf515987b4792d2
add --force option to management command to force all files to be pushed
mntan/django-mediasync,sunlightlabs/django-mediasync,sunlightlabs/django-mediasync,mntan/django-mediasync,mntan/django-mediasync,sunlightlabs/django-mediasync
mediasync/management/commands/syncmedia.py
mediasync/management/commands/syncmedia.py
from django.core.management.base import BaseCommand, CommandError from optparse import make_option import mediasync class Command(BaseCommand): help = "Sync local media with S3" args = '[options]' requires_model_validation = False option_list = BaseCommand.option_list + ( make_option("-F", "--force", dest="force", help="force files to sync", action="store_true"), ) def handle(self, *args, **options): force = options.get('force') or False try: mediasync.sync(force=force) except ValueError, ve: raise CommandError('%s\nUsage is mediasync %s' % (ve.message, self.args))
from django.core.management.base import BaseCommand, CommandError from optparse import make_option import mediasync class Command(BaseCommand): help = "Sync local media with S3" args = '[options]' requires_model_validation = False option_list = BaseCommand.option_list + ( make_option("-f", "--force", dest="force", help="force files to sync", action="store_true"), ) def handle(self, *args, **options): force = options.get('force') or False try: mediasync.sync(force=force) except ValueError, ve: raise CommandError('%s\nUsage is mediasync %s' % (ve.message, self.args))
bsd-3-clause
Python
0be6bddf8c92c461af57e7c61c2378c817fb0143
Make oppetarkiv work with --all-episodes again
dalgr/svtplay-dl,selepo/svtplay-dl,leakim/svtplay-dl,leakim/svtplay-dl,dalgr/svtplay-dl,qnorsten/svtplay-dl,qnorsten/svtplay-dl,olof/svtplay-dl,spaam/svtplay-dl,iwconfig/svtplay-dl,olof/svtplay-dl,selepo/svtplay-dl,iwconfig/svtplay-dl,leakim/svtplay-dl,spaam/svtplay-dl
lib/svtplay_dl/service/oppetarkiv.py
lib/svtplay_dl/service/oppetarkiv.py
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import re from svtplay_dl.service.svtplay import Svtplay from svtplay_dl.log import log class OppetArkiv(Svtplay): supported_domains = ['oppetarkiv.se'] def find_all_episodes(self, options): page = 1 data = self.get_urldata() match = re.search(r'"/etikett/titel/([^"/]+)', data) if match is None: match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url) if match is None: log.error("Couldn't find title") return program = match.group(1) episodes = [] n = 0 if options.all_last > 0: sort = "tid_fallande" else: sort = "tid_stigande" while True: url = "http://www.oppetarkiv.se/etikett/titel/%s/?sida=%s&sort=%s&embed=true" % (program, page, sort) data = self.http.request("get", url) if data.status_code == 404: break data = data.text regex = re.compile(r'href="(/video/[^"]+)"') for match in regex.finditer(data): if n == options.all_last: break episodes.append("http://www.oppetarkiv.se%s" % match.group(1)) n += 1 page += 1 return episodes
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import re from svtplay_dl.service.svtplay import Svtplay from svtplay_dl.log import log class OppetArkiv(Svtplay): supported_domains = ['oppetarkiv.se'] def find_all_episodes(self, options): page = 1 data = self.get_urldata() match = re.search(r'"/etikett/titel/([^"/]+)', data) if match is None: match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url) if match is None: log.error("Couldn't find title") return program = match.group(1) more = True episodes = [] n = 0 if options.all_last > 0: sort = "tid_fallande" else: sort = "tid_stigande" while more: url = "http://www.oppetarkiv.se/etikett/titel/%s/?sida=%s&sort=%s&embed=true" % (program, page, sort) data = self.http.request("get", url).text visa = re.search(r'svtXColorDarkLightGrey', data) if not visa: more = False regex = re.compile(r'href="(/video/[^"]+)"') for match in regex.finditer(data): if n == options.all_last: break episodes.append("http://www.oppetarkiv.se%s" % match.group(1)) n += 1 page += 1 return episodes
mit
Python
a52b4097dfcb9fea26af0bc994426baecb97efc1
update image if streetview url
justinwp/croplands,justinwp/croplands
croplands_api/views/api/locations.py
croplands_api/views/api/locations.py
from croplands_api import api from croplands_api.models import Location from processors import api_roles, add_user_to_posted_data, remove_relations, debug_post from records import save_record_state_to_history from croplands_api.utils.s3 import upload_image import requests import uuid import cStringIO def process_records(result=None, **kwargs): """ This processes all records that may have been posted as a relation of the location. :param result: :param kwargs: :return: None """ for record in result['records']: save_record_state_to_history(record) def merge_same_location_lat_long(data=None, **kwargs): """ This preprocessor checks if the location already exists. :param data: :param kwargs: :return: """ # TODO pass def change_field_names(data=None, **kwargs): if 'photos' in data: data['images'] = data['photos'] del data['photos'] def check_for_street_view_image(data=None, **kwargs): if 'images' not in data: return for image in data['images']: if 'source' in image and image['source'] == 'streetview': try: r = requests.get(image['url']) if r.status_code == 200: url = 'images/streetview/' + str(uuid.uuid4()) + '.jpg' image['url'] = url except Exception as e: print(e) def create(app): api.create_api(Location, app=app, collection_name='locations', methods=['GET', 'POST', 'PATCH', 'DELETE'], preprocessors={ 'POST': [change_field_names, add_user_to_posted_data, debug_post, check_for_street_view_image], 'PATCH_SINGLE': [api_roles(['mapping', 'validation', 'admin']), remove_relations], 'PATCH_MANY': [api_roles('admin'), remove_relations], 'DELETE': [api_roles('admin')] }, postprocessors={ 'POST': [process_records], 'PATCH_SINGLE': [], 'PATCH_MANY': [], 'DELETE': [] }, results_per_page=10)
from croplands_api import api from croplands_api.models import Location from processors import api_roles, add_user_to_posted_data, remove_relations, debug_post from records import save_record_state_to_history from croplands_api.tasks.records import get_ndvi def process_records(result=None, **kwargs): """ This processes all records that may have been posted as a relation of the location. :param result: :param kwargs: :return: None """ for record in result['records']: save_record_state_to_history(record) def merge_same_location_lat_long(data=None, **kwargs): """ This preprocessor checks if the location already exists. :param data: :param kwargs: :return: """ # TODO pass def change_field_names(data=None, **kwargs): if 'photos' in data: data['images'] = data['photos'] del data['photos'] def create(app): api.create_api(Location, app=app, collection_name='locations', methods=['GET', 'POST', 'PATCH', 'DELETE'], preprocessors={ 'POST': [change_field_names, add_user_to_posted_data, debug_post], 'PATCH_SINGLE': [api_roles(['mapping', 'validation', 'admin']), remove_relations], 'PATCH_MANY': [api_roles('admin'), remove_relations], 'DELETE': [api_roles('admin')] }, postprocessors={ 'POST': [process_records], 'PATCH_SINGLE': [], 'PATCH_MANY': [], 'DELETE': [] }, results_per_page=10)
mit
Python
928d498b5f67970f9ec75d62068e8cbec0fdc352
Update python3, flake8
NORDUnet/niscanner
ni_scanner.py
ni_scanner.py
from ConfigParser import SafeConfigParser from utils.cli import CLI from api.queue import Queue from api.nerds import NerdsApi from scanner.host import HostScanner from scanner.exceptions import ScannerExeption from utils.url import url_concat import logging FORMAT = '%(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT) logger = logging.getLogger('ni_scanner') def process_host(queue, nerds_api): item = queue.next("Host") while item: try: queue.processing(item) scanner = HostScanner(item) nerds = scanner.process() if not nerds: # Error occured :( logger.error("Unable to scan item %s", str(item)) queue.failed(item) else: logger.debug("Posting nerds data") nerds_api.send(nerds) queue.done(item) except ScannerExeption as e: logger.error("%s", e) failed(queue, item) except Exception as e: logger.error("Unable to process host %s got error: %s", item, str(e)) failed(queue, item) item = queue.next("Host") def failed(queue, item): try: queue.failed(item) except Exception as e: logger.error("Problem with reaching NI, got error: %s", e) def main(): args = CLI().options() try: config = SafeConfigParser() config.readfp(open(args.config)) except IOError: logger.error("Config file '%s' is missing", args.config) return None # ready :) api_user = config.get("NI", "api_user") api_key = config.get("NI", "api_key") queue_url = url_concat(config.get("NI", "url"), "scan_queue/") queue = Queue(queue_url, api_user, api_key) nerds_url = url_concat(config.get("NI", "url"), "nerds/") nerds_api = NerdsApi(nerds_url, api_user, api_key) process_host(queue, nerds_api) if __name__ == "__main__": main()
from ConfigParser import SafeConfigParser from utils.cli import CLI from api.queue import Queue from api.nerds import NerdsApi from scanner.host import HostScanner from scanner.exceptions import ScannerExeption from utils.url import url_concat import logging FORMAT = '%(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT) logger = logging.getLogger('ni_scanner') def process_host(queue, nerds_api): item = queue.next("Host") while item: try: queue.processing(item) scanner = HostScanner(item) nerds = scanner.process() if not nerds: # Error occured :( logger.error("Unable to scan item "+str(item)) queue.failed(item) else: logger.debug("Posting nerds data") nerds_api.send(nerds) queue.done(item) except ScannerExeption as e: logger.error("%s",e) failed(queue,item) except Exception as e: logger.error("Unable to process host %s got error: %s",item,str(e)) failed(queue,item) item = queue.next("Host") def failed(queue,item): try: queue.failed(item) except Exception as e: logger.error("Problem with reaching NI, got error: %s", e) def main(): args = CLI().options() try: config = SafeConfigParser() config.readfp(open(args.config)) except IOError as (errno, strerror): logger.error("Config file '%s' is missing", args.config) return None ## ready :) api_user = config.get("NI", "api_user") api_key = config.get("NI", "api_key") queue_url = url_concat(config.get("NI", "url"), "scan_queue/") queue = Queue(queue_url, api_user, api_key) nerds_url = url_concat(config.get("NI", "url"), "nerds/") nerds_api = NerdsApi(nerds_url, api_user, api_key) process_host(queue, nerds_api) if __name__ == "__main__": main()
bsd-3-clause
Python
496007543f941bb3ca46c011383f2673b9362e47
Bump development version
lpomfrey/django-debreach,lpomfrey/django-debreach
debreach/__init__.py
debreach/__init__.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from distutils import version __version__ = '1.4.1' version_info = version.StrictVersion(__version__).version default_app_config = 'debreach.apps.DebreachConfig'
# -*- coding: utf-8 -*- from __future__ import unicode_literals from distutils import version __version__ = '1.4.0' version_info = version.StrictVersion(__version__).version default_app_config = 'debreach.apps.DebreachConfig'
bsd-2-clause
Python
31d6ce09382035458eca2a310f99cb3c958ea604
Use main template environment for rendering document content
jreese/nib
nib/render.py
nib/render.py
import jinja2 from jinja2 import Environment, FileSystemLoader, Template from os import path import time jinja_filters = {} def jinja(name): def decorator(f): jinja_filters[name] = f return f return decorator class Render(object): def __init__(self, options, documents): self.options = options self.documents = documents self.loader = FileSystemLoader(path.abspath(options['template_path'])) self.env = Environment(loader=self.loader) for name in jinja_filters: self.env.filters[name] = jinja_filters[name] self.site = dict(options['site'], documents=documents) self.now = time.time() def render_content(self, document): params = { 'now': self.now, 'site': self.site, 'page': document, } params.update(document) document.short = self.env.from_string(document.short).render(**params) document.content = self.env.from_string(document.content).render(**params) def render_template(self, document): if 'template' in document: template = self.env.get_template(document['template']) params = { 'now': self.now, 'site': self.options['site'], 'page': document, 'content': document.content, 'short': document.short, } params.update(document) return template.render(**params) else: return document.content
import jinja2 from jinja2 import Environment, FileSystemLoader, Template from os import path import time jinja_filters = {} def jinja(name): def decorator(f): jinja_filters[name] = f return f return decorator class Render(object): def __init__(self, options, documents): self.options = options self.documents = documents self.loader = FileSystemLoader(path.abspath(options['template_path'])) self.env = Environment(loader=self.loader) for name in jinja_filters: self.env.filters[name] = jinja_filters[name] self.site = dict(options['site'], documents=documents) self.now = time.time() def render_content(self, document): params = { 'now': self.now, 'site': self.site, 'page': document, } params.update(document) document.short = Template(document.short).render(**params) document.content = Template(document.content).render(**params) def render_template(self, document): if 'template' in document: template = self.env.get_template(document['template']) params = { 'now': self.now, 'site': self.options['site'], 'page': document, 'content': document.content, 'short': document.short, } params.update(document) return template.render(**params) else: return document.content
mit
Python
8a03a3fbcfdb22dc21e5539462a2b235e744abba
change open/close to with
raspearsy/bme590hrm
output.py
output.py
def summarizeECG(instHR, avgHR, brady, tachy): """Create txt file summarizing ECG analysis :param instHR: (int) :param avgHR: (int) :param brady: (int) :param tachy: (int) """ #Calls hrdetector() to get instantaneous heart rate #instHR = findInstHR() #Calls findAvgHR() to get average heart rate #avgHR = findAvgHR() #Calls bradyTimes() to get times when bradycardia occurred #brady = bradyTimes() #Calls tachtimes() to get times when tachycardia occurred #tachy = tachyTimes() #Writes the output of the ECG analysis to an output file named ecgOutput.txt with open('ecgOutput.txt','w') as ecgResults: instHRstr = "Estimated instantaneous heart rate: %s" % str(instHR) avgHRstr = "Estimated average heart rate: %s" % str(avgHR) bradystr = "Bradycardia occurred at: %s" % str(brady) tachystr = "Tachycardia occurred at: %s" % str(tachy) ecgResults.write(instHRstr + ' BPM\n' + avgHRstr + ' BPM\n' + bradystr + ' sec\n' + tachystr + ' sec')
def summarizeECG(instHR, avgHR, brady, tachy): """Create txt file summarizing ECG analysis :param instHR: (int) :param avgHR: (int) :param brady: (int) :param tachy: (int) """ #Calls hrdetector() to get instantaneous heart rate #instHR = findInstHR() #Calls findAvgHR() to get average heart rate #avgHR = findAvgHR() #Calls bradyTimes() to get times when bradycardia occurred #brady = bradyTimes() #Calls tachtimes() to get times when tachycardia occurred #tachy = tachyTimes() #Writes the output of the ECG analysis to an output file named ecgOutput.txt ecgResults = open('ecgOutput.txt','w') instHRstr = "Estimated instantaneous heart rate: %s" % str(instHR) avgHRstr = "Estimated average heart rate: %s" % str(avgHR) bradystr = "Bradycardia occurred at: %s" % str(brady) tachystr = "Tachycardia occurred at: %s" % str(tachy) ecgResults.write(instHRstr + ' BPM\n' + avgHRstr + ' BPM\n' + bradystr + ' sec\n' + tachystr + ' sec') ecgResults.close()
mit
Python
b0a1f10d60abc6c9fc7751e3bae492976d3f3306
Update version 1.0.0.dev3 -> 1.0.0.dev4
oneklc/dimod,oneklc/dimod
dimod/package_info.py
dimod/package_info.py
__version__ = '1.0.0.dev4' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
__version__ = '1.0.0.dev3' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
apache-2.0
Python
518443854f7ef4466885d88cf7b379c626692da1
Add PlannedBudgetLimits to Budgets::Budget BudgetData
cloudtools/troposphere,ikben/troposphere,ikben/troposphere,cloudtools/troposphere
troposphere/budgets.py
troposphere/budgets.py
# Copyright (c) 2012-2019, Mark Peek <[email protected]> # All rights reserved. # # See LICENSE file for full license. # # *** Do not modify - this file is autogenerated *** # Resource specification version: 8.0.0 from . import AWSObject from . import AWSProperty from .validators import boolean from .validators import double class CostTypes(AWSProperty): props = { 'IncludeCredit': (boolean, False), 'IncludeDiscount': (boolean, False), 'IncludeOtherSubscription': (boolean, False), 'IncludeRecurring': (boolean, False), 'IncludeRefund': (boolean, False), 'IncludeSubscription': (boolean, False), 'IncludeSupport': (boolean, False), 'IncludeTax': (boolean, False), 'IncludeUpfront': (boolean, False), 'UseAmortized': (boolean, False), 'UseBlended': (boolean, False), } class Spend(AWSProperty): props = { 'Amount': (double, True), 'Unit': (basestring, True), } class TimePeriod(AWSProperty): props = { 'End': (basestring, False), 'Start': (basestring, False), } class BudgetData(AWSProperty): props = { 'BudgetLimit': (Spend, False), 'BudgetName': (basestring, False), 'BudgetType': (basestring, True), 'CostFilters': (dict, False), 'CostTypes': (CostTypes, False), 'PlannedBudgetLimits': (dict, False), 'TimePeriod': (TimePeriod, False), 'TimeUnit': (basestring, True), } class Notification(AWSProperty): props = { 'ComparisonOperator': (basestring, True), 'NotificationType': (basestring, True), 'Threshold': (double, True), 'ThresholdType': (basestring, False), } class Subscriber(AWSProperty): props = { 'Address': (basestring, True), 'SubscriptionType': (basestring, True), } class NotificationWithSubscribers(AWSProperty): props = { 'Notification': (Notification, True), 'Subscribers': ([Subscriber], True), } class Budget(AWSObject): resource_type = "AWS::Budgets::Budget" props = { 'Budget': (BudgetData, True), 'NotificationsWithSubscribers': ([NotificationWithSubscribers], False), }
# Copyright (c) 2012-2018, Mark Peek <[email protected]> # All rights reserved. # # See LICENSE file for full license. from . import AWSObject, AWSProperty from .validators import boolean class Spend(AWSProperty): props = { 'Amount': (float, True), 'Unit': (basestring, True), } class CostTypes(AWSProperty): props = { 'IncludeCredit': (boolean, False), 'IncludeDiscount': (boolean, False), 'IncludeOtherSubscription': (boolean, False), 'IncludeRecurring': (boolean, False), 'IncludeRefund': (boolean, False), 'IncludeSubscription': (boolean, False), 'IncludeSupport': (boolean, False), 'IncludeTax': (boolean, False), 'IncludeUpfront': (boolean, False), 'UseAmortized': (boolean, False), 'UseBlended': (boolean, False), } class TimePeriod(AWSProperty): props = { 'End': (basestring, False), 'Start': (basestring, False), } class BudgetData(AWSProperty): props = { 'BudgetLimit': (Spend, False), 'BudgetName': (basestring, False), 'BudgetType': (basestring, True), 'CostFilters': (dict, False), 'CostTypes': (CostTypes, False), 'TimePeriod': (TimePeriod, False), 'TimeUnit': (basestring, True), } class Notification(AWSProperty): props = { 'ComparisonOperator': (basestring, True), 'NotificationType': (basestring, True), 'Threshold': (float, True), 'ThresholdType': (basestring, False), } class Subscriber(AWSProperty): props = { 'Address': (basestring, True), 'SubscriptionType': (basestring, True), } class NotificationWithSubscribers(AWSProperty): props = { 'Notification': (Notification, True), 'Subscribers': ([Subscriber], True), } class Budget(AWSObject): resource_type = "AWS::Budgets::Budget" props = { 'Budget': (BudgetData, True), 'NotificationsWithSubscribers': ([NotificationWithSubscribers], False), }
bsd-2-clause
Python
ba84f4a1b11f486d211254721397be43f8c9b07a
update __manifest__.py
thinkopensolutions/tkobr-addons,thinkopensolutions/tkobr-addons,thinkopensolutions/tkobr-addons,thinkopensolutions/tkobr-addons
tko_coexiste_coa/__manifest__.py
tko_coexiste_coa/__manifest__.py
# -*- coding: utf-8 -*- # © 2017 TKO <http://tko.tko-br.com> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). { 'name': 'Plano de Contas Brasileiro', 'summary': '', 'description': 'Plano de contas brasileiro adaptável a qualquer segmento.', 'author': 'TKO', 'category': 'l10n_br', 'license': 'AGPL-3', 'website': 'http://tko.tko-br.com', 'version': '10.0.0.0.0', 'application': False, 'installable': True, 'auto_install': False, 'depends': [ 'account', 'br_account', 'account_parent', ], 'external_dependencies': { 'python': [], 'bin': [], }, 'init_xml': [], 'update_xml': [], 'css': [], 'demo_xml': [], 'test': [], 'data': [ 'data/chart_data_properties.xml', 'data/chart_data.xml', 'data/account.account.template.csv', # TODO Separate proprities for products vs. services (enhance data/chart_data_properties.xml) # TODO Criar Contas Pai # TODO Create & Import l10n_br Taxes ], }
# -*- coding: utf-8 -*- # © 2017 TKO <http://tko.tko-br.com> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). { 'name': 'Plano de Contas Brasileiro', 'summary': '', 'description': 'Plano de contas brasileiro adaptável a qualquer segmento.', 'author': 'TKO', 'category': 'l10n_br', 'license': 'AGPL-3', 'website': 'http://tko.tko-br.com', 'version': '10.0.0.0.0', 'application': False, 'installable': True, 'auto_install': False, 'depends': [ 'account', 'br_account', 'account_parent', ], 'external_dependencies': { 'python': [], 'bin': [], }, 'init_xml': [], 'update_xml': [], 'css': [], 'demo_xml': [], 'test': [], 'data': [ 'data/chart_data.xml', 'data/account.account.template.csv', 'data/chart_data_properties.xml', # TODO Separate proprities for products vs. services (enhance data/chart_data_properties.xml) # TODO Criar Contas Pai # TODO Create & Import l10n_br Taxes ], }
agpl-3.0
Python
febb2e9369a706d7319d89851cac3dc9a1fd167e
add source of kyoko image
neynt/tsundiary,neynt/tsundiary,neynt/tsundiary,neynt/tsundiary
tsundiary/jinja_env.py
tsundiary/jinja_env.py
from tsundiary import app app.jinja_env.globals.update(theme_nicename = { 'classic': 'Classic Orange', 'minimal': 'Minimal Black/Grey', 'misato-tachibana': 'Misato Tachibana', 'rei-ayanami': 'Rei Ayanami', 'saya': 'Saya', 'yuno': 'Yuno Gasai', 'kyoko-sakura': 'Kyoko Sakura', 'colorful': 'Based on favorite color' }) app.jinja_env.globals.update(themes = ['classic', 'minimal', 'misato-tachibana', 'rei-ayanami', 'saya', 'yuno', 'colorful']) app.jinja_env.globals.update(theme_creds = { 'misato-tachibana': '<a href="http://konachan.com/post/show/102801">Misato Tachibana vector source</a>', 'rei-ayanami': '<a href="http://megadud20.deviantart.com/art/Rei-Ayanami-Vector-214547575">Rei vector source</a>', 'saya': '<a href="http://www.zerochan.net/671274">Saya source</a>', 'kyoko-sakura': '<a href="http://3071527.deviantart.com/art/kyoko-sakura-376238110">Kyoko source</a>' }) app.jinja_env.globals.update(theme_colors = [ ('Red', '0,100,100'), ('Orange', '35,100,100'), ('Yellow', '50,100,100'), ('Green', '120,100,80'), ('Cyan', '180,100,80'), ('Blue', '215,100,100'), ('Purple', '270,100,100'), ('Black', '0,0,0'), ('Grey', '0,0,70'), ('White', '0,0,100'), ('Saya Green', '152,100,100'), ('Tsundiary Orange', '17,100,100'), ])
from tsundiary import app app.jinja_env.globals.update(theme_nicename = { 'classic': 'Classic Orange', 'minimal': 'Minimal Black/Grey', 'misato-tachibana': 'Misato Tachibana', 'rei-ayanami': 'Rei Ayanami', 'saya': 'Saya', 'yuno': 'Yuno Gasai', 'kyoko-sakura': 'Kyoko Sakura', 'colorful': 'Based on favorite color' }) app.jinja_env.globals.update(themes = ['classic', 'minimal', 'misato-tachibana', 'rei-ayanami', 'saya', 'yuno', 'colorful']) app.jinja_env.globals.update(theme_creds = { 'misato-tachibana': '<a href="http://konachan.com/post/show/102801">Misato Tachibana vector source</a>', 'rei-ayanami': '<a href="http://megadud20.deviantart.com/art/Rei-Ayanami-Vector-214547575">Rei vector source</a>', 'saya': '<a href="http://www.zerochan.net/671274">Saya source</a>', 'kyoko-sakura': "An artist drew this Kyoko, I'm sure." }) app.jinja_env.globals.update(theme_colors = [ ('Red', '0,100,100'), ('Orange', '35,100,100'), ('Yellow', '50,100,100'), ('Green', '120,100,80'), ('Cyan', '180,100,80'), ('Blue', '215,100,100'), ('Purple', '270,100,100'), ('Black', '0,0,0'), ('Grey', '0,0,70'), ('White', '0,0,100'), ('Saya Green', '152,100,100'), ('Tsundiary Orange', '17,100,100'), ])
mit
Python
3cf93f7f640ef04a1be31d515c19cffec19cec45
Remove logging import unused
openstack/python-searchlightclient,openstack/python-searchlightclient
searchlightclient/osc/plugin.py
searchlightclient/osc/plugin.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import logging from osc_lib import utils DEFAULT_SEARCH_API_VERSION = '1' API_VERSION_OPTION = 'os_search_api_version' API_NAME = 'search' API_VERSIONS = { '1': 'searchlightclient.v1.client.Client', } def make_client(instance): """Returns a search service client""" search_client = utils.get_client_class( API_NAME, instance._api_version[API_NAME], API_VERSIONS) # Set client http_log_debug to True if verbosity level is high enough http_log_debug = utils.get_effective_log_level() <= logging.DEBUG # Remember interface only if it is set kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface) client = search_client( session=instance.session, http_log_debug=http_log_debug, region_name=instance._region_name, **kwargs ) return client def build_option_parser(parser): """Hook to add global options""" parser.add_argument( '--os-search-api-version', metavar='<search-api-version>', default=utils.env( 'OS_SEARCH_API_VERSION', default=DEFAULT_SEARCH_API_VERSION), help='Search API version, default=' + DEFAULT_SEARCH_API_VERSION + ' (Env: OS_SEARCH_API_VERSION)') return parser
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import logging from osc_lib import utils LOG = logging.getLogger(__name__) DEFAULT_SEARCH_API_VERSION = '1' API_VERSION_OPTION = 'os_search_api_version' API_NAME = 'search' API_VERSIONS = { '1': 'searchlightclient.v1.client.Client', } def make_client(instance): """Returns a search service client""" search_client = utils.get_client_class( API_NAME, instance._api_version[API_NAME], API_VERSIONS) # Set client http_log_debug to True if verbosity level is high enough http_log_debug = utils.get_effective_log_level() <= logging.DEBUG # Remember interface only if it is set kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface) client = search_client( session=instance.session, http_log_debug=http_log_debug, region_name=instance._region_name, **kwargs ) return client def build_option_parser(parser): """Hook to add global options""" parser.add_argument( '--os-search-api-version', metavar='<search-api-version>', default=utils.env( 'OS_SEARCH_API_VERSION', default=DEFAULT_SEARCH_API_VERSION), help='Search API version, default=' + DEFAULT_SEARCH_API_VERSION + ' (Env: OS_SEARCH_API_VERSION)') return parser
apache-2.0
Python
57cb5546d0e832bae8b2171d42fc4428ebc6dc74
add try for imports
benjspriggs/tumb-borg
tumb_borg/authorize.py
tumb_borg/authorize.py
#!/usr/bin/python from tumblpy import Tumblpy as T try: from urllib.parse import urlparse, parse_qs except ImportError: from urlparse import urlparse, parse_qs def authorize(KEY, SECRET, CALLBACK): def get_authorization_properties(): t = T(KEY, SECRET) return t \ .get_authentication_tokens( callback_url=CALLBACK) auth_p = get_authorization_properties() def get_auth_url(): print('Please connect with Tumblr via: \n%s' \ % auth_p['auth_url']) result_url = \ raw_input("Copy and paste the accepting url: ") return result_url def query_string(url): return { k: v[0] for k, v in parse_qs(urlparse(url).query).items() } def query_string_auth(): return query_string(get_auth_url()) def authorized_tokens(): q = query_string_auth() t = T(KEY, SECRET, q['oauth_token'], auth_p['oauth_token_secret']) return t.get_authorized_tokens(q['oauth_verifier']) def authorized_t(): a = authorized_tokens() return T(KEY, SECRET, a['oauth_token'], a['oauth_token_secret']) return authorized_t()
#!/usr/bin/python from tumblpy import Tumblpy as T from urlparse import urlparse, parse_qs def authorize(KEY, SECRET, CALLBACK): def get_authorization_properties(): t = T(KEY, SECRET) return t \ .get_authentication_tokens( callback_url=CALLBACK) auth_p = get_authorization_properties() def get_auth_url(): print('Please connect with Tumblr via: \n%s' \ % auth_p['auth_url']) result_url = \ raw_input("Copy and paste the accepting url: ") return result_url def query_string(url): return { k: v[0] for k, v in parse_qs(urlparse(url).query).items() } def query_string_auth(): return query_string(get_auth_url()) def authorized_tokens(): q = query_string_auth() t = T(KEY, SECRET, q['oauth_token'], auth_p['oauth_token_secret']) return t.get_authorized_tokens(q['oauth_verifier']) def authorized_t(): a = authorized_tokens() return T(KEY, SECRET, a['oauth_token'], a['oauth_token_secret']) return authorized_t()
apache-2.0
Python
722de274d3ee9866c7580a7f95e32de1777e6a3b
Add note
christabor/csscms,christabor/csscms,christabor/csscms
csscms/properties_scraper.py
csscms/properties_scraper.py
from pyquery import PyQuery as pq """ A quick and dirty scraper for w3c's css properties list. See css_properties.py for the example output. This is meant to be run once, except when new properties need to be scraped. """ def strip_all_prefixes(string): bad_prefixes = [ 'text-text-', 'pos-', 'font-font-', 'nav-', 'class-', 'gen-', 'tab-' ] for prefix in bad_prefixes: string = string.replace(prefix, '') return string def normalize_w3c_link(url): url = strip_all_prefixes(url) return '-'.join(url.replace( '.asp', '').replace('css3_pr_', '').replace('pr_', '').split('_')) def load_all_w3c_props(root_url, max_open=None): table_class = '.reference.notranslate' data = {} urls = [] doc = pq(url=root_url) links = pq(doc).find(table_class).find('a') def _process(_, selector): if selector is not None: prop = pq(selector).find('td').eq(0).text().strip() if len(prop) > 0: return urls.append(prop) else: return '' for k, link in enumerate(links): if max_open is not None: if k >= max_open: break url = pq(link).attr('href') follow_doc = pq(url='{}/{}'.format(root_url, url)) pq(follow_doc).find(table_class).find('tr').each(_process) # Normalize property from w3c's url structure url = normalize_w3c_link(url) # Push all current options data[url] = {'dropdown': True, 'props': urls} # Mutable container, empty it out for reuse urls = [] return data print(load_all_w3c_props('http://www.w3schools.com/cssref/'))
from pyquery import PyQuery as pq """A quick and dirty scraper for w3c's css properties list.""" def strip_all_prefixes(string): bad_prefixes = [ 'text-text-', 'pos-', 'font-font-', 'nav-', 'class-', 'gen-', 'tab-' ] for prefix in bad_prefixes: string = string.replace(prefix, '') return string def normalize_w3c_link(url): url = strip_all_prefixes(url) return '-'.join(url.replace( '.asp', '').replace('css3_pr_', '').replace('pr_', '').split('_')) def load_all_w3c_props(root_url, max_open=None): table_class = '.reference.notranslate' data = {} urls = [] doc = pq(url=root_url) links = pq(doc).find(table_class).find('a') def _process(_, selector): if selector is not None: prop = pq(selector).find('td').eq(0).text().strip() if len(prop) > 0: return urls.append(prop) else: return '' for k, link in enumerate(links): if max_open is not None: if k >= max_open: break url = pq(link).attr('href') follow_doc = pq(url='{}/{}'.format(root_url, url)) pq(follow_doc).find(table_class).find('tr').each(_process) # Normalize property from w3c's url structure url = normalize_w3c_link(url) # Push all current options data[url] = {'dropdown': True, 'props': urls} # Mutable container, empty it out for reuse urls = [] return data print(load_all_w3c_props('http://www.w3schools.com/cssref/'))
mit
Python
d13204abb2cf5d341eff78416dd442c303042697
Modify add_occupant method to raise exception in case of a duplicate
peterpaints/room-allocator
classes/room.py
classes/room.py
class Room(object): def __init__(self, room_name, room_type, max_persons): self.room_name = room_name self.room_type = room_type self.max_persons = max_persons self.persons = [] def add_occupant(self, person): if person not in self.persons: if len(self.persons) < self.max_persons: self.persons.append(person) print (person.person_type.title() + " " + person.person_name.title() + " " + person.person_surname.title() + " has been allocated " + self.room_type + " " + self.room_name.title()) else: raise Exception(self.room_type.title() + " " + self.room_name.title() + " is at full capacity") else: raise Exception(person.person_type.title() + " " + person.person_name.title() + " " + person.person_surname.title() + " is already among the occupants in " + self.room_type + " " + self.room_name.title())
class Room(object): def __init__(self, room_name, room_type, max_persons): self.room_name = room_name self.room_type = room_type self.max_persons = max_persons self.persons = [] def add_occupant(self, person): if len(self.persons) < self.max_persons: self.persons.append(person) print (person.person_type.title() + " " + person.person_name.title() + " " + person.person_surname.title() + " has been allocated " + self.room_type + " " + self.room_name.title()) else: raise Exception(self.room_type.title() + " " + self.room_name.title() + " is at full capacity")
mit
Python
90d3f00cd8fea8fab9274069ac06ea461f8e4dfd
Send only pics and gifs to OOO_B_R.
nsiregar/reddit2telegram,Fillll/reddit2telegram,nsiregar/reddit2telegram,Fillll/reddit2telegram
channels/ooo_b_r/app.py
channels/ooo_b_r/app.py
#encoding:utf-8 from utils import get_url, weighted_random_subreddit # Group chat https://yal.sh/dvdahoy t_channel = '-1001065558871' subreddit = weighted_random_subreddit({ 'ANormalDayInRussia': 1.0, 'ANormalDayInAmerica': 0.1, 'ANormalDayInJapan': 0.01 }) def send_post(submission, r2t): what, url, ext = get_url(submission) title = submission.title link = submission.shortlink text = '{}\n{}'.format(title, link) return r2t.send_gif_img(what, url, ext, text)
#encoding:utf-8 from utils import get_url, weighted_random_subreddit # Group chat https://yal.sh/dvdahoy t_channel = '-1001065558871' subreddit = weighted_random_subreddit({ 'ANormalDayInRussia': 1.0, 'ANormalDayInAmerica': 0.1, 'ANormalDayInJapan': 0.01 }) def send_post(submission, r2t): what, url, ext = get_url(submission) title = submission.title link = submission.shortlink text = '{}\n{}'.format(title, link) if what == 'text': return False elif what == 'other': return False elif what == 'album': r2t.send_album(url) return True elif what in ('gif', 'img'): return r2t.send_gif_img(what, url, ext, text) else: return False
mit
Python
d966b0973da71f5c883697ddd12c2728b2a04cce
Improve git tag to version conversion
autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim,autozimu/LanguageClient-neovim
ci/cleanup-binary-tags.py
ci/cleanup-binary-tags.py
#!/usr/bin/env python3 import os import subprocess import re import semver def tag_to_version(tag): return tag.split('-')[1].lstrip('v') subprocess.check_call('git pull --tags', shell=True) tags = subprocess.check_output( 'git tag --list | grep binary', shell=True).decode('UTF-8').splitlines() versions = sorted(list(set([tag_to_version(tag) for tag in tags])), key=semver.parse_version_info) versions_to_delete = versions[:-3] cmd_delete_local = 'git tag --delete' cmd_delete_remote = 'git push --delete ' GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN') if GITHUB_TOKEN: cmd_delete_remote += ( 'https://{}@github.com/autozimu/LanguageClient-neovim.git' .format(GITHUB_TOKEN)) else: cmd_delete_remote += 'origin' for tag in tags: if tag_to_version(tag) in versions_to_delete: cmd_delete_local += ' ' + tag cmd_delete_remote += ' ' + tag if not cmd_delete_local.endswith('delete'): subprocess.check_call(cmd_delete_local, shell=True) if not (cmd_delete_remote.endswith('origin') or cmd_delete_remote.endswith('.git')): subprocess.check_call(cmd_delete_remote, shell=True)
#!/usr/bin/env python3 import os import subprocess import re import semver def tag_to_version(tag): version = re.sub(r'binary-', '', tag) version = re.sub(r'-[x86|i686].*', '', version) return version subprocess.check_call('git pull --tags', shell=True) tags = subprocess.check_output( 'git tag --list | grep binary', shell=True).decode('UTF-8').splitlines() versions = sorted(list(set([tag_to_version(tag) for tag in tags])), key=semver.parse_version_info) versions_to_delete = versions[:-3] cmd_delete_local = 'git tag --delete' cmd_delete_remote = 'git push --delete ' GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN') if GITHUB_TOKEN: cmd_delete_remote += ( 'https://{}@github.com/autozimu/LanguageClient-neovim.git' .format(GITHUB_TOKEN)) else: cmd_delete_remote += 'origin' for tag in tags: if tag_to_version(tag) in versions_to_delete: cmd_delete_local += ' ' + tag cmd_delete_remote += ' ' + tag if not cmd_delete_local.endswith('delete'): subprocess.check_call(cmd_delete_local, shell=True) if not (cmd_delete_remote.endswith('origin') or cmd_delete_remote.endswith('.git')): subprocess.check_call(cmd_delete_remote, shell=True)
mit
Python
94aed149fd39ba9a6dd6fcf5dcc44c6e4f2a09b9
fix imports
it-projects-llc/website-addons,it-projects-llc/website-addons,it-projects-llc/website-addons
website_sale_search_clear/controllers.py
website_sale_search_clear/controllers.py
# -*- coding: utf-8 -*- from odoo import http from odoo.addons.website_sale.controllers.main import WebsiteSale as controller class WebsiteSale(controller): @http.route() def shop(self, page=0, category=None, search='', **post): if category and search: category = None return super(WebsiteSale, self).shop(page, category, search, **post)
# -*- coding: utf-8 -*- from openerp import http from openerp.addons.website_sale.controllers.main import website_sale as controller class WebsiteSale(controller): @http.route() def shop(self, page=0, category=None, search='', **post): if category and search: category = None return super(WebsiteSale, self).shop(page, category, search, **post)
mit
Python
683ccc69c51a64146dda838ad01674ca3b95fccd
Remove useless hearing comments router
City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi
democracy/urls_v1.py
democracy/urls_v1.py
from django.conf.urls import include, url from rest_framework_nested import routers from democracy.views import ( CommentViewSet, ContactPersonViewSet, HearingViewSet, ImageViewSet, LabelViewSet, ProjectViewSet, RootSectionViewSet, SectionCommentViewSet, SectionViewSet, UserDataViewSet, FileViewSet, ServeFileView ) router = routers.DefaultRouter() router.register(r'hearing', HearingViewSet, base_name='hearing') router.register(r'users', UserDataViewSet, base_name='users') router.register(r'comment', CommentViewSet, base_name='comment') router.register(r'image', ImageViewSet, base_name='image') router.register(r'section', RootSectionViewSet, base_name='section') router.register(r'label', LabelViewSet, base_name='label') router.register(r'contact_person', ContactPersonViewSet, base_name='contact_person') router.register(r'project', ProjectViewSet, base_name='project') router.register(r'file', FileViewSet, base_name='file') hearing_child_router = routers.NestedSimpleRouter(router, r'hearing', lookup='hearing') hearing_child_router.register(r'sections', SectionViewSet, base_name='sections') section_comments_router = routers.NestedSimpleRouter(hearing_child_router, r'sections', lookup='comment_parent') section_comments_router.register(r'comments', SectionCommentViewSet, base_name='comments') urlpatterns = [ url(r'^', include(router.urls, namespace='v1')), url(r'^', include(hearing_child_router.urls, namespace='v1')), url(r'^', include(section_comments_router.urls, namespace='v1')), url(r'^download/(?P<filetype>sectionfile|sectionimage)/(?P<pk>\d+)/$', ServeFileView.as_view(), name='serve_file'), ]
from django.conf.urls import include, url from rest_framework_nested import routers from democracy.views import ( CommentViewSet, ContactPersonViewSet, HearingViewSet, ImageViewSet, LabelViewSet, ProjectViewSet, RootSectionViewSet, SectionCommentViewSet, SectionViewSet, UserDataViewSet, FileViewSet, ServeFileView ) router = routers.DefaultRouter() router.register(r'hearing', HearingViewSet, base_name='hearing') router.register(r'users', UserDataViewSet, base_name='users') router.register(r'comment', CommentViewSet, base_name='comment') router.register(r'image', ImageViewSet, base_name='image') router.register(r'section', RootSectionViewSet, base_name='section') router.register(r'label', LabelViewSet, base_name='label') router.register(r'contact_person', ContactPersonViewSet, base_name='contact_person') router.register(r'project', ProjectViewSet, base_name='project') router.register(r'file', FileViewSet, base_name='file') hearing_comments_router = routers.NestedSimpleRouter(router, r'hearing', lookup='comment_parent') hearing_child_router = routers.NestedSimpleRouter(router, r'hearing', lookup='hearing') hearing_child_router.register(r'sections', SectionViewSet, base_name='sections') section_comments_router = routers.NestedSimpleRouter(hearing_child_router, r'sections', lookup='comment_parent') section_comments_router.register(r'comments', SectionCommentViewSet, base_name='comments') urlpatterns = [ url(r'^', include(router.urls, namespace='v1')), url(r'^', include(hearing_comments_router.urls, namespace='v1')), url(r'^', include(hearing_child_router.urls, namespace='v1')), url(r'^', include(section_comments_router.urls, namespace='v1')), url(r'^download/(?P<filetype>sectionfile|sectionimage)/(?P<pk>\d+)/$', ServeFileView.as_view(), name='serve_file'), ]
mit
Python
ad153499a3982182533033acfa17971a35d7a587
implement __eq__
mandiant/capa,mandiant/capa
capa/features/address.py
capa/features/address.py
import abc from dncil.clr.token import Token class Address(abc.ABC): @abc.abstractmethod def __eq__(self, other): ... @abc.abstractmethod def __lt__(self, other): # implement < so that addresses can be sorted from low to high ... @abc.abstractmethod def __hash__(self): # implement hash so that addresses can be used in sets and dicts ... @abc.abstractmethod def __repr__(self): # implement repr to help during debugging ... class AbsoluteVirtualAddress(int, Address): """an absolute memory address""" def __new__(cls, v): assert v >= 0 return int.__new__(cls, v) def __repr__(self): return f"absolute(0x{self:x})" class RelativeVirtualAddress(int, Address): """a memory address relative to a base address""" def __repr__(self): return f"relative(0x{self:x})" class FileOffsetAddress(int, Address): """an address relative to the start of a file""" def __new__(cls, v): assert v >= 0 return int.__new__(cls, v) def __repr__(self): return f"file(0x{self:x})" class DNTokenAddress(Address): """a .NET token""" def __init__(self, token: Token): self.token = token def __eq__(self, other): return self.token.value == other.token.value def __lt__(self, other): return self.token.value < other.token.value def __hash__(self): return hash(self.token.value) def __repr__(self): return f"token(0x{self.token.value:x})" class DNTokenOffsetAddress(Address): """an offset into an object specified by a .NET token""" def __init__(self, token: Token, offset: int): assert offset >= 0 self.token = token self.offset = offset def __eq__(self, other): return (self.token.value, self.offset) == (other.token.value, other.offset) def __lt__(self, other): return (self.token.value, self.offset) < (other.token.value, other.offset) def __hash__(self): return hash((self.token.value, self.offset)) def __repr__(self): return f"token(0x{self.token.value:x})+(0x{self.offset:x})" class _NoAddress(Address): def __eq__(self, other): return True def __lt__(self, other): return False def __hash__(self): return hash(0) def __repr__(self): return "no address" NO_ADDRESS = _NoAddress()
import abc from dncil.clr.token import Token class Address(abc.ABC): @abc.abstractmethod def __lt__(self, other): # implement < so that addresses can be sorted from low to high ... @abc.abstractmethod def __hash__(self): # implement hash so that addresses can be used in sets and dicts ... @abc.abstractmethod def __repr__(self): # implement repr to help during debugging ... class AbsoluteVirtualAddress(int, Address): """an absolute memory address""" def __new__(cls, v): assert v >= 0 return int.__new__(cls, v) def __repr__(self): return f"absolute(0x{self:x})" class RelativeVirtualAddress(int, Address): """a memory address relative to a base address""" def __repr__(self): return f"relative(0x{self:x})" class FileOffsetAddress(int, Address): """an address relative to the start of a file""" def __new__(cls, v): assert v >= 0 return int.__new__(cls, v) def __repr__(self): return f"file(0x{self:x})" class DNTokenAddress(Address): """a .NET token""" def __init__(self, token: Token): self.token = token def __lt__(self, other): return self.token.value < other.token.value def __hash__(self): return hash(self.token.value) def __repr__(self): return f"token(0x{self.token.value:x})" class DNTokenOffsetAddress(Address): """an offset into an object specified by a .NET token""" def __init__(self, token: Token, offset: int): assert offset >= 0 self.token = token self.offset = offset def __lt__(self, other): return (self.token.value, self.offset) < (other.token.value, other.offset) def __hash__(self): return hash((self.token.value, self.offset)) def __repr__(self): return f"token(0x{self.token.value:x})+(0x{self.offset:x})" class _NoAddress(Address): def __lt__(self, other): return False def __hash__(self): return hash(0) def __repr__(self): return "no address" NO_ADDRESS = _NoAddress()
apache-2.0
Python