commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
fe14781a46a60a4fdd0101468ae487a691a2154a | Add ontap_command.py Module (#44190) | thaim/ansible,thaim/ansible | lib/ansible/modules/storage/netapp/na_ontap_command.py | lib/ansible/modules/storage/netapp/na_ontap_command.py | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
author: NetApp Ansible Team ([email protected])
description:
- "Run system-cli commands on ONTAP"
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_command
short_description: "NetApp ONTAP Run any cli command"
version_added: "2.7"
options:
command:
description:
- a comma separated list containing the command and arguments.
'''
EXAMPLES = """
- name: run ontap cli command
na_ontap_command:
hostname: "{{ hostname }} "
username: "{{ admin username }}"
password: "{{ admin password }}"
command: ['version']
- name: run ontap cli command
na_ontap_command:
hostname: "{{ hostname }} "
username: "{{ admin username }}"
password: "{{ admin password }}"
command: ['network', 'interface', 'show']
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPCommand(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
command=dict(required=True, type='list')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
# set up state variables
self.command = parameters['command']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def run_command(self):
command_obj = netapp_utils.zapi.NaElement("system-cli")
args_obj = netapp_utils.zapi.NaElement("args")
for arg in self.command:
args_obj.add_new_child('arg', arg)
command_obj.add_child_elem(args_obj)
try:
output = self.server.invoke_successfully(command_obj, True)
return output.to_string()
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error running command %s: %s' %
(self.command, to_native(error)),
exception=traceback.format_exc())
def apply(self):
changed = True
output = self.run_command()
self.module.exit_json(changed=changed, msg=output)
def main():
"""
Execute action from playbook
"""
command = NetAppONTAPCommand()
command.apply()
if __name__ == '__main__':
main()
| mit | Python |
|
295afe540c24ded86353402d87c42e072f7a64fa | Initialize makePublicPrivateKeys | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/CrackingCodesWithPython/Chapter23/makePublicPrivateKeys.py | books/CrackingCodesWithPython/Chapter23/makePublicPrivateKeys.py | # Public Key Generator
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import random, sys, os, primeNum, cryptomath
def main():
# Create a public/private keypair with 1024-bit keys:
print('Making key files...')
makeKeyFiles('al_sweigart', 1024)
print('Key files made.')
def generateKey(keySize):
# Creates public/private keys keySize bits in size.
p = 0
q = 0
# Step 1: Create two prime numbers, p and q. Calculate n = p * q:
print('Generating p prime...')
while p == q:
p = primeNum.generateLargePrime(keySize)
q = primeNum.generateLargePrime(keySize)
n = p * q
# Step 2: Create a number e that is relatively prime to (p-1)*(q-1):
print('Generating e that is relatively prime to (p-1)*(q-1)...')
while True:
# Keep trying random numbers for e until one is valid:
e = random.randrange(2 ** (keySize - 1), 2 ** (keySize))
if cryptomath.gcd(e, (p - 1) * (q - 1)) == 1:
break
# Step 3: Calculate d, the mod inverse of e:
print('Calculating d that is mod inverse of e...')
d = cryptomath.findModInverse(e, (p - 1) * (q - 1))
publicKey = (n, e)
privateKey = (n, d)
print('Public key:', publicKey)
print('Private key:', privateKey)
return (publicKey, privateKey)
def makeKeyFiles(name, keySize):
# Creates two files 'x_pubkey.txt' and 'x_privkey.txt' (where x
# is the value in name) with the n,e and d,e integers written in
# them, delimited by a comma.
# Our safety check will prevent us from overwriting our old key files:
if os.path.exists('%s_pubkey.txt' % (name)) or os.path.exists('%s_privkey.txt' % (name)):
sys.exit('WARNING: The file %s_pubkey.txt or %s_privkey.txt already exists! Use a different name or delete these files and rerun this program.' % (name, name))
publicKey, privateKey = generateKey(keySize)
print()
print('The public key is a %s and a %s digit number.' % (len(str(publicKey[0])), len(str(publicKey[1]))))
print('Writing public key to file %s_pubkey.txt...' % (name))
fo = open('%s_pubkey.txt' % (name), 'w')
fo.write('%s,%s,%s' % (keySize, publicKey[0], publicKey[1]))
fo.close()
print()
print('The private key is a %s and a %s digit number.' % (len(str(publicKey[0])), len(str(publicKey[1]))))
print('Writing private key to file %s_privkey.txt...' % (name))
fo = open('%s_privkey.txt' % (name), 'w')
fo.write('%s,%s,%s' % (keySize, privateKey[0], privateKey[1]))
fo.close()
# If makePublicPrivateKeys.py is run (instead of imported as a module),
# call the main() function:
if __name__ == '__main__':
main() | mit | Python |
|
3e97731449027e5ac0d3a047e1b872956feac528 | Create cracking-the-safe.py | tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015 | Python/cracking-the-safe.py | Python/cracking-the-safe.py | # Time: O(k^n)
# Space: O(k^n)
# There is a box protected by a password.
# The password is n digits, where each letter can be one of the first k digits 0, 1, ..., k-1.
#
# You can keep inputting the password,
# the password will automatically be matched against the last n digits entered.
#
# For example, assuming the password is "345",
# I can open it when I type "012345", but I enter a total of 6 digits.
#
# Please return any string of minimum length that is guaranteed to open the box after the entire string is inputted.
#
# Example 1:
# Input: n = 1, k = 2
# Output: "01"
# Note: "10" will be accepted too.
#
# Example 2:
# Input: n = 2, k = 2
# Output: "00110"
# Note: "01100", "10011", "11001" will be accepted too.
#
# Note:
# - n will be in the range [1, 4].
# - k will be in the range [1, 10].
# - k^n will be at most 4096.
# https://en.wikipedia.org/wiki/De_Bruijn_sequence
class Solution(object):
def crackSafe(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
M = k**(n-1)
P = [q*k+i for i in xrange(k) for q in xrange(M)]
result = []
for i in xrange(k**n):
j = i
while P[j] >= 0:
result.append(str(j//M))
P[j], j = -1, P[j]
return "".join(result) + "0"*(n-1)
# Time: O(n *k^n)
# Space: O(n *k^n)
class Solution2(object):
def crackSafe(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
def dfs(k, node, lookup, result):
for i in xrange(k):
neigbor = node + str(i)
if neigbor not in lookup:
lookup.add(neigbor)
dfs(k, neigbor[1:], lookup, result)
result.append(str(i))
lookup = set()
result = []
dfs(k, "0"*(n-1), lookup, result)
return "".join(result) + "0"*(n-1)
| mit | Python |
|
9c2487ab2c3b8d12e5a5f0f179b2a1fd79496b17 | add tests | DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj | doajtest/unit/event_consumers/test_application_publisher_in_progress_notify.py | doajtest/unit/event_consumers/test_application_publisher_in_progress_notify.py | from portality import models
from portality import constants
from portality.bll import exceptions
from doajtest.helpers import DoajTestCase
from doajtest.fixtures import ApplicationFixtureFactory
import time
from portality.events.consumers.application_publisher_inprogress_notify import ApplicationPublisherInprogresNotify
class TestApplicationPublisherInProgressNotify(DoajTestCase):
def setUp(self):
super(TestApplicationPublisherInProgressNotify, self).setUp()
def tearDown(self):
super(TestApplicationPublisherInProgressNotify, self).tearDown()
def test_consumes(self):
source = ApplicationFixtureFactory.make_application_source()
event = models.Event(constants.EVENT_APPLICATION_STATUS, context={"application": {}, "old_status": "pending", "new_status": "in progress"})
assert ApplicationPublisherInprogresNotify.consumes(event)
event = models.Event(constants.EVENT_APPLICATION_STATUS,
context={"application": {}, "old_status": "in progress", "new_status": "in progress"})
assert not ApplicationPublisherInprogresNotify.consumes(event)
event = models.Event("test:event", context={"application" : {}})
assert not ApplicationPublisherInprogresNotify.consumes(event)
event = models.Event(constants.EVENT_APPLICATION_STATUS)
assert not ApplicationPublisherInprogresNotify.consumes(event)
def test_consume_success(self):
self._make_and_push_test_context("/")
acc = models.Account()
acc.set_id("publisher")
acc.set_email("[email protected]")
acc.save()
source = ApplicationFixtureFactory.make_application_source()
event = models.Event(constants.EVENT_APPLICATION_STATUS,
context={"application": source, "old_status": "pending",
"new_status": "in progress"})
# event = models.Event(constants.EVENT_APPLICATION_STATUS, context={"application": "abcdefghijk", "old_status": "in progress", "new_status": "revisions_required"})
ApplicationPublisherInprogresNotify.consume(event)
time.sleep(2)
ns = models.Notification.all()
assert len(ns) == 1
n = ns[0]
assert n.who == "publisher", "Expected: {}, Received: {}".format("publisher", n.who)
assert n.created_by == ApplicationPublisherInprogresNotify.ID, "Expected: {}, Received: {}".format(ApplicationPublisherInprogresNotify.ID, n.created_by)
assert n.classification == constants.NOTIFICATION_CLASSIFICATION_STATUS_CHANGE, "Expected: {}, Received: {}".format(constants.NOTIFICATION_CLASSIFICATION_STATUS_CHANGE, n.classification)
assert n.message is not None
assert n.action is None
assert not n.is_seen()
def test_consume_fail(self):
event = models.Event(constants.EVENT_APPLICATION_ASSED_ASSIGNED, context={"application": {"dummy" : "data"}})
with self.assertRaises(exceptions.NoSuchObjectException):
ApplicationPublisherInprogresNotify.consume(event)
| apache-2.0 | Python |
|
73ae4839941b802870eaba29b67c8b8a89e43c71 | add backend_service_migration script to call the migration handler | googleinterns/vm-network-migration | backend_service_migration.py | backend_service_migration.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The script takes the arguments and run the backend service migration handler.
Before running:
1. If not already done, enable the Compute Engine API
and check the quota for your project at
https://console.developers.google.com/apis/api/compute
2. This sample uses Application Default Credentials for authentication.
If not already done, install the gcloud CLI from
https://cloud.google.com/sdk and run
`gcloud beta auth application-default login`.
For more information, see
https://developers.google.com/identity/protocols/application-default-credentials
3. Install the Python client library for Google APIs by running
`pip install --upgrade google-api-python-client`
Run the script by terminal, for example:
python3 instance_group_migration.py --project_id=test-project
--zone=us-central1-a --instance_group_name=test-group --network=test-network
--subnetwork=test-network --preserve_external_ip=False
"""
import warnings
import argparse
from vm_network_migration.handlers.backend_service_migration import BackendServiceMigration
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--project_id',
help='The project ID of the backend service.')
parser.add_argument('--region', default=None,
help='The region of the the backend service.')
parser.add_argument('--backend_service_name',
help='The name of the the backend service')
parser.add_argument('--network', help='The name of the new network')
parser.add_argument(
'--subnetwork',
default=None,
help='The name of the subnetwork. For auto mode networks,'
' this field is optional')
parser.add_argument(
'--preserve_external_ip',
default=False,
help='Preserve the external IP address')
args = parser.parse_args()
if args.preserve_external_ip == 'True':
args.preserve_external_ip = True
else:
args.preserve_external_ip = False
if args.preserve_external_ip:
warnings.warn(
'You choose to preserve the external IP. If the original instance '
'has an ephemeral IP, it will be reserved as a static external IP after the '
'execution.',
Warning)
continue_execution = input(
'Do you still want to preserve the external IP? y/n: ')
if continue_execution == 'n':
args.preserve_external_ip = False
backend_service_migration = BackendServiceMigration(args.project_id,
args.backend_service_name,
args.network,
args.subnetwork,
args.preserve_external_ip,
args.region)
backend_service_migration.network_migration()
| apache-2.0 | Python |
|
a1b88f50edf9f30f3840c50067545f2d315596aa | create compare.py | kumalee/python-101 | part-1/compare.py | part-1/compare.py | # coding: utf8
print '''
CPython implementation detail: Objects of different types except numbers are ordered by their type names; objects of the same types that don’t support proper comparison are ordered by their address.
>>> 5 < 'foo' # <type 'int'> < <type 'str'>
True
>>> 5 < (1, 2)
True
>>> 5 < {}
True
>>> 5 < [1, 2]
True
>>> [1, 2] > 'foo' # 'list' < 'str'
False
>>> (1, 2) > 'foo' # 'tuple' > 'str'
True
'''
| mit | Python |
|
c7c4a3c68e4950049db4b113576cfa3b2f6748f5 | add test data | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/reports/tests/data/case_list_report_data.py | corehq/apps/reports/tests/data/case_list_report_data.py | dummy_user_list = [
{
'domain': 'case-list-test',
'username': 'active-worker-1',
'password': 'Some secret Pass',
'created_by': None,
'created_via': None,
'email': '[email protected]',
'uuid': 'active1',
'is_active': True,
'doc_type': 'CommcareUser'
},
{
'domain': 'case-list-test',
'username': 'active-worker-2',
'password': 'Some secret Pass',
'created_by': None,
'created_via': None,
'email': '[email protected]',
'uuid': 'active2',
'is_active': True,
'doc_type': 'CommcareUser'
},
{
'domain': 'case-list-test',
'username': 'deactivated-worker-1',
'password': 'Some secret Pass',
'created_by': None,
'created_via': None,
'email': '[email protected]',
'uuid': 'deactive1',
'is_active': False,
'doc_type': 'CommcareUser'
},
{
'domain': 'case-list-test',
'username': 'web-worker-1',
'password': 'Some secret Pass',
'created_by': None,
'created_via': None,
'email': '[email protected]',
'uuid': 'web1',
'is_active': True,
"timezone": "UTC",
'doc_type': 'WebUser'
},
]
dummy_case_list = [
{
'_id': 'id-1',
'domain': 'case-list-test',
'name': 'Deactivated Owner case 1',
'owner_id': 'deactive1',
'user_id': '[email protected]',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
{
'_id': 'id-2',
'domain': 'case-list-test',
'name': 'Active Owner case 1',
'owner_id': 'active1',
'user_id': '[email protected]',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
{
'_id': 'id-3',
'domain': 'case-list-test',
'name': 'Active Owner case 2',
'owner_id': 'active1',
'user_id': '[email protected]',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
{
'_id': 'id-4',
'domain': 'case-list-test',
'name': 'Web Owner case 1',
'owner_id': 'web1',
'user_id': '[email protected]',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
{
'_id': 'id-5',
'domain': 'case-list-test',
'name': 'Active Owner case 2',
'owner_id': 'active2',
'user_id': '[email protected]',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
]
| bsd-3-clause | Python |
|
3f24e7b51281031fa9713b737a9647b305105a89 | Write unittest for parse_file() in ConfigReader.py | johnmarcampbell/twircBot | src/unittests.py | src/unittests.py | from ConfigReader import ConfigReader as cr
import unittest
import os
class testConfigReader(unittest.TestCase):
"""Test cases for configReader"""
def setUp(self):
"""Set up some important variables"""
self.example_config_filename = 'testConfig.config'
# Set some values
oauth_string = 'xxxxxxxxxxx'
nick_string = 'justinfan4242'
channels_string = 'channel1 channel2'
channels_list = ['channel1', 'channel2']
log_string = 'default.log'
time_format_string = "'[%Y-%m-%d %H:%M:%S]'"
time_format_value = '[%Y-%m-%d %H:%M:%S]'
host_string = 'irc.twitch.tv'
port_string = '6667'
port_int = 6667
block_size_string = '4096'
block_size_int = 4096
reconnect_timer_string = '600'
reconnect_timer_int = 600
stayalive_timer_string = '0'
stayalive_timer_int = 0
connect_timeout_string = '10'
connect_timeout_float = 10
receive_timeout_string = '0.1'
receive_timeout_float = 0.1
# Write a config file
config_file_string = 'oauth: ' + oauth_string + '\n'
config_file_string += 'nick: ' + nick_string + '\n'
config_file_string += 'channels: ' + channels_string + '\n'
config_file_string += 'log: ' + log_string + '\n'
config_file_string += 'time_format: ' + time_format_string + '\n'
config_file_string += 'host: ' + host_string + '\n'
config_file_string += 'port: ' + port_string + '\n'
config_file_string += 'block_size: ' + block_size_string + '\n'
config_file_string += 'reconnect_timer: ' + reconnect_timer_string + '\n'
config_file_string += 'stayalive_timer: ' + stayalive_timer_string + '\n'
config_file_string += 'connect_timeout: ' + connect_timeout_string + '\n'
config_file_string += 'receive_timeout: ' + receive_timeout_string + '\n'
config_example = open(self.example_config_filename,'w')
config_example.write(config_file_string)
config_example.close()
self.exemplar_config = {
'oauth': oauth_string,
'nick': nick_string,
'channels': channels_list,
'log': log_string,
'time_format': time_format_value,
'host': host_string,
'port': port_int,
'block_size': block_size_int,
'reconnect_timer': reconnect_timer_int,
'stayalive_timer': stayalive_timer_int,
'connect_timeout': connect_timeout_float,
'receive_timeout': receive_timeout_float
}
def test_parse_file(self):
"""Test parse_file()"""
reader = cr()
reader.parse_file(self.example_config_filename)
self.assertEqual(reader.configuration, self.exemplar_config)
def tearDown(self):
"""Delete the example config file, etc"""
os.remove(self.example_config_filename)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
bbd6e538ec45c3650b7b3b7d520613fb4967236a | Print 4x4 grid | bandarji/lekhan | python/reddit/think_python_grid.py | python/reddit/think_python_grid.py | def grid():
delimiter_row = ('{}{}'.format('+ ', '- ' * 4) * 4) + '+'
openspace_row = ('{}{}'.format('|', ' ' * 9) * 4) + '|'
for box_row in range(4 * 4):
if box_row % 4 == 0:
print(delimiter_row)
print(openspace_row)
else:
print(openspace_row)
print(delimiter_row)
grid()
| apache-2.0 | Python |
|
6e535a2d597f172d9342fb8a547335890c474b49 | Add a sample config file | byanofsky/playa-vista-neighborhood,byanofsky/playa-vista-neighborhood,byanofsky/playa-vista-neighborhood | src/config-sample.py | src/config-sample.py | FLASK_SECRET_KEY = 'Enter a Flask Secret Key'
# OAuth Credentials. You can find them on
# https://www.yelp.com/developers/v3/manage_app
YELP_CLIENT_ID = 'Enter Yelp Client ID'
YELP_CLIENT_SECRET = 'Enter Yelp Client Secret'
| mit | Python |
|
7b545e210aa534b5d76e30769a125285cb40bfa8 | Create PrintFunctionBancorFormula.py | enjin/contracts | solidity/python/constants/PrintFunctionBancorFormula.py | solidity/python/constants/PrintFunctionBancorFormula.py | from math import factorial
MIN_PRECISION = 32
MAX_PRECISION = 127
NUM_OF_PRECISIONS = 128
NUM_OF_COEFS = 34
maxFactorial = factorial(NUM_OF_COEFS)
coefficients = [maxFactorial/factorial(i) for i in range(NUM_OF_COEFS)]
def fixedExpUnsafe(x,precision):
xi = x
res = safeMul(coefficients[0],1 << precision)
for i in range(1,NUM_OF_COEFS-1):
res = safeAdd(res,safeMul(xi,coefficients[i]))
xi = safeMul(xi,x) >> precision
res = safeAdd(res,safeMul(xi,coefficients[-1]))
return res / coefficients[0]
def safeMul(x,y):
assert(x * y < (1 << 256))
return x * y
def safeAdd(x,y):
assert(x + y < (1 << 256))
return x + y
def binarySearch(func,args):
lo = 1
hi = 1 << 256
while lo+1 < hi:
mid = (lo+hi)/2
try:
func(mid,args)
lo = mid
except Exception,error:
hi = mid
try:
func(hi,args)
return hi
except Exception,error:
func(lo,args)
return lo
maxExpArray = [0]*NUM_OF_PRECISIONS
for precision in range(NUM_OF_PRECISIONS):
maxExpArray[precision] = binarySearch(fixedExpUnsafe,precision)
print ' function BancorFormula() {'
for precision in range(NUM_OF_PRECISIONS):
prefix = ' ' if MIN_PRECISION <= precision <= MAX_PRECISION else '//'
print ' {} maxExpArray[{:3d}] = 0x{:x};'.format(prefix,precision,maxExpArray[precision])
print ' }'
| apache-2.0 | Python |
|
8b0130ccb318f7f04daf8e8fa7532c88afb9f7c2 | convert eexec doctests into eexec_test.py | fonttools/fonttools,googlefonts/fonttools | Tests/misc/eexec_test.py | Tests/misc/eexec_test.py | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.eexec import decrypt, encrypt
def test_decrypt():
testStr = b"\0\0asdadads asds\265"
decryptedStr, R = decrypt(testStr, 12321)
assert decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
assert R == 36142
def test_encrypt():
testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
encryptedStr, R = encrypt(testStr, 12321)
assert encryptedStr == b"\0\0asdadads asds\265"
assert R == 36142
| mit | Python |
|
9ea8b1ea9eaf7906abaf9cfe73bbe19b581fa562 | Add TVA. | divergentdave/inspectors-general,lukerosiak/inspectors-general | inspectors/tva.py | inspectors/tva.py | #!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# http://oig.tva.gov
# Oldest report: 1998
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
#
AUDIT_REPORTS_URL = "http://oig.tva.gov/reports/{year}.html"
SEMIANNUAL_REPORTS_URL = "http://oig.tva.gov/reports/oig-reports.xml"
PDF_REPORT_FORMAT = "http://oig.tva.gov/reports/node/semi/{report_number}/semi{report_number}.pdf"
def run(options):
year_range = inspector.year_range(options)
# Pull the reports
for year in year_range:
if year < 2005: # This is the earliest audits go back
continue
url = AUDIT_REPORTS_URL.format(year=year)
doc = BeautifulSoup(utils.download(url))
results = doc.select("div.content")
for result in results:
report = report_from(result, url, year_range)
if report:
inspector.save_report(report)
doc = BeautifulSoup(utils.download(SEMIANNUAL_REPORTS_URL))
results = doc.select("report")
for result in results:
report = semiannual_report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, landing_url, year_range):
header = result.find_previous("p", class_="heading")
published_on_text, title, report_id = header.text.split("-", 2)
title = title.strip()
report_id = report_id.strip().replace("/", "-")
if "summary only" in result.text.lower():
unreleased = True
report_url = None
else:
unreleased = False
report_url = urljoin(landing_url, result.find("a").get('href'))
# Skip the last 'p' since it is just the report link
summary_text = [paragraph.text for paragraph in result.findAll("p")[:-1]]
summary = "\n".join(summary_text)
# Some reports list multiple dates. Split on '&' to get the latter.
published_on_text = published_on_text.split("&")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'tva',
'inspector_url': 'http://oig.tva.gov',
'agency': 'tva',
'agency_name': 'Tennessee Valley Authority',
'report_id': report_id,
'url': report_url,
'title': title,
'summary': summary,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if unreleased:
report['unreleased'] = unreleased
report['landing_url'] = landing_url
return report
def semiannual_report_from(result, year_range):
report_url = urljoin(SEMIANNUAL_REPORTS_URL, result.get('pdfurl'))
if report_url.endswith("index.html"):
# Sometime they link to the landing page instead of the report. We convert
# the url to get the actual report.
report_number = report_url.split("/")[-2]
report_url = PDF_REPORT_FORMAT.format(report_number=report_number)
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
published_on_text = result.find("date").text
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
title = "Semiannual Report {}".format(published_on_text)
alternative_title = result.find("title").text.strip()
if alternative_title:
title = "{} ({})".format(alternative_title, title)
summary = result.find("summary").text.strip()
report = {
'inspector': 'tva',
'inspector_url': 'http://oig.tva.gov',
'agency': 'tva',
'agency_name': 'Tennessee Valley Authority',
'report_id': report_id,
'url': report_url,
'title': title,
'summary': summary,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 | Python |
|
6515e45e6d717ed2c84789a5d0941533465496b7 | update test | h2oai/h2o-3,mathemage/h2o-3,YzPaul3/h2o-3,michalkurka/h2o-3,YzPaul3/h2o-3,spennihana/h2o-3,mathemage/h2o-3,YzPaul3/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,jangorecki/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,jangorecki/h2o-3,YzPaul3/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,spennihana/h2o-3,mathemage/h2o-3,michalkurka/h2o-3,YzPaul3/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,YzPaul3/h2o-3,h2oai/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,YzPaul3/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,spennihana/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,michalkurka/h2o-3 | h2o-py/tests/testdir_munging/pyunit_insert_missing.py | h2o-py/tests/testdir_munging/pyunit_insert_missing.py | from builtins import zip
from builtins import range
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def insert_missing():
# Connect to a pre-existing cluster
data = [[1, 2, 3, 1, 'a', 1, 9],
[1, 6, 4, 2, 'a', 1, 9],
[2, 3, 8, 6, 'b', 1, 9],
[3, 4, 3, 2, 'b', 3, 8],
[4, 5, 9, 5, 'c', 2, 8],
[5, 7, 10,7, 'b', 8, 8]]
h2o_data = h2o.H2OFrame(data)
h2o_data.insert_missing_values(fraction = 0.0)
print(h2o_data)
num_nas = sum([v.isna().sum() for v in h2o_data])
assert num_nas == 0, "Expected no missing values inserted, but got {0}".format(num_nas)
h2o_data.insert_missing_values(fraction = 1.0)
print(h2o_data)
num_nas = sum([v.isna().sum() for v in h2o_data])
assert num_nas == h2o_data.nrow*h2o_data.ncol, "Expected all missing values inserted, but got {0}".format(num_nas)
if __name__ == "__main__":
pyunit_utils.standalone_test(insert_missing)
else:
insert_missing()
| from builtins import zip
from builtins import range
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def insert_missing():
# Connect to a pre-existing cluster
data = [[1, 2, 3, 1, 'a', 1, 9],
[1, 6, 4, 2, 'a', 1, 9],
[2, 3, 8, 6, 'b', 1, 9],
[3, 4, 3, 2, 'b', 3, 8],
[4, 5, 9, 5, 'c', 2, 8],
[5, 7, 10,7, 'b', 8, 8]]
h2o_data = h2o.H2OFrame(data)
h2o_data.insert_missing_values(fraction = 0.0)
num_nas = sum([h2o_data[c].isna().sum() for c in range(h2o_data.ncol)])
assert num_nas == 0, "Expected no missing values inserted, but got {0}".format(num_nas)
h2o_data.insert_missing_values(fraction = 1.0)
num_nas = sum([h2o_data[c].isna().sum() for c in range(h2o_data.ncol)])
assert num_nas == h2o_data.nrow*h2o_data.ncol, "Expected all missing values inserted, but got {0}".format(num_nas)
if __name__ == "__main__":
pyunit_utils.standalone_test(insert_missing)
else:
insert_missing()
| apache-2.0 | Python |
1de77375a12e26693c89f5fe824df82719bc8632 | Add dummy directory | prophile/jacquard,prophile/jacquard | jacquard/directory/dummy.py | jacquard/directory/dummy.py | from .base import Directory
class DummyDirectory(Directory):
def __init__(self, users=()):
self.users = {x.id: x for x in users}
def lookup(self, user_id):
return self.users[user_id]
def all_users(self):
return self.users.values()
| mit | Python |
|
90eda86a7bbd1dc28023a6c5df1f964add3ddf55 | add client test for oaipmh endpoint. | DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj | test/oaipmh_client_test.py | test/oaipmh_client_test.py | import requests
from lxml import etree
NS = "{http://www.openarchives.org/OAI/2.0/}"
JOURNAL_BASE_URL = "http://localhost:5004/oai"
ARTICLE_BASE_URL = "http://localhost:5004/oai.article"
def harvest(base_url, resToken=None):
url = base_url + "?verb=ListRecords"
if resToken is not None:
url += "&resumptionToken=" + resToken
else:
url += "&metadataPrefix=oai_dc"
print "harvesting " + url
resp = requests.get(url)
assert resp.status_code == 200, resp.text
xml = etree.fromstring(resp.text[39:])
rtel = xml.find(".//" + NS + "resumptionToken")
if rtel is not None and (rtel.text is not None and rtel.text != ""):
print "resumption token", rtel.text, "cursor", rtel.get("cursor") + "/" + rtel.get("completeListSize")
return rtel.text
print "no resumption token, complete"
return None
# journals
rt = None
while True:
rt = harvest(JOURNAL_BASE_URL, rt)
if rt is None:
break
# articles
rt = None
while True:
rt = harvest(ARTICLE_BASE_URL, rt)
if rt is None:
break | apache-2.0 | Python |
|
294f8721799f6562b7d7f3f31a68f25cb24c964f | Add Spanish Código Cuenta Corriente (CCC) | holvi/python-stdnum,holvi/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum,arthurdejong/python-stdnum,arthurdejong/python-stdnum | stdnum/es/ccc.py | stdnum/es/ccc.py | # ccc.py - functions for handling Spanish CCC bank account code
# coding: utf-8
#
# Copyright (C) 2016 David García Garzón
# Copyright (C) 2016 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""CCC (Código Cuenta Corriente, Spanish Bank Account Code)
CCC code is the country-specific part in Spanish IBAN codes. In order to
fully validate an Spanish IBAN you have to validate as well the country
specific part as a valid CCC. It was used for home banking transactions until
February 1st 2014 when IBAN codes started to be used as an account ID.
The CCC has 20 digits, all being numbers: EEEE OOOO DD NNNNNNNNNN
* EEEE: banking entity
* OOOO: office
* DD: check digits
* NNNNN NNNNN: account identifier
This module does not check if the bank code to exist. Existing bank codes are
published on the 'Registro de Entidades' by 'Banco de España' (Spanish
Central Bank).
More information:
* https://es.wikipedia.org/wiki/Código_cuenta_cliente
* http://www.bde.es/bde/es/secciones/servicios/Particulares_y_e/Registros_de_Ent/
>>> validate('1234-1234-16 1234567890')
'12341234161234567890'
>>> validate('134-1234-16 1234567890') # wrong length
Traceback (most recent call last):
...
InvalidLength: ...
>>> validate('12X4-1234-16 1234567890') # non numbers
Traceback (most recent call last):
...
InvalidFormat: ...
>>> validate('1234-1234-00 1234567890') # invalid check digits
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('12341234161234567890')
'1234 1234 16 12345 67890'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -').strip().upper()
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return ' '.join([
number[0:4],
number[4:8],
number[8:10],
number[10:15],
number[15:20],
])
def _calc_check_digit(number):
"""Calculate a single check digit on the provided part of the number."""
check = sum(int(n) * 2 ** i for i, n in enumerate(number)) % 11
return str(check if check < 2 else 11 - check)
def calc_check_digits(number):
"""Calculate the check digits for the number. The supplied number should
have check digits included but are ignored."""
number = compact(number)
return (
_calc_check_digit('00' + number[:8]) + _calc_check_digit(number[10:]))
def validate(number):
"""Checks to see if the number provided is a valid CCC."""
number = compact(number)
if len(number) != 20:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
if number[8:10] != calc_check_digits(number):
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid CCC."""
try:
return bool(validate(number))
except ValidationError:
return False
| lgpl-2.1 | Python |
|
0ba3dff1e150d534e4eda086ebbd53ec3789d82c | Add alg_balance_symbols.py | bowen0701/algorithms_data_structures | alg_max_connected_colors.py | alg_max_connected_colors.py | def max_connected_colors():
pass
def main():
# A grid of connected colors: 5 (of 2's).
grid = [[1, 1, 2, 3],
[1, 2, 3, 2],
[3, 2, 2, 2]]
if __init__ == '__main__':
main()
| bsd-2-clause | Python |
|
ce6052ee9df9ca83ac2da691eb51a8eaea0ab603 | Comment model migration | v1k45/blogghar,v1k45/blogghar,v1k45/blogghar | comments/migrations/0001_initial.py | comments/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-10 22:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0007_post_tags'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(max_length=500)),
('is_removed', models.BooleanField(default=False)),
('is_public', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
| mit | Python |
|
74260fbf266628d4f8afbbab61bbd6de0ddfe7fe | Remove unused constant | openstack/dragonflow,openstack/dragonflow,openstack/dragonflow | dragonflow/neutron/common/constants.py | dragonflow/neutron/common/constants.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DF_REMOTE_PORT_TYPE = 'remote_port'
DF_BINDING_PROFILE_PORT_KEY = 'port_key'
DF_BINDING_PROFILE_HOST_IP = 'host_ip'
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
DF_REMOTE_PORT_TYPE = 'remote_port'
DF_BINDING_PROFILE_PORT_KEY = 'port_key'
DF_BINDING_PROFILE_HOST_IP = 'host_ip'
DF_PORT_BINDING_PROFILE = portbindings.PROFILE
| apache-2.0 | Python |
22298d91fff788c37395cdad9245b3e7ed20cfdf | Add a snippet (Python OpenCV). | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/opencv/opencv_2/images/display_image_with_matplotlib.py | python/opencv/opencv_2/images/display_image_with_matplotlib.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Display image: display an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html#display-an-image
"""
from __future__ import print_function
import cv2 as cv
import argparse
from matplotlib import pyplot as plt
def main():
# Parse the programm options (get the path of the image file to display)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The picture file to display", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
# imread_flags is a flag which specifies the way image should be read:
# - cv.IMREAD_COLOR loads a color image. Any transparency of image will be neglected. It is the default flag.
# - cv.IMREAD_GRAYSCALE loads image in grayscale mode
# - cv.IMREAD_UNCHANGED loads image as such including alpha channel
imread_flags = cv.IMREAD_GRAYSCALE
img_np = cv.imread(infile_str, imread_flags)
plt.imshow(img_np, cmap='gray', interpolation='none') # Display the image "img_np" with matplotlib
plt.xticks([]) # to hide tick values on X axis
plt.yticks([]) # to hide tick values on Y axis
plt.show()
if __name__ == '__main__':
main()
| mit | Python |
|
ba9e4c6b003cc002e5bc7216da960e47f9fe5424 | Print information about all nitrogens. | berquist/orcaparse | copper_imidazole_csv_allnitrogen.py | copper_imidazole_csv_allnitrogen.py | #!/usr/bin/env python2
import orca_parser
from copper_imidazole_analysis import CopperImidazoleAnalysis
import argparse
import csv
cia = CopperImidazoleAnalysis()
parser = argparse.ArgumentParser(description="Given pathnames of ORCA output files, make a dump of all nitrogen parameters to a CSV file.")
parser.add_argument("--csvname", dest="csvname", metavar="<CSV output root name>", type=str, default="nitrogen.csv", help="optional name for the CSV output file")
parser.add_argument(dest="namelist", metavar="<ORCA filename>", nargs="+", type=str, default=None, help="ORCA output files")
args = parser.parse_args()
namelist = args.namelist
with open(args.csvname, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for name in namelist:
csvwriter.writerow([name])
csvwriter.writerow(["g-tensor",
"id_copper",
"A_copper (MHz)",
"euler_copper (deg.)",
"NQCC_copper (MHz)",
"eta_copper"])
orcafile = orca_parser.ORCAOutputParser(name)
gtensor, giso = orcafile.return_gtensor()
id_copper = cia.copper_id(orcafile)
atensor_copper = cia.hyperfine(orcafile, id_copper)
euler_copper = cia.euler(orcafile, id_copper)
nqi_copper, nqcc_copper, eta_copper = cia.nqi(orcafile, id_copper)
csvwriter.writerow([gtensor,
id_copper,
atensor_copper,
euler_copper,
nqcc_copper,
eta_copper])
csvwriter.writerow(["",
"id_nitrogen",
"A_nitrogen (MHz)",
"euler_nitrogen (deg.)",
"NQCC_nitrogen (MHz)",
"eta_nitrogen",
"Cu_N_distance (Angstroms)"])
nitrogen_list = orcafile.find_element("N")
for id_nitrogen in nitrogen_list:
atensor_nitrogen = cia.hyperfine(orcafile, id_nitrogen)
euler_nitrogen = cia.euler(orcafile, id_nitrogen)
nqi_nitrogen, nqcc_nitrogen, eta_nitrogen = cia.nqi(orcafile, id_nitrogen)
cu_n_dist = orcafile.pair_distance(id_copper, id_nitrogen)
csvwriter.writerow(["",
id_nitrogen,
atensor_nitrogen,
euler_nitrogen,
nqcc_nitrogen,
eta_nitrogen,
cu_n_dist])
| mpl-2.0 | Python |
|
eb0772fc6c30d98b83bf1c8e7d83af21066ae45b | Add peek method and implementation | Deepak345/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms | data_structures/Stack/Python/Stack.py | data_structures/Stack/Python/Stack.py | # Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
# Return the last element of the stack array (without removing it).
def peek(self):
return self.stack[-1] | # Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element) | cc0-1.0 | Python |
825c4d613915d43aea2e6ee0bc5d5b49ed0a4500 | Create a simple method to segment a trip into sections | shankari/e-mission-server,joshzarrabi/e-mission-server,joshzarrabi/e-mission-server,sunil07t/e-mission-server,yw374cornell/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,yw374cornell/e-mission-server,sunil07t/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,joshzarrabi/e-mission-server,joshzarrabi/e-mission-server,e-mission/e-mission-server | emission/analysis/classification/segmentation/section_segmentation.py | emission/analysis/classification/segmentation/section_segmentation.py | # Standard imports
import attrdict as ad
import numpy as np
import datetime as pydt
# Our imports
import emission.analysis.classification.cleaning.location_smoothing as ls
import emission.analysis.point_features as pf
import emission.storage.decorations.location_queries as lq
def segment_into_sections(trip):
points_df = lq.get_activities_for_section(trip)
no_tilting_points_df = points_df[points_df.activity != lq.Activities.TILTING]
section_list = []
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": trip.start_ts, "start_time": trip.start_time,
"activity": no_tilting_points_df.iloc[0].activity})
for idx, row in enumerate(no_tilting_points_df.to_dict('records')):
if row["activity"] != curr_section.activity:
# Let's add a second check here for confidence and types of activities
if (row['agc'] > 60 and
row['activity'] != lq.Activities.UNKNOWN and
row['activity'] != lq.Activities.STILL):
# Because the first section is initialized with the first activity.
# So when idx == 0, the activities will be equal and this is
# guaranteed to not be invoked
assert(idx > 0)
prev_ts = no_tilting_points_df.iloc[idx-1]["write_ts"]
print("At %s, found new activity %s compared to current %s - creating new section with start_time %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)),
row["activity"], curr_section.activity,
str(pydt.datetime.fromtimestamp(prev_ts/1000))))
# complete this section
curr_section.end_ts = prev_ts
curr_section.end_time = str(pydt.datetime.fromtimestamp(curr_section.end_ts/1000))
section_list.append(curr_section)
# make a new section
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": prev_ts,
"start_time": pydt.datetime.fromtimestamp(prev_ts/1000),
"activity": row["activity"]})
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
print("Detected trip end! Ending section at %s" % trip.end_time)
# End the last section at the same time as the trip
curr_section.end_ts = trip.end_ts
curr_section.end_time = trip.end_time
section_list.append(curr_section)
return section_list
| bsd-3-clause | Python |
|
5f2cd26054adff5a1fbf9ba5d56766b972f46670 | Add a multithreaded stress tester for key generation. Hopefully provides additional confidence that that code is correct with respect to threading. | mhils/pyopenssl,kediacorporation/pyopenssl,reaperhulk/pyopenssl,r0ro/pyopenssl,mhils/pyopenssl,daodaoliang/pyopenssl,elitest/pyopenssl,kjav/pyopenssl,kediacorporation/pyopenssl,alex/pyopenssl,lvh/pyopenssl,samv/pyopenssl,Lukasa/pyopenssl,mitghi/pyopenssl,msabramo/pyOpenSSL,reaperhulk/pyopenssl,hynek/pyopenssl,pyca/pyopenssl,rackerlabs/pyopenssl,rackerlabs/pyopenssl,EnerNOC/pyopenssl,lvh/pyopenssl,adamwolf/pyopenssl,Lukasa/pyopenssl,mitghi/pyopenssl,sorenh/pyopenssl,EnerNOC/pyopenssl,msabramo/pyOpenSSL,EnerNOC/pyopenssl,hynek/pyopenssl,msabramo/pyOpenSSL,aalba6675/pyopenssl,mschmo/pyopenssl,aalba6675/pyopenssl,JensTimmerman/pyopenssl,r0ro/pyopenssl,sholsapp/pyopenssl,alex/pyopenssl,JensTimmerman/pyopenssl | leakcheck/thread-key-gen.py | leakcheck/thread-key-gen.py | # Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in RSA and DSA key generation. 0.12 and
# older held the GIL during these operations. Subsequent versions release it
# during them.
from threading import Thread
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey
def generate_rsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_RSA, 1024)
keys.append(key)
def generate_dsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_DSA, 512)
keys.append(key)
def main():
threads = []
for i in range(3):
t = Thread(target=generate_rsa, args=())
threads.append(t)
t = Thread(target=generate_dsa, args=())
threads.append(t)
for t in threads:
t.start()
main()
| apache-2.0 | Python |
|
c87be0f98295d64addc01529999996b566c80f2c | add sent notification status | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/00xx_add_sent_notification_status.py | migrations/versions/00xx_add_sent_notification_status.py | """empty message
Revision ID: 00xx_add_sent_notification_status
Revises: 0075_create_rates_table
Create Date: 2017-04-24 16:55:20.731069
"""
# revision identifiers, used by Alembic.
revision = '00xx_sent_notification_status'
down_revision = '0075_create_rates_table'
from alembic import op
import sqlalchemy as sa
enum_name = 'notify_status_type'
tmp_name = 'tmp_' + enum_name
old_options = (
'created',
'sending',
'delivered',
'pending',
'failed',
'technical-failure',
'temporary-failure',
'permanent-failure'
)
new_options = old_options + ('sent',)
old_type = sa.Enum(*old_options, name=enum_name)
new_type = sa.Enum(*new_options, name=enum_name)
alter_str = 'ALTER TABLE {table} ALTER COLUMN status TYPE {enum} USING status::text::notify_status_type '
def upgrade():
op.execute('ALTER TYPE {enum} RENAME TO {tmp_name}'.format(enum=enum_name, tmp_name=tmp_name))
new_type.create(op.get_bind())
op.execute(alter_str.format(table='notifications', enum=enum_name))
op.execute(alter_str.format(table='notification_history', enum=enum_name))
op.execute('DROP TYPE ' + tmp_name)
def downgrade():
op.execute('ALTER TYPE {enum} RENAME TO {tmp_name}'.format(enum=enum_name, tmp_name=tmp_name))
# Convert 'sent' template into 'sending'
update_str = "UPDATE TABLE {table} SET status='sending' where status='sent'"
op.execute(update_str.format(table='notifications'))
op.execute(update_str.format(table='notification_history'))
old_type.create(op.get_bind())
op.execute(alter_str.format(table='notifications', enum=enum_name))
op.execute(alter_str.format(table='notification_history', enum=enum_name))
op.execute('DROP TYPE ' + tmp_name)
| mit | Python |
|
7597e834288c21065703bcdc86530a0ad5414a95 | backup strategy tasks | opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor | nodeconductor/backup/tasks.py | nodeconductor/backup/tasks.py | from celery import shared_task
@shared_task
def backup_task(backupable_instance):
backupable_instance.get_backup_strategy.backup()
@shared_task
def restore_task(backupable_instance):
backupable_instance.get_backup_strategy.restore()
@shared_task
def delete_task(backupable_instance):
backupable_instance.get_backup_strategy.delete()
| mit | Python |
|
4820013e207947fe7ff94777cd8dcf1ed474eab1 | Add migration for account lockout fields on User | richgieg/flask-now,richgieg/flask-now | migrations/versions/fb6a6554b21_add_account_lockout_fields_to_user.py | migrations/versions/fb6a6554b21_add_account_lockout_fields_to_user.py | """Add account lockout fields to User
Revision ID: fb6a6554b21
Revises: 1f9b411bf6df
Create Date: 2015-10-29 01:07:27.930095
"""
# revision identifiers, used by Alembic.
revision = 'fb6a6554b21'
down_revision = '1f9b411bf6df'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('failed_login_attempts', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('last_failed_login_attempt', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('locked_out', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'locked_out')
op.drop_column('users', 'last_failed_login_attempt')
op.drop_column('users', 'failed_login_attempts')
### end Alembic commands ###
| mit | Python |
|
cee7f23df93f4a09550348e30709aa1e6e6969fc | use net ip availability api def from neutron-lib | noironetworks/neutron,noironetworks/neutron,eayunstack/neutron,eayunstack/neutron,openstack/neutron,mahak/neutron,huntxu/neutron,huntxu/neutron,openstack/neutron,mahak/neutron,openstack/neutron,mahak/neutron | neutron/extensions/network_ip_availability.py | neutron/extensions/network_ip_availability.py | # Copyright 2016 GoDaddy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api.definitions import network_ip_availability as apidef
from neutron_lib.api import extensions as api_extensions
import neutron.api.extensions as extensions
import neutron.api.v2.base as base
import neutron.services.network_ip_availability.plugin as plugin
class Network_ip_availability(api_extensions.APIExtensionDescriptor):
"""Extension class supporting network ip availability information."""
api_definition = apidef
@classmethod
def get_resources(cls):
"""Returns Extended Resource for service type management."""
resource_attributes = apidef.RESOURCE_ATTRIBUTE_MAP[
apidef.RESOURCE_PLURAL]
controller = base.create_resource(
apidef.RESOURCE_PLURAL,
apidef.RESOURCE_NAME,
plugin.NetworkIPAvailabilityPlugin.get_instance(),
resource_attributes)
return [extensions.ResourceExtension(apidef.COLLECTION_NAME,
controller,
attr_map=resource_attributes)]
| # Copyright 2016 GoDaddy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api import extensions as api_extensions
import neutron.api.extensions as extensions
import neutron.api.v2.base as base
import neutron.services.network_ip_availability.plugin as plugin
RESOURCE_NAME = "network_ip_availability"
RESOURCE_PLURAL = "network_ip_availabilities"
COLLECTION_NAME = RESOURCE_PLURAL.replace('_', '-')
EXT_ALIAS = RESOURCE_NAME.replace('_', '-')
RESOURCE_ATTRIBUTE_MAP = {
RESOURCE_PLURAL: {
'network_id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'network_name': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'total_ips': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'used_ips': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'subnet_ip_availability': {'allow_post': False, 'allow_put': False,
'is_visible': True},
# TODO(wwriverrat) Make composite attribute for subnet_ip_availability
}
}
class Network_ip_availability(api_extensions.ExtensionDescriptor):
"""Extension class supporting network ip availability information."""
@classmethod
def get_name(cls):
return "Network IP Availability"
@classmethod
def get_alias(cls):
return EXT_ALIAS
@classmethod
def get_description(cls):
return "Provides IP availability data for each network and subnet."
@classmethod
def get_updated(cls):
return "2015-09-24T00:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Extended Resource for service type management."""
resource_attributes = RESOURCE_ATTRIBUTE_MAP[RESOURCE_PLURAL]
controller = base.create_resource(
RESOURCE_PLURAL,
RESOURCE_NAME,
plugin.NetworkIPAvailabilityPlugin.get_instance(),
resource_attributes)
return [extensions.ResourceExtension(COLLECTION_NAME,
controller,
attr_map=resource_attributes)]
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
| apache-2.0 | Python |
6fabbe85bb74788641897daf8b162eac3d47b0aa | Add script for downloading Indonesia price data | FAB4D/humanitas,FAB4D/humanitas,FAB4D/humanitas | data_crunching/indonesia_timeseries/download_indonesia_prices.py | data_crunching/indonesia_timeseries/download_indonesia_prices.py | #!/usr/bin/env python2
import urllib2
import shutil
import re
import sys
import datetime
from lxml import etree
usage_str = """
This scripts downloads daily food prices from http://m.pip.kementan.org/index.php (Indonesia).
Provide date in DD/MM/YYYY format.
Example:
./download_indonesia_prices.py 15/03/2013
"""
def download_table(date):
"""Download price table for a given date"""
main_url = 'http://m.pip.kementan.org/index.php'
params = 'laporan=LHK-01&tanggal=%s&bulan=%s&tahun=%s&pilihlaporan=View+Laporan' % (date.day, date.month, date.year)
req = urllib2.Request(main_url, params)
response = urllib2.urlopen(req)
html_code = response.read()
regex = re.compile(r'<div id="content" align="center">.*(<table.+</table>)', re.DOTALL)
match = regex.search(html_code)
if not match:
print "ERROR: table not detected"
sys.exit(1)
table_html = match.group(1)
# Remove commas
table_html = re.sub(r'(?<=\d),(?=\d)', '', table_html)
table = etree.XML(table_html)
rows = iter(table)
actual_headers = [col.text for col in next(rows)]
# TODO: translate this bullshit ;)
headers = ['Dried Grain Harvest', 'Dry unhusked', 'Rice Medium', 'Rice Premium', 'Corn', 'Local soybean', 'Local Peanuts', 'Green Beans', 'Cassava', 'Sweet potato', 'Cassava spindles']
print "; ".join(headers), "\n"
# Print table
for row in rows:
if all(v.text is None for v in row):
continue
print ('''"%s"''') % row[0].text,
for col in row[1:]:
print col.text,
print
def parse_date(date_string):
"""Check date"""
match = re.match(r'(\d{2})/(\d{2})/(\d{4})', date_string)
if not match:
sys.exit("ERROR: invalid date")
day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3))
return datetime.date(year, month, day)
def usage():
print usage_str
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
usage()
sys.exit(0)
date_string = sys.argv[1]
date = parse_date(date_string)
download_table(date)
| bsd-3-clause | Python |
|
4fdf2c32bcd937ba2fc21dbaad8a81620c02fb17 | Fix part of #5134: Add test for core.storage.config.gae_models (#5565) | kevinlee12/oppia,prasanna08/oppia,prasanna08/oppia,oppia/oppia,kevinlee12/oppia,souravbadami/oppia,souravbadami/oppia,souravbadami/oppia,prasanna08/oppia,brianrodri/oppia,oppia/oppia,oppia/oppia,oppia/oppia,brianrodri/oppia,souravbadami/oppia,kevinlee12/oppia,prasanna08/oppia,prasanna08/oppia,oppia/oppia,brianrodri/oppia,kevinlee12/oppia,souravbadami/oppia,kevinlee12/oppia,brianrodri/oppia,brianrodri/oppia | core/storage/config/gae_models_test.py | core/storage/config/gae_models_test.py | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.platform import models
from core.tests import test_utils
import feconf
(config_models,) = models.Registry.import_models([models.NAMES.config])
class ConfigPropertyModelUnitTests(test_utils.GenericTestBase):
"""Test ConfigPropertyModel class."""
def test_create_model(self):
config_model = config_models.ConfigPropertyModel(
value='b')
self.assertEqual(config_model.value, 'b')
def test_commit(self):
config_model1 = config_models.ConfigPropertyModel(
id='config_model1', value='c')
config_model1.commit(feconf.SYSTEM_COMMITTER_ID, [])
retrieved_model1 = config_models.ConfigPropertyModel.get_version(
'config_model1', 1)
self.assertEqual(retrieved_model1.value, 'c')
retrieved_model1.value = 'd'
retrieved_model1.commit(feconf.SYSTEM_COMMITTER_ID, [])
retrieved_model2 = config_models.ConfigPropertyModel.get_version(
'config_model1', 2)
self.assertEqual(retrieved_model2.value, 'd')
| apache-2.0 | Python |
|
65969d0251dc5031328132cf2043f1f76ee90d72 | Include the demo as a separate file | CylonicRaider/cwidgets | demo.py | demo.py |
import sys, curses
from cwidgets import *
from cwidgets import _LOG
def demo(window):
# Create the root of the widget hierarchy.
root = WidgetRoot(window)
# Wrap the UI in a Viewport to avoid crashes at small resolutions.
vp = root.add(Viewport())
# Push the UI together to avoid spreading everyting over the screen.
cont = vp.add(AlignContainer())
# The user-visible "window"; with a border and the bottom line pushed
# inside by one line height.
win = cont.add(MarginContainer(border=True, insets=(0, 0, 1, 0)))
# Decoratively enclose the title
title_wrapper = win.add(TeeContainer(), slot=MarginContainer.POS_TOP)
# Add the title
title = title_wrapper.add(Label('cwidgets demo'))
# Add the content. This could also be a nested Viewport containing
# a more complex UI.
# When text is typed into the entry box, it will increase smoothly (along
# with the remaining UI) until it's 70 columns or 20 rows (because of the
# multiline setting, it can have multiple lines) large, then, it will not
# grow further (along the corresponding axis), and scroll instead.
content = win.add(EntryBox('Lorem ipsum dolor sit amet', multiline=True,
cmaxsize=(70, 20)))
# Bind a vertical scrollbar to the content
sbv = win.add(content.bind(Scrollbar(Scrollbar.DIR_VERTICAL)),
slot=MarginContainer.POS_RIGHT)
# The bottom contains a line of buttons stacked below a scrollbar.
bottom = win.add(VerticalContainer(), slot=MarginContainer.POS_BOTTOM)
# Add the horizontal scrollbar.
sbh = bottom.add(content.bind(Scrollbar(Scrollbar.DIR_HORIZONTAL)))
# The buttons are laid out horizontally.
buttons = bottom.add(HorizontalContainer())
# A bare Widget as "glue" to fill the space. An AlignContainer would
# have been possible as well.
buttons.add(Widget(), weight=1)
# The first button
buttons.add(Button('OK', sys.exit))
# A little spacer between the buttons
buttons.add(Widget(cminsize=(1, 1)))
# The second button
buttons.add(Button('Cancel', lambda: sys.exit(1)))
# Another glue
buttons.add(Widget(), weight=1)
# Run it.
root.main()
try:
init()
curses.wrapper(demo)
finally:
if _LOG:
_LOG.append('')
sys.stderr.write('\n'.join(map(str, _LOG)))
sys.stderr.flush()
| mit | Python |
|
3968c53c4577b2efe9ef3cd2de76b688a26517d9 | Add gpio example | designiot/code,designiot/code,designiot/code,designiot/code,phodal/iot-code,phodal/iot-code,phodal/iot-code,phodal/iot-code | chapter2/gpio.py | chapter2/gpio.py | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(5, GPIO.OUT)
GPIO.output(5, GPIO.HIGH)
GPIO.output(5, GPIO.LOW)
| mit | Python |
|
dc0bb07da52fd11a7980b9f36c38fcdb7f9c6ba5 | Add `edit.py` to be able to edit a view asynchronously | tbfisher/sublimetext-Pandoc | edit.py | edit.py | # edit.py
# buffer editing for both ST2 and ST3 that "just works"
import sublime
import sublime_plugin
from collections import defaultdict
try:
sublime.edit_storage
except AttributeError:
sublime.edit_storage = {}
class EditStep:
def __init__(self, cmd, *args):
self.cmd = cmd
self.args = args
def run(self, view, edit):
if self.cmd == 'callback':
return self.args[0](view, edit)
funcs = {
'insert': view.insert,
'erase': view.erase,
'replace': view.replace,
}
func = funcs.get(self.cmd)
if func:
func(edit, *self.args)
class Edit:
defer = defaultdict(dict)
def __init__(self, view):
self.view = view
self.steps = []
def step(self, cmd, *args):
step = EditStep(cmd, *args)
self.steps.append(step)
def insert(self, point, string):
self.step('insert', point, string)
def erase(self, region):
self.step('erase', region)
def replace(self, region, string):
self.step('replace', region, string)
def callback(self, func):
self.step('callback', func)
def run(self, view, edit):
for step in self.steps:
step.run(view, edit)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
view = self.view
if sublime.version().startswith('2'):
edit = view.begin_edit()
self.run(edit)
view.end_edit(edit)
else:
key = str(hash(tuple(self.steps)))
sublime.edit_storage[key] = self.run
view.run_command('apply_edit', {'key': key})
class apply_edit(sublime_plugin.TextCommand):
def run(self, edit, key):
sublime.edit_storage.pop(key)(self.view, edit)
| mit | Python |
|
a795d94a9c885b97ab5bffc313524ae46626d556 | Add simple function-size analysis tool. | pypyjs/pypyjs,perkinslr/pypyjs,perkinslr/pypyjs,perkinslr/pypyjs,albertjan/pypyjs,pombredanne/pypyjs,albertjan/pypyjs,perkinslr/pypyjs,pombredanne/pypyjs,pypyjs/pypyjs,trinketapp/pypyjs,pypyjs/pypyjs,pypyjs/pypyjs,perkinslr/pypyjs,trinketapp/pypyjs,albertjan/pypyjs | tools/analyze_code_size.py | tools/analyze_code_size.py |
import os
import re
import sys
import optparse
MARKER_START_FUNCS = "// EMSCRIPTEN_START_FUNCS"
MARKER_END_FUNCS = "// EMSCRIPTEN_END_FUNCS"
FUNCTION_CODE_RE = re.compile(
r"function (?P<name>[a-zA-Z0-9_]+)(?P<defn>.*?)((?=function)|(?=$))"
)
def analyze_code_size(fileobj, opts):
funcs = {}
name_re = None
if opts.grep is not None:
name_re = re.compile(opts.grep, re.I)
# Split out and analyze the code for each individual function.
# XXX TODO: read incrementally to reduce memory usage.
data = fileobj.read()
pre_code, data = data.split(MARKER_START_FUNCS, 1)
data, post_code = data.split(MARKER_END_FUNCS, 1)
for match in FUNCTION_CODE_RE.finditer(data):
name = match.group("name")
defn = match.group("defn")
if name_re and not name_re.search(name):
continue
funcs[name] = FunctionMetrics(name, defn)
# Print summary metrics.
total = 0
funcs_by_size = ((f.size, f.name) for f in funcs.itervalues())
for (size, name) in sorted(funcs_by_size, reverse=True):
print size, name, human_readable(size)
total += size
print "Total size:", total, human_readable(total)
class FunctionMetrics(object):
def __init__(self, name, defn):
self.name = name
self.defn = defn
self.size = len(defn)
def human_readable(size):
units = ((1024*1024, "M"), (1024, "k"))
for (scale, unit) in units:
scale = float(scale)
if size / scale > 0.1:
return "(%.2f%s)" % (size / scale, unit)
return ""
def main(args=None):
usage = "usage: %prog [options] file"
descr = "Analyze code size and complexity for emscripten-compiled output"
parser = optparse.OptionParser(usage=usage, description=descr)
parser.add_option("-g", "--grep", metavar="REGEXP",
help="only analyze functions matching this regexp")
opts, args = parser.parse_args(args)
with open(args[0], "r") as infile:
analyze_code_size(infile, opts)
return 0
if __name__ == "__main__":
try:
exitcode = main()
except KeyboardInterrupt:
exitcode = 1
sys.exit(exitcode)
| mit | Python |
|
2ce7bcdd6606cb1590febf6430a7635462b09d74 | fix #61: prefer configuration files under script dir | wangjun/xunlei-lixian,sndnvaps/xunlei-lixian,xieyanhao/xunlei-lixian,davies/xunlei-lixian,ccagg/xunlei,windygu/xunlei-lixian,wogong/xunlei-lixian,liujianpc/xunlei-lixian,sdgdsffdsfff/xunlei-lixian,iambus/xunlei-lixian,myself659/xunlei-lixian,GeassDB/xunlei-lixian | lixian_config.py | lixian_config.py |
import os
import os.path
def get_config_path(filename):
if os.path.exists(filename):
return filename
import sys
local_path = os.path.join(sys.path[0], filename)
if os.path.exists(local_path):
return local_path
user_home = os.getenv('USERPROFILE') or os.getenv('HOME')
lixian_home = os.getenv('LIXIAN_HOME') or user_home
return os.path.join(lixian_home, filename)
LIXIAN_DEFAULT_CONFIG = get_config_path('.xunlei.lixian.config')
LIXIAN_DEFAULT_COOKIES = get_config_path('.xunlei.lixian.cookies')
def load_config(path):
values = {}
if os.path.exists(path):
with open(path) as x:
for line in x.readlines():
line = line.strip()
if line:
if line.startswith('--'):
line = line.lstrip('-')
if line.startswith('no-'):
values[line[3:]] = False
elif '=' in line:
k, v = line.split('=', 1)
values[k] = v
else:
values[line] = True
else:
raise NotImplementedError(line)
return values
def dump_config(path, values):
with open(path, 'w') as x:
for k in values:
v = values[k]
if v is True:
x.write('--%s\n'%k)
elif v is False:
x.write('--no-%s\n'%k)
else:
x.write('--%s=%s\n'%(k, v))
class Config:
def __init__(self, path=LIXIAN_DEFAULT_CONFIG):
self.path = path
self.values = load_config(path)
def put(self, k, v=True):
self.values[k] = v
dump_config(self.path, self.values)
def get(self, k, v=None):
return self.values.get(k, v)
def delete(self, k):
if k in self.values:
del self.values[k]
dump_config(self.path, self.values)
def source(self):
if os.path.exists(self.path):
with open(self.path) as x:
return x.read()
def __str__(self):
return '<Config{%s}>' % self.values
global_config = Config()
def put_config(k, v=True):
if k.startswith('no-') and v is True:
k = k[3:]
v = False
global_config.put(k, v)
def get_config(k, v=None):
return global_config.get(k, v)
def delete_config(k):
return global_config.delete(k)
def source_config():
return global_config.source()
|
import os
import os.path
def get_config_path(filename):
if os.path.exists(filename):
return filename
user_home = os.getenv('USERPROFILE') or os.getenv('HOME')
lixian_home = os.getenv('LIXIAN_HOME') or user_home
return os.path.join(lixian_home, filename)
LIXIAN_DEFAULT_CONFIG = get_config_path('.xunlei.lixian.config')
LIXIAN_DEFAULT_COOKIES = get_config_path('.xunlei.lixian.cookies')
def load_config(path):
values = {}
if os.path.exists(path):
with open(path) as x:
for line in x.readlines():
line = line.strip()
if line:
if line.startswith('--'):
line = line.lstrip('-')
if line.startswith('no-'):
values[line[3:]] = False
elif '=' in line:
k, v = line.split('=', 1)
values[k] = v
else:
values[line] = True
else:
raise NotImplementedError(line)
return values
def dump_config(path, values):
with open(path, 'w') as x:
for k in values:
v = values[k]
if v is True:
x.write('--%s\n'%k)
elif v is False:
x.write('--no-%s\n'%k)
else:
x.write('--%s=%s\n'%(k, v))
class Config:
def __init__(self, path=LIXIAN_DEFAULT_CONFIG):
self.path = path
self.values = load_config(path)
def put(self, k, v=True):
self.values[k] = v
dump_config(self.path, self.values)
def get(self, k, v=None):
return self.values.get(k, v)
def delete(self, k):
if k in self.values:
del self.values[k]
dump_config(self.path, self.values)
def source(self):
if os.path.exists(self.path):
with open(self.path) as x:
return x.read()
def __str__(self):
return '<Config{%s}>' % self.values
global_config = Config()
def put_config(k, v=True):
if k.startswith('no-') and v is True:
k = k[3:]
v = False
global_config.put(k, v)
def get_config(k, v=None):
return global_config.get(k, v)
def delete_config(k):
return global_config.delete(k)
def source_config():
return global_config.source()
| mit | Python |
2ba9fb77ddcf1a5cc8b923ab46e50c4b17c36447 | add readme update tool | buptlxb/hihoCoder,buptlxb/hihoCoder | hiho.py | hiho.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import codecs
import sys
import argparse
class Entry:
URL = r'http://hihocoder.com/problemset/problem/'
def __init__(self, name=None, number=0):
self.name = name
self.number = number
def parse(self, line):
m = re.match(r'\|.*?\|\[(?P<name>.*?)\].*?\|\[(?P<number>.*?)\].*?\|', line, re.U)
assert m, u'Malformed line: {}'.format(line).encode('utf-8')
self.name = m.group('name')
self.number = m.group('number')
def __str__(self):
return u'[{name}]({url}{number})|[{number}](solutions/{number})'.format(name=self.name, url=Entry.URL, number=self.number).encode('utf-8')
class Table:
def __init__(self):
self.entries = {}
def parse(self, lines):
for line in lines:
e = Entry()
e.parse(line)
self.entries[e.number] = e
def add(self, entry):
if self.entries.get(entry.number, None):
return False
self.entries[entry.number] = entry
return True
def __str__(self):
order = 1
ret = []
for k in sorted(self.entries):
ret.append('|{order}|{content}|'.format(order=order, content=str(self.entries[k])))
order += 1
return '\n'.join(ret)
class ReadMe:
def __init__(self, path):
self.path = path
self.header = []
self.table = Table()
self.trailor = []
self.parse()
def parse(self):
table_start = False
table_end = True
with codecs.open(self.path, mode='r', encoding='utf-8') as fin:
for line in fin.readlines():
if not table_end and line == '\n':
table_end = True
line = '\n\n'
if not table_start:
self.header.append(line)
elif not table_end:
e = Entry()
e.parse(line)
self.table.add(e)
else:
self.trailor.append(line)
if not table_start and line.startswith('|---|-------|----------|'):
table_start = True
table_end = False
def write(self, path=None):
if not path:
path = self.path
with open(path, mode='w') as fout:
fout.write(str(self))
def add_solution(self, title, number):
return self.table.add(Entry(title, number))
def __str__(self):
ret = []
ret.append(''.join(self.header).encode('utf-8'))
ret.append(str(self.table))
ret.extend(''.join(self.trailor).encode('utf-8'))
return ''.join(ret)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="""description:
hiho facilitates update README
""",
epilog="""examples:
""")
parser.add_argument("-n", "--number", type=int, metavar="<number>", required=True, help="Specify the question number")
parser.add_argument("-t", "--title", type=str, metavar="<title>", required=True, help="Specify the question title")
args = parser.parse_args(sys.argv[1:])
r = ReadMe(r'./README.md')
if r.add_solution(args.title, args.number):
print 'Add success'
r.write()
| apache-2.0 | Python |
|
dfe2bd52fd2e561a79c91d4ff34fbead8a26c1c3 | Create init.py | andrermartins/xgh | init.py | init.py | #!/usr/bin/env python
import sys
import os
import psycopg2
def dump_table(table_name, conn):
query = "SELECT * FROM "+table_name+" LIMIT 1"
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
description = cur.description
columns = "'INSERT INTO "+table_name+" VALUES ('"
for desc in description:
columns += "||CASE WHEN "+desc.name+" IS NULL THEN 'NULL' ELSE ''''||"+desc.name+"::VARCHAR||'''' END ||','"
columns = columns[0:len(columns)-3]
columns += "')'"
print "SELECT "+columns+" FROM "+table_name
def update_flex_version(vl_flex_version, hostname, conn):
if (hostname == "alpha"):
hostname = "alpha-asset.valebroker.com.br"
else:
hostname = "alpha-asset-"+hostname+".valebroker.com.br"
cur = conn.cursor()
cur.execute("UPDATE tb_contract_host SET vl_flex_version = %s WHERE hostname = %s", (vl_flex_version, hostname))
conn.commit()
print "Host "+hostname+" updated to Flex version "+vl_flex_version
def show_error(conn):
cur = conn.cursor()
cur.execute("SELECT stack_trace, detail FROM tb_log_error WHERE id_investor = 5801 ORDER BY dt_error DESC LIMIT 1")
rows = cur.fetchall()
print rows[0][0]
print rows[0][1]
def get_connection():
postgres_database = os.environ['postgres_database']
postgres_user = os.environ['postgres_user']
postgres_password = os.environ['postgres_password']
postgres_host = os.environ['postgres_host']
postgres_port = os.environ['postgres_port']
return psycopg2.connect(database=postgres_database, user=postgres_user, password=postgres_password, host=postgres_host, port=postgres_port)
# def set_enviroment_vars():
# f = open('/tmp/envs.conf')
# for line in f:
def init(args):
conn = get_connection()
# docker-compose up
if (os.environ['action'] == "dump_table"):
# docker-compose run dump_table tb_asset_operation
dump_table(args[0], conn)
if (os.environ['action'] == "update_flex_version"):
# docker-compose run update_flex_version 4324 alpha/rf/support
update_flex_version(args[0], args[1], conn)
if (os.environ['action'] == "show_error"):
# docker-compose run show_error
show_error(conn)
conn.close()
if __name__ == "__main__":
init(sys.argv[1:])
| mit | Python |
|
bfaeeec3f5f5582822e2918491090815a606ba44 | Add test to make sure imports and __all__ matches | smarter-travel-media/warthog | test/test_api.py | test/test_api.py | # -*- coding: utf-8 -*-
import warthog.api
def test_public_exports():
exports = set([item for item in dir(warthog.api) if not item.startswith('_')])
declared = set(warthog.api.__all__)
assert exports == declared, 'Exports and __all__ members should match'
| mit | Python |
|
48857638694ceca08c64d7b9c6825e2178c53279 | Add function decorator to improve functools.wraps | goodfeli/pylearn2,JesseLivezey/pylearn2,TNick/pylearn2,fulmicoton/pylearn2,pkainz/pylearn2,Refefer/pylearn2,woozzu/pylearn2,kastnerkyle/pylearn2,CIFASIS/pylearn2,mclaughlin6464/pylearn2,hyqneuron/pylearn2-maxsom,aalmah/pylearn2,bartvm/pylearn2,JesseLivezey/pylearn2,nouiz/pylearn2,lamblin/pylearn2,CIFASIS/pylearn2,junbochen/pylearn2,ddboline/pylearn2,junbochen/pylearn2,ddboline/pylearn2,alexjc/pylearn2,w1kke/pylearn2,abergeron/pylearn2,mkraemer67/pylearn2,jamessergeant/pylearn2,fyffyt/pylearn2,fishcorn/pylearn2,fyffyt/pylearn2,matrogers/pylearn2,matrogers/pylearn2,aalmah/pylearn2,lunyang/pylearn2,skearnes/pylearn2,mkraemer67/pylearn2,theoryno3/pylearn2,kose-y/pylearn2,se4u/pylearn2,aalmah/pylearn2,daemonmaker/pylearn2,jeremyfix/pylearn2,hyqneuron/pylearn2-maxsom,lancezlin/pylearn2,Refefer/pylearn2,ddboline/pylearn2,lancezlin/pylearn2,kose-y/pylearn2,JesseLivezey/plankton,hantek/pylearn2,goodfeli/pylearn2,woozzu/pylearn2,ashhher3/pylearn2,bartvm/pylearn2,shiquanwang/pylearn2,TNick/pylearn2,hantek/pylearn2,lancezlin/pylearn2,TNick/pylearn2,daemonmaker/pylearn2,pkainz/pylearn2,fyffyt/pylearn2,alexjc/pylearn2,lunyang/pylearn2,ddboline/pylearn2,hantek/pylearn2,woozzu/pylearn2,pombredanne/pylearn2,TNick/pylearn2,jeremyfix/pylearn2,hyqneuron/pylearn2-maxsom,fishcorn/pylearn2,lisa-lab/pylearn2,kastnerkyle/pylearn2,mkraemer67/pylearn2,abergeron/pylearn2,alexjc/pylearn2,pombredanne/pylearn2,hyqneuron/pylearn2-maxsom,daemonmaker/pylearn2,KennethPierce/pylearnk,cosmoharrigan/pylearn2,aalmah/pylearn2,shiquanwang/pylearn2,JesseLivezey/plankton,KennethPierce/pylearnk,jamessergeant/pylearn2,caidongyun/pylearn2,fishcorn/pylearn2,mkraemer67/pylearn2,jamessergeant/pylearn2,CIFASIS/pylearn2,chrish42/pylearn,lunyang/pylearn2,fulmicoton/pylearn2,bartvm/pylearn2,mclaughlin6464/pylearn2,lunyang/pylearn2,se4u/pylearn2,fyffyt/pylearn2,Refefer/pylearn2,matrogers/pylearn2,mclaughlin6464/pylearn2,se4u/pylearn2,nouiz/pylearn2,jamessergeant/pylearn2,JesseLivezey/pylearn2,chrish42/pylearn,KennethPierce/pylearnk,cosmoharrigan/pylearn2,sandeepkbhat/pylearn2,theoryno3/pylearn2,w1kke/pylearn2,goodfeli/pylearn2,JesseLivezey/pylearn2,Refefer/pylearn2,caidongyun/pylearn2,msingh172/pylearn2,kastnerkyle/pylearn2,skearnes/pylearn2,KennethPierce/pylearnk,abergeron/pylearn2,kastnerkyle/pylearn2,skearnes/pylearn2,jeremyfix/pylearn2,pombredanne/pylearn2,ashhher3/pylearn2,lisa-lab/pylearn2,lamblin/pylearn2,junbochen/pylearn2,ashhher3/pylearn2,lamblin/pylearn2,CIFASIS/pylearn2,lisa-lab/pylearn2,pkainz/pylearn2,pombredanne/pylearn2,fishcorn/pylearn2,se4u/pylearn2,pkainz/pylearn2,junbochen/pylearn2,cosmoharrigan/pylearn2,goodfeli/pylearn2,theoryno3/pylearn2,ashhher3/pylearn2,alexjc/pylearn2,jeremyfix/pylearn2,JesseLivezey/plankton,chrish42/pylearn,msingh172/pylearn2,mclaughlin6464/pylearn2,sandeepkbhat/pylearn2,shiquanwang/pylearn2,w1kke/pylearn2,caidongyun/pylearn2,skearnes/pylearn2,matrogers/pylearn2,fulmicoton/pylearn2,shiquanwang/pylearn2,chrish42/pylearn,lisa-lab/pylearn2,kose-y/pylearn2,hantek/pylearn2,kose-y/pylearn2,msingh172/pylearn2,sandeepkbhat/pylearn2,w1kke/pylearn2,msingh172/pylearn2,lancezlin/pylearn2,woozzu/pylearn2,daemonmaker/pylearn2,JesseLivezey/plankton,fulmicoton/pylearn2,caidongyun/pylearn2,bartvm/pylearn2,nouiz/pylearn2,cosmoharrigan/pylearn2,sandeepkbhat/pylearn2,nouiz/pylearn2,lamblin/pylearn2,theoryno3/pylearn2,abergeron/pylearn2 | pylearn2/utils/doc.py | pylearn2/utils/doc.py | """
Documentation-related helper classes/functions
"""
class soft_wraps:
"""
A Python decorator which concatenates two functions' docstrings: one
function is defined at initialization and the other one is defined when
soft_wraps is called.
This helps reduce the ammount of documentation to write: one can use
this decorator on child classes' functions when their implementation is
similar to the one of the parent class. Conversely, if a function defined
in a child class departs from its parent's implementation, one can simply
explain the differences in a 'Notes' section without re-writing the whole
docstring.
Examples
--------
>>> class Parent(object):
... def f(x):
... '''
... Adds 1 to x
...
... Parameters
... ----------
... x : int
... Variable to increment by 1
...
... Returns
... -------
... rval : int
... x incremented by 1
... '''
... rval = x + 1
... return rval
...
>>> class Child(Parent):
... @soft_wraps(Parent.f)
... def f(x):
... '''
... Notes
... -----
... Also prints the incremented value
... '''
... rval = x + 1
... print rval
... return rval
...
>>> c = Child()
>>> print c.f.__doc__
Adds 1 to x
Parameters
----------
x : int
Variable to increment by 1
Returns
-------
rval : int
x incremented by 1
Notes
-----
Also prints the incremented value
"""
def __init__(self, f, append=False):
"""
Parameters
----------
f : function
Function whose docstring will be concatenated with the decorated
function's docstring
prepend : bool, optional
If True, appends f's docstring to the decorated function's
docstring instead of prepending it. Defaults to False.
"""
self.f = f
self.append = append
def __call__(self, f):
"""
Prepend self.f's docstring to f's docstring (or append it if
`self.append == True`).
Parameters
----------
f : function
Function to decorate
Returns
-------
f : function
Function f passed as argument with self.f's docstring
{pre,ap}pended to it
"""
if self.append:
f.__doc__ += + self.f.__doc__
else:
f.__doc__ = self.f.__doc__ + f.__doc__
return f
| bsd-3-clause | Python |
|
dfca9c3d7dbbe97516a24bea89b917f7282c7dc7 | Add problem rotate image | guozengxin/myleetcode,guozengxin/myleetcode | python/rotateImage.py | python/rotateImage.py | # https://leetcode.com/problems/rotate-image/
class Solution(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
size = len(matrix)
for i in xrange(0, size/2):
for j in xrange(i, size-1-i):
t = matrix[i][j]
matrix[i][j] = matrix[size-j-1][i]
matrix[size-j-1][i] = matrix[size-i-1][size-j-1]
matrix[size-i-1][size-j-1] = matrix[j][size-i-1]
matrix[j][size-i-1]= t
matrix = [
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]
s = Solution()
s.rotate(matrix)
print matrix
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
s.rotate(matrix)
print matrix
| mit | Python |
|
cce6a4c2efe62c267b04f6ce75019d577428e2c9 | add sensu_check_dict module | twaldrop/ursula,panxia6679/ursula,narengan/ursula,blueboxgroup/ursula,channus/ursula,wupeiran/ursula,ryshah/ursula,fancyhe/ursula,rongzhus/ursula,persistent-ursula/ursula,panxia6679/ursula,panxia6679/ursula,blueboxgroup/ursula,channus/ursula,knandya/ursula,edtubillara/ursula,blueboxgroup/ursula,persistent-ursula/ursula,channus/ursula,narengan/ursula,pgraziano/ursula,pgraziano/ursula,twaldrop/ursula,nirajdp76/ursula,masteinhauser/ursula,wupeiran/ursula,rongzhus/ursula,nirajdp76/ursula,persistent-ursula/ursula,knandya/ursula,nirajdp76/ursula,persistent-ursula/ursula,ryshah/ursula,edtubillara/ursula,wupeiran/ursula,twaldrop/ursula,ddaskal/ursula,lihkin213/ursula,pgraziano/ursula,narengan/ursula,blueboxgroup/ursula,zrs233/ursula,ddaskal/ursula,edtubillara/ursula,ryshah/ursula,knandya/ursula,panxia6679/ursula,lihkin213/ursula,wupeiran/ursula,zrs233/ursula,fancyhe/ursula,ddaskal/ursula,ryshah/ursula,lihkin213/ursula,channus/ursula,masteinhauser/ursula,pgraziano/ursula,edtubillara/ursula,masteinhauser/ursula,twaldrop/ursula,masteinhauser/ursula,fancyhe/ursula,ddaskal/ursula,rongzhus/ursula,zrs233/ursula,rongzhus/ursula,nirajdp76/ursula,fancyhe/ursula,knandya/ursula,narengan/ursula,zrs233/ursula,lihkin213/ursula | library/sensu_check_dict.py | library/sensu_check_dict.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Blue Box Group, Inc.
# Copyright 2014, Craig Tracey <[email protected]>
# Copyright 2016, Paul Czarkowski <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import traceback
from hashlib import md5
from jinja2 import Environment
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, required=True),
check_dir=dict(default='/etc/sensu/conf.d/checks', required=False),
state=dict(default='present', required=False, choices=['present','absent']),
check=dict(type='dict', required=True)
)
)
if module.params['state'] == 'present':
try:
changed = False
check_path = '%s/%s.json' % (module.params['check_dir'], module.params['name'])
check=dict({
'checks': {
module.params['name']: module.params['check']
}
})
if os.path.isfile(check_path):
with open(check_path) as fh:
if json.load(fh) == check:
module.exit_json(changed=False, result="ok")
else:
with open(check_path, "w") as fh:
fh.write(json.dumps(check, indent=4))
module.exit_json(changed=True, result="changed")
else:
with open(check_path, "w") as fh:
fh.write(json.dumps(check, indent=4))
module.exit_json(changed=True, result="created")
except Exception as e:
formatted_lines = traceback.format_exc()
module.fail_json(msg="creating the check failed: %s %s" % (e,formatted_lines))
else:
try:
changed = False
check_path = '%s/%s.json' % (module.params['check_dir'], module.params['name'])
if os.path.isfile(check_path):
os.remove(check_path)
module.exit_json(changed=True, result="changed")
else:
module.exit_json(changed=False, result="ok")
except Exception as e:
formatted_lines = traceback.format_exc()
module.fail_json(msg="removing the check failed: %s %s" % (e,formatted_lines))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
main()
| mit | Python |
|
577b84cf124a35b49311e39ab4d40ef0f8af59ed | introduce proso.analysis module | adaptive-learning/proso-apps,adaptive-learning/proso-apps,adaptive-learning/proso-apps | proso/analysis.py | proso/analysis.py | import json
import hashlib
import os
def get_experiment_data(name, compute_fun, cache_dir, cached=True, **kwargs):
kwargs_hash = hashlib.sha1(json.dumps(kwargs, sort_keys=True)).hexdigest()
filename = '{}/{}.json'.format(cache_dir, name);
if cached and os.path.exists(filename):
with open(filename, 'r') as f:
return _convert_json_keys(json.loads(f.read()))
result = compute_fun(**kwargs)
if cached:
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
with open(filename, 'w') as f:
f.write(json.dumps(result, sort_keys=True))
return result
def _convert_json_keys(json_struct):
if isinstance(json_struct, list):
return map(_convert_json_keys, json_struct)
elif isinstance(json_struct, dict):
return {_maybe_convert_str(key): val for (key, val) in json_struct.iteritems()}
else:
return json_struct
def _maybe_convert_str(x):
if x.isdigit():
try:
return int(x)
except ValueError:
pass
try:
return float(x)
except ValueError:
return x
| mit | Python |
|
45cb940db74d99b0dac31a2aace3d8505e4a9046 | Add empty file to contain main part of module | jrsmith3/datac,jrsmith3/datac | datac/main.py | datac/main.py | # -*- coding: utf-8 -*-
import copy
| mit | Python |
|
323fb80744e63a322fe5ed70d86130aa61aa3c19 | Remove unused imports | yonglehou/scikit-learn,russel1237/scikit-learn,thientu/scikit-learn,rrohan/scikit-learn,procoder317/scikit-learn,yunfeilu/scikit-learn,Obus/scikit-learn,MatthieuBizien/scikit-learn,jakirkham/scikit-learn,q1ang/scikit-learn,shyamalschandra/scikit-learn,jpautom/scikit-learn,mattgiguere/scikit-learn,Adai0808/scikit-learn,depet/scikit-learn,fzalkow/scikit-learn,Akshay0724/scikit-learn,victorbergelin/scikit-learn,zihua/scikit-learn,florian-f/sklearn,PatrickOReilly/scikit-learn,sumspr/scikit-learn,ngoix/OCRF,0x0all/scikit-learn,jorge2703/scikit-learn,untom/scikit-learn,olologin/scikit-learn,thilbern/scikit-learn,spallavolu/scikit-learn,lucidfrontier45/scikit-learn,anurag313/scikit-learn,clemkoa/scikit-learn,abhishekkrthakur/scikit-learn,cdegroc/scikit-learn,ivannz/scikit-learn,AlexandreAbraham/scikit-learn,AIML/scikit-learn,jorik041/scikit-learn,potash/scikit-learn,davidgbe/scikit-learn,joernhees/scikit-learn,smartscheduling/scikit-learn-categorical-tree,roxyboy/scikit-learn,ningchi/scikit-learn,glennq/scikit-learn,hitszxp/scikit-learn,manhhomienbienthuy/scikit-learn,petosegan/scikit-learn,sergeyf/scikit-learn,B3AU/waveTree,466152112/scikit-learn,pratapvardhan/scikit-learn,aflaxman/scikit-learn,icdishb/scikit-learn,NelisVerhoef/scikit-learn,nikitasingh981/scikit-learn,cwu2011/scikit-learn,carrillo/scikit-learn,sarahgrogan/scikit-learn,JsNoNo/scikit-learn,ogrisel/scikit-learn,treycausey/scikit-learn,aabadie/scikit-learn,mikebenfield/scikit-learn,huobaowangxi/scikit-learn,jkarnows/scikit-learn,tdhopper/scikit-learn,henrykironde/scikit-learn,pnedunuri/scikit-learn,3manuek/scikit-learn,massmutual/scikit-learn,vshtanko/scikit-learn,pkruskal/scikit-learn,IndraVikas/scikit-learn,scikit-learn/scikit-learn,pv/scikit-learn,lucidfrontier45/scikit-learn,henridwyer/scikit-learn,mwv/scikit-learn,costypetrisor/scikit-learn,idlead/scikit-learn,ogrisel/scikit-learn,rohanp/scikit-learn,fzalkow/scikit-learn,Lawrence-Liu/scikit-learn,wlamond/scikit-learn,davidgbe/scikit-learn,OshynSong/scikit-learn,jorik041/scikit-learn,mjudsp/Tsallis,arjoly/scikit-learn,LohithBlaze/scikit-learn,ishanic/scikit-learn,hugobowne/scikit-learn,vibhorag/scikit-learn,hainm/scikit-learn,gclenaghan/scikit-learn,mlyundin/scikit-learn,lin-credible/scikit-learn,bikong2/scikit-learn,fzalkow/scikit-learn,roxyboy/scikit-learn,simon-pepin/scikit-learn,petosegan/scikit-learn,dingocuster/scikit-learn,rvraghav93/scikit-learn,andrewnc/scikit-learn,ominux/scikit-learn,yonglehou/scikit-learn,bnaul/scikit-learn,djgagne/scikit-learn,sanketloke/scikit-learn,waterponey/scikit-learn,Barmaley-exe/scikit-learn,rishikksh20/scikit-learn,nrhine1/scikit-learn,evgchz/scikit-learn,wanggang3333/scikit-learn,pianomania/scikit-learn,walterreade/scikit-learn,manashmndl/scikit-learn,rsivapr/scikit-learn,yyjiang/scikit-learn,jblackburne/scikit-learn,murali-munna/scikit-learn,aflaxman/scikit-learn,bhargav/scikit-learn,B3AU/waveTree,Sentient07/scikit-learn,pythonvietnam/scikit-learn,lbishal/scikit-learn,ClimbsRocks/scikit-learn,madjelan/scikit-learn,andrewnc/scikit-learn,fengzhyuan/scikit-learn,jkarnows/scikit-learn,jlegendary/scikit-learn,hdmetor/scikit-learn,TomDLT/scikit-learn,shusenl/scikit-learn,themrmax/scikit-learn,vermouthmjl/scikit-learn,YinongLong/scikit-learn,etkirsch/scikit-learn,dingocuster/scikit-learn,lazywei/scikit-learn,zorojean/scikit-learn,hitszxp/scikit-learn,yask123/scikit-learn,meduz/scikit-learn,NunoEdgarGub1/scikit-learn,nrhine1/scikit-learn,ngoix/OCRF,tdhopper/scikit-learn,sergeyf/scikit-learn,qifeigit/scikit-learn,zaxtax/scikit-learn,procoder317/scikit-learn,ilyes14/scikit-learn,RachitKansal/scikit-learn,Sentient07/scikit-learn,evgchz/scikit-learn,MechCoder/scikit-learn,mblondel/scikit-learn,mblondel/scikit-learn,djgagne/scikit-learn,yunfeilu/scikit-learn,HolgerPeters/scikit-learn,abhishekkrthakur/scikit-learn,jakobworldpeace/scikit-learn,ycaihua/scikit-learn,elkingtonmcb/scikit-learn,simon-pepin/scikit-learn,yask123/scikit-learn,ndingwall/scikit-learn,ishanic/scikit-learn,Garrett-R/scikit-learn,xuewei4d/scikit-learn,wanggang3333/scikit-learn,liangz0707/scikit-learn,shahankhatch/scikit-learn,PrashntS/scikit-learn,loli/sklearn-ensembletrees,icdishb/scikit-learn,iismd17/scikit-learn,Sentient07/scikit-learn,deepesch/scikit-learn,HolgerPeters/scikit-learn,fabioticconi/scikit-learn,ChanderG/scikit-learn,nomadcube/scikit-learn,macks22/scikit-learn,sergeyf/scikit-learn,jereze/scikit-learn,Myasuka/scikit-learn,nesterione/scikit-learn,liyu1990/sklearn,saiwing-yeung/scikit-learn,AlexanderFabisch/scikit-learn,NelisVerhoef/scikit-learn,maheshakya/scikit-learn,voxlol/scikit-learn,costypetrisor/scikit-learn,depet/scikit-learn,dhruv13J/scikit-learn,russel1237/scikit-learn,pv/scikit-learn,vybstat/scikit-learn,krez13/scikit-learn,trungnt13/scikit-learn,rohanp/scikit-learn,hainm/scikit-learn,treycausey/scikit-learn,NunoEdgarGub1/scikit-learn,kjung/scikit-learn,jjx02230808/project0223,potash/scikit-learn,hitszxp/scikit-learn,B3AU/waveTree,jzt5132/scikit-learn,stylianos-kampakis/scikit-learn,kmike/scikit-learn,appapantula/scikit-learn,glouppe/scikit-learn,henrykironde/scikit-learn,rexshihaoren/scikit-learn,vshtanko/scikit-learn,sonnyhu/scikit-learn,xzh86/scikit-learn,PatrickChrist/scikit-learn,Titan-C/scikit-learn,Barmaley-exe/scikit-learn,glennq/scikit-learn,loli/sklearn-ensembletrees,LohithBlaze/scikit-learn,victorbergelin/scikit-learn,cauchycui/scikit-learn,hsiaoyi0504/scikit-learn,0x0all/scikit-learn,schets/scikit-learn,mjudsp/Tsallis,ilo10/scikit-learn,alexsavio/scikit-learn,hsuantien/scikit-learn,ankurankan/scikit-learn,ChanChiChoi/scikit-learn,ishanic/scikit-learn,aminert/scikit-learn,xwolf12/scikit-learn,ogrisel/scikit-learn,fredhusser/scikit-learn,btabibian/scikit-learn,robbymeals/scikit-learn,HolgerPeters/scikit-learn,MartinSavc/scikit-learn,IndraVikas/scikit-learn,sinhrks/scikit-learn,IndraVikas/scikit-learn,mugizico/scikit-learn,3manuek/scikit-learn,nomadcube/scikit-learn,ldirer/scikit-learn,bigdataelephants/scikit-learn,xavierwu/scikit-learn,mwv/scikit-learn,lbishal/scikit-learn,abhishekgahlot/scikit-learn,andrewnc/scikit-learn,arahuja/scikit-learn,jm-begon/scikit-learn,huobaowangxi/scikit-learn,lbishal/scikit-learn,Nyker510/scikit-learn,cwu2011/scikit-learn,shusenl/scikit-learn,liyu1990/sklearn,altairpearl/scikit-learn,pompiduskus/scikit-learn,madjelan/scikit-learn,clemkoa/scikit-learn,vibhorag/scikit-learn,mfjb/scikit-learn,treycausey/scikit-learn,arabenjamin/scikit-learn,giorgiop/scikit-learn,russel1237/scikit-learn,meduz/scikit-learn,gotomypc/scikit-learn,DonBeo/scikit-learn,ngoix/OCRF,shikhardb/scikit-learn,rishikksh20/scikit-learn,jlegendary/scikit-learn,hlin117/scikit-learn,etkirsch/scikit-learn,terkkila/scikit-learn,f3r/scikit-learn,mugizico/scikit-learn,JPFrancoia/scikit-learn,zhenv5/scikit-learn,Vimos/scikit-learn,vortex-ape/scikit-learn,yyjiang/scikit-learn,robbymeals/scikit-learn,idlead/scikit-learn,mehdidc/scikit-learn,jereze/scikit-learn,ashhher3/scikit-learn,jaidevd/scikit-learn,alexeyum/scikit-learn,theoryno3/scikit-learn,wanggang3333/scikit-learn,JeanKossaifi/scikit-learn,Achuth17/scikit-learn,rahuldhote/scikit-learn,hugobowne/scikit-learn,loli/sklearn-ensembletrees,pnedunuri/scikit-learn,tosolveit/scikit-learn,JPFrancoia/scikit-learn,hainm/scikit-learn,ogrisel/scikit-learn,ssaeger/scikit-learn,nelson-liu/scikit-learn,mlyundin/scikit-learn,jzt5132/scikit-learn,eickenberg/scikit-learn,JsNoNo/scikit-learn,appapantula/scikit-learn,kashif/scikit-learn,ahoyosid/scikit-learn,jm-begon/scikit-learn,HolgerPeters/scikit-learn,samzhang111/scikit-learn,lesteve/scikit-learn,cl4rke/scikit-learn,RayMick/scikit-learn,giorgiop/scikit-learn,schets/scikit-learn,equialgo/scikit-learn,elkingtonmcb/scikit-learn,clemkoa/scikit-learn,shenzebang/scikit-learn,kmike/scikit-learn,IshankGulati/scikit-learn,arjoly/scikit-learn,thilbern/scikit-learn,potash/scikit-learn,plissonf/scikit-learn,zorroblue/scikit-learn,nelson-liu/scikit-learn,DonBeo/scikit-learn,sonnyhu/scikit-learn,mojoboss/scikit-learn,kaichogami/scikit-learn,terkkila/scikit-learn,wzbozon/scikit-learn,robin-lai/scikit-learn,glemaitre/scikit-learn,samuel1208/scikit-learn,MartinSavc/scikit-learn,DSLituiev/scikit-learn,robin-lai/scikit-learn,mjudsp/Tsallis,heli522/scikit-learn,arjoly/scikit-learn,eg-zhang/scikit-learn,PatrickOReilly/scikit-learn,PatrickChrist/scikit-learn,fabianp/scikit-learn,krez13/scikit-learn,akionakamura/scikit-learn,DonBeo/scikit-learn,ssaeger/scikit-learn,CVML/scikit-learn,MartinDelzant/scikit-learn,frank-tancf/scikit-learn,kmike/scikit-learn,Nyker510/scikit-learn,bthirion/scikit-learn,cauchycui/scikit-learn,zihua/scikit-learn,beepee14/scikit-learn,rsivapr/scikit-learn,lazywei/scikit-learn,kaichogami/scikit-learn,Titan-C/scikit-learn,chrsrds/scikit-learn,xiaoxiamii/scikit-learn,kjung/scikit-learn,shusenl/scikit-learn,ElDeveloper/scikit-learn,anirudhjayaraman/scikit-learn,RPGOne/scikit-learn,mhue/scikit-learn,liberatorqjw/scikit-learn,phdowling/scikit-learn,carrillo/scikit-learn,themrmax/scikit-learn,luo66/scikit-learn,jblackburne/scikit-learn,jakirkham/scikit-learn,betatim/scikit-learn,NelisVerhoef/scikit-learn,lenovor/scikit-learn,themrmax/scikit-learn,procoder317/scikit-learn,pythonvietnam/scikit-learn,joshloyal/scikit-learn,ningchi/scikit-learn,hugobowne/scikit-learn,untom/scikit-learn,shahankhatch/scikit-learn,depet/scikit-learn,ChanChiChoi/scikit-learn,IshankGulati/scikit-learn,amueller/scikit-learn,rahul-c1/scikit-learn,xwolf12/scikit-learn,elkingtonmcb/scikit-learn,etkirsch/scikit-learn,mjgrav2001/scikit-learn,billy-inn/scikit-learn,anurag313/scikit-learn,ankurankan/scikit-learn,ndingwall/scikit-learn,fredhusser/scikit-learn,xubenben/scikit-learn,IssamLaradji/scikit-learn,evgchz/scikit-learn,mayblue9/scikit-learn,LiaoPan/scikit-learn,mrshu/scikit-learn,michigraber/scikit-learn,CVML/scikit-learn,mfjb/scikit-learn,xyguo/scikit-learn,robin-lai/scikit-learn,nmayorov/scikit-learn,sanketloke/scikit-learn,bnaul/scikit-learn,huzq/scikit-learn,thientu/scikit-learn,espg/scikit-learn,ZenDevelopmentSystems/scikit-learn,aminert/scikit-learn,dingocuster/scikit-learn,rahul-c1/scikit-learn,MatthieuBizien/scikit-learn,JosmanPS/scikit-learn,tosolveit/scikit-learn,xiaoxiamii/scikit-learn,vigilv/scikit-learn,aewhatley/scikit-learn,sonnyhu/scikit-learn,gclenaghan/scikit-learn,Garrett-R/scikit-learn,macks22/scikit-learn,AIML/scikit-learn,zuku1985/scikit-learn,AnasGhrab/scikit-learn,Myasuka/scikit-learn,toastedcornflakes/scikit-learn,zorojean/scikit-learn,shangwuhencc/scikit-learn,olologin/scikit-learn,jpautom/scikit-learn,jlegendary/scikit-learn,anirudhjayaraman/scikit-learn,wzbozon/scikit-learn,toastedcornflakes/scikit-learn,eg-zhang/scikit-learn,andrewnc/scikit-learn,davidgbe/scikit-learn,MechCoder/scikit-learn,RachitKansal/scikit-learn,cybernet14/scikit-learn,ahoyosid/scikit-learn,fengzhyuan/scikit-learn,tmhm/scikit-learn,potash/scikit-learn,aetilley/scikit-learn,ltiao/scikit-learn,hsuantien/scikit-learn,sonnyhu/scikit-learn,marcocaccin/scikit-learn,f3r/scikit-learn,mrshu/scikit-learn,eg-zhang/scikit-learn,jmetzen/scikit-learn,adamgreenhall/scikit-learn,adamgreenhall/scikit-learn,gotomypc/scikit-learn,mjudsp/Tsallis,arabenjamin/scikit-learn,loli/semisupervisedforests,kagayakidan/scikit-learn,andaag/scikit-learn,ahoyosid/scikit-learn,frank-tancf/scikit-learn,equialgo/scikit-learn,gotomypc/scikit-learn,xiaoxiamii/scikit-learn,quheng/scikit-learn,mhdella/scikit-learn,lucidfrontier45/scikit-learn,nrhine1/scikit-learn,ndingwall/scikit-learn,huzq/scikit-learn,AnasGhrab/scikit-learn,spallavolu/scikit-learn,macks22/scikit-learn,petosegan/scikit-learn,nmayorov/scikit-learn,akionakamura/scikit-learn,tdhopper/scikit-learn,vshtanko/scikit-learn,cainiaocome/scikit-learn,gotomypc/scikit-learn,r-mart/scikit-learn,glemaitre/scikit-learn,manhhomienbienthuy/scikit-learn,ngoix/OCRF,djgagne/scikit-learn,plissonf/scikit-learn,mhue/scikit-learn,Sentient07/scikit-learn,vibhorag/scikit-learn,rishikksh20/scikit-learn,sumspr/scikit-learn,moutai/scikit-learn,belltailjp/scikit-learn,billy-inn/scikit-learn,0asa/scikit-learn,hsuantien/scikit-learn,chrsrds/scikit-learn,466152112/scikit-learn,idlead/scikit-learn,fbagirov/scikit-learn,imaculate/scikit-learn,rahuldhote/scikit-learn,jmschrei/scikit-learn,henridwyer/scikit-learn,rohanp/scikit-learn,Djabbz/scikit-learn,3manuek/scikit-learn,mattgiguere/scikit-learn,ElDeveloper/scikit-learn,terkkila/scikit-learn,altairpearl/scikit-learn,untom/scikit-learn,aabadie/scikit-learn,liangz0707/scikit-learn,qifeigit/scikit-learn,thilbern/scikit-learn,ycaihua/scikit-learn,lazywei/scikit-learn,NunoEdgarGub1/scikit-learn,ChanChiChoi/scikit-learn,trankmichael/scikit-learn,anntzer/scikit-learn,quheng/scikit-learn,heli522/scikit-learn,alvarofierroclavero/scikit-learn,ilo10/scikit-learn,hlin117/scikit-learn,zuku1985/scikit-learn,PrashntS/scikit-learn,anntzer/scikit-learn,poryfly/scikit-learn,mikebenfield/scikit-learn,rahul-c1/scikit-learn,michigraber/scikit-learn,murali-munna/scikit-learn,nesterione/scikit-learn,ilyes14/scikit-learn,carrillo/scikit-learn,vortex-ape/scikit-learn,poryfly/scikit-learn,manhhomienbienthuy/scikit-learn,lucidfrontier45/scikit-learn,evgchz/scikit-learn,jmetzen/scikit-learn,tomlof/scikit-learn,massmutual/scikit-learn,siutanwong/scikit-learn,pythonvietnam/scikit-learn,dsullivan7/scikit-learn,ltiao/scikit-learn,wlamond/scikit-learn,jmschrei/scikit-learn,LiaoPan/scikit-learn,ClimbsRocks/scikit-learn,mlyundin/scikit-learn,Lawrence-Liu/scikit-learn,abhishekgahlot/scikit-learn,kjung/scikit-learn,larsmans/scikit-learn,AlexanderFabisch/scikit-learn,dhruv13J/scikit-learn,jzt5132/scikit-learn,frank-tancf/scikit-learn,UNR-AERIAL/scikit-learn,pnedunuri/scikit-learn,rrohan/scikit-learn,YinongLong/scikit-learn,herilalaina/scikit-learn,PatrickChrist/scikit-learn,Fireblend/scikit-learn,chrisburr/scikit-learn,siutanwong/scikit-learn,adamgreenhall/scikit-learn,belltailjp/scikit-learn,costypetrisor/scikit-learn,voxlol/scikit-learn,MatthieuBizien/scikit-learn,cdegroc/scikit-learn,maheshakya/scikit-learn,wlamond/scikit-learn,abimannans/scikit-learn,xyguo/scikit-learn,yyjiang/scikit-learn,poryfly/scikit-learn,hrjn/scikit-learn,ilo10/scikit-learn,xzh86/scikit-learn,JosmanPS/scikit-learn,RomainBrault/scikit-learn,lenovor/scikit-learn,shangwuhencc/scikit-learn,fyffyt/scikit-learn,waterponey/scikit-learn,vinayak-mehta/scikit-learn,devanshdalal/scikit-learn,hugobowne/scikit-learn,jmschrei/scikit-learn,mhue/scikit-learn,maheshakya/scikit-learn,mugizico/scikit-learn,MechCoder/scikit-learn,Obus/scikit-learn,AlexRobson/scikit-learn,manashmndl/scikit-learn,ashhher3/scikit-learn,siutanwong/scikit-learn,arahuja/scikit-learn,IssamLaradji/scikit-learn,huobaowangxi/scikit-learn,zihua/scikit-learn,toastedcornflakes/scikit-learn,kashif/scikit-learn,belltailjp/scikit-learn,ilo10/scikit-learn,glemaitre/scikit-learn,Windy-Ground/scikit-learn,altairpearl/scikit-learn,ankurankan/scikit-learn,dhruv13J/scikit-learn,bikong2/scikit-learn,bigdataelephants/scikit-learn,ephes/scikit-learn,Adai0808/scikit-learn,yanlend/scikit-learn,Akshay0724/scikit-learn,cdegroc/scikit-learn,zaxtax/scikit-learn,chrisburr/scikit-learn,B3AU/waveTree,MechCoder/scikit-learn,herilalaina/scikit-learn,kevin-intel/scikit-learn,shenzebang/scikit-learn,cwu2011/scikit-learn,nvoron23/scikit-learn,h2educ/scikit-learn,fbagirov/scikit-learn,quheng/scikit-learn,michigraber/scikit-learn,BiaDarkia/scikit-learn,rsivapr/scikit-learn,elkingtonmcb/scikit-learn,PrashntS/scikit-learn,MohammedWasim/scikit-learn,shenzebang/scikit-learn,xyguo/scikit-learn,bthirion/scikit-learn,Myasuka/scikit-learn,lesteve/scikit-learn,tmhm/scikit-learn,nhejazi/scikit-learn,rahuldhote/scikit-learn,IshankGulati/scikit-learn,liangz0707/scikit-learn,Obus/scikit-learn,jorge2703/scikit-learn,trungnt13/scikit-learn,vigilv/scikit-learn,aewhatley/scikit-learn,shusenl/scikit-learn,Jimmy-Morzaria/scikit-learn,anirudhjayaraman/scikit-learn,smartscheduling/scikit-learn-categorical-tree,aetilley/scikit-learn,jseabold/scikit-learn,Garrett-R/scikit-learn,JosmanPS/scikit-learn,wanggang3333/scikit-learn,imaculate/scikit-learn,trankmichael/scikit-learn,devanshdalal/scikit-learn,Clyde-fare/scikit-learn,xwolf12/scikit-learn,AIML/scikit-learn,alvarofierroclavero/scikit-learn,trankmichael/scikit-learn,JPFrancoia/scikit-learn,kaichogami/scikit-learn,btabibian/scikit-learn,vibhorag/scikit-learn,Nyker510/scikit-learn,hitszxp/scikit-learn,jblackburne/scikit-learn,nomadcube/scikit-learn,meduz/scikit-learn,mjgrav2001/scikit-learn,yanlend/scikit-learn,samuel1208/scikit-learn,florian-f/sklearn,ningchi/scikit-learn,fabianp/scikit-learn,harshaneelhg/scikit-learn,ky822/scikit-learn,cainiaocome/scikit-learn,ClimbsRocks/scikit-learn,victorbergelin/scikit-learn,zuku1985/scikit-learn,spallavolu/scikit-learn,maheshakya/scikit-learn,Achuth17/scikit-learn,ElDeveloper/scikit-learn,liangz0707/scikit-learn,amueller/scikit-learn,r-mart/scikit-learn,arabenjamin/scikit-learn,mojoboss/scikit-learn,Garrett-R/scikit-learn,shahankhatch/scikit-learn,ltiao/scikit-learn,ivannz/scikit-learn,lin-credible/scikit-learn,justincassidy/scikit-learn,abhishekgahlot/scikit-learn,wazeerzulfikar/scikit-learn,0asa/scikit-learn,liyu1990/sklearn,ycaihua/scikit-learn,hdmetor/scikit-learn,mwv/scikit-learn,LiaoPan/scikit-learn,Akshay0724/scikit-learn,fyffyt/scikit-learn,glemaitre/scikit-learn,AIML/scikit-learn,yanlend/scikit-learn,ChanChiChoi/scikit-learn,smartscheduling/scikit-learn-categorical-tree,jm-begon/scikit-learn,LiaoPan/scikit-learn,Clyde-fare/scikit-learn,ilyes14/scikit-learn,ZenDevelopmentSystems/scikit-learn,alexeyum/scikit-learn,henridwyer/scikit-learn,loli/semisupervisedforests,phdowling/scikit-learn,manashmndl/scikit-learn,vigilv/scikit-learn,RayMick/scikit-learn,victorbergelin/scikit-learn,Srisai85/scikit-learn,jseabold/scikit-learn,spallavolu/scikit-learn,RachitKansal/scikit-learn,Titan-C/scikit-learn,kmike/scikit-learn,nmayorov/scikit-learn,eickenberg/scikit-learn,mblondel/scikit-learn,alvarofierroclavero/scikit-learn,anurag313/scikit-learn,rexshihaoren/scikit-learn,mxjl620/scikit-learn,fbagirov/scikit-learn,Fireblend/scikit-learn,xuewei4d/scikit-learn,cwu2011/scikit-learn,arahuja/scikit-learn,pianomania/scikit-learn,nvoron23/scikit-learn,yunfeilu/scikit-learn,JeanKossaifi/scikit-learn,B3AU/waveTree,3manuek/scikit-learn,joernhees/scikit-learn,nelson-liu/scikit-learn,massmutual/scikit-learn,stylianos-kampakis/scikit-learn,scikit-learn/scikit-learn,h2educ/scikit-learn,vermouthmjl/scikit-learn,zuku1985/scikit-learn,liberatorqjw/scikit-learn,poryfly/scikit-learn,OshynSong/scikit-learn,Fireblend/scikit-learn,Windy-Ground/scikit-learn,Aasmi/scikit-learn,rajat1994/scikit-learn,sanketloke/scikit-learn,tawsifkhan/scikit-learn,depet/scikit-learn,voxlol/scikit-learn,fyffyt/scikit-learn,AlexandreAbraham/scikit-learn,TomDLT/scikit-learn,fabioticconi/scikit-learn,yyjiang/scikit-learn,costypetrisor/scikit-learn,trungnt13/scikit-learn,kevin-intel/scikit-learn,clemkoa/scikit-learn,luo66/scikit-learn,Barmaley-exe/scikit-learn,loli/sklearn-ensembletrees,xubenben/scikit-learn,robin-lai/scikit-learn,harshaneelhg/scikit-learn,xyguo/scikit-learn,terkkila/scikit-learn,0asa/scikit-learn,alvarofierroclavero/scikit-learn,chrisburr/scikit-learn,jkarnows/scikit-learn,ishanic/scikit-learn,pianomania/scikit-learn,bikong2/scikit-learn,mhdella/scikit-learn,pianomania/scikit-learn,jayflo/scikit-learn,MartinDelzant/scikit-learn,beepee14/scikit-learn,Barmaley-exe/scikit-learn,phdowling/scikit-learn,ZenDevelopmentSystems/scikit-learn,olologin/scikit-learn,ycaihua/scikit-learn,tomlof/scikit-learn,DSLituiev/scikit-learn,IndraVikas/scikit-learn,arahuja/scikit-learn,yunfeilu/scikit-learn,nvoron23/scikit-learn,ningchi/scikit-learn,lenovor/scikit-learn,scikit-learn/scikit-learn,olologin/scikit-learn,jakobworldpeace/scikit-learn,joshloyal/scikit-learn,vermouthmjl/scikit-learn,bigdataelephants/scikit-learn,DSLituiev/scikit-learn,betatim/scikit-learn,tawsifkhan/scikit-learn,toastedcornflakes/scikit-learn,theoryno3/scikit-learn,nesterione/scikit-learn,mattilyra/scikit-learn,madjelan/scikit-learn,deepesch/scikit-learn,LohithBlaze/scikit-learn,YinongLong/scikit-learn,rvraghav93/scikit-learn,iismd17/scikit-learn,nmayorov/scikit-learn,xubenben/scikit-learn,wzbozon/scikit-learn,f3r/scikit-learn,AlexRobson/scikit-learn,chrsrds/scikit-learn,pv/scikit-learn,wazeerzulfikar/scikit-learn,Aasmi/scikit-learn,CVML/scikit-learn,ilyes14/scikit-learn,chrisburr/scikit-learn,mxjl620/scikit-learn,samuel1208/scikit-learn,deepesch/scikit-learn,kylerbrown/scikit-learn,zhenv5/scikit-learn,pratapvardhan/scikit-learn,sanketloke/scikit-learn,giorgiop/scikit-learn,kevin-intel/scikit-learn,DSLituiev/scikit-learn,nrhine1/scikit-learn,etkirsch/scikit-learn,plissonf/scikit-learn,ephes/scikit-learn,bhargav/scikit-learn,sinhrks/scikit-learn,kjung/scikit-learn,liyu1990/sklearn,aetilley/scikit-learn,thientu/scikit-learn,belltailjp/scikit-learn,fbagirov/scikit-learn,lesteve/scikit-learn,bigdataelephants/scikit-learn,fengzhyuan/scikit-learn,chrsrds/scikit-learn,mfjb/scikit-learn,hlin117/scikit-learn,mattilyra/scikit-learn,appapantula/scikit-learn,RachitKansal/scikit-learn,huzq/scikit-learn,zhenv5/scikit-learn,sgenoud/scikit-learn,zaxtax/scikit-learn,nhejazi/scikit-learn,AlexRobson/scikit-learn,0asa/scikit-learn,beepee14/scikit-learn,mhdella/scikit-learn,justincassidy/scikit-learn,hlin117/scikit-learn,larsmans/scikit-learn,zorroblue/scikit-learn,meduz/scikit-learn,LohithBlaze/scikit-learn,plissonf/scikit-learn,jjx02230808/project0223,murali-munna/scikit-learn,fyffyt/scikit-learn,florian-f/sklearn,vybstat/scikit-learn,NelisVerhoef/scikit-learn,YinongLong/scikit-learn,depet/scikit-learn,equialgo/scikit-learn,mxjl620/scikit-learn,vybstat/scikit-learn,mattilyra/scikit-learn,equialgo/scikit-learn,jaidevd/scikit-learn,JsNoNo/scikit-learn,stylianos-kampakis/scikit-learn,jlegendary/scikit-learn,theoryno3/scikit-learn,theoryno3/scikit-learn,h2educ/scikit-learn,anurag313/scikit-learn,nomadcube/scikit-learn,larsmans/scikit-learn,simon-pepin/scikit-learn,devanshdalal/scikit-learn,AlexandreAbraham/scikit-learn,kylerbrown/scikit-learn,andaag/scikit-learn,gclenaghan/scikit-learn,marcocaccin/scikit-learn,glennq/scikit-learn,cl4rke/scikit-learn,Jimmy-Morzaria/scikit-learn,MatthieuBizien/scikit-learn,treycausey/scikit-learn,pkruskal/scikit-learn,madjelan/scikit-learn,schets/scikit-learn,hsuantien/scikit-learn,waterponey/scikit-learn,cybernet14/scikit-learn,ahoyosid/scikit-learn,giorgiop/scikit-learn,pypot/scikit-learn,vinayak-mehta/scikit-learn,shikhardb/scikit-learn,florian-f/sklearn,pompiduskus/scikit-learn,mugizico/scikit-learn,sergeyf/scikit-learn,r-mart/scikit-learn,kagayakidan/scikit-learn,hainm/scikit-learn,jayflo/scikit-learn,yonglehou/scikit-learn,carrillo/scikit-learn,shyamalschandra/scikit-learn,alexsavio/scikit-learn,frank-tancf/scikit-learn,yonglehou/scikit-learn,OshynSong/scikit-learn,aabadie/scikit-learn,abhishekkrthakur/scikit-learn,dhruv13J/scikit-learn,shahankhatch/scikit-learn,vortex-ape/scikit-learn,wzbozon/scikit-learn,mehdidc/scikit-learn,fabianp/scikit-learn,cauchycui/scikit-learn,anirudhjayaraman/scikit-learn,lenovor/scikit-learn,samzhang111/scikit-learn,jpautom/scikit-learn,trungnt13/scikit-learn,imaculate/scikit-learn,betatim/scikit-learn,scikit-learn/scikit-learn,stylianos-kampakis/scikit-learn,larsmans/scikit-learn,tmhm/scikit-learn,0x0all/scikit-learn,q1ang/scikit-learn,AlexanderFabisch/scikit-learn,jakirkham/scikit-learn,phdowling/scikit-learn,espg/scikit-learn,mattgiguere/scikit-learn,dsullivan7/scikit-learn,vivekmishra1991/scikit-learn,lazywei/scikit-learn,Vimos/scikit-learn,jjx02230808/project0223,voxlol/scikit-learn,h2educ/scikit-learn,lin-credible/scikit-learn,nvoron23/scikit-learn,vigilv/scikit-learn,466152112/scikit-learn,nikitasingh981/scikit-learn,AlexanderFabisch/scikit-learn,UNR-AERIAL/scikit-learn,tmhm/scikit-learn,dsquareindia/scikit-learn,Achuth17/scikit-learn,jakirkham/scikit-learn,BiaDarkia/scikit-learn,ClimbsRocks/scikit-learn,ominux/scikit-learn,rahuldhote/scikit-learn,moutai/scikit-learn,mhdella/scikit-learn,Djabbz/scikit-learn,lin-credible/scikit-learn,RPGOne/scikit-learn,eg-zhang/scikit-learn,ivannz/scikit-learn,mattilyra/scikit-learn,wlamond/scikit-learn,ldirer/scikit-learn,billy-inn/scikit-learn,AnasGhrab/scikit-learn,marcocaccin/scikit-learn,RomainBrault/scikit-learn,Nyker510/scikit-learn,evgchz/scikit-learn,OshynSong/scikit-learn,luo66/scikit-learn,MartinDelzant/scikit-learn,jblackburne/scikit-learn,tawsifkhan/scikit-learn,mwv/scikit-learn,loli/sklearn-ensembletrees,mehdidc/scikit-learn,untom/scikit-learn,lesteve/scikit-learn,huobaowangxi/scikit-learn,mayblue9/scikit-learn,sarahgrogan/scikit-learn,PatrickChrist/scikit-learn,xavierwu/scikit-learn,ltiao/scikit-learn,jseabold/scikit-learn,ephes/scikit-learn,vermouthmjl/scikit-learn,mblondel/scikit-learn,quheng/scikit-learn,samuel1208/scikit-learn,sinhrks/scikit-learn,BiaDarkia/scikit-learn,ky822/scikit-learn,akionakamura/scikit-learn,zhenv5/scikit-learn,abimannans/scikit-learn,gclenaghan/scikit-learn,fredhusser/scikit-learn,cl4rke/scikit-learn,pnedunuri/scikit-learn,JPFrancoia/scikit-learn,huzq/scikit-learn,vshtanko/scikit-learn,abimannans/scikit-learn,khkaminska/scikit-learn,ChanderG/scikit-learn,TomDLT/scikit-learn,billy-inn/scikit-learn,saiwing-yeung/scikit-learn,ldirer/scikit-learn,raghavrv/scikit-learn,ZenDevelopmentSystems/scikit-learn,deepesch/scikit-learn,altairpearl/scikit-learn,kylerbrown/scikit-learn,joshloyal/scikit-learn,Srisai85/scikit-learn,zihua/scikit-learn,alexsavio/scikit-learn,manhhomienbienthuy/scikit-learn,CforED/Machine-Learning,fredhusser/scikit-learn,rrohan/scikit-learn,dingocuster/scikit-learn,shikhardb/scikit-learn,luo66/scikit-learn,nesterione/scikit-learn,RPGOne/scikit-learn,thilbern/scikit-learn,0x0all/scikit-learn,mrshu/scikit-learn,siutanwong/scikit-learn,raghavrv/scikit-learn,xzh86/scikit-learn,Vimos/scikit-learn,pv/scikit-learn,procoder317/scikit-learn,JeanKossaifi/scikit-learn,UNR-AERIAL/scikit-learn,Lawrence-Liu/scikit-learn,zorroblue/scikit-learn,MartinSavc/scikit-learn,RayMick/scikit-learn,r-mart/scikit-learn,loli/semisupervisedforests,pompiduskus/scikit-learn,shangwuhencc/scikit-learn,manashmndl/scikit-learn,hdmetor/scikit-learn,eickenberg/scikit-learn,mrshu/scikit-learn,Srisai85/scikit-learn,IshankGulati/scikit-learn,rahul-c1/scikit-learn,larsmans/scikit-learn,appapantula/scikit-learn,nikitasingh981/scikit-learn,0asa/scikit-learn,nhejazi/scikit-learn,liberatorqjw/scikit-learn,moutai/scikit-learn,henridwyer/scikit-learn,macks22/scikit-learn,khkaminska/scikit-learn,xavierwu/scikit-learn,aminert/scikit-learn,justincassidy/scikit-learn,CforED/Machine-Learning,nikitasingh981/scikit-learn,mojoboss/scikit-learn,jereze/scikit-learn,nelson-liu/scikit-learn,mayblue9/scikit-learn,bthirion/scikit-learn,jorik041/scikit-learn,xiaoxiamii/scikit-learn,andaag/scikit-learn,mhue/scikit-learn,jaidevd/scikit-learn,kashif/scikit-learn,Aasmi/scikit-learn,rajat1994/scikit-learn,sinhrks/scikit-learn,mayblue9/scikit-learn,raghavrv/scikit-learn,rohanp/scikit-learn,espg/scikit-learn,alexeyum/scikit-learn,roxyboy/scikit-learn,arjoly/scikit-learn,pkruskal/scikit-learn,ZENGXH/scikit-learn,mikebenfield/scikit-learn,rsivapr/scikit-learn,arabenjamin/scikit-learn,kagayakidan/scikit-learn,massmutual/scikit-learn,ky822/scikit-learn,Vimos/scikit-learn,eickenberg/scikit-learn,tomlof/scikit-learn,rajat1994/scikit-learn,cainiaocome/scikit-learn,xavierwu/scikit-learn,vivekmishra1991/scikit-learn,ndingwall/scikit-learn,IssamLaradji/scikit-learn,saiwing-yeung/scikit-learn,pompiduskus/scikit-learn,kmike/scikit-learn,pypot/scikit-learn,jpautom/scikit-learn,alexeyum/scikit-learn,wazeerzulfikar/scikit-learn,yask123/scikit-learn,robbymeals/scikit-learn,aflaxman/scikit-learn,marcocaccin/scikit-learn,ankurankan/scikit-learn,loli/semisupervisedforests,pypot/scikit-learn,iismd17/scikit-learn,roxyboy/scikit-learn,ldirer/scikit-learn,aetilley/scikit-learn,akionakamura/scikit-learn,hdmetor/scikit-learn,0x0all/scikit-learn,wazeerzulfikar/scikit-learn,bthirion/scikit-learn,kagayakidan/scikit-learn,anntzer/scikit-learn,aminert/scikit-learn,ElDeveloper/scikit-learn,MartinSavc/scikit-learn,shangwuhencc/scikit-learn,mjgrav2001/scikit-learn,Garrett-R/scikit-learn,abimannans/scikit-learn,Adai0808/scikit-learn,pratapvardhan/scikit-learn,idlead/scikit-learn,djgagne/scikit-learn,jereze/scikit-learn,rvraghav93/scikit-learn,vinayak-mehta/scikit-learn,ngoix/OCRF,sarahgrogan/scikit-learn,icdishb/scikit-learn,AlexandreAbraham/scikit-learn,lucidfrontier45/scikit-learn,rishikksh20/scikit-learn,rvraghav93/scikit-learn,mjgrav2001/scikit-learn,samzhang111/scikit-learn,BiaDarkia/scikit-learn,jmetzen/scikit-learn,bikong2/scikit-learn,davidgbe/scikit-learn,qifeigit/scikit-learn,krez13/scikit-learn,harshaneelhg/scikit-learn,jaidevd/scikit-learn,waterponey/scikit-learn,ngoix/OCRF,cainiaocome/scikit-learn,florian-f/sklearn,NunoEdgarGub1/scikit-learn,yanlend/scikit-learn,espg/scikit-learn,jorik041/scikit-learn,bnaul/scikit-learn,rexshihaoren/scikit-learn,walterreade/scikit-learn,thientu/scikit-learn,Jimmy-Morzaria/scikit-learn,jmschrei/scikit-learn,walterreade/scikit-learn,rajat1994/scikit-learn,henrykironde/scikit-learn,khkaminska/scikit-learn,q1ang/scikit-learn,Akshay0724/scikit-learn,petosegan/scikit-learn,themrmax/scikit-learn,glouppe/scikit-learn,mehdidc/scikit-learn,bhargav/scikit-learn,jorge2703/scikit-learn,Djabbz/scikit-learn,saiwing-yeung/scikit-learn,hrjn/scikit-learn,jseabold/scikit-learn,vybstat/scikit-learn,q1ang/scikit-learn,Fireblend/scikit-learn,ZENGXH/scikit-learn,cauchycui/scikit-learn,abhishekkrthakur/scikit-learn,JosmanPS/scikit-learn,ssaeger/scikit-learn,ChanderG/scikit-learn,abhishekgahlot/scikit-learn,f3r/scikit-learn,moutai/scikit-learn,ZENGXH/scikit-learn,AnasGhrab/scikit-learn,mrshu/scikit-learn,lbishal/scikit-learn,ChanderG/scikit-learn,kaichogami/scikit-learn,heli522/scikit-learn,xuewei4d/scikit-learn,joernhees/scikit-learn,khkaminska/scikit-learn,CforED/Machine-Learning,jkarnows/scikit-learn,MohammedWasim/scikit-learn,qifeigit/scikit-learn,jayflo/scikit-learn,mattgiguere/scikit-learn,466152112/scikit-learn,abhishekgahlot/scikit-learn,fabioticconi/scikit-learn,IssamLaradji/scikit-learn,murali-munna/scikit-learn,JeanKossaifi/scikit-learn,ashhher3/scikit-learn,jayflo/scikit-learn,AlexRobson/scikit-learn,aewhatley/scikit-learn,amueller/scikit-learn,sgenoud/scikit-learn,zaxtax/scikit-learn,rsivapr/scikit-learn,ashhher3/scikit-learn,fzalkow/scikit-learn,hsiaoyi0504/scikit-learn,Obus/scikit-learn,sarahgrogan/scikit-learn,sgenoud/scikit-learn,robbymeals/scikit-learn,shyamalschandra/scikit-learn,bhargav/scikit-learn,CforED/Machine-Learning,Adai0808/scikit-learn,sumspr/scikit-learn,cdegroc/scikit-learn,Titan-C/scikit-learn,trankmichael/scikit-learn,dsquareindia/scikit-learn,ankurankan/scikit-learn,UNR-AERIAL/scikit-learn,joshloyal/scikit-learn,MohammedWasim/scikit-learn,sgenoud/scikit-learn,xzh86/scikit-learn,fabianp/scikit-learn,PrashntS/scikit-learn,mattilyra/scikit-learn,jakobworldpeace/scikit-learn,mjudsp/Tsallis,dsquareindia/scikit-learn,vinayak-mehta/scikit-learn,tawsifkhan/scikit-learn,vortex-ape/scikit-learn,treycausey/scikit-learn,mxjl620/scikit-learn,schets/scikit-learn,RomainBrault/scikit-learn,dsullivan7/scikit-learn,Clyde-fare/scikit-learn,Myasuka/scikit-learn,hsiaoyi0504/scikit-learn,RomainBrault/scikit-learn,eickenberg/scikit-learn,adamgreenhall/scikit-learn,hrjn/scikit-learn,hitszxp/scikit-learn,pypot/scikit-learn,fengzhyuan/scikit-learn,aabadie/scikit-learn,jm-begon/scikit-learn,sgenoud/scikit-learn,joernhees/scikit-learn,glouppe/scikit-learn,ivannz/scikit-learn,krez13/scikit-learn,PatrickOReilly/scikit-learn,jjx02230808/project0223,andaag/scikit-learn,henrykironde/scikit-learn,mlyundin/scikit-learn,shikhardb/scikit-learn,kashif/scikit-learn,shenzebang/scikit-learn,vivekmishra1991/scikit-learn,liberatorqjw/scikit-learn,tdhopper/scikit-learn,mfjb/scikit-learn,jakobworldpeace/scikit-learn,ominux/scikit-learn,hsiaoyi0504/scikit-learn,betatim/scikit-learn,kylerbrown/scikit-learn,herilalaina/scikit-learn,beepee14/scikit-learn,raghavrv/scikit-learn,Lawrence-Liu/scikit-learn,dsullivan7/scikit-learn,icdishb/scikit-learn,nhejazi/scikit-learn,rrohan/scikit-learn,zorojean/scikit-learn,jorge2703/scikit-learn,michigraber/scikit-learn,russel1237/scikit-learn,anntzer/scikit-learn,hrjn/scikit-learn,pkruskal/scikit-learn,Windy-Ground/scikit-learn,ssaeger/scikit-learn,Djabbz/scikit-learn,aewhatley/scikit-learn,cybernet14/scikit-learn,fabioticconi/scikit-learn,pratapvardhan/scikit-learn,Aasmi/scikit-learn,jzt5132/scikit-learn,mikebenfield/scikit-learn,Windy-Ground/scikit-learn,zorojean/scikit-learn,Clyde-fare/scikit-learn,btabibian/scikit-learn,imaculate/scikit-learn,PatrickOReilly/scikit-learn,sumspr/scikit-learn,MartinDelzant/scikit-learn,herilalaina/scikit-learn,samzhang111/scikit-learn,Jimmy-Morzaria/scikit-learn,heli522/scikit-learn,tomlof/scikit-learn,ominux/scikit-learn,maheshakya/scikit-learn,justincassidy/scikit-learn,harshaneelhg/scikit-learn,ZENGXH/scikit-learn,cybernet14/scikit-learn,Achuth17/scikit-learn,alexsavio/scikit-learn,glouppe/scikit-learn,cl4rke/scikit-learn,devanshdalal/scikit-learn,simon-pepin/scikit-learn,pythonvietnam/scikit-learn,jmetzen/scikit-learn,tosolveit/scikit-learn,RPGOne/scikit-learn,JsNoNo/scikit-learn,amueller/scikit-learn,xuewei4d/scikit-learn,vivekmishra1991/scikit-learn,DonBeo/scikit-learn,ky822/scikit-learn,Srisai85/scikit-learn,xubenben/scikit-learn,iismd17/scikit-learn,ephes/scikit-learn,shyamalschandra/scikit-learn,walterreade/scikit-learn,ycaihua/scikit-learn,tosolveit/scikit-learn,zorroblue/scikit-learn,bnaul/scikit-learn,TomDLT/scikit-learn,xwolf12/scikit-learn,CVML/scikit-learn,dsquareindia/scikit-learn,MohammedWasim/scikit-learn,aflaxman/scikit-learn,kevin-intel/scikit-learn,yask123/scikit-learn,RayMick/scikit-learn,rexshihaoren/scikit-learn,mojoboss/scikit-learn,btabibian/scikit-learn,smartscheduling/scikit-learn-categorical-tree,glennq/scikit-learn | examples/manifold/plot_swissroll.py | examples/manifold/plot_swissroll.py | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD, (C) INRIA 2011
print __doc__
import pylab as pl
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from scikits.learn import manifold, datasets
X, color = datasets.samples_generator.swiss_roll(1500)
print "Computing LLE embedding"
X_r, err = manifold.locally_linear_embedding(X, 12, 2)
print "Done. Reconstruction error: %g" % err
#----------------------------------------------------------------------
# Plot result
fig = pl.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:,0], X_r[:,1], c=color)
pl.xticks([]), pl.yticks([])
pl.show()
| """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD, (C) INRIA 2011
print __doc__
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from scikits.learn import manifold, datasets
X, color = datasets.samples_generator.swiss_roll(1500)
print "Computing LLE embedding"
X_r, err = manifold.locally_linear_embedding(X, 12, 2)
print "Done. Reconstruction error: %g" % err
#----------------------------------------------------------------------
# Plot result
fig = pl.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:,0], X_r[:,1], c=color)
pl.xticks([]), pl.yticks([])
pl.show()
| bsd-3-clause | Python |
e8c75e84a158876e71a926bec244af43ad93cbc4 | add imu class | codingfoo/overo_python_examples,codingfoo/overo_python_examples | imu.py | imu.py | import serial
import math
import struct
class IMU:
"""Class for working with a Microstrain IMU"""
def __init__(self):
self.IMU_PORT = '/dev/ttyS0'
self.IMU_BAUD = 115200
self.CMD_ACCEL_ANG_ORIENT = '\xC8'
self.CMD_ACCEL_ANG_ORIENT_SIZE = 67
self.IMU_COMMAND = self.CMD_ACCEL_ANG_ORIENT
self.IMU_MESSAGE_SIZE = self.CMD_ACCEL_ANG_ORIENT_SIZE
def open_imu(self):
self.imu = serial.Serial(self.IMU_PORT, self.IMU_BAUD)
def close_imu(self):
self.imu.close()
def read_imu(self):
self.imu.write(self.IMU_COMMAND)
#TODO check IMU write
data = []
data = self.imu.read(self.IMU_MESSAGE_SIZE)
#TODO check read status, check first char, checksum
#conversion to numbers
accel_x = struct.unpack('>f', data[1:5])[0]
accel_y = struct.unpack('>f', data[5:9])[0]
accel_z = struct.unpack('>f', data[9:13])[0]
ang_rate_x = struct.unpack('>f', data[13:17])[0]
ang_rate_y = struct.unpack('>f', data[17:21])[0]
ang_rate_z = struct.unpack('>f', data[21:25])[0]
#orientation matrix
m_1 = struct.unpack('>f', data[33:37])[0]
m_2 = struct.unpack('>f', data[45:49])[0]
m_3 = struct.unpack('>f', data[57:61])[0]
#handle clock rollover outside of function
t = 0
t = struct.unpack('>I', data[61:65])[0]
time = 0.0
time = t / 62500.0 # convert time to seconds
return accel_x, accel_y, accel_z, m_1, m_2, m_3, ang_rate_x, ang_rate_y, ang_rate_z, time, data
def main():
imu = IMU()
imu.open_imu()
accel_x, accel_y, accel_z, m_1, m_2, m_3, ang_rate_x, ang_rate_y, ang_rate_z, time, data = imu.read_imu()
print accel_x
print accel_y
print accel_z
print ang_rate_x
print ang_rate_y
print ang_rate_z
print time
imu.close_imu()
if __name__ == "__main__":
main()
| mit | Python |
|
fb37af691d63ab8a43d50701d6b1f8ae027e2e1b | Create dfirwizard.py | dlcowen/dfirwizard | dfirwizard.py | dfirwizard.py | #!/usr/bin/python
# Sample program or step 1 in becoming a DFIR Wizard!
# No license as this code is simple and free!
import sys
import pytsk3
imagefile = "Stage2.vhd"
imagehandle = pytsk3.Img_Info(imagefile)
partitionTable = pytsk3.Volume_Info(imagehandle)
for partition in partitionTable:
print partition.addr, partition.desc, "%ss(%s)" % (partition.start, partition.start * 512), partition.len
| apache-2.0 | Python |
|
ac83a8bbef2c61021c39c77ef3c14675383edc62 | Fix a typo. | pidah/st2contrib,pearsontechnology/st2contrib,meirwah/st2contrib,digideskio/st2contrib,StackStorm/st2contrib,meirwah/st2contrib,psychopenguin/st2contrib,psychopenguin/st2contrib,tonybaloney/st2contrib,StackStorm/st2contrib,armab/st2contrib,armab/st2contrib,digideskio/st2contrib,tonybaloney/st2contrib,lmEshoo/st2contrib,pidah/st2contrib,pearsontechnology/st2contrib,pearsontechnology/st2contrib,tonybaloney/st2contrib,pidah/st2contrib,pearsontechnology/st2contrib,armab/st2contrib,lmEshoo/st2contrib,StackStorm/st2contrib | packs/st2/actions/lib/action.py | packs/st2/actions/lib/action.py | from st2actions.runners.pythonrunner import Action
from st2client.client import Client
from st2client.models.keyvalue import KeyValuePair # pylint: disable=no-name-in-module
from lib.utils import filter_none_values
__all__ = [
'St2BaseAction'
]
class St2BaseAction(Action):
def __init__(self, config):
super(St2BaseAction, self).__init__(config)
self._client = Client
self._kvp = KeyValuePair
self.client = self._get_client()
def _get_client(self):
host = self.config['base_url']
try:
return self._client(base_url=host)
except Exception as e:
return e
def _run_client_method(self, method, method_kwargs, format_func):
"""
Run the provided client method and format the result.
:param method: Client method to run.
:type method: ``func``
:param method_kwargs: Keyword arguments passed to the client method.
:type method_kwargs: ``dict``
:param format_func: Function for formatting the result.
:type format_func: ``func``
:rtype: ``list`` of ``dict``
"""
# Filter out parameters with string value of "None"
# This is a work around since the default values can only be strings
method_kwargs = filter_none_values(method_kwargs)
method_name = method.__name__
self.logger.debug('Calling client method "%s" with kwargs "%s"' % (method_name,
method_kwargs))
result = method(**method_kwargs)
result = format_func(result)
return result
| from st2actions.runners.pythonrunner import Action
from st2client.client import Client
from st2client.models.datastore import KeyValuePair # pylint: disable=no-name-in-module
from lib.utils import filter_none_values
__all__ = [
'St2BaseAction'
]
class St2BaseAction(Action):
def __init__(self, config):
super(St2BaseAction, self).__init__(config)
self._client = Client
self._kvp = KeyValuePair
self.client = self._get_client()
def _get_client(self):
host = self.config['base_url']
try:
return self._client(base_url=host)
except Exception as e:
return e
def _run_client_method(self, method, method_kwargs, format_func):
"""
Run the provided client method and format the result.
:param method: Client method to run.
:type method: ``func``
:param method_kwargs: Keyword arguments passed to the client method.
:type method_kwargs: ``dict``
:param format_func: Function for formatting the result.
:type format_func: ``func``
:rtype: ``list`` of ``dict``
"""
# Filter out parameters with string value of "None"
# This is a work around since the default values can only be strings
method_kwargs = filter_none_values(method_kwargs)
method_name = method.__name__
self.logger.debug('Calling client method "%s" with kwargs "%s"' % (method_name,
method_kwargs))
result = method(**method_kwargs)
result = format_func(result)
return result
| apache-2.0 | Python |
7ff614950163b1fb6a8fe0fef5b8de9bfa3a9d85 | Add a test for the hard-coded re() partial frac form | ergs/transmutagen,ergs/transmutagen | transmutagen/tests/test_partialfrac.py | transmutagen/tests/test_partialfrac.py | from sympy import together, expand_complex, re, im, symbols
from ..partialfrac import t
def test_re_form():
theta, alpha = symbols('theta, alpha')
# Check that this doesn't change
re_form = together(expand_complex(re(alpha/(t - theta))))
assert re_form == (t*re(alpha) - re(alpha)*re(theta) -
im(alpha)*im(theta))/((t - re(theta))**2 + im(theta)**2)
| bsd-3-clause | Python |
|
352e2d053b8880e1e1a951be4338c188fee925d1 | order book testing first iteration | PierreRochard/coinbase-exchange-order-book | orderbooktest.py | orderbooktest.py | import time
try:
import ujson as json
except ImportError:
import json
from orderbook.book import Book
def dict_compare(new_dictionary, old_dictionary, price_map=False, order_map=False):
d1_keys = set(new_dictionary.keys())
d2_keys = set(old_dictionary.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = []
# for key in intersect_keys:
# if price_map:
# try:
# print(len(new_dictionary[key]))
# print(len(old_dictionary[key]))
# assert len(new_dictionary[key]) == len(old_dictionary[key])
# assert len(new_dictionary[key]) == old_dictionary[key]
# assert new_dictionary[key].length == old_dictionary[key].length
# assert new_dictionary[key].volume == old_dictionary[key].volume
#
# assert new_dictionary[key].head_order.order_id == old_dictionary[key].head_order.order_id
# assert new_dictionary[key].head_order.size == old_dictionary[key].head_order.size
# assert new_dictionary[key].head_order.price == old_dictionary[key].head_order.price
#
# assert new_dictionary[key].tail_order.order_id == old_dictionary[key].tail_order.order_id
# assert new_dictionary[key].tail_order.size == old_dictionary[key].tail_order.size
# assert new_dictionary[key].tail_order.price == old_dictionary[key].tail_order.price
# except AssertionError:
# pass
# raise Exception()
# modified += (new_dictionary[key], old_dictionary[key])
modified = {o: (new_dictionary[o], old_dictionary[o]) for o in intersect_keys if new_dictionary[o] != old_dictionary[o]}
same = set(o for o in intersect_keys if new_dictionary[o] == old_dictionary[o])
return added, removed, modified, same
def test_orderbook():
variable_order_book = Book()
control_order_book = Book()
with open('testdata/messages.json') as messages_json_file:
messages = json.load(messages_json_file)
with open('testdata/beginning_level_3.json') as begin_json_file:
beginning_level_3 = json.load(begin_json_file)
with open('testdata/ending_level_3.json') as end_json_file:
ending_level_3 = json.load(end_json_file)
try:
assert beginning_level_3['sequence'] + 1 == messages[0]['sequence']
assert ending_level_3['sequence'] == messages[-1]['sequence']
except AssertionError:
print("Problem with sample data sequences")
variable_order_book.get_level3(beginning_level_3)
start = time.time()
[variable_order_book.process_message(message) for message in messages]
end = time.time()
print('messages per sec: {0}'.format(int(len(messages)/(end-start))))
control_order_book.get_level3(ending_level_3)
# assert variable_order_book.asks.price_map == control_order_book.asks.price_map
added, removed, modified, same = dict_compare(variable_order_book.asks.price_map, control_order_book.asks.price_map,
price_map=True)
if added:
print('superfluous entries: {0}'.format(added))
if removed:
print('missing entries: {0}'.format(removed))
# if modified:
# print('modified entries: {0}'.format(modified))
#
if __name__ == '__main__':
test_orderbook()
| bsd-2-clause | Python |
|
7522ffb9f6934de02d5d326d5f798d42a2da800d | add script to find old experimental apis | vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam | pdfium/find_old_experimental.py | pdfium/find_old_experimental.py | #!/usr/bin/env python3
#
# Copyright 2019 Miklos Vajna. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""Finds my old + experimental APIs."""
import subprocess
import time
def main() -> None:
"""Commandline interface to this module."""
apis_bytes = subprocess.check_output(["git", "grep", "-n", "Experimental API", "public/"])
apis = apis_bytes.decode("utf-8").strip().split("\n")
author_date_loc = []
for api in apis:
tokens = api.split(":")
path = tokens[0]
line_num = tokens[1]
blame_bytes = subprocess.check_output(["git", "blame", "--porcelain", "-L", line_num + "," + line_num, path])
blame_lines = blame_bytes.decode("utf-8").strip().split("\n")
date = 0
author = ""
for line in blame_lines:
if line.startswith("author-time"):
tokens = line.split(" ")
date = int(tokens[1])
elif line.startswith("author "):
tokens = line.split(" ")
author = tokens[1]
author_date_loc.append((author, date, path + ":" + line_num))
author_date_loc = sorted(author_date_loc, key=lambda x: x[1])
today = time.time()
for author, date, loc in author_date_loc:
if author != "Miklos":
continue
# Year in seconds.
if date >= today - 3 * 31536000:
continue
parsed_date = time.localtime(date)
date_string = time.strftime("%Y-%m-%d", parsed_date)
print("date: '"+date_string+"', loc: "+loc+"")
if __name__ == "__main__":
main()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
| mit | Python |
|
05dd8bdfeab63b3096e8f7d98032088133d1f0e5 | Add function provider to get osm data | meomancer/field-campaigner,meomancer/field-campaigner,meomancer/field-campaigner | campaign_manager/provider.py | campaign_manager/provider.py | import json
import hashlib
import os
from reporter import config
from reporter.utilities import (
split_bbox,
)
from reporter.osm import (
load_osm_document
)
from urllib.parse import quote
from reporter.queries import TAG_MAPPING, OVERPASS_QUERY_MAP
def get_osm_data(bbox, feature):
"""Get osm data.
:param bbox: String describing a bbox e.g. '106.78674459457397,
-6.141301491467023,106.80691480636597,-6.133834354201348'
:param feature: The type of feature to extract:
buildings, building-points, roads, potential-idp, boundary-[1,11]
:type feature: str
:returns: A dict from retrieved OSM dataset.
:rtype: dict
"""
server_url = 'http://overpass-api.de/api/interpreter?data='
tag_name = feature
overpass_verbosity = 'body'
try:
coordinates = split_bbox(bbox)
except ValueError:
error = "Invalid area"
coordinates = split_bbox(config.BBOX)
feature_type = TAG_MAPPING[tag_name]
parameters = coordinates
parameters['print_mode'] = overpass_verbosity
query = OVERPASS_QUERY_MAP[feature_type].format(**parameters)
# Query to returns json string
query = '[out:json];' + query
encoded_query = quote(query)
url_path = '%s%s' % (server_url, encoded_query)
safe_name = hashlib.md5(query.encode('utf-8')).hexdigest() + '.osm'
file_path = os.path.join(config.CACHE_DIR, safe_name)
osm_document = load_osm_document(file_path, url_path)
osm_data = json.loads(osm_document.read())
return osm_data
| bsd-3-clause | Python |
|
9305f158b71f65923ee37de2805324db362e0db6 | Add DRF LocalDateTimeField | PSU-OIT-ARC/django-arcutils,wylee/django-arcutils,wylee/django-arcutils,PSU-OIT-ARC/django-arcutils | arcutils/drf/serializers.py | arcutils/drf/serializers.py | from django.utils import timezone
from rest_framework import serializers
class LocalDateTimeField(serializers.DateTimeField):
"""Converts datetime to local time before serialization."""
def to_representation(self, value):
value = timezone.localtime(value)
return super().to_representation(value)
| mit | Python |
|
3f9aae149dba5c9b68ff6f7fd83cadf3fd6b1d7d | Add automorphic number implementation (#7978) | TheAlgorithms/Python | maths/automorphic_number.py | maths/automorphic_number.py | """
== Automorphic Numbers ==
A number n is said to be a Automorphic number if
the square of n "ends" in the same digits as n itself.
Examples of Automorphic Numbers: 0, 1, 5, 6, 25, 76, 376, 625, 9376, 90625, ...
https://en.wikipedia.org/wiki/Automorphic_number
"""
# Author : Akshay Dubey (https://github.com/itsAkshayDubey)
# Time Complexity : O(log10n)
def is_automorphic_number(number: int) -> bool:
"""
# doctest: +NORMALIZE_WHITESPACE
This functions takes an integer number as input.
returns True if the number is automorphic.
>>> is_automorphic_number(-1)
False
>>> is_automorphic_number(0)
True
>>> is_automorphic_number(5)
True
>>> is_automorphic_number(6)
True
>>> is_automorphic_number(7)
False
>>> is_automorphic_number(25)
True
>>> is_automorphic_number(259918212890625)
True
>>> is_automorphic_number(259918212890636)
False
>>> is_automorphic_number(740081787109376)
True
>>> is_automorphic_number(5.0)
Traceback (most recent call last):
...
TypeError: Input value of [number=5.0] must be an integer
"""
if not isinstance(number, int):
raise TypeError(f"Input value of [number={number}] must be an integer")
if number < 0:
return False
number_square = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | Python |
|
34391723f44c81ceab77fd3200ee34c9f1b2d4b2 | add plugin factory | PalNilsson/pilot2,mlassnig/pilot2,mlassnig/pilot2,PalNilsson/pilot2 | pilot/common/pluginfactory.py | pilot/common/pluginfactory.py | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, [email protected], 2018
import logging
logger = logging.getLogger(__name__)
"""
A factory to manage plugins
"""
class PluginFactory(object):
def __init__(self, *args, **kwargs):
self.classMap = {}
def get_plugin(self, confs):
"""
Load plugin class
:param confs: a dict of configurations.
"""
class_name = confs['class']
if class_name is None:
logger.error("[class] is not defined in confs: %s" % confs)
return None
if class_name not in self.classMap:
logger.info("Trying to import %s" % class_name)
components = class_name.split('.')
mod = __import__('.'.join(components[:-1]))
for comp in components[1:]:
mod = getattr(mod, comp)
self.classMap[class_name] = mod
args = {}
for key in confs:
if key in ['class']:
continue
args[key] = confs[key]
cls = self.classMap[class_name]
logger.info("Importing %s with args: %s" % (cls, args))
impl = cls(**args)
return impl
| apache-2.0 | Python |
|
9346ca997d723cbfedf383eb78db2f62552f8a7c | Fix empty image list test. | Freestila/dosage,mbrandis/dosage,Freestila/dosage,blade2005/dosage,peterjanes/dosage,wummel/dosage,webcomics/dosage,wummel/dosage,mbrandis/dosage,peterjanes/dosage,webcomics/dosage,blade2005/dosage | tests/test_comics.py | tests/test_comics.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012 Bastian Kleineidam
import tempfile
import shutil
from itertools import islice
from unittest import TestCase
from dosagelib import scraper
class _ComicTester(TestCase):
"""Basic comic test class."""
scraperclass=None
def setUp(self):
self.name = self.scraperclass.get_name()
def test_comic(self):
# Test a scraper. It must be able to traverse backward for
# at least 5 pages from the start, and find strip images
# on at least 4 pages.
scraperobj = self.scraperclass()
num = empty = 0
for strip in islice(scraperobj.getAllStrips(), 0, 5):
images = 0
for image in strip.getImages():
images += 1
self.save(image)
if not images:
empty += 1
num += 1
self.check(num >= 4, 'traversal failed after %d strips.' % num)
self.check(empty <= 1, 'failed to find images on %d pages.' % empty)
def save(self, image):
# create a temporary directory
tmpdir = tempfile.mkdtemp()
try:
image.save(tmpdir)
except Exception, msg:
self.check(False, 'could not save to %s: %s' % (tmpdir, msg))
finally:
shutil.rmtree(tmpdir)
def check(self, condition, msg):
self.assertTrue(condition, "%s: %s" % (self.name, msg))
def generate_comic_testers():
"""For each comic scraper, create a test class."""
# Limit number of scraper tests for now
max_scrapers = 10
for scraperclass in islice(scraper.get_scrapers(), 0, max_scrapers):
name = 'Test'+scraperclass.__name__
globals()[name] = type(name,
(_ComicTester,),
dict(scraperclass=scraperclass)
)
generate_comic_testers()
| # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012 Bastian Kleineidam
import tempfile
import shutil
from itertools import islice
from unittest import TestCase
from dosagelib import scraper
class _ComicTester(TestCase):
"""Basic comic test class."""
scraperclass=None
def setUp(self):
self.name = self.scraperclass.get_name()
def test_comic(self):
# Test a scraper. It must be able to traverse backward for
# at least 5 pages from the start, and find strip images
# on at least 4 pages.
scraperobj = self.scraperclass()
num = empty = 0
for strip in islice(scraperobj.getAllStrips(), 0, 5):
images = strip.getImages()
if len(images) == 0:
empty += 1
for image in images:
self.save(image)
num += 1
self.check(num >= 4, 'traversal failed after %d strips.' % num)
self.check(empty <= 1, 'failed to find images on %d pages.' % empty)
def save(self, image):
# create a temporary directory
tmpdir = tempfile.mkdtemp()
try:
image.save(tmpdir)
except Exception, msg:
self.check(False, 'could not save to %s: %s' % (tmpdir, msg))
finally:
shutil.rmtree(tmpdir)
def check(self, condition, msg):
self.assertTrue(condition, "%s: %s" % (self.name, msg))
def generate_comic_testers():
"""For each comic scraper, create a test class."""
# Limit number of scraper tests for now
max_scrapers = 10
for scraperclass in islice(scraper.get_scrapers(), 0, max_scrapers):
name = 'Test'+scraperclass.__name__
globals()[name] = type(name,
(_ComicTester,),
dict(scraperclass=scraperclass)
)
generate_comic_testers()
| mit | Python |
acd33bdffb3302d2130505873a062fae39dcd976 | Add WikiText103 and WikiText2 Mocked Unit Tests (#1592) | pytorch/text,pytorch/text,pytorch/text,pytorch/text | test/datasets/test_wikitexts.py | test/datasets/test_wikitexts.py | import os
import random
import string
import zipfile
from collections import defaultdict
from unittest.mock import patch
from ..common.parameterized_utils import nested_params
from torchtext.datasets.wikitext103 import WikiText103
from torchtext.datasets.wikitext2 import WikiText2
from ..common.case_utils import TempDirMixin, zip_equal
from ..common.torchtext_test_case import TorchtextTestCase
def _get_mock_dataset(root_dir, base_dir_name):
"""
root_dir: directory to the mocked dataset
base_dir_name: WikiText103 or WikiText2
"""
base_dir = os.path.join(root_dir, base_dir_name)
temp_dataset_dir = os.path.join(base_dir, "temp_dataset_dir")
os.makedirs(temp_dataset_dir, exist_ok=True)
seed = 1
mocked_data = defaultdict(list)
file_names = ("wiki.train.tokens", "wiki.valid.tokens", "wiki.test.tokens")
for file_name in file_names:
csv_file = os.path.join(temp_dataset_dir, file_name)
mocked_lines = mocked_data[os.path.splitext(file_name)[0]]
with open(csv_file, "w") as f:
for i in range(5):
rand_string = " ".join(
random.choice(string.ascii_letters) for i in range(seed)
)
dataset_line = rand_string
f.write(f'{rand_string}\n')
# append line to correct dataset split
mocked_lines.append(dataset_line)
seed += 1
if base_dir_name == WikiText103.__name__:
compressed_file = "wikitext-103-v1"
else:
compressed_file = "wikitext-2-v1"
compressed_dataset_path = os.path.join(base_dir, compressed_file + ".zip")
# create zip file from dataset folder
with zipfile.ZipFile(compressed_dataset_path, "w") as zip_file:
for file_name in file_names:
txt_file = os.path.join(temp_dataset_dir, file_name)
zip_file.write(txt_file, arcname=compressed_file)
return mocked_data
class TestWikiTexts(TempDirMixin, TorchtextTestCase):
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.root_dir = cls.get_base_temp_dir()
cls.patcher = patch(
"torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True
)
cls.patcher.start()
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
super().tearDownClass()
@nested_params([WikiText103, WikiText2], ["train", "valid", "test"])
def test_wikitexts(self, wikitext_dataset, split):
expected_samples = _get_mock_dataset(self.root_dir, base_dir_name=wikitext_dataset.__name__)[split]
dataset = wikitext_dataset(root=self.root_dir, split=split)
samples = list(dataset)
for sample, expected_sample in zip_equal(samples, expected_samples):
self.assertEqual(sample, expected_sample)
@nested_params([WikiText103, WikiText2], ["train", "valid", "test"])
def test_wikitexts_split_argument(self, wikitext_dataset, split):
# call `_get_mock_dataset` to create mock dataset files
_ = _get_mock_dataset(self.root_dir, wikitext_dataset.__name__)
dataset1 = wikitext_dataset(root=self.root_dir, split=split)
(dataset2,) = wikitext_dataset(root=self.root_dir, split=(split,))
for d1, d2 in zip_equal(dataset1, dataset2):
self.assertEqual(d1, d2)
| bsd-3-clause | Python |
|
61cd24aef4c9c8ef72527e75991c23873892ec3b | Change listener module file | gelnior/newebe,gelnior/newebe,gelnior/newebe,gelnior/newebe | platform/listener/__init__.py | platform/listener/__init__.py | '''
Module to handle data synchronization with contacts.
'''
| agpl-3.0 | Python |
|
18378b201cae7e23889031044fa6ddbaf50946c5 | check langauge detecting for lett files where we know the expetected language from the URL | ModernMT/DataCollection,ModernMT/DataCollection,ModernMT/DataCollection,ModernMT/DataCollection,ModernMT/DataCollection | baseline/check_lett_lang.py | baseline/check_lett_lang.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
doc2lang = {}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('referencepairs', type=argparse.FileType('r'))
parser.add_argument('-slang', help='Source language', default='en')
parser.add_argument('-tlang', help='Non-english language', default='fr')
parser.add_argument('-prefix', help='prefix added to make filenames',
default="/fs/syn0/pkoehn/crawl/data/site-crawls")
args = parser.parse_args(sys.argv[1:])
# read all the .lett files from stdin
for line in sys.stdin:
line = line.split("\t")
if len(line) != 6:
# sys.stderr.write("broken format: %s\n" % line[0])
continue
lang = line[0]
filename = line[3].strip()
if filename in doc2lang:
sys.stderr.write("Duplicate entry: %s:%s\n" % (filename, lang))
doc2lang[filename] = lang
# print filename, lang
correct = 0
total = 0
unknown = 0
unknown_but_file = 0
wrong_lang_pair = 0
for line in args.referencepairs:
total += 1
domain, a, b = line.split("\t")
a = a.strip()
b = b.strip()
found = True
for f in (a, b):
if f not in doc2lang:
sys.stderr.write("unknown file %s\n" % (f))
unknown += 1
filename = os.path.join(args.prefix, f.split("/")[0], f)
if os.path.isfile(filename):
sys.stderr.write("but file %s exists\n" % (filename))
unknown_but_file += 1
found = False
elif doc2lang[f] not in (args.slang, args.tlang):
sys.stderr.write("%s detected as neither %s or %s\n"
% (f, args.slang, args.tland))
wrong_lang_pair += 1
found = False
if not found:
continue
if doc2lang[a] == doc2lang[b]:
sys.stderr.write("Found both %s and %s to be in %s\n"
% (a, b, doc2lang[b]))
wrong_lang_pair += 1
continue
correct += 1
print "Total: ", total
print "Possible: ", correct
print "Unknown: ", unknown
print "Unknown but file exists: ", unknown_but_file
print "Wrong_lang_pair: ", wrong_lang_pair
| apache-2.0 | Python |
|
1bbfb6fe5080de9326bd7a35afe893bf59744bdf | add ASGI plugin/middleware tests. | honeybadger-io/honeybadger-python,honeybadger-io/honeybadger-python | honeybadger/tests/contrib/test_asgi.py | honeybadger/tests/contrib/test_asgi.py | import pprint
import unittest
from async_asgi_testclient import TestClient
import aiounittest
import mock
from honeybadger import contrib
class SomeError(Exception):
pass
def asgi_app():
"""Example ASGI App."""
async def app(scope, receive, send):
if "error" in scope["path"]:
raise SomeError("Some Error.")
headers = [(b"content-type", b"text/html")]
body = f"<pre>{pprint.PrettyPrinter(indent=2, width=256).pformat(scope)}</pre>"
await send({"type": "http.response.start", "status": 200, "headers": headers})
await send({"type": "http.response.body", "body": body})
return app
class ASGIPluginTestCase(unittest.TestCase):
def setUp(self):
self.client = TestClient(contrib.ASGIHoneybadger(asgi_app(), api_key="abcd"))
@mock.patch("honeybadger.contrib.asgi.honeybadger")
def test_should_support_asgi(self, hb):
asgi_context = {"asgi": {"version": "3.0"}}
non_asgi_context = {}
self.assertTrue(self.client.application.supports(hb.config, asgi_context))
self.assertFalse(self.client.application.supports(hb.config, non_asgi_context))
@aiounittest.async_test
@mock.patch("honeybadger.contrib.asgi.honeybadger")
async def test_should_notify_exception(self, hb):
with self.assertRaises(SomeError):
await self.client.get("/error")
hb.notify.assert_called_once()
self.assertEqual(type(hb.notify.call_args.kwargs["exception"]), SomeError)
@aiounittest.async_test
@mock.patch("honeybadger.contrib.asgi.honeybadger")
async def test_should_not_notify_exception(self, hb):
response = self.client.get("/")
hb.notify.assert_not_called()
| mit | Python |
|
10dd7a4a70fe639b806e004bc0a0d6fb791279a3 | Add a utility script: | llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb | utils/misc/grep-svn-log.py | utils/misc/grep-svn-log.py | #!/usr/bin/env python
"""
Greps and returns the first svn log entry containing a line matching the regular
expression pattern passed as the only arg.
Example:
svn log -v | grep-svn-log.py '^ D.+why_are_you_missing.h$'
"""
import fileinput, re, sys, StringIO
# Separator string for "svn log -v" output.
separator = '-' * 72
usage = """Usage: grep-svn-log.py line-pattern
Example:
svn log -v | grep-svn-log.py '^ D.+why_are_you_missing.h'"""
class Log(StringIO.StringIO):
"""Simple facade to keep track of the log content."""
def __init__(self):
self.reset()
def add_line(self, a_line):
"""Add a line to the content, if there is a previous line, commit it."""
global separator
if self.prev_line != None:
print >> self, self.prev_line
self.prev_line = a_line
self.separator_added = (a_line == separator)
def del_line(self):
"""Forget about the previous line, do not commit it."""
self.prev_line = None
def reset(self):
"""Forget about the previous lines entered."""
StringIO.StringIO.__init__(self)
self.prev_line = None
def finish(self):
"""Call this when you're finished with populating content."""
if self.prev_line != None:
print >> self, self.prev_line
self.prev_line = None
def grep(regexp):
# The log content to be written out once a match is found.
log = Log()
LOOKING_FOR_MATCH = 0
FOUND_LINE_MATCH = 1
state = LOOKING_FOR_MATCH
while 1:
line = sys.stdin.readline()
if not line:
return
line = line.splitlines()[0]
if state == FOUND_LINE_MATCH:
# At this state, we keep on accumulating lines until the separator
# is encountered. At which point, we can return the log content.
if line == separator:
print log.getvalue()
return
log.add_line(line)
elif state == LOOKING_FOR_MATCH:
if line == separator:
log.reset()
log.add_line(line)
# Update next state if necessary.
if regexp.search(line):
state = FOUND_LINE_MATCH
def main():
if len(sys.argv) != 2:
print usage
sys.exit(0)
regexp = re.compile(sys.argv[1])
grep(regexp)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
|
8dc7a1e239dc22dd4eb69cfe1754586e3a1690dc | Test javascript using the "js" | qsnake/py2js,mattpap/py2js,chrivers/pyjaco,buchuki/pyjaco,mattpap/py2js,chrivers/pyjaco,chrivers/pyjaco,buchuki/pyjaco,buchuki/pyjaco,qsnake/py2js | tests/test_run_js.py | tests/test_run_js.py | import os
from py2js import JavaScript
def f(x):
return x
def test(func, run):
func_source = str(JavaScript(func))
run_file = "/tmp/run.js"
with open(run_file, "w") as f:
f.write(func_source)
f.write("\n")
f.write(run)
r = os.system('js -f defs.js -f %s' % run_file)
assert r == 0
test(f, "assert(f(3) == 3)")
test(f, "assert(f(3) != 4)")
| mit | Python |
|
bbed1fc6d144571f5cb69d1c1a54904857646d74 | Create redis-graphite.py | DerMitch/redis-graphite | redis-graphite.py | redis-graphite.py | """
Redis Graphite Publisher
~~~~~~~~~~~~~~~~~~~~~~~~
Publishes stats from a redis server to a carbon server.
These stats include:
- Generic server stats (INFO command)
- Length of lists (useful for monitoring queues)
Requires redis and statsd:
https://pypi.python.org/pypi/redis
Example for a carbon storage schema:
[redis]
pattern = ^redis\.
retentions = 10s:24d,1m:30d,10m:1y
:license: MIT License
:author: Michael Mayr <[email protected]>
"""
import time
import socket
import logging
from argparse import ArgumentParser
from redis import Redis
log = logging.getLogger("redis-graphite")
stats_keys = [
# Clients
('connected_clients', int),
('client_longest_output_list', int),
('client_biggest_input_buf', int),
('blocked_clients', int),
# Memory
('used_memory', int),
('used_memory_rss', int),
('used_memory_peak', int),
('used_memory_lua', int),
('mem_fragmentation_ratio', lambda x: int(float(x) * 100)),
# Persistence
('rdb_bgsave_in_progress', int), # Nice for graphites render 0 as inf
('aof_rewrite_in_progress', int), # Nice for graphites render 0 as inf
('aof_base_size', int),
('aof_current_size', int),
# Stats
('total_connections_received', int),
('total_commands_processed', int),
]
parser = ArgumentParser()
# Connections
parser.add_argument('--redis-server', default="localhost")
parser.add_argument('--redis-port', type=int, default=6379)
parser.add_argument('--carbon-server', default="localhost")
parser.add_argument('--carbon-port', type=int, default=2003)
# Options
parser.add_argument('--no-server-stats', '-s', help="Disable graphing of server stats", action="store_true")
parser.add_argument('--lists', '-l', help="Watch the length of one or more lists", nargs="+")
parser.add_argument('--once', '-o', help="Run only once, then quit", action="store_true")
parser.add_argument('--interval', '-i', help="Check interval in seconds", type=int, default=10)
parser.add_argument('--verbose', '-v', help="Debug output", action="store_true")
def main():
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
base_key = "redis.{}:{}.".format(args.redis_server, args.redis_port)
log.debug("Base key:{}".format(base_key))
log.debug("Connecting to redis")
client = Redis(args.redis_server, args.redis_port)
sock = socket.socket()
sock.connect((args.carbon_server, args.carbon_port))
def send(key, value):
cmd = "{} {} {}\n".format(key, value, int(time.time()))
sock.sendall(cmd)
log.debug("Starting mainloop")
while True:
info = client.info()
log.debug("Got {} info keys from redis".format(len(info)))
if not args.no_server_stats:
for key, keytype in stats_keys:
if key not in info:
log.debug("WARN:Key not supported by redis: {}".format(key))
continue
value = keytype(info[key])
log.debug("gauge {}{} -> {}".format(base_key, key, value))
send(base_key + key, value)
if args.lists:
lists_key = base_key + "list."
for key in args.lists:
length = client.llen(key)
log.debug("Length of list {}: {}".format(key, length))
send(lists_key + key, length)
if args.once:
break
log.debug("Sleeping {} seconds".format(args.interval))
time.sleep(args.interval)
sock.close()
if __name__ == '__main__':
main()
| mit | Python |
|
dee49a5e023907d77e2598560d25480bc7f56e34 | Add k40 batch script | tamasgal/km3pipe,tamasgal/km3pipe | examples/offline_analysis/qk40calib.py | examples/offline_analysis/qk40calib.py | """
================================
K40 Calibration Batch Processing
================================
Standalone job submitter for K40 offline calibrations with KM3Pipe.
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename: qk40calib.py
# Author: Tamas Gal <[email protected]>
"""
Standalone job submitter for K40 offline calibrations with KM3Pipe.
Usage:
qk40calib.py OUTPUT_PATH [options]
qk40calib.py (-h | --help)
Options:
OUTPUT_PATH Folder to store the calibration data.
-d DET_ID Detector ID [default: 29].
-t TMAX Coincidence time window [default: 10].
-n N_RUNS Number of runs to process per job [default: 10].
-e ET Estimated walltime per run in minutes [default: 8].
-m VMEM Estimated vmem for a job [default: 8G].
-s RUNSETUP Runsetup match [default: PHYS.1710v5-TUNED.HRV19.3D_T_S_MX.NBMODULE].
-j JOBNAME The name of the submitted jobs [default: k40calib].
-l LOG_PATH Path of the job log files [default: qlogs].
-q Dryrun: don't submit jobs, just print the first job script.
-h --help Show this screen.
"""
import os
import re
from glob import glob
import time
from km3pipe.shell import qsub
import km3pipe as kp
from docopt import docopt
def main():
args = docopt(__doc__)
DET_ID = int(args['-d'])
TMAX = int(args['-t'])
ET_PER_RUN = int(args['-e'])*60 # [s]
RUNS_PER_JOB = int(args['-n']) # runs per job
VMEM = args['-m']
CWD = os.getcwd()
LOG_PATH = args['-l']
JOB_NAME = args['-j']
CALIB_PATH = os.path.join(CWD, args['OUTPUT_PATH'])
RUN_SUBSTR = args['-s']
DRYRUN = args['-q']
if not os.path.exists(CALIB_PATH):
os.makedirs(CALIB_PATH)
db = kp.db.DBManager()
run_table = db.run_table(det_id=DET_ID)
phys_run_table = run_table[run_table.RUNSETUPNAME.str.contains(RUN_SUBSTR)]
phys_runs = set(phys_run_table.RUN)
processed_runs = set(int(re.search("_\\d{8}_(\\d{8})", s).group(1))
for s in
glob(os.path.join(CALIB_PATH, '*.k40_cal.p')))
remaining_runs = list(phys_runs - processed_runs)
print("Remaining runs: {}".format(remaining_runs))
cmds = []
for job_id, runs_chunk in enumerate(kp.tools.chunks(remaining_runs,
RUNS_PER_JOB)):
n_runs = len(runs_chunk)
print("Preparing batch script for a chunk of {} runs."
.format(len(runs_chunk)))
cmds.append("cd $TMPDIR; mkdir -p $USER; cd $USER")
for run in runs_chunk:
cmds.append("echo Processing {}:".format(run))
irods_path = kp.tools.irods_filepath(DET_ID, run)
root_filename = os.path.basename(irods_path)
calib_filename = root_filename + '.k40_cal.p'
cmds.append("iget -v {}".format(irods_path))
cmds.append("CTMIN=$(JPrint -f {}|grep '^ctMin'|awk '{{print $2}}')"
.format(root_filename))
cmds.append("k40calib {} {} -t {} -c $CTMIN -o {}"
.format(root_filename, DET_ID, TMAX, calib_filename))
cmds.append("cp {} {}".format(calib_filename, CALIB_PATH))
cmds.append("rm -f {}".format(root_filename))
cmds.append("rm -f {}".format(calib_filename))
cmds.append("echo Run {} processed.".format(run))
cmds.append("echo " + 42*"=")
walltime = time.strftime('%H:%M:%S', time.gmtime(ET_PER_RUN * n_runs))
script = '\n'.join(cmds)
qsub(script, '{}_{}'.format(JOB_NAME, job_id), walltime=walltime,
vmem=VMEM, log_path=LOG_PATH, irods=True, platform='sl6',
dryrun=DRYRUN)
if DRYRUN:
break
cmds = []
if __name__ == '__main__':
main()
| mit | Python |
|
e44bd0b5a5db15b99a06b7561b8146554b1419d2 | Add genesisbalance class #217 | xeroc/python-bitshares | bitshares/genesisbalance.py | bitshares/genesisbalance.py | # -*- coding: utf-8 -*-
from .account import Account
from .instance import BlockchainInstance
from graphenecommon.genesisbalance import (
GenesisBalance as GrapheneGenesisBalance,
GenesisBalances as GrapheneGenesisBalances,
)
from bitsharesbase.account import Address, PublicKey
from bitsharesbase import operations
@BlockchainInstance.inject
class GenesisBalance(GrapheneGenesisBalance):
""" Read data about a Genesis Balances from the chain
:param str identifier: identifier of the balance
:param bitshares blockchain_instance: bitshares() instance to use when
accesing a RPC
"""
type_id = 15
def define_classes(self):
self.account_class = Account
self.operations = operations
self.address_class = Address
self.publickey_class = PublicKey
@BlockchainInstance.inject
class GenesisBalances(GrapheneGenesisBalances):
""" List genesis balances that can be claimed from the
keys in the wallet
"""
def define_classes(self):
self.genesisbalance_class = GenesisBalance
self.publickey_class = PublicKey
self.address_class = Address
| mit | Python |
|
3dd71c02ea1fa9e39054bd82bf9e8657ec77d6b9 | Add a script to recover the chat_id | a2ohm/ProgressBot | tools/get_chat_id.py | tools/get_chat_id.py | #! /usr/bin/python3
# -*- coding:utf-8 -*-
# by [email protected]
import sys
import time
import telepot
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print("\tchat_id: {}".format(chat_id))
if content_type == 'text' and msg['text'] == '/start':
ans = """
Hello <b>{first_name}</b>, nice to meet you!\n
Your chat_id is <code>{chat_id}</code>.\n
You can stop the <code>get_chat_id</code> script with <code>CTRL+C</code> and start using the ProgressBot right now.\n
See you soon!
""".format(first_name = msg['from']['first_name'],
chat_id = chat_id)
bot.sendMessage(chat_id, ans, parse_mode = "HTML")
TOKEN = "PUT_YOUR_TOKKEN_HERE"
bot = telepot.Bot(TOKEN)
bot.message_loop(handle)
print ('Listening ...')
# Keep the program running.
while 1:
try:
time.sleep(10)
except KeyboardInterrupt:
print()
sys.exit()
| apache-2.0 | Python |
|
e950a53b2a392014fbfd7b9827a9f3f0b12a377b | add connector test class | brucelau-github/raspberry-pi-proj | connectortest.py | connectortest.py | import unittest
import threading
import re
import message
import StringIO
from connector import Connector, AppConnector
import SocketServer
from threadserver import DetailServer
from datetime import datetime
from PIL import Image
class App:
def update_msg(self, txtmsg):
print txtmsg.get_body()
return txtmsg
def update_image(self, imgmsg):
img = imgmsg.get_image()
img.show()
return imgmsg
class ConnectorTest(unittest.TestCase):
def setUp(self):
self.app = App()
self.c = AppConnector(app=self.app)
def test_header(self):
c = self.c
c.serve_forever()
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
f97868b89da50532413465250d84308b84276296 | add script | adamewing/tebreak,ValentinaPeona/tebreak,adamewing/tebreak,ValentinaPeona/tebreak | scripts/getliblist.py | scripts/getliblist.py | #!/usr/bin/env python
import sys
import os
def getlibs(invcf):
rgs = {}
with open(invcf, 'r') as vcf:
for line in vcf:
if not line.startswith('#'):
chrom, pos, id, ref, alt, qual, filter, info, format, sample = line.strip().split('\t')
for rg in sample.split(':')[-1].split(','):
rgs[rg] = True
return rgs.keys()
if len(sys.argv) == 2:
rgs = []
with open(sys.argv[1], 'r') as vcflist:
for vcf in vcflist:
vcf = vcf.strip()
assert os.path.exists(vcf), "VCF not found: " + vcf
for rg in getlibs(vcf):
rgs.append(rg)
print '\n'.join(sorted(list(set(rgs))))
else:
print "usage:", sys.argv[0], "<tebreak output vcf list in a file>"
| mit | Python |
|
fda7d76e4b10a1b43e3612742585d9abcc7b27da | Rename tags.py to search.py | cdent/tank,cdent/tank,cdent/tank | tiddlywebplugins/tank/search.py | tiddlywebplugins/tank/search.py | """
Routines associated with finding and listing tags.
An experiment for now.
"""
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.policy import PermissionsError
from tiddlywebplugins.whoosher import get_searcher, query_parse
def list_tags(environ, start_response):
"""
Plain text list of tags in a certain context.
If a q query parameter is provided, then that is used to limit
the search space for tags. For example q=modifier:cdent bag:foobar
would return tags only from tiddlers in the bag foobar with most
recent modifier of cdent.
"""
config = environ['tiddlyweb.config']
query = environ['tiddlyweb.query'].get('q', [None])[0]
searcher = get_searcher(config)
if query:
# XXX this is not robust in the face of wacky inputs
# (including quoted inputs), for now we ride.
kwargs = dict([entry.split(':') for entry in query.split()])
documents = searcher.documents(**kwargs)
else:
documents = searcher.documents()
# As yet unknown if this will be slow or not.
set_tags = set()
for stored_fields in documents:
set_tags.update(stored_fields['tags'].split(','))
start_response('200 OK', [('Content-Type', 'text/plain; charset=UTF-8')])
return '\n'.join(set_tags)
def get_comp_bags(store, config, usersign):
"""
Saving for later. Return a list of bags that can be used in
comps.
"""
comp_bags = []
for result in full_search(config, 'title:app'):
bag, _ = result['id'].split(':', 1)
bag = store.get(Bag(bag))
try:
bag.policy.allows(usersign, 'read')
comp_bags.append(bag)
except PermissionsError:
pass
return comp_bags
def full_search(config, query):
query = query_parse(config, query)
searcher = get_searcher(config)
return searcher.search(query)
| bsd-3-clause | Python |
|
eac6545d0700d2a6c3de43db5ea8d46cfea12464 | Update link.py | Halibot/haltoys | link.py | link.py | from module import XMPPModule
import halutils
import re, requests
class Link(XMPPModule):
def handleMessage(self, msg):
obj = re.match('.*(http[s]?://.*)+', msg['body'])
if obj:
addr = obj.group(1)
webpage = requests.get(addr).content
title = re.match('.*<title>(.*)</title>', str(webpage)).group(1).rstrip().lstrip()
self.xmpp.reply(msg, "Website: " + title)
| bsd-3-clause | Python |
|
c29e430301dc854dc7bd83ebc2a588cea70589a6 | Fix has_perm issue in get_project_list | mvaled/sentry,korealerts1/sentry,mvaled/sentry,JamesMura/sentry,BuildingLink/sentry,ngonzalvez/sentry,mvaled/sentry,alexm92/sentry,kevinlondon/sentry,SilentCircle/sentry,ewdurbin/sentry,kevinastone/sentry,argonemyth/sentry,boneyao/sentry,mvaled/sentry,llonchj/sentry,chayapan/django-sentry,JTCunning/sentry,rdio/sentry,mitsuhiko/sentry,SilentCircle/sentry,drcapulet/sentry,JamesMura/sentry,alexm92/sentry,gencer/sentry,felixbuenemann/sentry,Kryz/sentry,boneyao/sentry,fuziontech/sentry,gencer/sentry,TedaLIEz/sentry,camilonova/sentry,chayapan/django-sentry,zenefits/sentry,alexm92/sentry,wujuguang/sentry,argonemyth/sentry,ifduyue/sentry,kevinastone/sentry,SilentCircle/sentry,songyi199111/sentry,beeftornado/sentry,argonemyth/sentry,drcapulet/sentry,fuziontech/sentry,BuildingLink/sentry,BuildingLink/sentry,wong2/sentry,NickPresta/sentry,ifduyue/sentry,songyi199111/sentry,alex/sentry,drcapulet/sentry,boneyao/sentry,imankulov/sentry,jean/sentry,NickPresta/sentry,mvaled/sentry,JamesMura/sentry,mitsuhiko/sentry,hongliang5623/sentry,pauloschilling/sentry,zenefits/sentry,ifduyue/sentry,JTCunning/sentry,wujuguang/sentry,korealerts1/sentry,looker/sentry,daevaorn/sentry,JamesMura/sentry,alex/sentry,gg7/sentry,gencer/sentry,mvaled/sentry,nicholasserra/sentry,chayapan/django-sentry,looker/sentry,pauloschilling/sentry,llonchj/sentry,fotinakis/sentry,gg7/sentry,ngonzalvez/sentry,NickPresta/sentry,Kronuz/django-sentry,wujuguang/sentry,ifduyue/sentry,llonchj/sentry,BayanGroup/sentry,rdio/sentry,JackDanger/sentry,felixbuenemann/sentry,nicholasserra/sentry,hongliang5623/sentry,pauloschilling/sentry,nicholasserra/sentry,gencer/sentry,jokey2k/sentry,1tush/sentry,ewdurbin/sentry,fuziontech/sentry,looker/sentry,JamesMura/sentry,jean/sentry,camilonova/sentry,Kronuz/django-sentry,zenefits/sentry,daevaorn/sentry,jean/sentry,vperron/sentry,jokey2k/sentry,Natim/sentry,1tush/sentry,1tush/sentry,zenefits/sentry,fotinakis/sentry,gencer/sentry,TedaLIEz/sentry,BuildingLink/sentry,Kryz/sentry,beeftornado/sentry,camilonova/sentry,rdio/sentry,daevaorn/sentry,daevaorn/sentry,beni55/sentry,fotinakis/sentry,kevinlondon/sentry,gg7/sentry,imankulov/sentry,fotinakis/sentry,Kryz/sentry,jean/sentry,korealerts1/sentry,hongliang5623/sentry,NickPresta/sentry,BayanGroup/sentry,beni55/sentry,JTCunning/sentry,ewdurbin/sentry,wong2/sentry,imankulov/sentry,jokey2k/sentry,ifduyue/sentry,BuildingLink/sentry,songyi199111/sentry,beeftornado/sentry,kevinlondon/sentry,BayanGroup/sentry,Natim/sentry,looker/sentry,alex/sentry,kevinastone/sentry,SilentCircle/sentry,ngonzalvez/sentry,felixbuenemann/sentry,beni55/sentry,Kronuz/django-sentry,TedaLIEz/sentry,jean/sentry,wong2/sentry,vperron/sentry,JackDanger/sentry,Natim/sentry,zenefits/sentry,vperron/sentry,JackDanger/sentry,rdio/sentry,looker/sentry | sentry/web/helpers.py | sentry/web/helpers.py | """
sentry.web.views
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.conf import settings as dj_settings
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponse
from django.template import loader
from sentry.conf import settings
from sentry.models import ProjectMember, Project
def get_project_list(user=None, flag=None):
"""
Returns a set of all projects a user has some level of access to.
"""
projects = dict((p.pk, p) for p in Project.objects.filter(public=True))
if user.is_authenticated():
projects.update(dict(
(pm.project_id, pm.project)
for pm in ProjectMember.objects.filter(user=user).select_related('project')
if (not flag or pm.has_perm(flag))))
return projects
_LOGIN_URL = None
def get_login_url(reset=False):
global _LOGIN_URL
if _LOGIN_URL is None or reset:
# if LOGIN_URL resolves force login_required to it instead of our own
# XXX: this must be done as late as possible to avoid idempotent requirements
try:
resolve(dj_settings.LOGIN_URL)
except:
_LOGIN_URL = settings.LOGIN_URL
else:
_LOGIN_URL = dj_settings.LOGIN_URL
if _LOGIN_URL is None:
_LOGIN_URL = reverse('sentry-login')
return _LOGIN_URL
def iter_data(obj):
for k, v in obj.data.iteritems():
if k.startswith('_') or k in ['url']:
continue
yield k, v
def render_to_string(template, context={}):
context.update({
'has_search': False,
'MESSAGES_PER_PAGE': settings.MESSAGES_PER_PAGE,
})
return loader.render_to_string(template, context)
def render_to_response(template, context={}, status=200):
response = HttpResponse(render_to_string(template, context))
response.status_code = status
return response
| """
sentry.web.views
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.conf import settings as dj_settings
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponse
from django.template import loader
from sentry.conf import settings
from sentry.models import Project
def get_project_list(user=None, flag=None):
"""
Returns a set of all projects a user has some level of access to.
"""
projects = dict((p.pk, p) for p in Project.objects.filter(public=True))
if user.is_authenticated():
projects.update(dict((p.pk, p) for p in Project.objects.filter(member_set__user=user) if (not flag or p.has_perm(flag))))
return projects
_LOGIN_URL = None
def get_login_url(reset=False):
global _LOGIN_URL
if _LOGIN_URL is None or reset:
# if LOGIN_URL resolves force login_required to it instead of our own
# XXX: this must be done as late as possible to avoid idempotent requirements
try:
resolve(dj_settings.LOGIN_URL)
except:
_LOGIN_URL = settings.LOGIN_URL
else:
_LOGIN_URL = dj_settings.LOGIN_URL
if _LOGIN_URL is None:
_LOGIN_URL = reverse('sentry-login')
return _LOGIN_URL
def iter_data(obj):
for k, v in obj.data.iteritems():
if k.startswith('_') or k in ['url']:
continue
yield k, v
def render_to_string(template, context={}):
context.update({
'has_search': False,
'MESSAGES_PER_PAGE': settings.MESSAGES_PER_PAGE,
})
return loader.render_to_string(template, context)
def render_to_response(template, context={}, status=200):
response = HttpResponse(render_to_string(template, context))
response.status_code = status
return response
| bsd-3-clause | Python |
1510a0faeff91f6f6ed7a1c5929628d430cb0506 | Update file identification tools | artefactual/archivematica-fpr-admin,artefactual/archivematica-fpr-admin,artefactual/archivematica-fpr-admin,artefactual/archivematica-fpr-admin | fpr/migrations/0010_update_fido_136.py | fpr/migrations/0010_update_fido_136.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def data_migration(apps, schema_editor):
IDTool = apps.get_model('fpr', 'IDTool')
IDTool.objects.filter(description='Fido', version='1.3.5').update(version='1.3.6')
IDTool.objects.filter(description='Siegfried', version='1.6.7').update(version='1.7.3')
def reverse_migration(apps, schema_editor):
IDTool = apps.get_model('fpr', 'IDTool')
IDTool.objects.filter(description='Fido', version='1.3.6').update(version='1.3.5')
IDTool.objects.filter(description='Siegfried', version='1.7.3').update(version='1.6.7')
class Migration(migrations.Migration):
dependencies = [
('fpr', '0009_pronom_90'),
]
operations = [
migrations.RunPython(data_migration, reverse_migration),
]
| agpl-3.0 | Python |
|
93548efe9eb04dd9659e3cc76c711d967e8770df | Create filereader.py | timjinx/Sample-Repo,timjinx/Sample-Repo,timjinx/Sample-Repo,timjinx/Sample-Repo,timjinx/Sample-Repo,timjinx/Sample-Repo,timjinx/Sample-Repo,timjinx/Sample-Repo,timjinx/Sample-Repo,timjinx/Sample-Repo,timjinx/Sample-Repo | filereader.py | filereader.py | #!/usr/bin/python
import os
import re
from optparse import OptionParser
SUFFIX=".out"
def main () :
global filename
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="the file to update", metavar="FILE")
parser.add_option("-n", "--name", dest="name",
help="the name to replace the original name with", metavar="NAME")
parser.add_option("-c", "--fromname", dest="fromname",
help="the name be replaced", metavar="FROMNAME")
(options, args) = parser.parse_args()
if not options.filename :
print "You must specify the file to modify"
exit(-1)
if not options.name :
print "You must specify the name to replace Tim with"
exit(-1)
if not options.fromname :
print "You must specify the name to be replaced"
exit(-1)
fin = open(options.filename, 'r')
fout = open(options.filename + SUFFIX, 'w')
for line in fin :
fout.write(re.sub(options.fromname, options.name, line))
fin.close()
fout.close()
main()
| apache-2.0 | Python |
|
23ab301f4773892f6db7321105f79ba0c48404a3 | add urls | avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf | src/doc/expedient/source/developer/sshaggregate/urls.py | src/doc/expedient/source/developer/sshaggregate/urls.py | from django.conf.urls.defaults import *
urlpatterns = patterns('sshaggregate.views',
url(r'^aggregate/create/$', 'aggregate_crud', name='sshaggregate_aggregate_create'),
url(r'^aggregate/(?P<agg_id>\d+)/edit/$', 'aggregate_crud', name='sshaggregate_aggregate_edit'),
url(r'^aggregate/(?P<agg_id>\d+)/servers/$', 'aggregate_add_servers', name='sshaggregate_aggregate_servers'),
)
| bsd-3-clause | Python |
|
fed2e3f9bdb3a00b077b5e7df1aed4d927b77b6c | Add test for Clifford drudge by quaternions | tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge | tests/clifford_test.py | tests/clifford_test.py | """Test for the Clifford algebra drudge."""
from drudge import CliffordDrudge, Vec, inner_by_delta
def test_clifford_drudge_by_quaternions(spark_ctx):
"""Test basic functionality of Clifford drudge by quaternions.
"""
dr = CliffordDrudge(
spark_ctx, inner=lambda v1, v2: -inner_by_delta(v1, v2)
)
e_ = Vec('e')
i_ = dr.sum(e_[2] * e_[3]).simplify()
j_ = dr.sum(e_[3] * e_[1]).simplify()
k_ = dr.sum(e_[1] * e_[2]).simplify()
for i in [i_, j_, k_]:
assert (i * i).simplify() == -1
assert (i_ * j_ * k_).simplify() == -1
assert (i_ * j_).simplify() == k_
assert (j_ * k_).simplify() == i_
assert (k_ * i_).simplify() == j_
| mit | Python |
|
09a0689b8e521c1d5c0ea68ac448dc9ae7abcff5 | Read the header of a fits file and/or look up a single key (case insensitive). | DanielAndreasen/astro_scripts | fitsHeader.py | fitsHeader.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# My imports
from __future__ import division
from astropy.io import fits
from pydoc import pager
import argparse
def _parser():
parser = argparse.ArgumentParser(description='View the header of a fits file')
parser.add_argument('input', help='File name of fits file')
parser.add_argument('-key', help='Look up a given key (case insensitive)', default=None)
return parser.parse_args()
if __name__ == '__main__':
args = _parser()
h = fits.getheader(args.input)
h.keys = map(str.lower, h.keys())
if args.key:
args.key = args.key.lower()
try:
print h[args.key]
except KeyError:
raise KeyError('Key was not found')
else:
string = '\n'.join("{!s} : {!r}".format(key, val) for (key, val) in h.items())
pager(string)
| mit | Python |
|
b674f921a8e5cffb2d3e320f564c61ca01455a9f | Add command to generate a csv of talk titles and video reviewers | CTPUG/wafer,CTPUG/wafer,CTPUG/wafer,CTPUG/wafer | wafer/management/commands/wafer_talk_video_reviewers.py | wafer/management/commands/wafer_talk_video_reviewers.py | import sys
import csv
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from wafer.talks.models import Talk, ACCEPTED, PROVISIONAL
class Command(BaseCommand):
help = ("List talks and the associated video_reviewer emails."
" Only reviewers for accepted talks are listed")
def _video_reviewers(self, options):
talks = Talk.objects.filter(status=ACCEPTED)
csv_file = csv.writer(sys.stdout)
for talk in talks:
reviewer = talk.video_reviewer
if not reviewer:
reviewer = 'NO REVIEWER'
row = [x.encode("utf-8") for x in (
talk.title,
reviewer,
)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._video_reviewers(options)
| isc | Python |
|
3db3c22d83071550d8bbd70062f957cf43c5e54a | Add a compatibility module, because of Python 2/3 compatibility issues. | davidhalter-archive/shopping_cart_example | cart/_compatibility.py | cart/_compatibility.py | import sys
is_py3 = sys.version_info[0] >= 3
def utf8(string):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if is_py3:
return str(string)
elif not isinstance(string, unicode):
return unicode(str(string), 'UTF-8')
return string
| mit | Python |
|
156b7dfc11f24a7d77d2280e8ddade3cb7a474b7 | Add a script for listing all Elasticsearch indexes | wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api | misc/list_all_es_indexes.py | misc/list_all_es_indexes.py | #!/usr/bin/env python
# -*- encoding: utf-8
import boto3
import hcl
import requests
def get_terraform_vars():
s3_client = boto3.client("s3")
tfvars_body = s3_client.get_object(
Bucket="wellcomecollection-platform-infra",
Key="terraform.tfvars"
)["Body"]
return hcl.load(tfvars_body)
def build_url(es_credentials):
protocol = es_credentials["protocol"]
name = es_credentials["name"]
region = es_credentials["region"]
port = es_credentials["port"]
return f"{protocol}://{name}.{region}.aws.found.io:{port}"
def get_all_indexes(es_url, username, password):
resp = requests.get(
f"{es_url}/_cat/indices",
auth=(username, password),
params={"format": "json"}
)
resp.raise_for_status()
return resp.json()
if __name__ == "__main__":
terraform_vars = get_terraform_vars()
es_cluster_credentials = terraform_vars["es_cluster_credentials"]
es_url = build_url(es_cluster_credentials)
username = es_cluster_credentials["username"]
password = es_cluster_credentials["password"]
indexes = get_all_indexes(es_url, username=username, password=password)
print(
'\n'.join(sorted(
idx["index"]
for idx in indexes
if not idx["index"].startswith(".")
))
)
| mit | Python |
|
006a921f19f6c4f64d694c86346ad85ada2c8bb8 | Add tests for subclass support | pycurl/pycurl,pycurl/pycurl,pycurl/pycurl | tests/subclass_test.py | tests/subclass_test.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
try:
import unittest2 as unittest
except ImportError:
import unittest
import pycurl
CLASSES = (pycurl.Curl, pycurl.CurlMulti, pycurl.CurlShare)
class SubclassTest(unittest.TestCase):
def test_baseclass_init(self):
# base classes do not accept any arguments on initialization
for baseclass in CLASSES:
try:
baseclass(0)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid args')
try:
baseclass(a=1)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid kwargs')
def test_subclass_create(self):
for baseclass in CLASSES:
# test creation of a subclass
class MyCurlClass(baseclass):
pass
# test creation of its object
obj = MyCurlClass()
# must be of type subclass, but also an instance of base class
assert type(obj) == MyCurlClass
assert isinstance(obj, baseclass)
def test_subclass_init(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def __init__(self, x, y=4):
self.x = x
self.y = y
# subclass __init__ must be able to accept args and kwargs
obj = MyCurlClass(3)
assert obj.x == 3
assert obj.y == 4
obj = MyCurlClass(5, y=6)
assert obj.x == 5
assert obj.y == 6
# and it must throw TypeError if arguments don't match
try:
MyCurlClass(1, 2, 3, kwarg=4)
except TypeError:
pass
else:
raise AssertionError('Subclass accepted invalid arguments')
def test_subclass_method(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def my_method(self, x):
return x + 1
obj = MyCurlClass()
# methods must be able to accept arguments and return a value
assert obj.my_method(1) == 2
def test_subclass_method_override(self):
# setopt args for each base class
args = {
pycurl.Curl: (pycurl.VERBOSE, 1),
pycurl.CurlMulti: (pycurl.M_MAXCONNECTS, 3),
pycurl.CurlShare: (pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE),
}
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def setopt(self, option, value):
# base method must not be overwritten
assert super().setopt != self.setopt
# base method mut be callable, setopt must return None
assert super().setopt(option, value) is None
# return something else
return 'my setopt'
obj = MyCurlClass()
assert obj.setopt(*args[baseclass]) == 'my setopt'
| lgpl-2.1 | Python |
|
c8816f509a661ed53c166d843ebfb7dcb6b8d75a | use only single threaded svrlight | lisitsyn/shogun,Saurabh7/shogun,besser82/shogun,shogun-toolbox/shogun,Saurabh7/shogun,besser82/shogun,besser82/shogun,sorig/shogun,sorig/shogun,shogun-toolbox/shogun,karlnapf/shogun,karlnapf/shogun,sorig/shogun,shogun-toolbox/shogun,karlnapf/shogun,Saurabh7/shogun,karlnapf/shogun,besser82/shogun,Saurabh7/shogun,shogun-toolbox/shogun,lisitsyn/shogun,besser82/shogun,Saurabh7/shogun,lambday/shogun,geektoni/shogun,geektoni/shogun,lisitsyn/shogun,lambday/shogun,lisitsyn/shogun,Saurabh7/shogun,geektoni/shogun,sorig/shogun,geektoni/shogun,shogun-toolbox/shogun,Saurabh7/shogun,Saurabh7/shogun,geektoni/shogun,karlnapf/shogun,sorig/shogun,lambday/shogun,shogun-toolbox/shogun,lambday/shogun,geektoni/shogun,sorig/shogun,lambday/shogun,Saurabh7/shogun,lisitsyn/shogun,lisitsyn/shogun,karlnapf/shogun,lambday/shogun,besser82/shogun | examples/undocumented/python_modular/regression_svrlight_modular.py | examples/undocumented/python_modular/regression_svrlight_modular.py | ###########################################################################
# svm light based support vector regression
###########################################################################
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,1.2,1,1e-5,1e-2,1],[traindat,testdat,label_traindat,2.3,0.5,1e-5,1e-6,1]]
def regression_svrlight_modular(fm_train=traindat,fm_test=testdat,label_train=label_traindat, \
width=1.2,C=1,epsilon=1e-5,tube_epsilon=1e-2,num_threads=3):
from shogun.Features import Labels, RealFeatures
from shogun.Kernel import GaussianKernel
try:
from shogun.Regression import SVRLight
except ImportError:
print 'No support for SVRLight available.'
return
feats_train=RealFeatures(fm_train)
feats_test=RealFeatures(fm_test)
kernel=GaussianKernel(feats_train, feats_train, width)
labels=Labels(label_train)
svr=SVRLight(C, epsilon, kernel, labels)
svr.set_tube_epsilon(tube_epsilon)
svr.parallel.set_num_threads(num_threads)
svr.train()
kernel.init(feats_train, feats_test)
out = svr.classify().get_labels()
return out, kernel
if __name__=='__main__':
print 'SVRLight'
regression_svrlight_modular(*parameter_list[0])
| ###########################################################################
# svm light based support vector regression
###########################################################################
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,1.2,1,1e-5,1e-2,3],[traindat,testdat,label_traindat,2.3,0.5,1e-5,1e-6,1]]
def regression_svrlight_modular(fm_train=traindat,fm_test=testdat,label_train=label_traindat, \
width=1.2,C=1,epsilon=1e-5,tube_epsilon=1e-2,num_threads=3):
from shogun.Features import Labels, RealFeatures
from shogun.Kernel import GaussianKernel
try:
from shogun.Regression import SVRLight
except ImportError:
print 'No support for SVRLight available.'
return
feats_train=RealFeatures(fm_train)
feats_test=RealFeatures(fm_test)
kernel=GaussianKernel(feats_train, feats_train, width)
labels=Labels(label_train)
svr=SVRLight(C, epsilon, kernel, labels)
svr.set_tube_epsilon(tube_epsilon)
svr.parallel.set_num_threads(num_threads)
svr.train()
kernel.init(feats_train, feats_test)
out = svr.classify().get_labels()
return out, kernel
if __name__=='__main__':
print 'SVRLight'
regression_svrlight_modular(*parameter_list[0])
| bsd-3-clause | Python |
7327250621dc34a1e7c2f1998333d65024583168 | add simple test | zerovm/zpm,zerovm/zpm,zerovm/zpm,zerovm/zerovm-cli,zerovm/zerovm-cli,zerovm/zpm,zerovm/zerovm-cli,zerovm/zerovm-cli,zerovm/zpm,zerovm/zerovm-cli,zerovm/zpm,zerovm/zerovm-cli | tests/test_commands.py | tests/test_commands.py | # Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zpmlib import commands
def test_all_commands_sorted():
cmd_names = [cmd.__name__ for cmd in commands.all_commands()]
assert cmd_names == sorted(cmd_names)
| apache-2.0 | Python |
|
2b2f11cc7650fc5c40cd21a6e8ad671656fc9b21 | add quicksort | gnhuy91/python-utils | quicksort.py | quicksort.py | '''
QuickSort implementation
'''
def quick_sort(arr, l, r):
i = l
j = r
x = arr[(l + r) / 2]
if len(arr) == 0:
return arr
else:
while True:
while arr[i] < x:
i += 1
while arr[j] > x:
j -= 1
if i <= j:
tmp = arr[i]
arr[i] = arr[j]
arr[j] = tmp
i += 1
j -= 1
if i >= j:
break
if l < j:
quick_sort(arr, l, j)
if i < r:
quick_sort(arr, i, r)
if __name__ == '__main__':
arr = [12, 4, 5, 6, 7, 3, 1, 15]
quick_sort(arr, 0, len(arr) - 1)
print arr
| mit | Python |
|
ef76498542aec046c2307562db01e4764ae68b50 | Add gce_resize | tfuentes/cloudtool | gce_resize.py | gce_resize.py | #!/usr/bin/env python
# import section
import argparse, os, time
from googleapiclient import discovery
from oauth2client.service_account import ServiceAccountCredentials
from pprint import pprint
# functions
def get_instanceGroup(service, project,zone, instanceGroup):
"""
Returns instance group object.
"""
try:
result = service.instanceGroups().get(project=project, zone=zone, instanceGroup=instanceGroup).execute()
except Exception as error:
print("Error getting instance group: %s." % str(error.message))
exit(1)
return result
def get_instanceGroupManager(service, project,zone, instanceGroup):
"""
Return instance group manager object.
"""
try:
result = service.instanceGroupManagers().get(project=project, zone=zone, instanceGroupManager=instanceGroup).execute()
except Exception as error:
print("Error getting instance group manager: %s." % str(error.message))
exit(1)
return result
def resize_instanceGroup(service, project, zone, instanceGroup, instances_num):
"""
Resize instanceGroup manager to instances_num. Usually returns immediatly.
"""
operation = service.instanceGroupManagers().resize(project=project, zone=zone, instanceGroupManager=instanceGroup, size=instances_num).execute()
try:
result = wait_for_operation(service, project, zone, operation)
except Exception as error:
print("Error executing resize: %s." % str(error.message))
exit(1)
return result
def wait_instanceGroupManager(service, project, zone, instanceGroup, timeout=None):
"""
Checks and waits for any operation on an instance group until complete. Consider use of timeout.
"""
n = 0
all_actions = 1
while all_actions > 0:
result = get_instanceGroupManager(service, project, zone, instanceGroup)
all_actions = sum(result['currentActions'].values()) - result['currentActions']['none']
if timeout != None and n > timeout:
print("Timeout while checking for finish actions on instance group manager")
exit(1)
n+=1
time.sleep(1)
def wait_for_operation(service, project, zone, operation):
"""
Keep waiting for an operation object to finish on gcp to complete.
"""
print('Waiting for operation to finish...')
while True:
result = service.zoneOperations().get(
project=project,
zone=zone,
operation=operation['name']).execute()
if result['status'] == 'DONE':
print("done.")
if 'error' in result:
raise Exception(result.error)
return result
print("progress: %i" % (operation.progress))
time.sleep(1)
# main
def main(project_id, zone, credentials_file, instance_group, instances_num):
# start credentials, service
scopes = ['https://www.googleapis.com/auth/compute']
if credentials_file is not None:
credentials = ServiceAccountCredentials.from_json_keyfile_name(credentials_file, scopes)
else:
credentials = GoogleCredentials.get_application_default()
service = discovery.build('compute', 'v1', credentials=credentials)
# do resize
instancegroup = get_instanceGroup(service, project_id, zone, instance_group)
print("Got instance group")
instancegroup_resize = resize_instanceGroup(service, project_id, zone, instance_group, instances_num)
wait_instanceGroupManager(service, project_id, zone, instance_group, 100)
print("Instance group resize successfuly. %s intances on %s group." % (instances_num, instance_group))
if __name__ == '__main__':
""" Script for resizing an Instance Group on GCP.
Example: gce_resize.py --project_id=<project> --instance_group=<instance_group_name> --instance_num=<int> [--zone=<gce_zone>] [--credentials_file=json_gcp_application_credentials>]
Arguments:
--project_id
--instance_group
--instance_num
[--zone]
[--credentials_file]
"""
parser = argparse.ArgumentParser( description =__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-p', '--project_id', help='Your Google Cloud project ID.', required=True)
parser.add_argument('-i', '--instance_group', help='Instance Group to resize.', required=True)
parser.add_argument('-n', '--instances_num', help='Number of instances to grow or shrink instance group to.', required=True)
parser.add_argument('-z', '--zone', default='europe-west1-d', help='Compute Engine zone to deploy to.', required=False)
parser.add_argument('-c', '--credentials_file', default=None, help='Optional service credentials from json file.', required=False)
args = parser.parse_args()
main(project_id=args.project_id, zone=args.zone, credentials_file=args.credentials_file, instance_group=args.instance_group, instances_num=args.instances_num)
| agpl-3.0 | Python |
|
b102a2769dc70deb2055a2d4ae0bf11f48c13f9d | add game window | MadaooQuake/Pinko | core/core.py | core/core.py | # -*- coding: utf-8 -*-
import pygame
from pygame.locals import *
class App:
def __init__(self):
self._running = True
self._display_surf = None
self.size = self.weight, self.height = 1024, 576
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)
self._running = True
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
def on_loop(self):
pass
def on_render(self):
pass
def on_cleanup(self):
pygame.quit()
def on_execute(self):
if self.on_init() == False:
self._running = False
while( self._running ):
for event in pygame.event.get():
self.on_event(event)
self.on_loop()
self.on_render()
self.on_cleanup()
if __name__ == "__main__" :
theApp = App()
theApp.on_execute() | bsd-3-clause | Python |
|
fb6dd1a92471697b8665364dfaa7fedc519d00ed | Create properties.py | Zain117/Rogue | data/properties.py | data/properties.py | import libtcodpy as libtcod
class Object():
def __init__(self, x, y, char, color, screen):
self.x = x
self.y = y
self.char = char
self.color = color
self.screen = screen
def draw_object(self):
#Set the color of the character and draw it
libtcod.console_set_default_foreground(self.screen, self.color)
libtcod.console_put_char(self.screen, self.x, self.y, self.char, libtcod.BKGND_NONE)
def delete(self):
#Erase the char
libtcod.console_put_char(self.screen, self.x, self.y, self.char, libtcod.BKGND_NONE)
class Tile():
#Properties of a map's tiles, theres not much to it like there is to Object
def __init__(self, blocked, blocked_sight):
self.blocked = blocked
self.blocked_sight = blocked_sight
#blocked_sight's variable depends on blocked if its None
if blocked_sight == None: blocked_sight = blocked
| mit | Python |
|
a2ba0c1658850064f55de1a99c3c2a49ef847b8d | Add join_by draft | Suor/funcy | drafts/join_by.py | drafts/join_by.py | def join_by(op, dicts, start=EMPTY):
dicts = list(dicts)
if not dicts:
return {}
elif len(dicts) == 1:
return dicts[0]
result = {}
for d in dicts:
for k, v in iteritems(d):
if k in result:
result[k] = op(result[k], v)
else:
result[k] = v if start is EMPTY else op(start, v)
# result[k] = v if start is EMPTY else start(v)
# result[k] = v if start is EMPTY else op(start(), v)
# result[k] = v if start is EMPTY else op(start() if callable(start) else start, v)
return result
join_by(operator.__add__, dnfs, start=list)
join_with(cat, dnfs)
join_by(list.extend, dnfs, start=list)
join_by(lambda c, _: c + 1, dnfs, start=lambda _: 1)
join_by(lambda l, v: l + len(v), dnfs, start=len)
# join_by(list.append, dnfs, initial=[])
join_by(lambda l, v: l + len(v), dnfs, 0)
| bsd-3-clause | Python |
|
235cc3a7529b36e11a7935e15c90f496210d7c31 | implement method for generating request signature | gdmachado/scup-python | scup/auth.py | scup/auth.py | import hashlib
import time
def get_request_signature(private_key):
current_time = int(time.time())
message = '{}{}'.format(current_time, private_key)
digest = hashlib.md5(message).hexdigest()
return current_time, digest
| mit | Python |
|
5834f2e259834b325cf076b36af634dc6b64f442 | Add info if not parsed | Phantasus/intelmq,s4n7h0/intelmq | intelmq/bots/parsers/generic/parser.py | intelmq/bots/parsers/generic/parser.py | from intelmq.lib.bot import Bot, sys
from intelmq.lib.message import Event
from intelmq.bots import utils
import re
class GenericBot(Bot):
# Generic parser, will simply parse and add named group to event
# for example if you have the regex :
# '^\s*(?P<ip>(?:(?:\d){1,3}\.){3}\d{1,3})'
# You will have an item 'ip' in your event.
def process(self):
report = self.receive_message()
self.logger.debug("Will apply regex %s" % self.parameters.regex)
if report:
rowcount = 0
for row in report.split('\n'): # For each line
self.logger.debug(self.parameters.regex)
event = Event()
match = re.search(self.parameters.regex, row)
if match:
for key in match.groupdict():
event.add(key, matchtuple[key])
else:
continue # skip lines without matching regex
rowcount += 1
# Get detail from parser parameters, will be nice to have it by
# source parameters.. Avoid adding if parsed
if not 'feed' in match.groupdict():
event.add('feed', self.parameters.feed)
if not 'feed_url' in match.groupdict():
event.add('feed_url', self.parameters.feed_url)
if not 'type' in match.groupdict():
event.add('type', self.parameters.type)
event = utils.parse_source_time(event, "source_time")
event = utils.generate_observation_time(event,
"observation_time")
event = utils.generate_reported_fields(event)
self.send_message(event)
self.logger.info("Processed %d event" % rowcount)
self.acknowledge_message()
if __name__ == "__main__":
bot = GenericBot(sys.argv[1])
bot.start()
| from intelmq.lib.bot import Bot, sys
from intelmq.lib.message import Event
from intelmq.bots import utils
import re
class GenericBot(Bot):
# Generic parser, will simply parse and add named group to event
# for example if you have the regex :
# '^\s*(?P<ip>(?:(?:\d){1,3}\.){3}\d{1,3})'
# You will have an item 'ip' in your event.
def process(self):
report = self.receive_message()
if report:
rowcount = 0
for row in report.split('\n'): # For each line
self.logger.debug(row)
self.logger.debug(self.parameters.regex)
event = Event()
match = re.search(self.parameters.regex, row)
if match:
for key in match.groupdict():
event.add(key, matchtuple[key])
else:
continue # skip lines without matching regex
rowcount += 1
# Get detail from parser parameters, will be nice to have it by
# source parameters..
event.add('feed', self.parameters.feed)
event.add('feed_url', self.parameters.feed_url)
event.add('type', self.parameters.type)
event = utils.parse_source_time(event, "source_time")
event = utils.generate_observation_time(event,
"observation_time")
event = utils.generate_reported_fields(event)
self.send_message(event)
self.logger.info("Processed %d event" % rowcount)
self.acknowledge_message()
if __name__ == "__main__":
bot = GenericBot(sys.argv[1])
bot.start()
| agpl-3.0 | Python |
7490c39f958291cc99913d0f36581439d8efdf77 | Add a command to fix candidate image metadata | DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,openstate/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,datamade/yournextmp-popit,DemocracyClub/yournextrepresentative,mysociety/yournextmp-popit,openstate/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,datamade/yournextmp-popit,neavouli/yournextrepresentative,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative | candidates/management/commands/candidates_fix_image_metadata.py | candidates/management/commands/candidates_fix_image_metadata.py | from PIL import Image
from hashlib import md5
import re
import requests
import sys
from StringIO import StringIO
from candidates.popit import PopItApiMixin, popit_unwrap_pagination
from candidates.update import fix_dates
from moderation_queue.views import PILLOW_FORMAT_MIME_TYPES
from django.core.management.base import BaseCommand
from slumber.exceptions import HttpClientError
def fix_image_mime_type(image):
mime_type = image.get('mime_type')
if mime_type:
return
try:
image_url = image['url']
r = requests.get(image_url)
pillow_image = Image.open(StringIO(r.content))
except IOError as e:
if 'cannot identify image file' in unicode(e):
print "Unknown image format in {0}".format(image_url)
return
raise
new_mime_type = PILLOW_FORMAT_MIME_TYPES[pillow_image.format]
image['mime_type'] = new_mime_type
print " Setting mime_type to", new_mime_type
def fix_image_metadata(image):
notes = image.get('notes', '')
# If the notes field has an MD5sum in it, then it was from the
# import PPC script, so move that to an md5sum field (as
# organization images have) and set the moderator_why_allowed to
# 'profile-photo'
m = re.search(r'^md5sum:([a-f0-9]+)', notes)
if m:
image['md5sum'] = m.group(1)
image['moderator_why_allowed'] = 'profile-photo'
image['notes'] = 'Scraped from the official party PPC page'
print " Migrated old PPC scraped image"
# If there is a 'why_allowed' and 'justification_for_use' field,
# this was from before we switched to separating the user's and
# moderator's reason for allowing the photo, so migrate those
# fields.
if image.get('why_allowed') and image.get('justification_for_use'):
why_allowed = image.pop('why_allowed')
justification_for_use = image.pop('justification_for_use')
image['moderator_why_allowed'] = why_allowed
image['user_why_allowed'] = why_allowed
image['user_justification_for_use'] = justification_for_use
print " Migrated from old why_allowed", why_allowed
print " Migrated from old justification_for_use", justification_for_use
def ensure_md5sum_present(image):
if image.get('md5sum'):
return
image_url = image['url']
# Otherwise get the file and calculate its MD5sum
r = requests.get(image_url)
md5sum = md5(r.content).hexdigest()
image['md5sum'] = md5sum
print " Setting md5sum field to", md5sum
def fix_image(image):
fix_image_mime_type(image)
fix_image_metadata(image)
ensure_md5sum_present(image)
class Command(PopItApiMixin, BaseCommand):
def handle(self, **options):
for person in popit_unwrap_pagination(
self.api.persons,
embed='',
per_page=100
):
msg = "Person {0}persons/{1}"
print msg.format(self.get_base_url(), person['id'])
for image in person.get('images', []):
print " Image with URL:", image['url']
fix_image(image)
image.pop('_id', None)
# Some images have an empty 'created' field, which
# causes an Elasticsearch indexing error, so remove
# that if it's the case:
if not image.get('created'):
image.pop('created', None)
fix_dates(person)
try:
self.api.persons(person['id']).put(person)
except HttpClientError as e:
print "HttpClientError", e.content
sys.exit(1)
| agpl-3.0 | Python |
|
d046968c5b16239b4ce3fbe17b6359339f3e7b9b | Add vcf convertor | ihciah/AndroidSMSRelay,ihciah/AndroidSMSRelay | utils/vcf_convertor.py | utils/vcf_convertor.py | #! -*- coding: utf-8 -*-
import re
import json
person_patten = re.compile(r'BEGIN:VCARD(.*?)END:VCARD', re.DOTALL)
fullname_patten = re.compile(r'FN:(.*?)\n')
mobile_patten = re.compile(r':\+*?(\d{9}\d*?)\n')
f = open(r'iCloud vCard.vcf')
fc = f.read()
people = person_patten.findall(fc)
names = {}
for p in people:
for i in fullname_patten.findall(p):
name = i
p = p.replace("-", "")
for i in mobile_patten.findall(p):
if len(i) == 13 and i[:2] == "86":
i = i[2:]
names[i] = name
fl = open("dump", "w")
fl.write(json.dumps(names))
fl.close()
| mit | Python |
|
3a1b4ceb2ae989495d2453c612ac6645fdf59726 | Create cisco_vlan_extract.py | JamesKBowler/networking_scripts | cisco/cisco_vlan_extract.py | cisco/cisco_vlan_extract.py | from ciscoconfparse import CiscoConfParse as ccp
def extract_vlan(vlans):
"""
Will convert ACTIVE vlans in the 'show vlan' command .....
switch#show vlan
VLAN Name Status Ports
---- -------------------------------- --------- -------------------------------
1 default active Fa0/48
2 AAAAA active
3 BBBBB active
4 CCCCC active Fa0/1, Fa0/2, Fa0/3, Fa0/4, Fa0/5, Fa0/6, Fa0/7
5 DDDDD active
6 EEEEE active
7 FFFFF active Fa0/25, Fa0/26, Fa0/27, Fa0/28, Fa0/29, Fa0/30
1002 fddi-default act/unsup
1003 token-ring-default act/unsup
1004 fddinet-default act/unsup
1005 trnet-default act/unsup
To configuration like this .....
vlan 2
name AAAAA
vlan 3
name BBBBB
vlan 4
name CCCCC
vlan 5
name DDDDD
vlan 6
name EEEEE
vlan 7
name FFFFF
"""
active_vlans = vlans.find_objects("active")
for i in active_vlans:
if not " ".join(i.text.split()[0:1]) == "1":
print("vlan", " ".join(i.text.split()[0:1]))
print(" name"," ".join(i.text.split()[1:2]))
extract_vlan(ccp("show_vlan.txt"))
| mit | Python |
|
08d66a82ea47832654aa17f0323df6ce57691fcb | add setup.py | expertanalytics/fagkveld | verdenskart/setup.py | verdenskart/setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="bokeh-worldmap",
version="0.1.0",
packages=find_packages("src"),
package_data={},
package_dir={"": "src"},
entry_points={"console_scripts": []},
)
| bsd-2-clause | Python |
|
d33bd223ec35712d0aa9e4ab3da83a19cf1a1120 | Create httpclient.py | hoogles/CMPUT404-assignment-web-client | httpclient.py | httpclient.py | #!/usr/bin/env python
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib
def help():
print "httpclient.py [GET/POST] [URL]\n"
class HTTPRequest(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
def get_host_port(self,url):
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
hostname = url.split('://')[1]
url2 = hostname.split('/')[0]
host = socket.gethostbyname(url2)
return (host,80)
# creates a socket connected to host via port
# REMEMBER TO CLOSE THE SOCKETS WHEN YOU USE THEM
def connect(self, host, port):
# use sockets!
# sew the sock
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
# put on the sock
sock.connect((host,port))
print 'connected to ' + host, port
return sock
def get_code(self, data):
return data.split()[1]
def get_headers(self,data):
data = data.split('\r\n\r\n')
data = data - data[-1]
return data
def get_body(self, data):
return data.split('\r\n\r\n')[-1]
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
try:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
except:
return str(buffer)
return str(buffer)
# Perform an HTTP GET request
def GET(self, url, args=None):
code = 200
(http, uri) = re.split('://',url)
target = ""
hostname = ""
try:
hostname = uri.split('/')[0]
target = uri.split('/')[1]
except:
hostname = uri
target = ""
body = "GET /"+target+" HTTP/1.1 \r\nHost: "+hostname+" \r\n\r\n"
host = ""
port = 80
try:
(host,port) = self.get_host_port(url)
sock = self.connect(host,port)
sock.sendall(body)
buff = self.recvall(sock)
code = self.get_code(buff)
body = self.get_body(buff)
if len(buff) == 0:
code = 404
sock.close()
except:
code = 404
return HTTPRequest(code, body)
# Perform an HTTP POST request
def POST(self, url, args=None):
code = 200
(http, uri) = re.split('://',url)
target = ""
hostname = ""
try:
hostname = uri.split('/')[0]
target = uri.split('/')[1]
except:
hostname = uri
target = ""
body = "POST "+ target +" / HTTP/1.1 \r\n content-type:application/x-www-form-urlencoded;charset=utf-8 \r\n Host: www."+hostname+" \r\n "
try:
query = re.split('\?', target)
query = query[1]
body += len(query)+"\r\n" +query + '\r\n\r\n'
except:
body += "\r\n"
#sock_host = ""
host = ""
port = 80
try:
(host,port) = self.get_host_port(url)
sock = self.connect(host,port)
sock.sendall(body)
buff = self.recvall(sock)
code = self.get_code(buff)
body = self.get_body(buff)
if len(buff) == 0:
code = 404
sock.close()
except:
code = 404
return HTTPRequest(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print client.command( sys.argv[1], sys.argv[2] )
else:
print client.command( command, sys.argv[1] )
| apache-2.0 | Python |
|
5bb7d25765655f83c42b5e7abc1093f7f85f7950 | bump version to 0.8.16 | MycroftAI/mycroft-core,aatchison/mycroft-core,MycroftAI/mycroft-core,Dark5ide/mycroft-core,linuxipho/mycroft-core,Dark5ide/mycroft-core,linuxipho/mycroft-core,forslund/mycroft-core,aatchison/mycroft-core,forslund/mycroft-core | mycroft/version/__init__.py | mycroft/version/__init__.py | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import json
from genericpath import exists, isfile
from mycroft.util.log import getLogger
__author__ = 'augustnmonteiro'
# The following lines are replaced during the release process.
# START_VERSION_BLOCK
CORE_VERSION_MAJOR = 0
CORE_VERSION_MINOR = 8
CORE_VERSION_BUILD = 16
# END_VERSION_BLOCK
CORE_VERSION_STR = (str(CORE_VERSION_MAJOR) + "." +
str(CORE_VERSION_MINOR) + "." +
str(CORE_VERSION_BUILD))
LOG = getLogger(__name__)
class VersionManager(object):
__location = "/opt/mycroft/version.json"
@staticmethod
def get():
if (exists(VersionManager.__location) and
isfile(VersionManager.__location)):
try:
with open(VersionManager.__location) as f:
return json.load(f)
except:
LOG.error("Failed to load version from '%s'"
% VersionManager.__location)
return {"coreVersion": None, "enclosureVersion": None}
| # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import json
from genericpath import exists, isfile
from mycroft.util.log import getLogger
__author__ = 'augustnmonteiro'
# The following lines are replaced during the release process.
# START_VERSION_BLOCK
CORE_VERSION_MAJOR = 0
CORE_VERSION_MINOR = 8
CORE_VERSION_BUILD = 15
# END_VERSION_BLOCK
CORE_VERSION_STR = (str(CORE_VERSION_MAJOR) + "." +
str(CORE_VERSION_MINOR) + "." +
str(CORE_VERSION_BUILD))
LOG = getLogger(__name__)
class VersionManager(object):
__location = "/opt/mycroft/version.json"
@staticmethod
def get():
if (exists(VersionManager.__location) and
isfile(VersionManager.__location)):
try:
with open(VersionManager.__location) as f:
return json.load(f)
except:
LOG.error("Failed to load version from '%s'"
% VersionManager.__location)
return {"coreVersion": None, "enclosureVersion": None}
| apache-2.0 | Python |
32a1781bb5ba4f143e5910fbd841ca6aeeebc8fe | Add test script for color histogram matcher | pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc | jsk_2015_05_baxter_apc/node_scripts/test_color_histogram_matcher.py | jsk_2015_05_baxter_apc/node_scripts/test_color_histogram_matcher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""This script is to test color histogram & its matcher
Usage
-----
$ # to extract color histogram
$ roslaunch jsk_2014_picking_challenge extract_color_histogram.launch
input_image:=/test_color_histogram/train_image
$ rosrun jsk_2014_picking_challenge test_color_histogram.py --extract
$ # to test color histogram matcher
$ roslaunch jsk_2014_picking_challenge \
test_color_histogram_matching.launch
$ rosrun jsk_2014_picking_challenge test_color_histogram.py --test
"""
from __future__ import division
import os
import argparse
import numpy as np
import rospy
from jsk_2014_picking_challenge.srv import ObjectMatch, StringEmpty
from extract_color_histogram import ExtractColorHistogram
from matcher_common import listdir_for_img
from test_object_matching import TestObjectMatching
def get_nations():
data_dir = os.path.join(os.path.dirname(__file__),
'../data/national_flags')
return os.listdir(data_dir)
def get_data_dirs():
data_dir = os.path.join(os.path.dirname(__file__),
'../data/national_flags')
for nation in get_nations():
yield os.path.join(data_dir, nation)
def prepare_train_data():
for data_dir in get_data_dirs():
nation_nm = os.path.basename(data_dir)
raw_paths = map(lambda x: os.path.join(data_dir, x),
listdir_for_img(data_dir))
for color in ['red', 'blue', 'green']:
extractor = ExtractColorHistogram(object_nm=nation_nm,
color=color, raw_paths=raw_paths)
extractor.extract_and_save()
def test():
client_of_matcher = rospy.ServiceProxy('/semi/color_histogram_matcher',
ObjectMatch)
client_of_img = rospy.ServiceProxy('/image_publish_server', StringEmpty)
nations = np.array(get_nations())
for i, target_obj in enumerate(nations):
# request to publish image
rospy.loginfo('target: {}'.format(target_obj))
imgpath = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../data/national_flags/{0}/{0}.png'.format(target_obj))
client_of_img(string=imgpath)
rospy.sleep(3)
# request to object matcher
probs = client_of_matcher(objects=nations).probabilities
probs = np.array(probs)
rospy.loginfo('correct?: {}'.format(probs.argmax() == i))
rospy.loginfo('similar: {}'.format(
nations[probs.argsort()][::-1][:3]))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--extract', action='store_true',
help='flag to extract color histogram')
parser.add_argument('-t', '--test', action='store_true',
help='flag to test color histogram matcher')
args = parser.parse_args(rospy.myargv()[1:])
flags = dict(args._get_kwargs()).values()
if not any(flags) or all(flags):
print('either -e or -t should be set (both is not allowed)')
parser.print_help()
parser.exit()
return args
def main():
args = parse_args()
if args.extract:
prepare_train_data()
elif args.test:
test()
else:
rospy.logerr('Unknown args')
if __name__ == '__main__':
rospy.init_node('test_color_histogram_matcher')
main()
| bsd-3-clause | Python |
|
3088fcd2d42b4e59601c103cc01cec1d949f6f57 | Improve OldPersian | lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public | ielex/lexicon/migrations/0093_fix_oldPersian.py | ielex/lexicon/migrations/0093_fix_oldPersian.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
'''
OldPersian doesn't have lexemes for some meanings.
This migration generates them.
'''
# Models to work with:
Language = apps.get_model('lexicon', 'Language')
MeaningList = apps.get_model('lexicon', 'MeaningList')
Lexeme = apps.get_model('lexicon', 'Lexeme')
# Data to work with:
target = Language.objects.get(ascii_name='OldPersian')
# Mapping meaning.id -> Lexeme
mIdLexemeMap = {}
for l in Lexeme.objects.filter(language=target).all():
mIdLexemeMap[l.meaning_id] = l
# Searching for missing lexemes:
mList = MeaningList.objects.get(name='Jena200')
for m in mList.meanings.all():
if m.id not in mIdLexemeMap:
Lexeme.objects.create(
meaning=m,
language=target)
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0092_set_cjc_reliabilities_high')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| bsd-2-clause | Python |
|
6516b73210a575376bc78005ae28c0e843303b24 | add theano how-to-perform | rianrajagede/iris-python,rianrajagede/simplesamplecode | Theano/how-to-perform-stencil-computations-element-wise-on-a-matrix-in-theano.py | Theano/how-to-perform-stencil-computations-element-wise-on-a-matrix-in-theano.py | import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet import conv2d
# original image 3D (3x3x4) (RGB Channel, height, width)
img = [[[1, 2, 3, 4],
[1, 1, 3, 1],
[1, 3, 1, 1]],
[[2, 2, 3, 4],
[2, 2, 3, 2],
[2, 3, 2, 2]],
[[3, 2, 3, 4],
[3, 3, 3, 3],
[3, 3, 3, 3]]]
# separate and reshape each channel to 4D
# separated because convolution works on each channel only
R = np.asarray([[img[0]]], dtype='float32')
G = np.asarray([[img[1]]], dtype='float32')
B = np.asarray([[img[2]]], dtype='float32')
# 4D kernel from the original : [1,0,1]
# rotated because convolution works only on column
kernel = np.asarray([[[[1],[0],[1]]]], dtype='float32')
# theano convolution
t_img = T.ftensor4("t_img")
t_kernel = T.ftensor4("t_kernel")
result = conv2d(
input = t_img,
filters=t_kernel,
filter_shape=(1,1,1,3),
border_mode = 'half')
f = theano.function([t_img,t_kernel],result)
# compute each channel
R = f(R,kernel)
G = f(G,kernel)
B = f(B,kernel)
# merge and reshape again
img = np.asarray([R,G,B])
img = np.reshape(img,(3,3,4))
print img | mit | Python |
|
a99f0678815c2e998c25a0aaf9f2c79ad0d18610 | Add package 'ui' | AntumDeluge/desktop_recorder,AntumDeluge/desktop_recorder | source/ui/__init__.py | source/ui/__init__.py | # -*- coding: utf-8 -*-
## \package ui
# MIT licensing
# See: LICENSE.txt
| mit | Python |
|
00b995719aaf11c2d7c3126e29b94b74f0edf8d2 | add test | brianjgeiger/osf.io,adlius/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,pattisdr/osf.io,saradbowman/osf.io,felliott/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,caseyrollins/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,mattclark/osf.io,felliott/osf.io,saradbowman/osf.io,HalcyonChimera/osf.io,adlius/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,baylee-d/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,caseyrollins/osf.io,adlius/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,pattisdr/osf.io,cslzchen/osf.io,mfraezz/osf.io,cslzchen/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,felliott/osf.io,felliott/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,aaxelb/osf.io | osf_tests/test_downloads_summary.py | osf_tests/test_downloads_summary.py | # encoding: utf-8
import mock
import pytest
import pytz
import datetime
from django.utils import timezone
from addons.osfstorage import utils
from addons.osfstorage.tests.utils import StorageTestCase
from osf_tests.factories import ProjectFactory
from scripts.analytics.download_count_summary import DownloadCountSummary
@pytest.mark.django_db
class TestDownloadCount(StorageTestCase):
def test_download_count(self):
# Keen does not allow same day requests so we have to do some time traveling to my birthday
timezone.now = mock.Mock(return_value=datetime.datetime(1991, 9, 25).replace(tzinfo=pytz.utc))
node = ProjectFactory()
utils.update_analytics(node, 'fake id', {'contributors': node.contributors})
# Now back to the future, querying old date.
timezone.now = mock.Mock(return_value=datetime.datetime.now().replace(tzinfo=pytz.utc))
query_date = datetime.date(1991, 9, 25)
event = DownloadCountSummary().get_events(query_date)
assert event[0]['files']['total'] == 1
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.