commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
49712dd43a2376c913e66cac7b52fc7247912e44 | Make disable_builtins schema a property | tobinjt/Flexget,spencerjanssen/Flexget,JorisDeRieck/Flexget,v17al/Flexget,oxc/Flexget,jacobmetrick/Flexget,LynxyssCZ/Flexget,Flexget/Flexget,crawln45/Flexget,Danfocus/Flexget,Danfocus/Flexget,offbyone/Flexget,qk4l/Flexget,cvium/Flexget,qvazzler/Flexget,jawilson/Flexget,ZefQ/Flexget,ibrahimkarahan/Flexget,lildadou/Flexget,drwyrm/Flexget,vfrc2/Flexget,camon/Flexget,xfouloux/Flexget,Pretagonist/Flexget,jawilson/Flexget,malkavi/Flexget,v17al/Flexget,poulpito/Flexget,voriux/Flexget,cvium/Flexget,Danfocus/Flexget,LynxyssCZ/Flexget,malkavi/Flexget,asm0dey/Flexget,v17al/Flexget,tvcsantos/Flexget,LynxyssCZ/Flexget,Pretagonist/Flexget,Flexget/Flexget,ianstalk/Flexget,ZefQ/Flexget,patsissons/Flexget,offbyone/Flexget,ianstalk/Flexget,antivirtel/Flexget,OmgOhnoes/Flexget,tobinjt/Flexget,crawln45/Flexget,grrr2/Flexget,gazpachoking/Flexget,patsissons/Flexget,ratoaq2/Flexget,tarzasai/Flexget,Pretagonist/Flexget,tobinjt/Flexget,thalamus/Flexget,dsemi/Flexget,X-dark/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,ibrahimkarahan/Flexget,tvcsantos/Flexget,jacobmetrick/Flexget,poulpito/Flexget,ZefQ/Flexget,camon/Flexget,voriux/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,sean797/Flexget,tarzasai/Flexget,grrr2/Flexget,lildadou/Flexget,xfouloux/Flexget,qvazzler/Flexget,dsemi/Flexget,X-dark/Flexget,asm0dey/Flexget,poulpito/Flexget,jawilson/Flexget,malkavi/Flexget,drwyrm/Flexget,sean797/Flexget,drwyrm/Flexget,qvazzler/Flexget,tsnoam/Flexget,tarzasai/Flexget,vfrc2/Flexget,asm0dey/Flexget,spencerjanssen/Flexget,oxc/Flexget,tsnoam/Flexget,X-dark/Flexget,LynxyssCZ/Flexget,ratoaq2/Flexget,ibrahimkarahan/Flexget,OmgOhnoes/Flexget,dsemi/Flexget,sean797/Flexget,Flexget/Flexget,antivirtel/Flexget,tsnoam/Flexget,tobinjt/Flexget,oxc/Flexget,malkavi/Flexget,ratoaq2/Flexget,antivirtel/Flexget,qk4l/Flexget,qk4l/Flexget,patsissons/Flexget,OmgOhnoes/Flexget,jawilson/Flexget,lildadou/Flexget,vfrc2/Flexget,offbyone/Flexget,crawln45/Flexget,thalamus/Flexget,ianstalk/Flexget,JorisDeRieck/Flexget,cvium/Flexget,xfouloux/Flexget,Flexget/Flexget,spencerjanssen/Flexget,gazpachoking/Flexget,grrr2/Flexget,thalamus/Flexget,jacobmetrick/Flexget | flexget/plugins/operate/disable_builtins.py | flexget/plugins/operate/disable_builtins.py | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.plugin import priority, register_plugin, plugins
log = logging.getLogger('builtins')
def all_builtins():
"""Helper function to return an iterator over all builtin plugins."""
return (plugin for plugin in plugins.itervalues() if plugin.builtin)
class PluginDisableBuiltins(object):
"""Disables all (or specific) builtin plugins from a task."""
def __init__(self):
# cannot trust that on_task_start would have been executed
self.disabled = []
# TODO: Shit, how was this ever working? If this plugin is loaded before any builtin plugins, they are not allowed
# in the schema. We need to change plugin loading to not access the schema until all plugins are loaded.
@property
def schema(self):
return {
'oneOf': [
{'type': 'boolean'},
{'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}}
]
}
def debug(self):
log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins()))
@priority(255)
def on_task_start(self, task, config):
self.disabled = []
if not config:
return
for plugin in all_builtins():
if config is True or plugin.name in config:
plugin.builtin = False
self.disabled.append(plugin.name)
log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled))
@priority(-255)
def on_task_exit(self, task, config):
if not self.disabled:
return
for name in self.disabled:
plugin.plugins[name].builtin = True
log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled))
self.disabled = []
on_task_abort = on_task_exit
register_plugin(PluginDisableBuiltins, 'disable_builtins', api_ver=2)
| from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.plugin import priority, register_plugin, plugins
log = logging.getLogger('builtins')
def all_builtins():
"""Helper function to return an iterator over all builtin plugins."""
return (plugin for plugin in plugins.itervalues() if plugin.builtin)
class PluginDisableBuiltins(object):
"""Disables all (or specific) builtin plugins from a task."""
def __init__(self):
# cannot trust that on_task_start would have been executed
self.disabled = []
# TODO: Shit, how was this ever working? If this plugin is loaded before any builtin plugins, they are not allowed
# in the schema.
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}}
]
}
def debug(self):
log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins()))
@priority(255)
def on_task_start(self, task, config):
self.disabled = []
if not config:
return
for plugin in all_builtins():
if config is True or plugin.name in config:
plugin.builtin = False
self.disabled.append(plugin.name)
log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled))
@priority(-255)
def on_task_exit(self, task, config):
if not self.disabled:
return
for name in self.disabled:
plugin.plugins[name].builtin = True
log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled))
self.disabled = []
on_task_abort = on_task_exit
register_plugin(PluginDisableBuiltins, 'disable_builtins', api_ver=2)
| mit | Python |
790be842b1c2e752210d5328dad05acb05d337bb | add minimal test for serial.theaded | DavidHowlett/pyserial-1 | test/test_threaded.py | test/test_threaded.py | #!/usr/bin/env python
#
# This file is part of pySerial - Cross platform serial port support for Python
# (C) 2016 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
Test serial.threaded related functionality.
"""
import os
import unittest
import serial
import serial.threaded
import time
# on which port should the tests be performed:
PORT = 'loop://'
class Test_asyncio(unittest.TestCase):
"""Test asyncio related functionality"""
def test_line_reader(self):
"""simple test of line reader class"""
class TestLines(serial.threaded.LineReader):
def __init__(self):
super(TestLines, self).__init__()
self.received_lines = []
def handle_line(self, data):
self.received_lines.append(data)
ser = serial.serial_for_url(PORT, baudrate=115200, timeout=1)
with serial.threaded.ReaderThread(ser, TestLines) as protocol:
protocol.write_line('hello')
time.sleep(1)
self.assertEqual(protocol.received_lines, ['hello'])
if __name__ == '__main__':
import sys
sys.stdout.write(__doc__)
if len(sys.argv) > 1:
PORT = sys.argv[1]
sys.stdout.write("Testing port: {!r}\n".format(PORT))
sys.argv[1:] = ['-v']
# When this module is executed from the command-line, it runs all its tests
unittest.main()
| bsd-3-clause | Python |
|
70683aabe3cebda02db62fc254b7ec7532a50618 | Add test_config.sample.py. | previa/dictsheet | test_config.sample.py | test_config.sample.py | CREDENTIAL_FILE = ''
SHEET_NAME = ''
| mit | Python |
|
499adce8b5c23d60073d4c92259e611609ee0c61 | Add initial draft script to analyse Maven deps | uvsmtid/common-salt-states,uvsmtid/common-salt-states,uvsmtid/common-salt-states,uvsmtid/common-salt-states | states/common/maven/artifacts/check_dependencies.py | states/common/maven/artifacts/check_dependencies.py | #!/usr/bin/env python
import subprocess as sub
import yaml
import re
distrib_pom_path = '/home/uvsmtid/Works/maritime-singapore.git/clearsea-distribution/pom.xml'
# Resolve (download) all dependencies locally so that next command
# can work offline.
sub.check_call(
[
'mvn',
'-f',
distrib_pom_path,
'dependency:resolve',
],
)
# Get list of all dependencies.
p = sub.Popen(
[
'mvn',
'-f',
distrib_pom_path,
'dependency:list',
],
stdout = sub.PIPE,
)
# Select lines with dependency items.
artifact_regex = re.compile(')
for line in p.stdout:
| apache-2.0 | Python |
|
1a065a251c3337ae7741af1916c51f2edcb9180f | add db.py | shuchangwen/awesome-python-webapp,shuchangwen/awesome-python-webapp | www/transwarp/db.py | www/transwarp/db.py | #/usr/bin/python
#_*_ coding:utf-8 _*_
import threading
class _Engine(object):
"""
数据库引擎对象
"""
def __init__(self, connect):
self._connect = connect
def connect(self):
return self._connect()
engine = None
class _DbCtx(threading.local):
"""
持有数据库连接的上下文对象
"""
def __init__(self):
self.connection = None
self.transactions = 0
def is_init(self):
return not self.connection is None
def init(self):
self.connection = _LasyConnection()
self.transactions = 0
def cleanup(self):
self.connection.cleanup()
self.connection = None
def cursor(self):
return self.connection.cursor()
_db_ctx = _DbCtx()
class _ConnectionCtx(object):
def __enter__(self):
global _db_ctx
self.should_cleanup = False
if not _db_ctx.is_init():
_db_ctx.is_init()
self.should_cleanup = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global _db_ctx
if self.should_cleanup:
_db_ctx.cleanup()
def connection():
return _ConnectionCtx()
| apache-2.0 | Python |
|
fed635826be361c4748f13bca09ed411c59ca352 | Add test for OpenStackServer API to increase test coverage | open-craft/opencraft,open-craft/opencraft,open-craft/opencraft,open-craft/opencraft,open-craft/opencraft | instance/tests/api/test_openstack_server.py | instance/tests/api/test_openstack_server.py | # -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015-2016 OpenCraft <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Views - Tests
"""
# Imports #####################################################################
import ddt
from django.conf import settings
from rest_framework import status
from instance.tests.api.base import APITestCase
from instance.tests.models.factories.server import OpenStackServerFactory
# Tests #######################################################################
@ddt.ddt
class OpenStackServerAPITestCase(APITestCase):
"""
Test cases for OpenStackServer API calls
"""
def test_get_unauthenticated(self):
"""
GET - Require to be authenticated
"""
response = self.api_client.get('/api/v1/openstackserver/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, {"detail": "Authentication credentials were not provided."})
def test_get_authenticated(self):
"""
GET - Authenticated access
"""
self.api_client.login(username='user1', password='pass')
response = self.api_client.get('/api/v1/openstackserver/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
server = OpenStackServerFactory()
response = self.api_client.get('/api/v1/openstackserver/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.check_serialized_server(response.data[0], server)
def test_get_details(self):
"""
GET - Detailed attributes
"""
self.api_client.login(username='user3', password='pass')
test_openstack_id = 'test-openstack-id'
server = OpenStackServerFactory(openstack_id=test_openstack_id)
response = self.api_client.get('/api/v1/openstackserver/{pk}/'.format(pk=server.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.check_serialized_server(response.data, server)
self.assertEqual(response.data['openstack_id'], test_openstack_id)
def check_serialized_server(self, data, server):
"""
Assert that the server data is what we expect
"""
self.assertEqual(data['id'], server.id)
self.assertEqual(
data['api_url'],
'http://testserver/api/v1/openstackserver/{pk}/'.format(pk=server.id)
)
self.assertEqual(data['name'], server.name)
self.assertEqual(data['openstack_region'], settings.OPENSTACK_REGION)
self.assertIn('created', data)
self.assertIn('modified', data)
self.assertIn('openstack_id', data)
self.assertIn('public_ip', data)
self.assertIn('status', data)
| agpl-3.0 | Python |
|
6fd75772efac321517a1d8c01addfa5cbbf7caf0 | Add test file for user functions. | IvanMalison/okcupyd,okuser/okcupyd,IvanMalison/okcupyd,okuser/okcupyd | tests/db/user_test.py | tests/db/user_test.py | from okcupyd.db import user
def test_have_messaged_before(T):
message_thread_model = T.factory.message_thread()
assert user.have_messaged_by_username(
message_thread_model.initiator.handle,
message_thread_model.respondent.handle
)
assert user.have_messaged_by_username(
message_thread_model.respondent.handle,
message_thread_model.initiator.handle
)
assert not user.have_messaged_by_username('a', 'b')
assert not user.have_messaged_by_username(
message_thread_model.respondent.handle, 'a'
)
T.factory.user('b')
assert not user.have_messaged_by_username(
'b', message_thread_model.initiator.handle
)
| mit | Python |
|
6d2480c5817a8ba7a4a810378ce8fabe0ede3cbf | check YAML examples | vpsfreecz/brutus,vpsfreecz/brutus,vpsfreecz/brutus | tests/testexamples.py | tests/testexamples.py | #!/usr/bin/python
import os
import yaml
def test_examples():
for filename in os.listdir("examples/"):
with open(os.path.join("examples", filename)) as stream:
print(yaml.load(stream))
| bsd-2-clause | Python |
|
a9bb7c7c929b0e182160a700e0a3f23dc3e81765 | Update and rename exercises to exercises/12.py | krzyszti/my_projects,krzyszti/my_projects,krzyszti/my_projects,krzyszti/my_projects | exercises/12.py | exercises/12.py | '''
Define a procedure histogram() that takes a list of
integers and prints a histogram to the screen.
For example, histogram([4, 9, 7]) should print the following:
****
*********
*******
'''
def histogram(lst):
for item in lst:
print(item * '*')
| mit | Python |
|
a0d2e58a6eecf3427646f311e638c359706e806a | Add Energenie example code | RPi-Distro/python-energenie,rjw57/energenie,bennuttall/energenie | energenie.py | energenie.py | import RPi.GPIO as GPIO
from time import sleep
bit1 = 11
bit2 = 15
bit3 = 16
bit4 = 13
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(bit1, GPIO.OUT)
GPIO.setup(bit2, GPIO.OUT)
GPIO.setup(bit3, GPIO.OUT)
GPIO.setup(bit4, GPIO.OUT)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
GPIO.output(22, False)
GPIO.output(18, False)
GPIO.output(bit1, False)
GPIO.output(bit2, False)
GPIO.output(bit3, False)
GPIO.output(bit4, False)
on = ['1011', '0111', '0110', '0101', '0100']
off = ['0011', '1111', '1110', '1101', '1100']
def change_plug_state(socket, on_or_off):
state = on_or_off[socket][-1] == '1'
GPIO.output(bit1, state)
state = on_or_off[socket][-2] == '1'
GPIO.output(bit2, state)
state = on_or_off[socket][-3] == '1'
GPIO.output(bit3, state)
state = on_or_off[socket][-4] == '1'
GPIO.output(bit4, state)
sleep(0.1)
GPIO.output(22, True)
sleep(0.25)
GPIO.output(22, False)
while True:
raw_input('Hit any key to turn on: ')
print('turning on')
change_plug_state(2, on)
raw_input('Hit any key to turn off: ')
print('turning off')
change_plug_state(0, off)
| bsd-3-clause | Python |
|
8646929a913b77438bf58e48e672ea68492d3ac1 | Mark third_party/accessibility-developer-tools as a known license info issue. | dednal/chromium.src,timopulkkinen/BubbleFish,ondra-novak/chromium.src,dednal/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,Just-D/chromium-1,littlstar/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,ChromiumWebApps/chromium,zcbenz/cefode-chromium,dednal/chromium.src,chuan9/chromium-crosswalk,ChromiumWebApps/chromium,ltilve/chromium,jaruba/chromium.src,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,Chilledheart/chromium,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,dednal/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,Just-D/chromium-1,Jonekee/chromium.src,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,patrickm/chromium.src,dednal/chromium.src,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk,anirudhSK/chromium,anirudhSK/chromium,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,ondra-novak/chromium.src,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,Just-D/chromium-1,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,zcbenz/cefode-chromium,anirudhSK/chromium,Jonekee/chromium.src,nacl-webkit/chrome_deps,krieger-od/nwjs_chromium.src,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,timopulkkinen/BubbleFish,krieger-od/nwjs_chromium.src,Chilledheart/chromium,ltilve/chromium,nacl-webkit/chrome_deps,M4sse/chromium.src,patrickm/chromium.src,mogoweb/chromium-crosswalk,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,ChromiumWebApps/chromium,Jonekee/chromium.src,timopulkkinen/BubbleFish,mogoweb/chromium-crosswalk,M4sse/chromium.src,Chilledheart/chromium,ChromiumWebApps/chromium,nacl-webkit/chrome_deps,nacl-webkit/chrome_deps,zcbenz/cefode-chromium,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,littlstar/chromium.src,ChromiumWebApps/chromium,hujiajie/pa-chromium,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,hgl888/chromium-crosswalk,markYoungH/chromium.src,ltilve/chromium,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,Just-D/chromium-1,zcbenz/cefode-chromium,bright-sparks/chromium-spacewalk,patrickm/chromium.src,ChromiumWebApps/chromium,Just-D/chromium-1,anirudhSK/chromium,dushu1203/chromium.src,anirudhSK/chromium,Fireblend/chromium-crosswalk,M4sse/chromium.src,hgl888/chromium-crosswalk,timopulkkinen/BubbleFish,anirudhSK/chromium,nacl-webkit/chrome_deps,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,markYoungH/chromium.src,axinging/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,zcbenz/cefode-chromium,littlstar/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,dushu1203/chromium.src,ltilve/chromium,Just-D/chromium-1,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,jaruba/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,dednal/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,ltilve/chromium,Just-D/chromium-1,jaruba/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,nacl-webkit/chrome_deps,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,dushu1203/chromium.src,Jonekee/chromium.src,Chilledheart/chromium,chuan9/chromium-crosswalk,M4sse/chromium.src,mogoweb/chromium-crosswalk,zcbenz/cefode-chromium,timopulkkinen/BubbleFish,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,patrickm/chromium.src,ChromiumWebApps/chromium,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,axinging/chromium-crosswalk,zcbenz/cefode-chromium,ltilve/chromium,fujunwei/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk,pozdnyakov/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,pozdnyakov/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,mogoweb/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,timopulkkinen/BubbleFish,Chilledheart/chromium,patrickm/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,zcbenz/cefode-chromium,hujiajie/pa-chromium,timopulkkinen/BubbleFish,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,Fireblend/chromium-crosswalk,hujiajie/pa-chromium,hujiajie/pa-chromium,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,hujiajie/pa-chromium,markYoungH/chromium.src,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,anirudhSK/chromium,M4sse/chromium.src,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,dushu1203/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,dednal/chromium.src,anirudhSK/chromium,bright-sparks/chromium-spacewalk,patrickm/chromium.src,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,hgl888/chromium-crosswalk-efl,timopulkkinen/BubbleFish,ondra-novak/chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,zcbenz/cefode-chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,anirudhSK/chromium,Chilledheart/chromium,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,littlstar/chromium.src,Fireblend/chromium-crosswalk,nacl-webkit/chrome_deps,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,hujiajie/pa-chromium,timopulkkinen/BubbleFish,chuan9/chromium-crosswalk | android_webview/tools/known_issues.py | android_webview/tools/known_issues.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""List of known-incompatibly-licensed directories for Android WebView.
This is not used by the webview_licenses tool itself; it is effectively a
"cache" of the output of webview_licenses.GetIncompatibleDirectories() for the
subset of repositories that WebView needs.
We store a copy here because GetIncompatibleDirectories() doesn't work properly
after things have been removed from the tree - it can no longer see the
README.chromium files for previously-removed directories, but they may have
newly added files in them. As long as this list is up to date, we can remove the
things listed first, and then just run the tool afterwards to validate that it
was sufficient. If the tool returns any extra directories then the snapshotting
process will stop and this list must be updated.
"""
# If there is a temporary license-related issue with a particular third_party
# directory, please put it here, with a comment linking to the bug entry.
KNOWN_ISSUES = [
'third_party/accessibility-developer-tools', # crbug.com/165901
]
KNOWN_INCOMPATIBLE = {
# Incompatible code in the main chromium repository.
'.': [
'base/third_party/xdg_mime',
'breakpad',
'chrome/installer/mac/third_party/xz',
'chrome/test/data',
'third_party/active_doc',
'third_party/apple_apsl',
'third_party/apple_sample_code',
'third_party/bsdiff',
'third_party/bspatch',
'third_party/sudden_motion_sensor',
'third_party/swiftshader',
'third_party/talloc',
'third_party/webdriver',
'third_party/wtl',
'tools/telemetry/third_party/websocket-client',
],
# Incompatible code in ICU.
'third_party/icu': [
'source/data/brkitr',
],
}
KNOWN_INCOMPATIBLE['.'].extend(KNOWN_ISSUES)
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""List of known-incompatibly-licensed directories for Android WebView.
This is not used by the webview_licenses tool itself; it is effectively a
"cache" of the output of webview_licenses.GetIncompatibleDirectories() for the
subset of repositories that WebView needs.
We store a copy here because GetIncompatibleDirectories() doesn't work properly
after things have been removed from the tree - it can no longer see the
README.chromium files for previously-removed directories, but they may have
newly added files in them. As long as this list is up to date, we can remove the
things listed first, and then just run the tool afterwards to validate that it
was sufficient. If the tool returns any extra directories then the snapshotting
process will stop and this list must be updated.
"""
# If there is a temporary license-related issue with a particular third_party
# directory, please put it here, with a comment linking to the bug entry.
KNOWN_ISSUES = []
KNOWN_INCOMPATIBLE = {
# Incompatible code in the main chromium repository.
'.': [
'base/third_party/xdg_mime',
'breakpad',
'chrome/installer/mac/third_party/xz',
'chrome/test/data',
'third_party/active_doc',
'third_party/apple_apsl',
'third_party/apple_sample_code',
'third_party/bsdiff',
'third_party/bspatch',
'third_party/sudden_motion_sensor',
'third_party/swiftshader',
'third_party/talloc',
'third_party/webdriver',
'third_party/wtl',
'tools/telemetry/third_party/websocket-client',
],
# Incompatible code in ICU.
'third_party/icu': [
'source/data/brkitr',
],
}
KNOWN_INCOMPATIBLE['.'].extend(KNOWN_ISSUES)
| bsd-3-clause | Python |
a5440305173c218ec785b0d5a2dfa8b02bb0b731 | Add package: py-fava (#21275) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-fava/package.py | var/spack/repos/builtin/packages/py-fava/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFava(PythonPackage):
"""Fava is a web interface for the double-entry bookkeeping software
Beancount with a focus on features and usability."""
homepage = "https://beancount.github.io/fava/"
pypi = "fava/fava-1.18.tar.gz"
version('1.18', sha256='21336b695708497e6f00cab77135b174c51feb2713b657e0e208282960885bf5')
# For some reason Fava adds a whole bunch of executables to
# its bin directory, and this causes clashes when loading
# the module.
extends('python', ignore='bin/^(?!fava).*')
# Some of the dependencies are not listed as required at
# build or run time, but actually are.
# - py-setuptools
# - py-importlib
# - py-pytest
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-setuptools-scm', type=('build'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-cheroot', type=('build', 'run'))
depends_on('py-click', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-importlib', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-ply', type=('build', 'run'))
depends_on('py-pytest', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 | Python |
|
31342e58f914c057404fd35edfff42b95e5fb051 | Test #2 (with the current GitLab API syntax) | egnyte/gitlabform,egnyte/gitlabform | gitlabform/gitlabform/test/test_project_settings.py | gitlabform/gitlabform/test/test_project_settings.py | import pytest
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, GROUP_NAME
PROJECT_NAME = 'project_settings_project'
GROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME
@pytest.fixture(scope="module")
def gitlab(request):
create_group(GROUP_NAME)
create_project_in_group(GROUP_NAME, PROJECT_NAME)
gl = get_gitlab()
def fin():
gl.delete_project(GROUP_AND_PROJECT_NAME)
request.addfinalizer(fin)
return gl # provide fixture value
config_builds_for_private_projects = """
gitlab:
api_version: 4
project_settings:
project_settings:
builds_access_level: private
visibility: private
"""
class TestProjectSettings:
def test__builds_for_private_projects(self, gitlab):
gf = GitLabForm(config_string=config_builds_for_private_projects,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
settings = gitlab.get_project_settings(GROUP_AND_PROJECT_NAME)
assert settings['builds_access_level'] is 'private'
assert settings['visibility'] is 'private'
| mit | Python |
|
caf135f6c94146038ac9d9e77a808e30ea52f900 | make pyroma a runnable module (#62) | regebro/pyroma | pyroma/__main__.py | pyroma/__main__.py | from . import main
if __name__ == "__main__":
main()
| mit | Python |
|
25a1d94b45980fbc78c162af2c81ad807ee954c9 | add test_vpr.py, add test functions and stubs | heistermann/wradlib,kmuehlbauer/wradlib,kmuehlbauer/wradlib,heistermann/wradlib,wradlib/wradlib,wradlib/wradlib | wradlib/tests/test_vpr.py | wradlib/tests/test_vpr.py | #!/usr/bin/env python
# -------------------------------------------------------------------------------
# Name: test_vpr.py
# Purpose: testing file for the wradlib.vpr module
#
# Authors: wradlib developers
#
# Created: 26.02.2016
# Copyright: (c) wradlib developers
# Licence: The MIT License
# -------------------------------------------------------------------------------
import unittest
import wradlib.vpr as vpr
import wradlib.georef as georef
import numpy as np
class VPRHelperFunctionsTest(unittest.TestCase):
def setUp(self):
self.site = (7.0, 53.0, 100.)
self.proj = georef.epsg_to_osr(31467)
self.az = np.arange(0., 360., 1.)
self.r = np.arange(0, 100000, 1000)
self.el = 2.5
self.coords = vpr.volcoords_from_polar(self.site, self.el, self.az, self.r, self.proj)
def test_out_of_range(self):
pass
def test_blindspots(self):
pass
def test_volcoords_from_polar(self):
coords = vpr.volcoords_from_polar(self.site, self.el, self.az, self.r, self.proj)
pass
def test_volcoords_from_polar_irregular(self):
coords = vpr.volcoords_from_polar_irregular(self.site, [self.el, 5.0], self.az, self.r, self.proj)
pass
def test_synthetic_polar_volume(self):
vol = vpr.synthetic_polar_volume(self.coords)
pass
def test_vpr_interpolator(self):
pass
def test_correct_vpr(self):
pass
def test_mean_norm_from_vpr(self):
pass
def test_norm_vpr_stats(self):
pass
def test_make_3D_grid(self):
maxrange = 200000.
maxalt = 5000.
horiz_res = 2000.
vert_res = 250.
vpr.make_3D_grid(self.site, self.proj, maxrange, maxalt, horiz_res, vert_res)
pass
class CartesianVolumeTest(unittest.TestCase):
def test_CartesianVolume(self):
pass
def test_CAPPI(self):
pass
def test_PseudoCAPPI(self):
pass
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
9ad3b4e6ff5ec500fe1feeb841c4fe00e9267d19 | add sh_quote.py | TristanCavelier/notesntools,TristanCavelier/notesntools,TristanCavelier/notesntools,TristanCavelier/notesntools,TristanCavelier/notesntools | python/sh_quote.py | python/sh_quote.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Tristan Cavelier <[email protected]>
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
def sh_quote(*params):
return " ".join(("'" + p.replace("'", "'\\''") + "'" for p in params))
### in bash you can do :
# eval -- "$(python sh_quote.py)"
### in python3 you can do :
# import os, sys
# out = os.popen(sh_quote(*['ls', '-1', "my'file;"]))
# out._proc.wait()
# sys.stdout.write(out.read())
######################################################################
# Tests
# prints if failure
def test(a, b):
if a != b:
print(a + " != " + b)
test(sh_quote(*['ls', '-1', "my'file;"]), "'ls' '-1' 'my'\\''file;'")
| mit | Python |
|
8a836c53d85f63831e51e7aac9a2f77fdf25ef9f | Add more indexing tests. | sigma-random/numpy,sonnyhu/numpy,sigma-random/numpy,ekalosak/numpy,mingwpy/numpy,ahaldane/numpy,Eric89GXL/numpy,grlee77/numpy,has2k1/numpy,grlee77/numpy,Yusa95/numpy,mathdd/numpy,cjermain/numpy,BMJHayward/numpy,nguyentu1602/numpy,drasmuss/numpy,trankmichael/numpy,rmcgibbo/numpy,skymanaditya1/numpy,BMJHayward/numpy,charris/numpy,empeeu/numpy,anntzer/numpy,yiakwy/numpy,githubmlai/numpy,pizzathief/numpy,embray/numpy,maniteja123/numpy,behzadnouri/numpy,musically-ut/numpy,andsor/numpy,sigma-random/numpy,cowlicks/numpy,leifdenby/numpy,ogrisel/numpy,bmorris3/numpy,brandon-rhodes/numpy,mathdd/numpy,jakirkham/numpy,mattip/numpy,MaPePeR/numpy,WarrenWeckesser/numpy,shoyer/numpy,jorisvandenbossche/numpy,GrimDerp/numpy,cowlicks/numpy,BabeNovelty/numpy,hainm/numpy,sinhrks/numpy,ContinuumIO/numpy,ogrisel/numpy,has2k1/numpy,ahaldane/numpy,numpy/numpy,mingwpy/numpy,grlee77/numpy,simongibbons/numpy,MSeifert04/numpy,dwillmer/numpy,gmcastil/numpy,madphysicist/numpy,cjermain/numpy,pbrod/numpy,AustereCuriosity/numpy,pyparallel/numpy,mwiebe/numpy,has2k1/numpy,gmcastil/numpy,MaPePeR/numpy,numpy/numpy,MSeifert04/numpy,ESSS/numpy,SiccarPoint/numpy,mathdd/numpy,WarrenWeckesser/numpy,pdebuyl/numpy,tacaswell/numpy,ViralLeadership/numpy,solarjoe/numpy,yiakwy/numpy,NextThought/pypy-numpy,solarjoe/numpy,ChristopherHogan/numpy,tdsmith/numpy,behzadnouri/numpy,simongibbons/numpy,ogrisel/numpy,KaelChen/numpy,CMartelLML/numpy,anntzer/numpy,gfyoung/numpy,seberg/numpy,Yusa95/numpy,jakirkham/numpy,mattip/numpy,moreati/numpy,nbeaver/numpy,ChanderG/numpy,yiakwy/numpy,pbrod/numpy,madphysicist/numpy,mindw/numpy,tynn/numpy,sinhrks/numpy,WillieMaddox/numpy,nbeaver/numpy,dato-code/numpy,bringingheavendown/numpy,ChristopherHogan/numpy,naritta/numpy,nguyentu1602/numpy,endolith/numpy,skymanaditya1/numpy,SunghanKim/numpy,Srisai85/numpy,jschueller/numpy,bertrand-l/numpy,mortada/numpy,pbrod/numpy,tdsmith/numpy,jakirkham/numpy,stuarteberg/numpy,maniteja123/numpy,Anwesh43/numpy,Srisai85/numpy,MichaelAquilina/numpy,dch312/numpy,ViralLeadership/numpy,jankoslavic/numpy,jorisvandenbossche/numpy,ekalosak/numpy,GrimDerp/numpy,larsmans/numpy,KaelChen/numpy,sinhrks/numpy,felipebetancur/numpy,mingwpy/numpy,seberg/numpy,pyparallel/numpy,charris/numpy,njase/numpy,mattip/numpy,pyparallel/numpy,nguyentu1602/numpy,madphysicist/numpy,ContinuumIO/numpy,leifdenby/numpy,bmorris3/numpy,endolith/numpy,dimasad/numpy,ChanderG/numpy,larsmans/numpy,jonathanunderwood/numpy,Yusa95/numpy,rudimeier/numpy,ddasilva/numpy,ESSS/numpy,mhvk/numpy,ekalosak/numpy,has2k1/numpy,pizzathief/numpy,rudimeier/numpy,NextThought/pypy-numpy,b-carter/numpy,SiccarPoint/numpy,WillieMaddox/numpy,stuarteberg/numpy,mhvk/numpy,WarrenWeckesser/numpy,naritta/numpy,tacaswell/numpy,behzadnouri/numpy,CMartelLML/numpy,skwbc/numpy,gfyoung/numpy,bertrand-l/numpy,skymanaditya1/numpy,andsor/numpy,madphysicist/numpy,trankmichael/numpy,abalkin/numpy,ajdawson/numpy,brandon-rhodes/numpy,pdebuyl/numpy,simongibbons/numpy,hainm/numpy,brandon-rhodes/numpy,rgommers/numpy,dwillmer/numpy,shoyer/numpy,NextThought/pypy-numpy,BMJHayward/numpy,musically-ut/numpy,MaPePeR/numpy,jankoslavic/numpy,pizzathief/numpy,mwiebe/numpy,ESSS/numpy,mortada/numpy,utke1/numpy,rhythmsosad/numpy,sigma-random/numpy,argriffing/numpy,solarjoe/numpy,WarrenWeckesser/numpy,musically-ut/numpy,githubmlai/numpy,chiffa/numpy,stuarteberg/numpy,KaelChen/numpy,ChristopherHogan/numpy,mindw/numpy,dch312/numpy,dch312/numpy,cowlicks/numpy,MSeifert04/numpy,Linkid/numpy,jankoslavic/numpy,githubmlai/numpy,MSeifert04/numpy,rajathkumarmp/numpy,Yusa95/numpy,rmcgibbo/numpy,jschueller/numpy,jschueller/numpy,GaZ3ll3/numpy,rmcgibbo/numpy,andsor/numpy,jakirkham/numpy,mhvk/numpy,embray/numpy,bmorris3/numpy,ogrisel/numpy,kirillzhuravlev/numpy,Dapid/numpy,trankmichael/numpy,argriffing/numpy,immerrr/numpy,naritta/numpy,Linkid/numpy,abalkin/numpy,BabeNovelty/numpy,trankmichael/numpy,tynn/numpy,njase/numpy,mattip/numpy,rmcgibbo/numpy,jakirkham/numpy,pbrod/numpy,immerrr/numpy,mhvk/numpy,shoyer/numpy,moreati/numpy,ChristopherHogan/numpy,felipebetancur/numpy,drasmuss/numpy,dwillmer/numpy,stuarteberg/numpy,cjermain/numpy,AustereCuriosity/numpy,pbrod/numpy,simongibbons/numpy,ahaldane/numpy,ChanderG/numpy,Anwesh43/numpy,skymanaditya1/numpy,SunghanKim/numpy,jonathanunderwood/numpy,chiffa/numpy,rherault-insa/numpy,empeeu/numpy,rajathkumarmp/numpy,rajathkumarmp/numpy,rgommers/numpy,Eric89GXL/numpy,charris/numpy,joferkington/numpy,ChanderG/numpy,chatcannon/numpy,embray/numpy,dwillmer/numpy,b-carter/numpy,groutr/numpy,cowlicks/numpy,ajdawson/numpy,BabeNovelty/numpy,naritta/numpy,ekalosak/numpy,rudimeier/numpy,githubmlai/numpy,joferkington/numpy,shoyer/numpy,WillieMaddox/numpy,ssanderson/numpy,dato-code/numpy,shoyer/numpy,tynn/numpy,joferkington/numpy,chatcannon/numpy,dato-code/numpy,rherault-insa/numpy,bmorris3/numpy,mindw/numpy,rherault-insa/numpy,groutr/numpy,SiccarPoint/numpy,felipebetancur/numpy,kiwifb/numpy,njase/numpy,jschueller/numpy,ssanderson/numpy,rgommers/numpy,MaPePeR/numpy,endolith/numpy,pdebuyl/numpy,dch312/numpy,brandon-rhodes/numpy,simongibbons/numpy,drasmuss/numpy,Linkid/numpy,anntzer/numpy,dato-code/numpy,dimasad/numpy,empeeu/numpy,tacaswell/numpy,moreati/numpy,MSeifert04/numpy,ahaldane/numpy,jankoslavic/numpy,charris/numpy,mortada/numpy,ContinuumIO/numpy,sinhrks/numpy,embray/numpy,empeeu/numpy,anntzer/numpy,pizzathief/numpy,mingwpy/numpy,groutr/numpy,numpy/numpy,hainm/numpy,bringingheavendown/numpy,Anwesh43/numpy,Srisai85/numpy,andsor/numpy,mwiebe/numpy,GrimDerp/numpy,CMartelLML/numpy,felipebetancur/numpy,sonnyhu/numpy,kiwifb/numpy,yiakwy/numpy,embray/numpy,SunghanKim/numpy,CMartelLML/numpy,kirillzhuravlev/numpy,chiffa/numpy,seberg/numpy,SunghanKim/numpy,kirillzhuravlev/numpy,MichaelAquilina/numpy,madphysicist/numpy,kirillzhuravlev/numpy,maniteja123/numpy,rajathkumarmp/numpy,leifdenby/numpy,seberg/numpy,larsmans/numpy,numpy/numpy,GaZ3ll3/numpy,bringingheavendown/numpy,utke1/numpy,grlee77/numpy,ddasilva/numpy,AustereCuriosity/numpy,MichaelAquilina/numpy,ssanderson/numpy,bertrand-l/numpy,pizzathief/numpy,mortada/numpy,ahaldane/numpy,tdsmith/numpy,utke1/numpy,ajdawson/numpy,argriffing/numpy,mindw/numpy,MichaelAquilina/numpy,rudimeier/numpy,musically-ut/numpy,BabeNovelty/numpy,sonnyhu/numpy,jonathanunderwood/numpy,rgommers/numpy,KaelChen/numpy,hainm/numpy,mathdd/numpy,Dapid/numpy,immerrr/numpy,endolith/numpy,NextThought/pypy-numpy,dimasad/numpy,abalkin/numpy,rhythmsosad/numpy,mhvk/numpy,grlee77/numpy,joferkington/numpy,rhythmsosad/numpy,larsmans/numpy,Anwesh43/numpy,gfyoung/numpy,rhythmsosad/numpy,GaZ3ll3/numpy,dimasad/numpy,immerrr/numpy,pdebuyl/numpy,Eric89GXL/numpy,Srisai85/numpy,ajdawson/numpy,WarrenWeckesser/numpy,jorisvandenbossche/numpy,SiccarPoint/numpy,ogrisel/numpy,nguyentu1602/numpy,BMJHayward/numpy,sonnyhu/numpy,Linkid/numpy,kiwifb/numpy,Eric89GXL/numpy,GaZ3ll3/numpy,chatcannon/numpy,tdsmith/numpy,gmcastil/numpy,nbeaver/numpy,GrimDerp/numpy,jorisvandenbossche/numpy,skwbc/numpy,cjermain/numpy,Dapid/numpy,b-carter/numpy,skwbc/numpy,ViralLeadership/numpy,jorisvandenbossche/numpy,ddasilva/numpy | numpy/core/tests/test_indexing.py | numpy/core/tests/test_indexing.py | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import asbytes
from numpy.testing import *
import sys, warnings
# The C implementation of fancy indexing is relatively complicated,
# and has many seeming inconsistencies. It also appears to lack any
# kind of test suite, making any changes to the underlying code difficult
# because of its fragility.
# This file is to remedy the test suite part a little bit,
# but hopefully NumPy indexing can be changed to be more systematic
# at some point in the future.
class TestIndexing(TestCase):
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].base is a)
def _test_empty_list_index(self):
# Empty list index (is buggy!)
a = np.array([1, 2, 3])
assert_equal(a[[]], a)
def test_empty_array_index(self):
# Empty array index is illegal
a = np.array([1, 2, 3])
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
# Ellipsis index does not create a view
a = np.array([[1, 2, 3],
[4 ,5, 6],
[7, 8, 9]])
assert_equal(a[...], a)
assert_(a[...] is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0, :])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3],
[4 ,5, 6],
[7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1<<30)
# Index overflow produces ValueError
assert_raises(ValueError, a.__getitem__, 1<<64)
def _test_single_bool_index(self):
# Single boolean index (is buggy?)
a = np.array([[1, 2, 3],
[4 ,5, 6],
[7, 8, 9]])
# Python boolean converts to integer (invalid?)
assert_equal(a[True], a[1])
# NumPy zero-dimensional boolean array (*crashes*)
assert_equal(a[np.array(True)], a) # what should be the behaviour?
assert_equal(a[np.array(False)], []) # what should be the behaviour?
def test_boolean_indexing(self):
# Indexing a 2-dimensional array with a length-1 array of 'True'
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
if __name__ == "__main__":
run_module_suite()
| from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import asbytes
from numpy.testing import *
import sys, warnings
# The C implementation of fancy indexing is relatively complicated,
# and has many seeming inconsistencies. It also appears to lack any
# kind of test suite, making any changes to the underlying code difficult
# because of its fragility.
# This file is to remedy the test suite part a little bit,
# but hopefully NumPy indexing can be changed to be more systematic
# at some point in the future.
def test_boolean_indexing():
# Indexing a 2-dimensional array with a length-1 array of 'True'
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | Python |
2273dfcdb2f311f39e1bffe4f40cdc6e3b753155 | add buildOffsetMap.py | darkf/darkfo,darkf/darkfo,darkf/darkfo,darkf/darkfo,darkf/darkfo | buildOffsetMap.py | buildOffsetMap.py | import sys, os, json
import frm
DATA_PATH = "data"
def main():
if len(sys.argv) != 2:
print "USAGE: %s IMAGES_LIST" % sys.argv[0]
sys.exit(1)
images = list(open(sys.argv[1]))
imageInfo = {}
for image in images:
image = image.rstrip()
frmPath = os.path.join(DATA_PATH, image + ".FRM")
frmInfo = frm.readFRMInfo(open(frmPath, "rb"))
sx = 0 # running total width offset
for direction in frmInfo['frameOffsets']:
ox = 0 # running total offsets
oy = 0
for frame in direction:
ox += frame['x']
oy += frame['y']
frame['sx'] = sx
frame['ox'] = ox
frame['oy'] = oy
sx += frame['w']
imageInfo[image] = frmInfo
print json.dumps(imageInfo)
if __name__ == '__main__':
main() | apache-2.0 | Python |
|
a62b4f70816b831a16973e861449b0c76761cf52 | Create Odd_Even_Linked_List.py | ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms | data_structures/Linked_list/Python/Odd_Even_Linked_List.py | data_structures/Linked_list/Python/Odd_Even_Linked_List.py | '''
Given the head of a singly linked list, group all the nodes with odd indices together followed by the nodes with even indices, and return the reordered list.
'''
class Solution(object):
def oddEvenList(self, head):
if head is None: return None
if head.next is None: return head
o = head
p = o.next
ehead = p
while p.next is not None:
o.next = p.next
p.next = p.next.next
o = o.next
p = p.next
if p is None: break
o.next = ehead
return head
'''
Input: head = [1,2,3,4,5]
Output: [1,3,5,2,4]
----------------------
Input: head = [1,2,3,4,5]
Output: [1,3,5,2,4]
'''
| cc0-1.0 | Python |
|
dd0e335574afd936b5849186202aedc8500f2c5b | add build-front | Ircam-Web/mezzanine-organization,Ircam-Web/mezzanine-organization | organization/core/management/commands/build-front.py | organization/core/management/commands/build-front.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, time
import subprocess
from django.apps import apps
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import connections
class Command(BaseCommand):
help = "Build the front with bower and gulp"
def handle(self, *args, **options):
theme = ""
theme_path = ""
for ht in settings.HOST_THEMES:
# search for theme name in INSTALLED_APPS
# to get the one is used
if ht[1] in settings.INSTALLED_APPS:
theme = ht[1]
if theme :
theme_path = apps.get_app_config(theme.split('.')[1]).path
os.chdir(theme_path)
subprocess.run(["bower", "--allow-root", "install", "&&", "gulp", "build"])
| agpl-3.0 | Python |
|
cc2343a564572e6c0bd94279bf3907e9e85da79b | Create version.py | CarlosA-Lopez/Proyecto_Embebidos_Grupo2 | plotly-1.2.9/plotly/version.py | plotly-1.2.9/plotly/version.py | __version__ = '1.2.9'
| unlicense | Python |
|
65239ce01df89ceaaed989b28f4623ac521ce2c3 | Add download_stats script | ContinuumIO/pypi-conda-builds | download_stats.py | download_stats.py | import argparse
from xmlrpclib import ServerProxy
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int)
parser.add_argument("--package-list",
action="store")
args = parser.parse_args()
url = 'https://pypi.python.org/pypi'
client = ServerProxy(url)
if not args.package_list:
args.package_list = client.list_packages()
else:
args.package_list = [package.strip() for package in
open(args.package_list, 'r').readlines()]
if args.n:
args.package_list = args.package_list[:args.n]
downloads_dict = dict()
for package in args.package_list:
versions = client.package_releases(package)
try:
latest_version = versions[0]
downloads = max(client.release_data(package,
latest_version)['downloads'].values())
downloads_dict[package] = downloads
except:
downloads_dict[package] = 0
pickle.dump(downloads_dict, open('downloads_dict.pkl', 'w'))
| bsd-3-clause | Python |
|
2b79fd91b43248169e408093454a32367ecb6d61 | Add a solution that creates a separate LP for each frame transition. | karulont/combopt | project7/project7-lp-single.py | project7/project7-lp-single.py | from gurobipy import *
from sys import argv
import json
import math
import drawful
def read_lst(fn):
with open(fn, 'r') as f:
(n, tp) = json.load(f)
return (n, tp)
def write_lst(fn, lst):
with open(fn, 'w') as f:
json.dump(lst, f)
def distance(v1, v2):
return math.sqrt((v2[0] - v1[0]) ** 2 + (v2[1] - v1[1]) ** 2 + (v2[2] - v1[2]) ** 2)
def distance_squared(v1, v2):
return (v2[0] - v1[0]) ** 2 + (v2[1] - v1[1]) ** 2 + (v2[2] - v1[2]) ** 2
def get_permutation(edges, last_perm, last_frame, frame, n):
perm = [0] * n
for v1, v2 in edges:
v1i = last_frame.index(list(v1))
v2i = frame.index(list(v2))
j = last_perm.index(v1i)
perm[j] = v2i
return perm
def main():
def optimize_single(f):
m = Model('project7')
print("Adding variables...")
edge_vars = {}
point_edges = {}
t1, f1 = frames[f]
t2, f2 = frames[f + 1]
for i in range(n):
v1 = tuple(f1[i])
point_edges[v1] = []
for j in range(n):
v2 = tuple(f2[j])
cost = distance_squared(v1, v2)
#if (v1, v2) in edge_vars[f]:
# print("Duplicate vertex!")
# return
edge_vars[v1, v2] = m.addVar(obj=cost, vtype=GRB.BINARY)
point_edges[v1].append(edge_vars[v1, v2])
m.update()
print("Adding constraints...")
'''
# There must be n edges from one frame to the next
for frame in edge_vars:
m.addConstr(quicksum(frame.values()) == n)
'''
# There must be one incoming edge per point in the last n-1 frames
for v2 in frames[f+1][1]:
v2 = tuple(v2)
v2_edges = []
for v1 in frames[f][1]:
v1 = tuple(v1)
v2_edges.append(edge_vars[v1,v2])
m.addConstr(quicksum(v2_edges) == 1)
# There must be one outgoing edge per point in the first n-1 frames
for edges in point_edges:
m.addConstr(quicksum(point_edges[edges]) == 1)
m.optimize()
edges = m.getAttr('x', edge_vars).items()
selected = []
for edge, value in edges:
if value:
selected.append(edge)
# Calculate cost
cost = 0
for v1, v2 in selected:
cost += distance(v1, v2)
print("cost", f, ":", cost)
return get_permutation(selected, last_perm, frames[f][1], frames[f + 1][1], n)
# fn = 'data-n2-t3.json'
# fn = 'example-points.lst'
fn = 'points-00100-0.lst'
if len(argv) == 2:
fn = argv[1]
n, frames = read_lst(fn)
orig_frames = [[tuple(u) for u in ss[1]] for ss in frames]
nf = len(frames) - 1
print("n:", n)
print("frames: t0-t" + str(nf))
solution = [n]
last_perm = [i for i in range(n)]
for f in range(nf):
last_perm = optimize_single(f)
solution.append(last_perm)
# print(solution)
write_lst(fn + '.sol', solution)
drawful.drawWithIndices(orig_frames, solution[1], solution[2])
if __name__ == '__main__':
import time
start = time.clock()
main()
end = time.clock()
print("time: {0:.3f} s".format(end - start))
| mit | Python |
|
95a26454173b59c8609ddb81027ed71005e9e86c | add module to handle exceptions | cellnopt/cellnopt,cellnopt/cellnopt | cno/misc/tools.py | cno/misc/tools.py | __all__ = ["CNOError"]
class CNOError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| bsd-2-clause | Python |
|
bb58564dc400e91c132e3a26532595ec9de73958 | Create managers.py | jleeothon/trufflehog | managers.py | managers.py | from django.db import models
class VisibilityManagerMixin(object):
"""
This manager should be used with a model that implements the Hideable
mixin.
"""
def __init__(self, *args, **kwargs):
self.visible = kwargs.pop('visible', True)
super().__init__(*args, **kwargs)
def get_queryset(self):
return super().get_queryset()
.filter(hidden__isnull=self.visible)
class VisibilityManager(VisibilityManagerMixin, models.Manager):
pass
| mit | Python |
|
fbaaf3ba027ee9d18df7b1f48533c8847f084381 | Add harmonic mean estimator. | exord/bayev | harmonicmean.py | harmonicmean.py | import numpy.random
import numpy as np
import lib
from math import log
def compute_harmonicmean(lnlike_post, posterior_sample=None, lnlikefunc=None,
lnlikeargs=(), **kwargs):
"""
Computes the harmonic mean estimate of the marginal likelihood.
The estimation is based on n posterior samples
(indexed by s, with s = 0, ..., n-1), but can be done directly if the
log(likelihood) in this sample is passed.
:param array lnlike_post:
log(likelihood) computed over a posterior sample. 1-D array of length n.
If an emply array is given, then compute from posterior sample.
:param array posterior_sample:
A sample from the parameter posterior distribution.
Dimensions are (n x k), where k is the number of parameters. If None
the computation is done using the log(likelihood) obtained from the
posterior sample.
:param callable lnlikefunc:
Function to compute ln(likelihood) on the marginal samples.
:param tuple lnlikeargs:
Extra arguments passed to the likelihood function.
Other parameters
----------------
:param int size:
Size of sample to use for computation. If none is given, use size of
given array or posterior sample.
References
----------
Kass & Raftery (1995), JASA vol. 90, N. 430, pp. 773-795
"""
if len(lnlike_post) == 0 and posterior_sample is not None:
samplesize = kwargs.pop('size', len(posterior_sample))
if samplesize < len(posterior_sample):
posterior_subsample = numpy.random.choice(posterior_sample,
size=samplesize,
replace=False)
else:
posterior_subsample = posterior_sample.copy()
# Compute log likelihood in posterior sample.
log_likelihood = lnlikefunc(posterior_subsample, *lnlikeargs)
elif len(lnlike_post) > 0:
samplesize = kwargs.pop('size', len(lnlike_post))
log_likelihood = numpy.random.choice(lnlike_post, size=samplesize,
replace=False)
# Use identity for summation
# http://en.wikipedia.org/wiki/List_of_logarithmic_identities#Summation.2Fsubtraction
# ln(sum(x)) = ln(x[0]) + ln(1 + sum( exp( ln(x[1:]) - ln(x[0]) ) ) )
hme = -lib.log_sum(-log_likelihood) + log(len(log_likelihood))
return hme
def run_hme_mc(log_likelihood, nmc, samplesize):
hme = np.zeros(nmc)
for i in range(nmc):
hme[i] = compute_harmonicmean(log_likelihood, size=samplesize)
return hme
__author__ = 'Rodrigo F. Diaz' | mit | Python |
|
156a31c7aef3dfc07f5e3b0998b0957308abdd16 | Create getPrice.py | Healdb/CoinGoldBot | src/getPrice.py | src/getPrice.py | import requests
import requests.auth
import time
import json
def getPrices():
print "Grabbing price..."
dogeprice = parsePrices("doge")
btcprice = parsePrices("btc")
ltcprice = parsePrices("ltc")
rddprice = parsePrices("rdd")
obj3 = open('price.txt', 'w')
obj3.write(str(dogeprice) + "\n" + str(btcprice) + '\n' + str(ltcprice) + '\n' + str(rddprice))
obj3.close()
print 'Done'
def parsePrices(currency):
code = requests.get('http://coinmarketcap.northpole.ro/api/' + currency + '.json')
json_input = code.json()
decoded = json.dumps(json_input)
decoded = json.loads(decoded)
price = decoded['price']
price = float(price)
price = 1.3 * 4 / price
price = round(price,7)
return price
while True:
getPrices()
for x in range(2700,-1,-1):
print x
x+=1
time.sleep(1)
| mit | Python |
|
7c0bf9e7930773a35da7303284842f74cc7b7744 | Add util script to dump flickr photo stats | JulienLeonard/socialstats | myflickr.py | myflickr.py | #
# use flickrapi to dump stats about pictures in a flickr account
# use time sleep to prevent automatic flickr rejection
#
import sys
import flickrapi
import time
import sys
from basics import *
import xml.etree.ElementTree
#
# method to dump social stats about the flickr user account
# args:
# - api_secret : your flickr api secret
# - api_key : your flickr api key
# - user_id : your flickr user id
# - filepath : the path of the xml file where the data will be dumped into
#
def flickr_dump(api_secret,api_key,user_id,filepath):
#
# connect to flickr with flick api
#
flickr=flickrapi.FlickrAPI(api_key,api_secret)
flickr.web_login_url("read")
(token,frob)= flickr.get_token_part_one(perms='read')
if not token: time.sleep(20)
flickr.get_token_part_two((token, frob))
#
# get the photo data
#
myphotos = []
perpage = 10
pageindex = 1
rsp = flickr.people_getPhotos(api_key=api_key,user_id=user_id,per_page=perpage,page=pageindex)
photoss = list(rsp.iter("photos"))[0];
while int(photoss.attrib['page']) < int(photoss.attrib['pages']):
puts("page index",pageindex)
time.sleep(10)
photolist = list(photoss.iter("photo"));
photoindex = 0
for photo in photolist:
time.sleep(1)
# get the title
phototitle = photo.attrib['title']
# get timestamp of the photo
photoinfo = flickr.photos_getInfo(api_key=api_key,photo_id=photo.attrib['id'])
photoxml = list(photoinfo.iter("photo"))[0]
dates = list(photoxml.iter("dates"))[0]
phototimestamp = dates.attrib['posted']
# get the list of favorites for the photo
time.sleep(1)
favs = flickr.photos_getFavorites(api_key=api_key,photo_id=photo.attrib['id'])
favxml = list(favs.iter("photo"))[0]
favcount = favxml.attrib['total']
personlist = list(favxml.iter("person"))
favedates = [person.attrib['favedate'] for person in personlist]
# add data to cache structure
myphotos.append((phototitle, phototimestamp, favcount, favedates))
# stdout info
puts("photo",photo.attrib['id'],phototitle,"favedates",favedates)
# iter to the next photo
photoindex += 1
# iter to the next page
pageindex += 1
rsp = flickr.people_getPhotos(api_key=api_key,user_id="22283623@N00",per_page=perpage,page=pageindex)
photoss = list(rsp.iter("photos"))[0];
#
# format the data into xml
#
result = "<flickr timestamp=\"" + str(time.time()) + "\">\n"
for photo in myphotos:
(title,timestamp,total,favdates) = photo
result += " <photo title=\"" + title + "\" \t timestamp=\"" + timestamp + "\" \t count=\"" + total + "\" >\n"
for favdate in favedates:
result += " <favedate timestamp=\"" + favdate + "\"/>\n"
result += " </photo>\n"
result += "</flickr>\n"
# dump the xml result in a file
output=open(filepath, 'w+')
output.write(result.encode('utf8'))
output.close()
# flickr_dump("123456789abcdef0","123456789abcdef0123456789abcdef0","12345678@N01","C:/stats/flickr_stats.xml")
| mit | Python |
|
082f11f5a24efd21f05b1d7cc7f1b1f1ab91fb0c | Add exercise 13: couplage | AntoineAugusti/katas,AntoineAugusti/katas,AntoineAugusti/katas | prologin-2014/13_couplage.py | prologin-2014/13_couplage.py | # http://www.prologin.org/training/challenge/demi2014/couplage
from sys import stdin
nbBowlsFirst = int(stdin.readline())
nbBowlsSecond = int(stdin.readline())
bowlsFirst = [int(x) for x in stdin.readline().split()]
bowlsSecond = [int(x) for x in stdin.readline().split()]
def maxInTwoLists(first, second):
"""Find the max value present in two lists"""
maxFirst = max(first)
maxSecond = max(second)
if (maxFirst == maxSecond):
return maxFirst
elif (maxFirst < maxSecond):
second.remove(maxSecond)
return maxInTwoLists(first, second)
else:
first.remove(maxFirst)
return maxInTwoLists(first, second)
def optimize(acc, first, second):
# If a list is empty, stop here
if len(first) == 0 or len(second) == 0:
return acc
# Try to reach the max value in these lists
maxValue = maxInTwoLists(first, second)
# If we have matching bowls before the maxValue, count them
for i in range(min(first.index(maxValue), second.index(maxValue))):
if (first[i] == second[i]):
return optimize(acc + first[i], first[i+1:], second[i+1:])
# Determine the index of the maxValue in both lists
firstIndex = first.index(maxValue)
secondIndex = second.index(maxValue)
# Maybe it would be better to not reach this maxValue.
# Delete it from the first list and try that
firstWithoutMax = list(first)
firstWithoutMax.remove(maxValue)
return max(
# Go straight to the maxValue in both lists and continue with tails
optimize(acc + maxValue, first[firstIndex+1:], second[secondIndex+1:]),
# Maybe it would be better to not reach this maximum
optimize(acc, firstWithoutMax, second)
)
print optimize(0, bowlsFirst, bowlsSecond) | mit | Python |
|
f8a0aa92c8e19bc11f8a609733644afe0efed5c8 | Update test script to do match testing. | levilucio/SyVOLT,levilucio/SyVOLT | decompose_test.py | decompose_test.py | from util.decompose_graph import decompose_graph
from core.himesis_utils import expand_graph, set_do_pickle, set_compression
set_do_pickle(True)
set_compression(6)
file_name = "226482067288742734644994685633991185819"
graph = expand_graph(file_name)
print(graph.name)
from core.himesis_utils import load_directory
contracts = load_directory("mbeddr2C_MM/Contracts/")
atomic_contracts = [
'AssignmentInstance'
]
if_then_contracts = []
prop_if_then_contracts = []
from core.himesis_utils import graph_to_dot, load_directory
from util.test_script_utils import select_rules, get_sub_and_super_classes,\
load_transformation, changePropertyProverMetamodel, set_supertypes, load_contracts
from util.slicer import Slicer
from util.parser import load_parser
inputMM = "./mbeddr2C_MM/ecore_metamodels/Module.ecore"
outputMM = "./mbeddr2C_MM/ecore_metamodels/C.ecore"
subclasses_dict, superclasses_dict = get_sub_and_super_classes(inputMM, outputMM)
atomic_contracts, if_then_contracts = load_contracts(contracts, superclasses_dict,
atomic_contracts, if_then_contracts,
prop_if_then_contracts,
False)
contract =atomic_contracts[0][1]
print(contract)
print(contract.has_pivots())
#graph_to_dot(graph.name, graph, force_trace_links = True)
import time
print("Starting to check")
start_time = time.time()
result = contract.check(graph)
print(result)
print("Finished in " + str(time.time() - start_time) + " seconds")
#decompose_graph(graph) | mit | Python |
|
577891c76140ce50f6be450594a23d78366c5719 | Create __init__.py | PyThaiNLP/pythainlp | pythainlp/number/__init__.py | pythainlp/number/__init__.py | # ระบบแปลงเลขใน 1- 10 ภาษาไทย
p = [['ภาษาไทย', 'ตัวเลข','เลขไทย'],
['หนึ่ง', '1', '๑'],
['สอง', '2', '๒'],
['สาม', '3', '๓'],
['สี่', '4', '๔'],
['ห้า', '5', '๕'],
['หก', '6', '๖'],
['หก', '7', '๗'],
['แปด', '8', '๘'],
['เก้า', '9', '๙']]
thaitonum = dict((x[2], x[1]) for x in p[1:])
p1 = dict((x[0], x[1]) for x in p[1:])
d1 = 0
def nttn(text):
#เลขไทยสู่เลข
thaitonum = dict((x[2], x[1]) for x in p[1:])
return thaitonum[text]
def nttt(text):
#เลขไทยสู่ข้อความ
thaitonum = dict((x[2], x[0]) for x in p[1:])
return thaitonum[text]
def ntnt(text):
#เลขสู่เลขไทย
thaitonum = dict((x[1], x[2]) for x in p[1:])
return thaitonum[text]
def ntt(text):
#เลขสู่ข้อความ
thaitonum = dict((x[1], x[0]) for x in p[1:])
return thaitonum[text]
def ttn(text):
#ข้อความสู่เลข
thaitonum = dict((x[0], x[1]) for x in p[1:])
return thaitonum[text]
def ttnt(text):
#ข้อความสู่เลขไทย
thaitonum = dict((x[0], x[2]) for x in p[1:])
return thaitonum[text]
if __name__ == "__main__":
print(ntt('4'))
| apache-2.0 | Python |
|
8c98e313caeb82ee710d56399d5de7cf1eb1f7df | Add DNA Coding | kakaba2009/MachineLearning,kakaba2009/MachineLearning,kakaba2009/MachineLearning,kakaba2009/MachineLearning | python/src/dna/dna_coding.py | python/src/dna/dna_coding.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import src.mylib.mfile as mfile
from matplotlib import style
stop =int('101010', 2) #101010 I Ching 63 After Completion
befo =int('010101', 2) #101010 I Ching 64 Before Completion
guai =int('111110', 2) #101010 I Ching 43
qian =int('111111', 2) #101010 I Ching 01
kun =int('000000', 2) #101010 I Ching 02
df = mfile.loadOneSymbol("JPY=X", "../db/forex.db")
df = df.reset_index(drop=True)
df = df['Close']
df = df[-1000:]
df = df.diff()
df = df.dropna()
fn = lambda x: (1.0 if x > 0 else 0.0)
xx = df.apply(fn)
xx = xx.values
ln = len(xx)
sz = (ln // 6) * 6
xx = xx[:sz]
print(xx)
#L0 = xx[:-2]
#L1 = xx[1:-1]
#L2 = xx[2:]
#yy = L0 * 4 + L1 * 2 + L2
def my_func(arr, num):
sum = 0
for i in range(num):
sum += arr[i] * (2**(num-i-1))
return sum
xx = np.reshape(xx, (-1, 6))
yy = np.apply_along_axis(my_func, 1, xx, 6)
i, = np.where(yy == stop)
zz = np.copy(yy)
zz[zz != stop] = np.nan
ss = yy
sp = range(0, len(ss))
style.use('ggplot')
plt.plot(ss)
plt.plot(zz, 'bo')
print(ss)
plt.show()
| apache-2.0 | Python |
|
1be041fd9bfc856fd59fba52501823d80d3ff037 | Create setup.py | bestupefy/openstack-plugin | neutron/setup.py | neutron/setup.py | apache-2.0 | Python |
||
08122e57235e836dbfd4230e9e3ad3f7c54072ff | add simple debug callback test case | Lispython/pycurl,Lispython/pycurl,Lispython/pycurl | pycurl/tests/test_debug.py | pycurl/tests/test_debug.py | # $Id$
import pycurl
def test(**args):
print args
c = pycurl.init()
c.setopt(pycurl.URL, 'http://curl.haxx.se/')
c.setopt(pycurl.VERBOSE, 1)
c.setopt(pycurl.DEBUGFUNCTION, test)
c.perform()
c.cleanup()
| lgpl-2.1 | Python |
|
6e42855d527976dd8b1cdb272502ce3aa76f8c6e | Add dbee abstract base class. | vmware/dbeekeeper | dbeekeeper/dbee/base.py | dbeekeeper/dbee/base.py | # Copyright 2013 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
class Base(Exception):
"""Abstract base class for dbeekeeper local storage, or 'dbee'.
A dbee instance must be accessed from a single thread.
Dbee transactions must be idempotent. Much like ZooKeeper snapshots, dbee
snapshots are 'fuzzy', meaning that transactions that were executed during
snapshotting may or may not be included in the snapshot. During recovery,
dbeekeeper executes all the transactions since the beginning of the
snapshot it's recoverying from in the same order they were applied
originally.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def execute(self, transaction):
"""Execute a transaction.
This method is *not* responsible for persisting transaction to disk.
The caller must maintain a transaction log until it takes a snapshot.
Args:
transaction: transaction to execute in string.
Returns:
None
Raises:
dbeekeeper.DbeeError:
DbeeError is considered fatal since it might affet consistency
of dbee. When dbee throws a DbeeError, dbeekeeper goes into
recovery mode.
dbeekeeper.ClientError:
ClientError is *not* considered fatal since it does not affect
consistency of dbee. Dbeekeeper simply pass ClientErrors back
to the client.
"""
@abc.abstractmethod
def snapshot(self, filename, callback):
"""Take a snapshot of this dbee asynchronously.
This method must not block. It should initiate snapshotting in a
separate thread/process and return without waiting for the snapshotting
to finish. Dbee must reject any other incoming snapshot/restore
requests during the snapshot by raising a ClientError.
The resulting snapshot must contain all the transactions this dbee
successfully executed before the snapshot() was called. For incoming
execute requests during the snapshot, dbee must either:
a. Block them until the snapshotting finishes.
b. Accept the transactions. These transactions may or may not be in the
resulting snapshot. It is the caller's responsibility to maintain
a log for these transactions until the next snapshot() call finishes
successfully.
Args:
filename: filename to use for the snapshot.
callback: function to call when the snapshotting completes. This
function must take 2 arguemnts, error and filename. If
snapshotting succeeded, the first argument is set to None
and the second argument is a string that contains
filename of the resulting snapshot. If snapshotting
failed, the first argument is an Exception and the second
argument is set to None.
Returns:
None
Raises:
This method must not raise any dbeekeeper error. All the dbeekeeper
errors must be passed in the callback
"""
@abc.abstractmethod
def restore(self, filename):
"""Restore dbee from a snapshot.
This method must block until the restore operation completes.
Args:
filename: Snapshot file to restore from.
Returns:
None
Raises:
dbeekeeper.DbeeError:
"""
| apache-2.0 | Python |
|
7e96013f21bbb5003b30da1e04833dcf58650602 | Implement a ThriftHandler for tornado | fmoo/sparts,bboozzoo/sparts,facebook/sparts,pshuff/sparts,fmoo/sparts,djipko/sparts,bboozzoo/sparts,djipko/sparts,facebook/sparts,pshuff/sparts | freenoted/tasks/tornado_thrift.py | freenoted/tasks/tornado_thrift.py | from __future__ import absolute_import
import tornado.web
from thrift.transport.TTransport import TMemoryBuffer
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
class TornadoThriftHandler(tornado.web.RequestHandler):
def initialize(self, processor):
self.processor = processor
def post(self):
iprot = TBinaryProtocol(TMemoryBuffer(self.request.body))
oprot = TBinaryProtocol(TMemoryBuffer())
self.processor.process(iprot, oprot)
self.set_header('Content-Type', 'application/x-thrift')
self.write(oprot.trans.getvalue())
| bsd-3-clause | Python |
|
c66e64556747736c1ee7461aa6ee8780a330481b | add sparse_to_dense_test | xzturn/caffe2,Yangqing/caffe2,Yangqing/caffe2,xzturn/caffe2,Yangqing/caffe2,xzturn/caffe2,Yangqing/caffe2,caffe2/caffe2,xzturn/caffe2,Yangqing/caffe2,xzturn/caffe2 | caffe2/python/sparse_to_dense_test.py | caffe2/python/sparse_to_dense_test.py | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
class TestSparseToDense(TestCase):
def test_sparse_to_dense(self):
op = core.CreateOperator(
'SparseToDense',
['indices', 'values'],
['output'])
workspace.FeedBlob(
'indices',
np.array([2, 4, 999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array([1, 2, 6, 7], dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
print(output)
expected = np.zeros(1000, dtype=np.int32)
expected[2] = 1 + 7
expected[4] = 2
expected[999] = 6
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
def test_sparse_to_dense_invalid_inputs(self):
op = core.CreateOperator(
'SparseToDense',
['indices', 'values'],
['output'])
workspace.FeedBlob(
'indices',
np.array([2, 4, 999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array([1, 2, 6], dtype=np.int32))
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
def test_sparse_to_dense_with_data_to_infer_dim(self):
op = core.CreateOperator(
'SparseToDense',
['indices', 'values', 'data_to_infer_dim'],
['output'])
workspace.FeedBlob(
'indices',
np.array([2, 4, 999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array([1, 2, 6, 7], dtype=np.int32))
workspace.FeedBlob(
'data_to_infer_dim',
np.array(np.zeros(1500, ), dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
print(output)
expected = np.zeros(1500, dtype=np.int32)
expected[2] = 1 + 7
expected[4] = 2
expected[999] = 6
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
| apache-2.0 | Python |
|
29e8dce6fc2956dc9f942eca41fdb632c382fe8e | Create pylsy.py | gnithin/Pylsy,bcho/Pylsy,muteness/Pylsy,huiyi1990/Pylsy,muteness/Pylsy,gnithin/Pylsy,bcho/Pylsy,huiyi1990/Pylsy | pylsy/tests/pylsy.py | pylsy/tests/pylsy.py | # -*- coding: utf-8 -*-
from __future__ import print_function
class PylsyTable(object):
def __init__(self, attributes):
self.StrTable = ""
self.Attributes = attributes
self.Table = []
self.AttributesLength = []
self.Cols_num = len(self.Attributes)
self.Lines_num = 0
for attribute in self.Attributes:
col = dict()
col[attribute] = ""
self.Table.append(col)
def print_divide(self):
for space in self.AttributesLength:
self.StrTable += "+ "
for sign in range(space):
self.StrTable += "- "
self.StrTable += "+"+"\n"
def add_data(self, attribute, values):
for col in self.Table:
if attribute in col:
dict_values = [str(value) for value in values]
col[attribute] = dict_values
def create_table(self):
for col in self.Table:
values = list(col.values())[0]
if self.Lines_num < len(values):
self.Lines_num = len(values)
# find the length of longest word in current column
key_length = len(list(col.keys())[0])
for value in values:
length = len(value)
if length > key_length:
key_length = length
self.AttributesLength.append(key_length)
self.print_head()
self.print_value()
def print_head(self):
self.print_divide()
self.StrTable += "| "
for spaces, attr in zip(self.AttributesLength, self.Attributes):
space_num = spaces * 2 - 1
start = (space_num - len(attr)) // 2
for space in range(start):
self.StrTable += " "
self.StrTable += attr+' '
end = space_num - start - len(attr)
for space in range(end):
self.StrTable += " "
self.StrTable += "| "
self.StrTable += ""+'\n'
self.print_divide()
def print_value(self):
for line in range(self.Lines_num):
for col, length in zip(self.Table, self.AttributesLength):
self.StrTable += "| "
value_length = length * 2 - 1
value = list(col.values())[0]
if len(value) != 0:
start = (value_length - len(value[line])) // 2
for space in range(start):
self.StrTable += " "
self.StrTable += value[line]+' '
end = value_length - start - len(value[line])
for space in range(end):
self.StrTable += " "
else:
start = 0
end = value_length - start + 1
for space in range(end):
self.StrTable += " "
self.StrTable += "|"+'\n'
self.print_divide()
def __str__(self):
self.create_table()
return self.StrTable
| mit | Python |
|
d60c1f9a6e56472611a96779462b42e8505e7905 | Convert a PDF document to JPEG/PNG image via /pdftoimg endpoint | symisc/pixlab,symisc/pixlab,symisc/pixlab | python/pdf_to_img.py | python/pdf_to_img.py | import requests
import json
# Convert a PDF document to JPEG/PNG image via /pdftoimg endpoint - https://pixlab.io/cmd?id=pdftoimg
req = requests.get('https://api.pixlab.io/pdftoimg',params={
'src':'https://www.getharvest.com/downloads/Invoice_Template.pdf',
'export': 'jpeg',
'key':'My_PixLab_Key'
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the image output (Converted PDF page): "+ reply['link'])
| bsd-2-clause | Python |
|
198b54c9ff796cc98cccfdc530f0111739901b0d | Create base-7.py | yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,jaredkoontz/leetcode,jaredkoontz/leetcode,jaredkoontz/leetcode,jaredkoontz/leetcode | Python/base-7.py | Python/base-7.py | # Time: O(1)
# Space: O(1)
# Given an integer, return its base 7 string representation.
#
# Example 1:
# Input: 100
# Output: "202"
# Example 2:
# Input: -7
# Output: "-10"
# Note: The input will be in range of [-1e7, 1e7].
class Solution(object):
def convertToBase7(self, num):
if num < 0: return '-' + self.convertToBase7(-num)
result = ''
while num:
result = str(num % 7) + result
num //= 7
return result if result else '0'
class Solution2(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num < 0: return '-' + self.convertToBase7(-num)
if num < 7: return str(num)
return self.convertToBase7(num // 7) + str(num % 7)
| mit | Python |
|
50dc018891511ce34b4177a43cfcd678456444cf | test of quasiisothermaldf's meanvR | followthesheep/galpy,jobovy/galpy,jobovy/galpy,jobovy/galpy,followthesheep/galpy,followthesheep/galpy,followthesheep/galpy,jobovy/galpy | nose/test_qdf.py | nose/test_qdf.py | # Tests of the quasiisothermaldf module
import numpy
#fiducial setup uses these
from galpy.potential import MWPotential
from galpy.actionAngle import actionAngleAdiabatic, actionAngleStaeckel
from galpy.df import quasiisothermaldf
aAA= actionAngleAdiabatic(pot=MWPotential,c=True)
aAS= actionAngleStaeckel(pot=MWPotential,c=True,delta=0.5)
def test_meanvR_adiabatic_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvR(0.9,0.,gl=True)) < 0.01, "qdf's meanvr is not equal to zero"
#higher up
assert numpy.fabs(qdf.meanvR(0.9,0.2,gl=True)) < 0.01, "qdf's meanvr is not equal to zero"
assert numpy.fabs(qdf.meanvR(0.9,-0.25,gl=True)) < 0.01, "qdf's meanvr is not equal to zero"
return None
| bsd-3-clause | Python |
|
660e53fa4505782a2d1484cc0b6e598edc851df0 | Initialize P05_stylingExcel | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py | books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py | # This program uses the OpenPyXL module to manipulate Excel documents
import openpyxl
from openpyxl.styles import Font, Style
wb = openpyxl.Workbook()
sheet = wb.get_sheet_by_name("Sheet")
italic24Font = Font(size=24, italic=True)
styleObj = Style(font=italic24Font)
sheet["A1"].style = styleObj
sheet["A1"] = "Hello world!"
wb.save("styled.xlsx")
| mit | Python |
|
6e79ab6ca68252669055df12c333320bd0bda959 | Create obtainNytimes.py | Derek-Guan/readNewspaper,derekguan/readNewspaper | obtainNytimes.py | obtainNytimes.py | import urllib.request, sys, re
from http.cookiejar import CookieJar
from docx import Document
def writeDoc(title,content):
docName = title+'.docx'
document = Document()
document.add_heading(title, 0)
document.add_paragraph(content)
document.save(docName)
def ObtainContent(pageContent):
#obtain title
for ln in pageContent:
#print(ln)
mat = re.search(b'<h1 itemprop="headline" id="story-heading" class="story-heading">.*</h1>', ln)
if mat:
headline = mat.group(0).decode('utf-8')
title = ''
length = len(headline)
i = 0
while i < length:
if headline[i] == '<':
#find >
z = i + 1
while z < length and headline[z] != '>':
z = z + 1
i = z + 1
while i < length and headline[i] != '<':
title = title + headline[i]
i = i + 1
else:
i = i + 1
break
#obtain content
#step 1: get all content with label p
paraList = []
for ln in pageContent:
mat = re.findall(b'<p class="story-body-text story-content".*?</p>', ln)
for m in mat:
paraList.append(m)
#step 2: fetch content between <p> </p>
para = ''
for e in paraList:
extract = e.decode('utf-8')
length = len(extract)
i = 0
while i < length:
if extract[i] == '<':
#find >
z = i + 1
while z < length and extract[z] != '>':
z = z + 1
i = z + 1
while i < length and extract[i] != '<':
para = para + extract[i]
i = i + 1
else:
i = i + 1
para = para + '\n'
return (title,para)
def fetchWebPages(website):
cj = CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
page = opener.open(website)
return page
def ObtainNyTimes():
page = fetchWebPages(sys.argv[1])
(title, paras) = ObtainContent(page)
writeDoc(title,paras)
print("Fetch Your Newspaper Successfully..........")
if __name__ == "__main__":
ObtainNyTimes()
| apache-2.0 | Python |
|
4e778d86670d4673cd591217d514a1f64dbc8424 | Add an OOP demo | letuananh/pydemo,dakside/pydemo,dakside/pydemo,letuananh/pydemo,dakside/pydemo | oop/basic_oop.py | oop/basic_oop.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script demonstrates how to use OOP in Python
Latest version can be found at https://github.com/letuananh/pydemo
References:
Classes in Python:
https://docs.python.org/2/tutorial/classes.html
@author: Le Tuan Anh <[email protected]>
'''
# Copyright (c) 2015, Le Tuan Anh <[email protected]>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
__author__ = "Le Tuan Anh <[email protected]>"
__copyright__ = "Copyright 2015, pydemo"
__credits__ = [ "Le Tuan Anh" ]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Le Tuan Anh"
__email__ = "<[email protected]>"
__status__ = "Prototype"
########################################################################
import logging
########################################################################
class Classroom:
''' This class represents a classroom model. Each class has its own code and a group of students.
'''
def __init__(self, class_code):
self.students = []
self.class_code = class_code
def add(self, student):
''' This method will add an existing student into this classroom
'''
self.students.append(student)
def __repr__(self):
''' This method will print details of a classroom object
'''
return "Classroom{code='%s',Students=%s}" % (self.class_code, self.students)
def __str__(self):
''' A shorter & more human friendly way of printing an object
'''
return "Classroom %s" % (self.class_code)
class Student:
''' Each student object has a name and age.
'''
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return "Student{name='%s',age=%s}" % (self.name, self.age)
def __str__(self):
return "Student %s" % (self.name)
#----------------------------------------------------------------------------
# Define the main method
#------------------------------------------------------------------------------
def main():
'''The main entry of the application (i.e. The tasks should start from here)
'''
# Create a classroom
c = Classroom("Philosophy 101")
print("%s is created." % c)
# ... now we create students
descartes = Student("Rene Descartes", 419)
nietzsche = Student("Friedrich Nietzsche", 171)
print("%s is created." % descartes)
print("%s is created." % nietzsche)
# ... add the students to the classroom
c.add(descartes)
c.add(nietzsche)
# Bonus: You can use repr to get deeper information, this can be useful for debugging
print("-" * 20)
print(repr(c))
logging.info(repr(c))
pass
#------------------------------------------------------------------------------
# Check if this file is run as an application
#------------------------------------------------------------------------------
if __name__ == "__main__":
# If the condition is true, execute the main method
main()
| mit | Python |
|
fb5ed0ea066c9bdb801a95e50d78529addffbed8 | add twitter url to video URL email | yoe/veyepar,CarlFK/veyepar,CarlFK/veyepar,xfxf/veyepar,CarlFK/veyepar,yoe/veyepar,CarlFK/veyepar,xfxf/veyepar,xfxf/veyepar,yoe/veyepar,xfxf/veyepar,CarlFK/veyepar,xfxf/veyepar,yoe/veyepar,yoe/veyepar | dj/scripts/email_url.py | dj/scripts/email_url.py | #!/usr/bin/python
# email_url.py
# emails the video URL to the presenters
from email_ab import email_ab
class email_url(email_ab):
ready_state = 7
subject_template = "[{{ep.show.name}}] Video up: {{ep.name}}"
body_body = """
The video of your talk is posted:
{{url}}
{% if ep.state == 7 %}
Look at it, make sure the title is spelled right and the audio sounds reasonable.
If you are satisfied, tweet it, blog it, whatever it. No point in making videos if no one watches them.
To approve it click the Approve button at
http://veyepar.nextdayvideo.com/main/approve/{{ep.id}}/{{ep.slug}}/{{ep.edit_key}}/
As soon as you or someone approves your video, it will be tweeted on @NextDayVideo{% if ep.show.client.tweet_prefix %} tagged {{ep.show.client.tweet_prefix}}{% endif %}. It will also be sent to the event organizers in hopes that they add it to the event website.
{% endif %}
{% if ep.twitter_url %}
It has been tweeted: {{ ep.twitter_url }}
Re-tweet it, blog it, whatever it. No point in making videos if no one watches them.
{% endif %}
"""
py_name = "email_url.py"
def more_context(self, ep):
# If there is a Richard (pyvideo) url, use that;
# else use the youtube url.
url = ep.public_url or ep.host_url
return {'url':url}
if __name__ == '__main__':
p=email_url()
p.main()
| #!/usr/bin/python
# email_url.py
# emails the video URL to the presenters
from email_ab import email_ab
class email_url(email_ab):
ready_state = 7
subject_template = "[{{ep.show.name}}] Video up: {{ep.name}}"
body_body = """
The video of your talk is posted:
{{url}}
Look at it, make sure the title is spelled right, let me know if it is OK.
If you are satisfied, tweet it, blog it, whatever it. No point in making videos if no one watches them.
To approve it click the Approve button at
http://veyepar.nextdayvideo.com/main/approve/{{ep.id}}/{{ep.slug}}/{{ep.edit_key}}/
As soon as you or someone approves your video, it will be tweeted on @NextDayVideo{% if ep.show.client.tweet_prefix %} tagged {{ep.show.client.tweet_prefix}}{% endif %}. It will also be sent to the event organizers in hopes that they add it to the event website.
"""
py_name = "email_url.py"
def more_context(self, ep):
# If there is a Richard (pyvideo) url, use that;
# else use the youtube url.
url = ep.public_url or ep.host_url
return {'url':url}
if __name__ == '__main__':
p=email_url()
p.main()
| mit | Python |
b47369d43a0a85ac2bc32bfa77c6a4d9074ce700 | Add basic test case for retrieve_dns module | apel/apel,stfc/apel,tofu-rocketry/apel,apel/apel,tofu-rocketry/apel,stfc/apel | test/test_retrieve_dns.py | test/test_retrieve_dns.py | import logging
import os
import tempfile
import unittest
import mock
import bin.retrieve_dns
logging.basicConfig(level=logging.INFO)
class RetrieveDnsTestCase(unittest.TestCase):
def setUp(self):
# Mock out logging
mock.patch('bin.retrieve_dns.set_up_logging', autospec=True).start()
# Mock out config
mock_config = mock.patch('bin.retrieve_dns.get_config', autospec=True).start()
# Mock out retrieving xml
self.mock_xml = mock.patch('bin.retrieve_dns.get_xml', autospec=True).start()
# Set up temp files
self.files = {}
for item in ('dn', 'extra', 'ban'):
self.files[item] = dict(zip(('handle', 'path'), tempfile.mkstemp()))
os.write(self.files[item]['handle'], '/wobble')
for item in self.files.values():
os.close(item['handle'])
# Set up config using temp files
c = bin.retrieve_dns.Configuration()
c.dn_file = self.files['dn']['path']
c.extra_dns = self.files['extra']['path']
c.banned_dns = self.files['ban']['path']
mock_config.return_value = c
def test_basics(self):
self.mock_xml.return_value = "<HOSTDN>/wibble</HOSTDN>"
bin.retrieve_dns.runprocess("fakefile", "fakefile")
dns = open(self.files['dn']['path'])
self.assertEqual(dns.read(), '/wibble\n')
dns.close()
def tearDown(self):
# Delete temp files
for item in self.files.values():
os.remove(item['path'])
mock.patch.stopall()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
|
b97a9571478dc8c919e072734816b412dadc0da9 | Add maths plugin | thomasleese/smartbot-old,Muzer/smartbot,Cyanogenoid/smartbot,tomleese/smartbot | plugins/maths.py | plugins/maths.py | import io
import unittest
from sympy.parsing import sympy_parser
class Plugin:
def on_command(self, bot, msg, stdin, stdout, reply):
expr = " ".join(msg["args"][1:])
expr = sympy_parser.parse_expr(expr)
print(expr.evalf(), file=stdout)
def on_help(self):
return "Perform maths expressions."
class Test(unittest.TestCase):
def setUp(self):
self.plugin = Plugin()
def test_command(self):
for a in range(1, 1000, 50):
for b in range(1, 1000, 50):
stdout = io.StringIO()
self.plugin.on_command(None, {"args": [None, str(a) + "*" + str(b)]}, None, stdout, None)
self.assertEqual(int(float(stdout.getvalue().strip())), a * b)
def test_help(self):
self.assertTrue(self.plugin.on_help())
| mit | Python |
|
932fc681e1e1b79a28d03a480c19869fc0a6956c | Add state module to manage InfluxDB users | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/states/influxdb_user.py | salt/states/influxdb_user.py | # -*- coding: utf-8 -*-
'''
Management of InfluxDB users
============================
(compatible with InfluxDB version 0.9+)
'''
def __virtual__():
'''
Only load if the influxdb module is available
'''
if 'influxdb.db_exists' in __salt__:
return 'influxdb_user'
return False
def _changes(name, admin):
'''
Get necessary changes to given user account
'''
existing_user = __salt__['influxdb.user_info'](name)
changes = {}
if existing_user['admin'] != admin:
changes['admin'] = admin
return changes
def present(name,
password,
admin=False,
**client_args):
'''
Ensure that given user is present.
name
Name of the user to manage
password
Password of the user
admin : False
Whether the user should have cluster administration
privileges or not.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is present and up to date'.format(name)}
if not __salt__['influxdb.user_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} will be created'.format(name)
return ret
else:
if __salt__['influxdb.create_user'](
name, password, admin=admin, **client_args):
ret['comment'] = 'Created user {0}'.format(name)
ret['changes'][name] = 'created'
return ret
else:
ret['comment'] = 'Failed to create user {0}'.format(name)
ret['result'] = False
return ret
else:
changes = _changes(name, admin)
if changes:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The following user attributes are set to '
'be changed:\n')
for k, v in changes.items():
ret['comment'] += u'{0}: {1}\n'.format(k, v)
return ret
else:
pre = __salt__['influxdb.user_info'](name)
for k, v in changes.items():
if k == 'admin':
if v:
__salt__['influxdb.grant_admin_privileges'](name)
continue
else:
__salt__['influxdb.revoke_admin_privileges'](name)
continue
post = __salt__['influxdb.user_info'](name)
for k in post:
if post[k] != pre[k]:
ret['changes'][k] = post[k]
if ret['changes']:
ret['comment'] = 'Updated user {0}'.format(name)
return ret
def absent(name, **client_args):
'''
Ensure that given user is absent.
name
The name of the user to manage
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is not present'.format(name)}
if __salt__['influxdb.user_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} will be removed'.format(name)
return ret
else:
if __salt__['influxdb.remove_user'](name, **client_args):
ret['comment'] = 'Removed user {0}'.format(name)
ret['changes'][name] = 'removed'
return ret
else:
ret['comment'] = 'Failed to remove user {0}'.format(name)
ret['result'] = False
return ret
return ret
| apache-2.0 | Python |
|
8ab44294c0dd7b95102bfa1d9e8437067813cd0f | Add basic document parser | adambrenecki/vc2xlsx | vc2xlsx/doc_parser.py | vc2xlsx/doc_parser.py | import parsley
class Goto (object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Goto({}, {})".format(repr(self.x), repr(self.y))
class Entry (object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "Entry({})".format(repr(self.value))
class Menu (object):
def __init__(self, command):
self.command = command
def __repr__(self):
return "Menu({})".format(repr(self.command))
_grammar = parsley.makeGrammar(r"""
document = command*:c -> tuple(x for x in c if x)
command = goto_command | menu_command | entry_command | nl
goto_command = '>' <letter+>:x <digit+>:y (':' | nl) -> Goto(x, y)
entry_command = <(letter | digit | '"' | '\'' | '+' | '-' | '(' | '#' | '@') not_nl*>:value -> Entry(value)
menu_command = '/' <(letter | '-') (letter | digit | '$' | '*')*>:command -> Menu(command)
nl = ('\r'? '\n' | '\r') -> None
not_nl = anything:x ?(x not in '\r\n') -> x
""", globals())
def parse(value):
return _grammar(value.rstrip('\0\r\n\t ')).document()
if __name__ == "__main__":
import sys
with open(sys.argv[1]) as f:
result = parse(f.read())
print(repr(result))
| agpl-3.0 | Python |
|
01bcda4326dc0204798f268bb1c60f06526aaba3 | add freebsd shadow module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/freebsd_shadow.py | salt/modules/freebsd_shadow.py | '''
Manage the password database on FreeBSD systems
'''
# Import python libs
import os
try:
import pwd
except ImportError:
pass
# Import salt libs
import salt.utils
def __virtual__():
return 'shadow' if __grains__.get('os', '') == 'FreeBSD' else False
def info(name):
'''
Return information for the specified user
CLI Example::
salt '*' shadow.info root
'''
try:
data = pwd.getpwnam(name)
ret = {
'name': data.pw_name,
'passwd': data.pw_passwd if data.pw_passwd != '*' else '',
'change': '',
'expire': ''}
except KeyError:
return {
'name': '',
'passwd': '',
'change': '',
'expire': ''}
# Get password aging info
cmd = 'pw user show {0} | cut -f6,7 -d:'.format(name)
try:
change, expire = __salt__['cmd.run_all'](cmd)['stdout'].split(':')
except ValueError:
pass
else:
ret['change'] = change
ret['expire'] = expire
return ret
def set_password(name, password):
'''
Set the password for a named user. The password must be a properly defined
hash. The password hash can be generated with this command:
``python -c "import crypt; print crypt.crypt('password',
'$6$SALTsalt')"``
``SALTsalt`` is the 8-character crpytographic salt. Valid characters in the
salt are ``.``, ``/``, and any alphanumeric character.
Keep in mind that the $6 represents a sha512 hash, if your OS is using a
different hashing algorithm this needs to be changed accordingly
CLI Example::
salt '*' shadow.set_password root '$1$UYCIxa628.9qXjpQCjM4a..'
'''
__salt__['cmd.run']('pw user mod {0} -H 0'.format(name), stdin=password)
uinfo = info(name)
return uinfo['passwd'] == password
| apache-2.0 | Python |
|
9dab373023fa6b7767cd7555a533161752205eda | Test a weighted affine solver. | UASLab/ImageAnalysis | scripts/0-weighted-affine.py | scripts/0-weighted-affine.py | #!/usr/bin/python
import sys
sys.path.append('../lib')
import transformations
v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
#weights = [1.0, 1.0, 1.0, 1.0]
weights = [0.1, 0.01, 0.1, 0.2]
print "original"
print transformations.affine_matrix_from_points(v0, v1, shear=False)
print "weighted"
print transformations.affine_matrix_from_points_weighted(v0, v1, weights, shear=False)
| mit | Python |
|
260e0ef2bc37750dccea47d30110221c272e757a | Add script for automating analysis for all corpora | MontrealCorpusTools/SPADE,MontrealCorpusTools/SPADE | run_all_corpora.py | run_all_corpora.py | import os
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("corpusdir", help = "Path to the directory containing corpus directories")
parser.add_argument("script", help = "name of the script to be run")
args = parser.parse_args()
## lists of corpora to skip
## and failed to run
skipped = []
failed = []
## first check that the script exists
assert(os.path.isfile(args.script), "{} should be a script that exists".format(args.script))
## loop through files in the directory
for corpus in os.listdir(args.corpusdir):
## check if the file is actually a directory since that is the expected format for the
## analysis scripts
if os.path.isdir(corpus):
if corpus in skipped:
continue
try:
print("Processing {}".format(corpus))
## first reset the corpus
subprocess.call(['python', 'reset_database.py', corpus])
## run the script on the corpus
subprocess.call(['python', args.script, corpus, "-s"])
except:
failed.append(corpus)
continue
print("Complete!")
print("Following corpora were not run: {}" failed)
| mit | Python |
|
593941ec42918a389a348a5d35e8c5033bb34e73 | Add 8ball plugin | itsmartin/nimbus,Plastix/nimbus,bcbwilla/nimbus,Brottweiler/nimbus | plugins/ball8.py | plugins/ball8.py | import random
from plugin import CommandPlugin, PluginException
class Ball8(CommandPlugin):
"""
8ball command (by javipepe :))
"""
def __init__(self, bot):
CommandPlugin.__init__(self, bot)
self.triggers = ['8ball']
self.short_help = 'Ask me a question'
self.help = 'Ask me a question, I\'ll decide what the answer should be. Based on https://en.wikipedia.org/wiki/Magic_8-Ball'
self.help_example = ['!8ball Is linux better than windows?']
# ^ obviously yes.
def on_command(self, event, response):
args = event['text']
if not args or not args[-1:].__contains__('?'):
raise PluginException('Invalid argument! Ask me a question!')
else:
possible_answers = ['It is certain', 'It is decidedly so', 'Without a doubt', 'Yes, definitely', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes', 'Signs point to yes', 'Reply hazy try again', 'Ask again later', 'Better not tell you now', 'Cannot predict now', 'Concentrate and ask again', 'Do\'t count on it', 'My reply is no', 'My sources say no', 'Outlook not so good', 'Very doubtful']
response['text'] = ':8ball: says *_%s_*!' % random.choice(possible_answers)
self.bot.sc.api_call('chat.postMessage', **response)
| mit | Python |
|
d22ca6dbf7e8aa98b0f580b7972e157894925365 | Fix test output for combining filename and extension | wintersandroid/tvrenamr,ghickman/tvrenamr | tests/test_auto_moving.py | tests/test_auto_moving.py | import os
import shutil
from nose.tools import assert_equal
from .base import BaseTest
class TestAutoMoving(BaseTest):
organise = True
def teardown(self):
super(TestAutoMoving, self).teardown()
shutil.rmtree(self.organised)
os.mkdir(self.organised)
def test_using_organise_uses_the_specified_organise_folder(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
organise_dir = os.path.join('/', *path.split('/')[:-3])
assert_equal(self.organised, organise_dir)
def test_using_organise_uses_the_correct_show_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-3:][0]
assert_equal(season_dir, self._file.show_name)
def test_using_organise_uses_the_correct_season_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-2:][0]
assert_equal(season_dir, 'Season {0}'.format(self._file.season))
def test_using_organise_uses_the_correct_filename(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
filename = path.split('/')[-1:][0].split(' - ')[-1:][0]
assert_equal(filename, ''.join([self._file.episodes[0].title, self._file.extension]))
def test_moving_the_leading_the_to_the_end_of_a_show_name_causes_the_show_folder_name_to_follow_suit_when_using_organise(self):
show_name = 'Big Bang Theory, The'
self._file.show_name = show_name
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
show_dir = path.split('/')[-3:][0]
assert_equal(show_dir, show_name)
| import os
import shutil
from nose.tools import assert_equal
from .base import BaseTest
class TestAutoMoving(BaseTest):
organise = True
def teardown(self):
super(TestAutoMoving, self).teardown()
shutil.rmtree(self.organised)
os.mkdir(self.organised)
def test_using_organise_uses_the_specified_organise_folder(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
organise_dir = os.path.join('/', *path.split('/')[:-3])
assert_equal(self.organised, organise_dir)
def test_using_organise_uses_the_correct_show_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-3:][0]
assert_equal(season_dir, self._file.show_name)
def test_using_organise_uses_the_correct_season_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-2:][0]
assert_equal(season_dir, 'Season {0}'.format(self._file.season))
def test_using_organise_uses_the_correct_filename(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
filename = path.split('/')[-1:][0].split(' - ')[-1:][0]
assert_equal(filename, '.'.join([self._file.episodes[0].title, self._file.extension]))
def test_moving_the_leading_the_to_the_end_of_a_show_name_causes_the_show_folder_name_to_follow_suit_when_using_organise(self):
show_name = 'Big Bang Theory, The'
self._file.show_name = show_name
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
show_dir = path.split('/')[-3:][0]
assert_equal(show_dir, show_name)
| mit | Python |
98c658822cf6782ca0907ab7a68691922e701aa6 | Add unittest for pytesseract | Kaggle/docker-python,Kaggle/docker-python | tests/test_pytesseract.py | tests/test_pytesseract.py | import unittest
import io
import pytesseract
import numpy as np
from wand.image import Image as wandimage
class TestPytesseract(unittest.TestCase):
def test_tesseract(self):
# Open pdf with Wand
with wandimage(filename='/input/tests/data/test.pdf') as wand_image:
img_buffer = np.asarray(bytearray(wand_image.make_blob(format='png')), dtype='uint8')
bytesio = io.BytesIO(img_buffer)
test_string = pytesseract.image_to_string(PILImage.open(bytesio))
self.assertTrue(type(test_string) == str)
| apache-2.0 | Python |
|
f8d8580dfffee35236478ec75116b291499c085c | Create maximum-average-subarray-i.py | kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode | Python/maximum-average-subarray-i.py | Python/maximum-average-subarray-i.py | # Time: O(n)
# Space: O(1)
# Given an array consisting of n integers,
# find the contiguous subarray of given length k that has the maximum average value.
# And you need to output the maximum average value.
#
# Example 1:
# Input: [1,12,-5,-6,50,3], k = 4
# Output: 12.75
# Explanation: Maximum average is (12-5-6+50)/4 = 51/4 = 12.75
# Note:
# 1 <= k <= n <= 30,000.
# Elements of the given array will be in the range [-10,000, 10,000].
class Solution(object):
def findMaxAverage(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: float
"""
total = 0
for i in xrange(k):
total += nums[i]
result = total
for i in xrange(k, len(nums)):
total += nums[i] - nums[i-k]
result = max(result, total)
return float(result) / k
| mit | Python |
|
164ccb9206885b216e724b3618ebae5601ab0ac0 | Add parallel execution utility module. | eddiejessup/ciabatta | parallel.py | parallel.py | import multiprocessing as mp
def run_func(func, args, parallel=False):
if parallel:
mp.Pool(mp.cpu_count() - 1).map(func, args)
else:
for arg in args:
func(arg)
| bsd-3-clause | Python |
|
7c3ed589ace907a71931b79902382b168a2ae80d | add direct_link_downloader | jtara1/turbo_palm_tree | downloaders/direct_link_downloader.py | downloaders/direct_link_downloader.py | import os
from urllib.request import urlopen
def direct_link_download(url, file_path):
"""
:param url: direct link to an image
:param file_path: file path (including filename) to save image to
"""
# make sure the file_path param doesn't point to a directory
if os.path.isdir(file_path):
raise ValueError(':param file_path: shouldn\'t point to a directory')
# make sure the file doesn't already exist
if os.path.isfile(file_path):
raise FileExistsError('%s already exists' % file_path)
# create path(s) for file_path if necessary
base_dir = os.path.dirname(file_path)
if not os.path.isdir(base_dir):
os.makedirs(os.path.abspath(base_dir))
# download and save the image
req = urlopen(url).read()
with open(file_path, 'w') as f:
f.write(req)
if __name__ == "__main__":
# tests
dir1 = os.path.join(os.getcwd(), 'img1.jpg')
print(dir1)
url = 'http://i.imgur.com/2MlAOkC.jpg'
url2 = 'http://img05.deviantart.net/41ee/i/2013/299/9/f/_stock__mystic_woods_by_dominikaaniola-d2ehxq4.jpg'
direct_link_download(url, 'img1.jpg')
# direct_link_download(url2, './tmp/tmp2/img2.jpg')
| apache-2.0 | Python |
|
e3e62c964b864c057e98763169ddc0dd922e6fa9 | Add a separate module for common parsing functions. | makrutenko/dunovo,makrutenko/dunovo,makrutenko/dunovo | dunovo_parsers.py | dunovo_parsers.py | import collections
# A pair of `StrandFamily`s with the same barcode.
BarFamily = collections.namedtuple('BarFamily', ('bar', 'ab', 'ba'))
# A pair of `ReadFamily`s with the same order and barcode.
StrandFamily = collections.namedtuple('StrandFamily', ('order', 'mate1', 'mate2'))
# A set of `Read`s with the same mate, order, and barcode.
ReadFamily = collections.namedtuple('ReadFamily', ('mate', 'reads'))
# A single read.
Read = collections.namedtuple('Read', ('name', 'seq', 'quals'))
class DunovoFormatError(ValueError):
pass
def parse_make_families(lines, prepended=False):
strand_families = []
strand_family_lines = []
last_barcode = last_order = None
for line_num, line in enumerate(lines, 1):
fields = line.rstrip('\r\n').split('\t')
if len(fields) != 8:
raise DunovoFormatError(f'Line {line_num} has an invalid number of columns: {len(fields)}')
# If it's the output of correct.py with --prepend, there's an extra column.
# We want the corrected barcode (column 1), not the original one (column 2).
if prepended:
del fields[1]
barcode, order = fields[:2]
if barcode != last_barcode or order != last_order:
if last_order is not None:
strand_families.append(create_strand_family(strand_family_lines))
strand_family_lines = []
if barcode != last_barcode:
if last_barcode is not None:
yield create_bar_family(strand_families, last_barcode)
strand_families = []
strand_family_lines.append(fields)
last_barcode = barcode
last_order = order
if last_order is not None:
strand_families.append(create_strand_family(strand_family_lines))
if last_barcode is not None:
yield create_bar_family(strand_families, last_barcode)
def create_strand_family(strand_family_lines):
read1s = []
read2s = []
last_order = None
for fields in strand_family_lines:
barcode, order, name1, seq1, quals1, name2, seq2, quals2 = fields
if order not in ('ab', 'ba'):
raise DunovoFormatError(f'Invalid order: {order!r}')
assert order == last_order or last_order is None, (order, last_order)
read1s.append(Read(name1, seq1, quals1))
read2s.append(Read(name2, seq2, quals2))
last_order = order
read_family1 = ReadFamily(1, tuple(read1s))
read_family2 = ReadFamily(2, tuple(read2s))
return StrandFamily(order, read_family1, read_family2)
def create_bar_family(strand_families_raw, barcode):
assert 1 <= len(strand_families_raw) <= 2, len(strand_families_raw)
# Create a strand_families list with them in the right order.
strand_families = [None, None]
for strand_family in strand_families_raw:
if strand_family.order == 'ab':
strand_families[0] = strand_family
elif strand_family.order == 'ba':
strand_families[1] = strand_family
# Fill in any missing strand families with empty ones.
for i, (order, strand_family) in enumerate(zip(('ab', 'ba'), strand_families)):
if strand_family is None:
strand_families[i] = StrandFamily(order, ReadFamily(1,()), ReadFamily(2,()))
return BarFamily(barcode, *strand_families)
| isc | Python |
|
1455f6c563edd07a61dd826bde03137fff2d3f57 | add data for recipe 1.8 | ordinary-developer/book_python_cookbook_3_ed_d_beazley_b_k_jones | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/08-calculating_with_dictionaries/main.py | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/08-calculating_with_dictionaries/main.py | def example_1():
prices = {
'ACME': 45.23,
'AAPL': 612.78,
'IBM': 205.55,
'HPQ': 37.20,
'FB': 10.75
}
min_price = min(zip(prices.values(), prices.keys()))
max_price = max(zip(prices.values(), prices.keys()))
print(min_price, max_price)
sorted_prices = sorted(zip(prices.values(), prices.keys()))
print(sorted_prices)
prices_and_names = zip(prices.values(), prices.keys())
print(min(prices_and_names))
try:
print(max(prices_and_names))
except ValueError:
print('here is ValueError')
def example_2():
prices = {
'ACME': 45.23,
'AAPL': 612.78,
'IBM': 205.55,
'HPQ': 37.20,
'FB': 10.75
}
print(min(prices))
print(max(prices))
print(min(prices.values()))
print(max(prices.values()))
print(min(prices, key = lambda k: prices[k]))
print(max(prices, key = lambda k: prices[k]))
print(prices[min(prices, key = lambda k: prices[k])])
print(prices[max(prices, key = lambda k: prices[k])])
def example_3():
prices = { 'AAA': 45.23, 'ZZZ': 45.23 }
print(min(zip(prices.values(), prices.keys())))
print(max(zip(prices.values(), prices.keys())))
if __name__ == '__main__':
example_1()
example_2()
example_3()
| mit | Python |
|
4879fc188c685e4676414a2f186d8d52998bc28d | Create task_3_3.py | Mariaanisimova/pythonintask | PINp/2015/KAKURKIN_I_V/task_3_3.py | PINp/2015/KAKURKIN_I_V/task_3_3.py | # Задача 3. Вариант 3.
# Напишите программу, которая выводит имя "Чарльз Лютвидж Доджсон",
# и запрашивает его псевдоним.
# Программа должна сцеплять две эти строки и выводить полученную строку,
# разделяя имя и псевдоним с помощью тире.
NAME = "Чарльз Лютвидж Доджсон"
print(NAME)
PNAME = input("Его псевдоним?\n")
print(NAME + " - " + PNAME)
inp = input()
# KAKURKIN I.V.
# 29.02.2016
| apache-2.0 | Python |
|
8630d60a2ecdd2fac4153623ac64ba188e05d8b7 | Add source code. | John-Lin/pigrelay | pigrelay.py | pigrelay.py | import os
import sys
import time
import socket
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
SOCKFILE = "/tmp/snort_alert"
BUFSIZE = 65863
IP = '127.0.0.1'
PORT = 51234
# TODO: TLS/SSL wrapper for socket
class SnortListener():
def __init__(self):
self.unsock = None
self.nwsock = None
def start_send(self):
'''Open a client on Network Socket'''
self.nwsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.nwsock.connect((IP, PORT))
except Exception, e:
logger.info("Network socket connection error: %s" % e)
sys.exit(1)
def start_recv(self):
'''Open a server on Unix Domain Socket'''
if os.path.exists(SOCKFILE):
os.unlink(SOCKFILE)
self.unsock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.unsock.bind(SOCKFILE)
logger.info("Unix Domain Socket listening...")
self.recv_loop()
def recv_loop(self):
'''Receive Snort alert on Unix Domain Socket and
send to Network Socket Server forever'''
logger.info("Start the network socket client....")
self.start_send()
while True:
data = self.unsock.recv(BUFSIZE)
time.sleep(0.5)
if data:
logger.debug("Send {0} bytes of data.".format
(sys.getsizeof(data)))
# data == 65900 byte
self.tcp_send(data)
else:
pass
def tcp_send(self, data):
self.nwsock.sendall(data)
logger.info("Send the alert messages to Ryu.")
if __name__ == '__main__':
server = SnortListener()
server.start_recv()
| apache-2.0 | Python |
|
e15de99ae79e97becdc3d4a4a2bcf60e70e5d2d4 | Create escolas_ceara.senso_2013.py | santiagosilas/propython | raspagem/random/escolas_ceara.senso_2013.py | raspagem/random/escolas_ceara.senso_2013.py | import urllib.request
import json
#url = 'http://educacao.dadosabertosbr.com/api/cidades/ce'
#cidades = urllib.request.urlopen(url).read()
#cidades = json.loads(cidades.decode('utf-8'))
#print('Lista de Cidades')
#[print(cidade.split(':')[1]) for cidade in cidades]
print('Lista de Escolas Municipais de Aracati com Lab de Informática')
url = 'http://educacao.dadosabertosbr.com/api/escolas/buscaavancada?cidade=2301109&laboratorioInformatica=on&situacaoFuncionamento=1&dependenciaAdministrativa=3'
escolas = urllib.request.urlopen(url).read() # em bytes
# obtém uma lista de escolas
escolas = json.loads(escolas.decode('utf-8'))
qtde, escolas = escolas
for escola in escolas:
print(escola['nome'])
print('total:{0}\n'.format(len(escolas)))
| mit | Python |
|
ef38b112a2cf46fe1bbb52a9633fd42fad03ddb0 | Update method name | yskmt/rtv,yskmt/rtv,michael-lazar/rtv,shaggytwodope/rtv,michael-lazar/rtv,5225225/rtv,5225225/rtv,TheoPib/rtv,TheoPib/rtv,shaggytwodope/rtv,michael-lazar/rtv | rtv/subscriptions.py | rtv/subscriptions.py | import curses
import sys
import time
import logging
from .content import SubscriptionContent
from .page import BasePage, Navigator, BaseController
from .curses_helpers import (Color, LoadScreen, add_line)
__all__ = ['SubscriptionController', 'SubscriptionPage']
_logger = logging.getLogger(__name__)
class SubscriptionController(BaseController):
character_map = {}
class SubscriptionPage(BasePage):
def __init__(self, stdscr, reddit):
self.controller = SubscriptionController(self)
self.loader = LoadScreen(stdscr)
self.selected_subreddit_data = None
content = SubscriptionContent.from_user(reddit, self.loader)
super(SubscriptionPage, self).__init__(stdscr, reddit, content)
def loop(self):
"Main control loop"
self.active = True
while self.active:
self.draw()
cmd = self.stdscr.getch()
self.controller.trigger(cmd)
@SubscriptionController.register(curses.KEY_F5, 'r')
def refresh_content(self):
"Re-download all subscriptions and reset the page index"
self.content = SubscriptionContent.from_user(self.reddit, self.loader)
self.nav = Navigator(self.content.get)
@SubscriptionController.register(curses.KEY_ENTER, 10, curses.KEY_RIGHT)
def store_selected_subreddit(self):
"Store the selected subreddit and return to the subreddit page"
self.selected_subreddit_data = self.content.get(self.nav.absolute_index)
self.active = False
@SubscriptionController.register(curses.KEY_LEFT, 'h', 's')
def close_subscriptions(self):
"Close subscriptions and return to the subreddit page"
self.active = False
@staticmethod
def draw_item(win, data, inverted=False):
n_rows, n_cols = win.getmaxyx()
n_cols -= 1 # Leave space for the cursor in the first column
# Handle the case where the window is not large enough to fit the data.
valid_rows = range(0, n_rows)
offset = 0 if not inverted else -(data['n_rows'] - n_rows)
row = offset
if row in valid_rows:
attr = curses.A_BOLD | Color.YELLOW
add_line(win, u'{name}'.format(**data), row, 1, attr)
row = offset + 1
for row, text in enumerate(data['split_title'], start=row):
if row in valid_rows:
add_line(win, text, row, 1)
| import curses
import sys
import time
import logging
from .content import SubscriptionContent
from .page import BasePage, Navigator, BaseController
from .curses_helpers import (Color, LoadScreen, add_line)
__all__ = ['SubscriptionController', 'SubscriptionPage']
_logger = logging.getLogger(__name__)
class SubscriptionController(BaseController):
character_map = {}
class SubscriptionPage(BasePage):
def __init__(self, stdscr, reddit):
self.controller = SubscriptionController(self)
self.loader = LoadScreen(stdscr)
self.selected_subreddit_data = None
content = SubscriptionContent.from_user(reddit, self.loader)
super(SubscriptionPage, self).__init__(stdscr, reddit, content)
def loop(self):
"Main control loop"
self.active = True
while self.active:
self.draw()
cmd = self.stdscr.getch()
self.controller.trigger(cmd)
@SubscriptionController.register(curses.KEY_F5, 'r')
def refresh_content(self):
"Re-download all subscriptions and reset the page index"
self.content = SubscriptionContent.get_list(self.reddit, self.loader)
self.nav = Navigator(self.content.get)
@SubscriptionController.register(curses.KEY_ENTER, 10, curses.KEY_RIGHT)
def store_selected_subreddit(self):
"Store the selected subreddit and return to the subreddit page"
self.selected_subreddit_data = self.content.get(self.nav.absolute_index)
self.active = False
@SubscriptionController.register(curses.KEY_LEFT, 'h', 's')
def close_subscriptions(self):
"Close subscriptions and return to the subreddit page"
self.active = False
@staticmethod
def draw_item(win, data, inverted=False):
n_rows, n_cols = win.getmaxyx()
n_cols -= 1 # Leave space for the cursor in the first column
# Handle the case where the window is not large enough to fit the data.
valid_rows = range(0, n_rows)
offset = 0 if not inverted else -(data['n_rows'] - n_rows)
row = offset
if row in valid_rows:
attr = curses.A_BOLD | Color.YELLOW
add_line(win, u'{name}'.format(**data), row, 1, attr)
row = offset + 1
for row, text in enumerate(data['split_title'], start=row):
if row in valid_rows:
add_line(win, text, row, 1) | mit | Python |
43e48de2210873fd5ab6c9181f0aab7884c529be | Create app.py | Fillll/reddit2telegram,Fillll/reddit2telegram | reddit2telegram/channels/r_communism/app.py | reddit2telegram/channels/r_communism/app.py | #encoding:utf-8
subreddit = 'communism'
t_channel = '@r_communism'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| mit | Python |
|
d17c14df00c31af49080ff2f9fea8597a8861461 | Add recipe usage command for quick diagnostics. | google/starthinker,google/starthinker,google/starthinker | starthinker_ui/recipe/management/commands/recipe_usage.py | starthinker_ui/recipe/management/commands/recipe_usage.py | ###########################################################################
#
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from starthinker_ui.account.models import Account
class Command(BaseCommand):
help = 'Prints recipe count and age by account.'
def handle(self, *args, **kwargs):
usage = []
for account in Account.objects.all():
usage.append({
'email':account.email,
'recipes':list(account.recipe_set.all().values_list('birthday', flat=True))
})
usage.sort(key=lambda u: len(u['recipes']), reverse=True)
for u in usage:
print ('{}, {}, {}'.format(u['email'], len(u['recipes']), max(u['recipes']) if u['recipes'] else ''))
| apache-2.0 | Python |
|
2b797879f3a4bf148575df1309369ed4532e4ab6 | bump version to 14.1.0 | yyt030/pyzmq,swn1/pyzmq,ArvinPan/pyzmq,ArvinPan/pyzmq,dash-dash/pyzmq,swn1/pyzmq,dash-dash/pyzmq,caidongyun/pyzmq,yyt030/pyzmq,caidongyun/pyzmq,ArvinPan/pyzmq,caidongyun/pyzmq,swn1/pyzmq,Mustard-Systems-Ltd/pyzmq,Mustard-Systems-Ltd/pyzmq,dash-dash/pyzmq,Mustard-Systems-Ltd/pyzmq,yyt030/pyzmq | zmq/sugar/version.py | zmq/sugar/version.py | """PyZMQ and 0MQ version functions."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from zmq.backend import zmq_version_info
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
VERSION_MAJOR = 14
VERSION_MINOR = 1
VERSION_PATCH = 0
VERSION_EXTRA = ''
__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
if VERSION_EXTRA:
__version__ = "%s-%s" % (__version__, VERSION_EXTRA)
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf'))
else:
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
__revision__ = ''
def pyzmq_version():
"""return the version of pyzmq as a string"""
if __revision__:
return '@'.join([__version__,__revision__[:6]])
else:
return __version__
def pyzmq_version_info():
"""return the pyzmq version as a tuple of at least three numbers
If pyzmq is a development version, `inf` will be appended after the third integer.
"""
return version_info
def zmq_version():
"""return the version of libzmq as a string"""
return "%i.%i.%i" % zmq_version_info()
__all__ = ['zmq_version', 'zmq_version_info',
'pyzmq_version','pyzmq_version_info',
'__version__', '__revision__'
]
| """PyZMQ and 0MQ version functions."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from zmq.backend import zmq_version_info
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
VERSION_MAJOR = 14
VERSION_MINOR = 1
VERSION_PATCH = 0
VERSION_EXTRA = 'dev'
__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
if VERSION_EXTRA:
__version__ = "%s-%s" % (__version__, VERSION_EXTRA)
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf'))
else:
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
__revision__ = ''
def pyzmq_version():
"""return the version of pyzmq as a string"""
if __revision__:
return '@'.join([__version__,__revision__[:6]])
else:
return __version__
def pyzmq_version_info():
"""return the pyzmq version as a tuple of at least three numbers
If pyzmq is a development version, `inf` will be appended after the third integer.
"""
return version_info
def zmq_version():
"""return the version of libzmq as a string"""
return "%i.%i.%i" % zmq_version_info()
__all__ = ['zmq_version', 'zmq_version_info',
'pyzmq_version','pyzmq_version_info',
'__version__', '__revision__'
]
| bsd-3-clause | Python |
5b2c328b94244fd6baf6403349919f3bc4f2d013 | add missing migration | liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin | meinberlin/apps/cms/migrations/0032_update_body.py | meinberlin/apps/cms/migrations/0032_update_body.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-23 16:07
from __future__ import unicode_literals
from django.db import migrations
import meinberlin.apps.cms.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('meinberlin_cms', '0031_mapteaser_block'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField([('paragraph', wagtail.core.blocks.RichTextBlock(template='meinberlin_cms/blocks/richtext_block.html')), ('call_to_action', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock()), ('link', wagtail.core.blocks.CharBlock()), ('link_text', wagtail.core.blocks.CharBlock(label='Link Text', max_length=50))])), ('image_call_to_action', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(max_length=80)), ('body', wagtail.core.blocks.RichTextBlock()), ('link', wagtail.core.blocks.CharBlock()), ('link_text', wagtail.core.blocks.CharBlock(label='Link Text', max_length=50))])), ('columns_text', wagtail.core.blocks.StructBlock([('columns_count', wagtail.core.blocks.ChoiceBlock(choices=[(2, 'Two columns'), (3, 'Three columns'), (4, 'Four columns')])), ('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.RichTextBlock(label='Column body')))])), ('projects', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(max_length=80)), ('projects', wagtail.core.blocks.ListBlock(meinberlin.apps.cms.blocks.ProjectSelectionBlock(label='Project')))])), ('activities', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(label='Heading')), ('count', wagtail.core.blocks.IntegerBlock(default=5, label='Count'))])), ('accordion', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.RichTextBlock(required=False))])), ('infographic', wagtail.core.blocks.StructBlock([('text_left', wagtail.core.blocks.CharBlock(max_length=50)), ('text_center', wagtail.core.blocks.CharBlock(max_length=50)), ('text_right', wagtail.core.blocks.CharBlock(max_length=50))])), ('map_teaser', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('icon', wagtail.core.blocks.RichTextBlock()), ('body', wagtail.core.blocks.RichTextBlock())]))]),
),
]
| agpl-3.0 | Python |
|
e53a951ed98f460b603f43f6364d5d0a0f17a1ba | Add basic class structure, map functionality, and a set of consumer functions. | caffeine-potent/Streamer-Datastructure | src/streamer.py | src/streamer.py | class pStream:
###PRIVATE FUNCTIONS
def _builder(self, expression):
self.STR = expression
return self
###OVERRIDES
def next(self):
return next(self.STR)
def __init__(self, iterable_thing):
self.STR = iterable_thing
def __iter__(self):
return iter(self.STR)
### TRANSFORMS
def map(self,function):
return self._builder(map(function, self.STR))
### CONSUMERS
def print_stream(self):
print(list(self.STR))
def consume(self, function):
function(self.STR)
def drain(self):
for x in self.STR:
pass
| mit | Python |
|
82e871441010999e4a369b101019a34e7b03eca4 | add common csp-report URL | issackelly/django-security,issackelly/django-security,MartinPetkov/django-security,MartinPetkov/django-security,barseghyanartur/django-security,barseghyanartur/django-security | security/urls.py | security/urls.py | from django.conf.urls import patterns, include, url
urlpatterns = patterns('security.views',
url('^/csp-report/$', security.views.csp_report),
)
| bsd-3-clause | Python |
|
86c3f149726b58951e85f9bd6e324b032430b5ae | Deploy a schema template (#51379) | thaim/ansible,thaim/ansible | lib/ansible/modules/network/aci/mso_schema_template_deploy.py | lib/ansible/modules/network/aci/mso_schema_template_deploy.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_deploy
short_description: Deploy schema templates to sites
description:
- Deploy schema templates to sites.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
aliases: [ name ]
state:
description:
- Use C(deploy) to deploy schema template.
- Use C(status) to get deployment status.
type: str
choices: [ deploy, status ]
default: deploy
seealso:
- module: mso_schema_site
- module: mso_schema_template
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Deploy a schema template
mso_schema_template:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
state: deploy
delegate_to: localhost
- name: Get deployment status
mso_schema:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
state: status
delegate_to: localhost
register: status_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default='deploy', choices=['deploy', 'status']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
schema = module.params['schema']
template = module.params['template']
state = module.params['state']
mso = MSOModule(module)
# Get schema
schema_id = mso.lookup_schema(schema)
payload = dict(
schemaId=schema_id,
templateName=template,
)
if state == 'deploy':
path = 'execute/schema/{0}/template/{1}'.format(schema_id, template)
elif state == 'status':
path = 'status/schema/{0}/template/{1}'.format(schema_id, template)
if not module.check_mode:
status = mso.request(path, method='GET', data=payload)
mso.exit_json(**status)
if __name__ == "__main__":
main()
| mit | Python |
|
0b6709670179c0721b4f113d13bf34d9ac7715dd | Add a python plotter that compares the results of with Stirling numbers | Anaphory/parameterclone,Anaphory/parameterclone | test/indices.py | test/indices.py | import matplotlib.pyplot as plt
import numpy
from math import factorial
def binom(a,b):
return factorial(a) / (factorial(b)*factorial(a-b))
def stirling(n,k):
if n<=0 or n!=0 and n==k:
return 1
elif k<=0 or n<k:
return 0
elif n==0 and k==0:
return -1
else:
s = sum((-1)**(k-j)*binom(k,j)*j**n for j in range(k+1))
return s / factorial(k)
log = []
with open("indices.log") as indices:
next(indices)
for line in indices:
indices = line.split()[1:7]
size = len(set(indices))
log.append(size)
stirlings = numpy.array([stirling(6, k) for k in range(1,7)])
plt.hist(log, [0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5])
plt.plot(range(1,7), stirlings * len(log)/stirlings.sum())
plt.show()
| lgpl-2.1 | Python |
|
7b179e4a420a3cd7a27f0f438a6eac462048bb93 | Add py solution for 554. Brick Wall | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/brick-wall.py | py/brick-wall.py | import heapq
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
n_row = len(wall)
heap = [(wall[i][0], i, 0) for i in xrange(n_row)]
heapq.heapify(heap)
max_noncross = 0
while True:
l, idx, offset = heapq.heappop(heap)
cur_l = l
if offset == len(wall[idx]) - 1:
break
heapq.heappush(heap, (l + wall[idx][offset + 1], idx, offset + 1))
cnt = 1
while True:
ol, oidx, ooffset = heapq.heappop(heap)
if ol == l:
cnt += 1
heapq.heappush(heap, (ol + wall[oidx][ooffset + 1], oidx, ooffset + 1))
elif ol > l:
heapq.heappush(heap, (ol, oidx, ooffset))
break
max_noncross = max(max_noncross, cnt)
return n_row - max_noncross
| apache-2.0 | Python |
|
846ad2780ad2ccc9afbd6a224d567389bf09611f | add integer-to-english-words | EdisonAlgorithms/LeetCode,zeyuanxy/leet-code,EdisonAlgorithms/LeetCode,zeyuanxy/leet-code,EdisonAlgorithms/LeetCode,zeyuanxy/leet-code | vol6/integer-to-english-words/integer-to-english-words.py | vol6/integer-to-english-words/integer-to-english-words.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-11-06 21:35:56
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-11-06 21:36:07
class Solution(object):
def numberToWords(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return 'Zero'
less20 = ['One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten',
'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
tens = ['Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
def words(num):
if num == 0:
return []
elif num < 20:
return [less20[num - 1]]
elif num < 100:
return [tens[num / 10 - 2]] + words(num % 10)
elif num < 1000:
return [less20[num / 100 - 1], 'Hundred'] + words(num % 100)
elif num < 10 ** 6:
return words(num / (10 ** 3)) + ['Thousand'] + words(num % (10 ** 3))
elif num < 10 ** 9:
return words(num / (10 ** 6)) + ['Million'] + words(num % (10 ** 6))
else:
return words(num / (10 ** 9)) + ['Billion'] + words(num % (10 ** 9))
return ' '.join(words(num)) | mit | Python |
|
f02f8f5a68bd26d1ece32c50482729b7774b6e2a | Add a simple script for looking at connections. | reddit/reddit-plugin-meatspace,reddit/reddit-plugin-meatspace,reddit/reddit-plugin-meatspace | scripts/simple-analysis.py | scripts/simple-analysis.py | #!/usr/bin/python
from __future__ import print_function, division
import networkx
from reddit_meatspace.models import MeetupConnections
connections = MeetupConnections._byID("2013")
digraph = networkx.DiGraph()
for connection, timestamp in connections._values().iteritems():
left, right = connection.split(":")
digraph.add_edge(left, right)
lenient = digraph.to_undirected(reciprocal=False)
strict = digraph.to_undirected(reciprocal=True)
meetups = networkx.connected_component_subgraphs(lenient)
print("{0} people @ {1} meetups (avg. {2:.2} per meetup)".format(
len(lenient), len(meetups), len(lenient) / len(meetups)))
print("{0} connections of {1} distinct meetings ({2:.2%})".format(strict.size(), lenient.size(), strict.size() / lenient.size()))
| bsd-3-clause | Python |
|
e3a36aaab3abe7c645e3b8491cd163dea8ff0fea | add python solution to "project euler - problem 4" | mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler,mo/project-euler | problem4.py | problem4.py |
def is_palindrome(number):
number = str(number)
number_digits = len(number)
for i in xrange(0, number_digits/2):
if number[i] != number[number_digits-i-1]:
return False
return True
def test_it(n):
print n, is_palindrome(n)
for x in xrange(0,1231):
test_it(x);
greatest_palindrome = 0
for x in xrange(100,999):
for y in xrange(100,999):
product = x * y
if is_palindrome(product) and product > greatest_palindrome:
greatest_palindrome = product
print greatest_palindrome
| mit | Python |
|
bf4b3d79f34e189a30b8168796fa1595bf49f1d7 | Fix field name | occrp/id-backend | core/migrations/0002_auto_20150907_1413.py | core/migrations/0002_auto_20150907_1413.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='notification',
old_name='name',
new_name='channel',
),
]
| mit | Python |
|
404051ebc9d68c571be77e177b0455631f0c14ad | create basic model `Food` for run all tests | avelino/django-tags | tests/models.py | tests/models.py | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from tags.fields import TagField
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=50)
tags = TagField('Tags')
def __str__(self):
return self.name
| mit | Python |
|
50ca8ffb078b37b472c1ccbbb2a4f21e8d7eade4 | fix gbformatting | kaiweifan/horizon,ntt-pf-lab/openstack-dashboard,Daniex/horizon,occ-data/tukey-portal,flochaz/horizon,developerworks/horizon,LabAdvComp/tukey_portal,developerworks/horizon,anthonydillon/horizon,doug-fish/horizon,coreycb/horizon,henaras/horizon,pnavarro/openstack-dashboard,mdavid/horizon,redhat-cip/horizon,VaneCloud/horizon,yeming233/horizon,blueboxgroup/horizon,nvoron23/avos,orbitfp7/horizon,CiscoSystems/horizon,vladryk/horizon,redhat-openstack/horizon,BiznetGIO/horizon,r-icarus/openstack_microserver,openstack-ja/horizon,ging/horizon,orbitfp7/horizon,yeming233/horizon,VaneCloud/horizon,Mirantis/mos-horizon,openstack/horizon,agileblaze/OpenStackTwoFactorAuthentication,mandeepdhami/horizon,sandvine/horizon,takeshineshiro/horizon,openstack-ja/horizon,tellesnobrega/horizon,tqtran7/horizon,redhat-cip/horizon,sandvine/horizon,ikargis/horizon_fod,agileblaze/OpenStackTwoFactorAuthentication,redhat-cip/horizon,tqtran7/horizon,orbitfp7/horizon,FNST-OpenStack/horizon,mandeepdhami/horizon,dan1/horizon-proto,Solinea/horizon,blueboxgroup/horizon,RudoCris/horizon,mrunge/horizon,Daniex/horizon,ging/horizon,openstack/horizon,newrocknj/horizon,citrix-openstack-build/horizon,pranavtendolkr/horizon,rdo-management/tuskar-ui,xme1226/horizon,spring-week-topos/horizon-week,zen/openstack-dashboard,LabAdvComp/tukey_portal,CiscoSystems/openstack-dashboard,damien-dg/horizon,99cloud/keystone_register,endorphinl/horizon,Solinea/horizon,Dark-Hacker/horizon,griddynamics/osc-robot-openstack-dashboard,maestro-hybrid-cloud/horizon,tuskar/tuskar-ui,promptworks/horizon,nvoron23/avos,RudoCris/horizon,netscaler/horizon,openstack-ja/horizon,gerrive/horizon,ohnoimdead/horizon,idjaw/horizon,flochaz/horizon,vladryk/horizon,mrunge/openstack_horizon,henaras/horizon,aaronorosen/horizon-congress,CiscoSystems/avos,gerrive/horizon,watonyweng/horizon,django-leonardo/horizon,Tesora/tesora-horizon,VaneCloud/horizon,tanglei528/horizon,coreycb/horizon,yjxtogo/horizon,tsufiev/horizon,yeming233/horizon,xinwu/horizon,tanglei528/horizon,NeCTAR-RC/horizon,ntt-pf-lab/horizon,takeshineshiro/horizon,tanglei528/horizon,savi-dev/horizon,icloudrnd/automation_tools,FNST-OpenStack/horizon,Solinea/horizon,watonyweng/horizon,bigswitch/horizon,ntt-pf-lab/openstack-dashboard,Mirantis/mos-horizon,spring-week-topos/horizon-week,saydulk/horizon,blueboxgroup/horizon,idjaw/horizon,JioCloud/horizon,citrix-openstack/horizon,FNST-OpenStack/horizon,NCI-Cloud/horizon,netscaler/horizon,rdo-management/tuskar-ui,ntt-pf-lab/horizon,Dark-Hacker/horizon,spring-week-topos/horizon-week,rickerc/horizon_audit,anthonydillon/horizon,citrix-openstack-build/horizon,Tesora/tesora-horizon,luhanhan/horizon,citrix-openstack-build/horizon,davidcusatis/horizon,luhanhan/horizon,developerworks/horizon,pranavtendolkr/horizon,promptworks/horizon,redhat-openstack/horizon,mrunge/openstack_horizon,vladryk/horizon,philoniare/horizon,henaras/horizon,j4/horizon,xinwu/horizon,JioCloud/horizon,promptworks/horizon,blueboxgroup/horizon,Metaswitch/horizon,vladryk/horizon,anthonydillon/horizon,wolverineav/horizon,yjxtogo/horizon,philoniare/horizon,karthik-suresh/horizon,icloudrnd/automation_tools,j4/horizon,NeCTAR-RC/horizon,luhanhan/horizon,zen/openstack-dashboard,mdavid/horizon,saydulk/horizon,griddynamics/osc-robot-openstack-dashboard,Tesora/tesora-horizon,ntt-pf-lab/openstack-dashboard,watonyweng/horizon,dan1/horizon-proto,CiscoSystems/avos,Daniex/horizon,sandvine/horizon,maestro-hybrid-cloud/horizon,tuskar/tuskar-ui,usc-isi/horizon-old,izadorozhna/dashboard_integration_tests,gochist/horizon,tqtran7/horizon,99cloud/keystone_register,maestro-hybrid-cloud/horizon,Metaswitch/horizon,mrunge/horizon,yanheven/console,vbannai/disk-qos-horizon,wangxiangyu/horizon,mandeepdhami/horizon,izadorozhna/dashboard_integration_tests,savi-dev/horizon,karthik-suresh/horizon,yanheven/console,1ukash/horizon,xme1226/horizon,idjaw/horizon,redhat-cip/horizon,CiscoSystems/horizon,Dark-Hacker/horizon,davidcusatis/horizon,1ukash/horizon,mdavid/horizon,newrocknj/horizon,Mirantis/mos-horizon,ohnoimdead/horizon,CiscoSystems/avos,philoniare/horizon,Hodorable/0602,opencloudconsortium/tukey-portal,luhanhan/horizon,cloud-smokers/openstack-dashboard,coreycb/horizon,asomya/test,gerrive/horizon,mandeepdhami/horizon,yeming233/horizon,CiscoSystems/avos,doug-fish/horizon,redhat-openstack/horizon,NeCTAR-RC/horizon,openstack/horizon,kaiweifan/horizon,dan1/horizon-x509,xme1226/horizon,zouyapeng/horizon,damien-dg/horizon,froyobin/horizon,endorphinl/horizon,tuskar/tuskar-ui,usc-isi/horizon-old,noironetworks/horizon,occ-data/tukey-portal,netscaler/horizon,gerrive/horizon,takeshineshiro/horizon,noironetworks/horizon,pnavarro/openstack-dashboard,Hodorable/0602,CiscoSystems/openstack-dashboard,ttrifonov/horizon,davidcusatis/horizon,karthik-suresh/horizon,karthik-suresh/horizon,mdavid/horizon,ikargis/horizon_fod,philoniare/horizon,noironetworks/horizon,eayunstack/horizon,eayunstack/horizon,newrocknj/horizon,ttrifonov/horizon,henaras/horizon,j4/horizon,vbannai/disk-qos-horizon,pranavtendolkr/horizon,flochaz/horizon,nvoron23/avos,tsufiev/horizon,mrunge/horizon_lib,tqtran7/horizon,kaiweifan/horizon,liyitest/rr,griddynamics/osc-robot-openstack-dashboard,icloudrnd/automation_tools,CiscoSystems/horizon,Frostman/eho-horizon,noironetworks/horizon,Hodorable/0602,newrocknj/horizon,liyitest/rr,coreycb/horizon,tsufiev/horizon,django-leonardo/horizon,takeshineshiro/horizon,wangxiangyu/horizon,endorphinl/horizon-fork,CiscoSystems/horizon,gochist/horizon,eayunstack/horizon,pnavarro/openstack-dashboard,bac/horizon,orbitfp7/horizon,NCI-Cloud/horizon,zouyapeng/horizon,rdo-management/tuskar-ui,ChameleonCloud/horizon,promptworks/horizon,dan1/horizon-x509,agileblaze/OpenStackTwoFactorAuthentication,r-icarus/openstack_microserver,wolverineav/horizon,Dark-Hacker/horizon,tellesnobrega/horizon,Frostman/eho-horizon,ging/horizon,aaronorosen/horizon-congress,BiznetGIO/horizon,bac/horizon,flochaz/horizon,xinwu/horizon,asomya/test,anthonydillon/horizon,JioCloud/horizon,mrunge/horizon,kfox1111/horizon,endorphinl/horizon-fork,doug-fish/horizon,dan1/horizon-proto,tellesnobrega/horizon,NCI-Cloud/horizon,zouyapeng/horizon,ohnoimdead/horizon,ikargis/horizon_fod,cloud-smokers/openstack-dashboard,dan1/horizon-x509,BiznetGIO/horizon,bigswitch/horizon,wangxiangyu/horizon,Solinea/horizon,mrunge/horizon_lib,endorphinl/horizon-fork,yanheven/console,bac/horizon,VaneCloud/horizon,ChameleonCloud/horizon,cloud-smokers/openstack-dashboard,LabAdvComp/tukey_portal,CiscoSystems/openstack-dashboard,Hodorable/0602,savi-dev/horizon,liyitest/rr,endorphinl/horizon,pranavtendolkr/horizon,idjaw/horizon,FNST-OpenStack/horizon,kfox1111/horizon,ChameleonCloud/horizon,bac/horizon,occ-data/tukey-portal,tellesnobrega/horizon,endorphinl/horizon-fork,wangxiangyu/horizon,asomya/test,ging/horizon,django-leonardo/horizon,dan1/horizon-proto,agileblaze/OpenStackTwoFactorAuthentication,wolverineav/horizon,bigswitch/horizon,liyitest/rr,redhat-openstack/horizon,damien-dg/horizon,j4/horizon,NCI-Cloud/horizon,rickerc/horizon_audit,watonyweng/horizon,dan1/horizon-x509,Metaswitch/horizon,yjxtogo/horizon,yjxtogo/horizon,Daniex/horizon,mrunge/horizon_lib,opencloudconsortium/tukey-portal,maestro-hybrid-cloud/horizon,1ukash/horizon,rickerc/horizon_audit,aaronorosen/horizon-congress,99cloud/keystone_register,Tesora/tesora-horizon,RudoCris/horizon,bigswitch/horizon,froyobin/horizon,openstack/horizon,zen/openstack-dashboard,rdo-management/tuskar-ui,nvoron23/avos,citrix-openstack/horizon,tsufiev/horizon,davidcusatis/horizon,saydulk/horizon,BiznetGIO/horizon,kfox1111/horizon,gochist/horizon,NeCTAR-RC/horizon,wolverineav/horizon,doug-fish/horizon,endorphinl/horizon,vbannai/disk-qos-horizon,usc-isi/horizon-old,ChameleonCloud/horizon,xinwu/horizon,LabAdvComp/tukey_portal,RudoCris/horizon,Metaswitch/horizon,citrix-openstack/horizon,kfox1111/horizon,opencloudconsortium/tukey-portal,ttrifonov/horizon,froyobin/horizon,sandvine/horizon,django-leonardo/horizon,saydulk/horizon,zouyapeng/horizon,icloudrnd/automation_tools,ntt-pf-lab/horizon,Mirantis/mos-horizon,mrunge/openstack_horizon,r-icarus/openstack_microserver,damien-dg/horizon,Frostman/eho-horizon | django-openstack/django_openstack/templatetags/templatetags/sizeformat.py | django-openstack/django_openstack/templatetags/templatetags/sizeformat.py | """
Template tags for displaying sizes
"""
import datetime
from django import template
from django.utils import translation
from django.utils import formats
register = template.Library()
def int_format(value):
return int(value)
def float_format(value):
return formats.number_format(round(value, 1), 0)
def filesizeformat(bytes, filesize_number_format):
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return translation.ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
if bytes < 1024:
return translation.ungettext("%(size)d", "%(size)d", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return translation.ugettext("%s KB") % filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return translation.ugettext("%s MB") % filesize_number_format(bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return translation.ugettext("%s GB") % filesize_number_format(bytes / (1024 * 1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024 * 1024:
return translation.ugettext("%s TB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024))
return translation.ugettext("%s PB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024))
@register.filter(name='mbformat')
def mbformat(mb):
return filesizeformat(mb * 1024 * 1024, int_format).replace(' ', '')
@register.filter(name='diskgbformat')
def diskgbformat(gb):
return filesizeformat(gb * 1024 * 1024 * 1024, float_format).replace(' ', '')
| """
Template tags for displaying sizes
"""
import datetime
from django import template
from django.utils import translation
from django.utils import formats
register = template.Library()
def int_format(value):
return int(value)
def float_format(value):
return formats.number_format(round(value, 1), 0)
def filesizeformat(bytes, filesize_number_format):
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return translation.ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
if bytes < 1024:
return translation.ungettext("%(size)d", "%(size)d", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return translation.ugettext("%s KB") % filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return translation.ugettext("%s MB") % filesize_number_format(bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return translation.ugettext("%s GB") % filesize_number_format(bytes / (1024 * 1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024 * 1024:
return translation.ugettext("%s TB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024))
return translation.ugettext("%s PB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024))
@register.filter(name='mbformat')
def mbformat(mb):
return filesizeformat(mb * 1024 * 1024, int_format).replace(' ', '')
@register.filter(name='diskgbformat')
def diskgbformat(gb):
return filesizeformat(gb * 1000 * 1000, float_format).replace(' ', '')
| apache-2.0 | Python |
106bce6081ed6c1c8442a1bb82aa39ef177bed8a | Solve problem 6 | mazayus/ProjectEuler | problem006.py | problem006.py | #!/usr/bin/env python3
def sumsq(maxnumber):
return sum(n**2 for n in range(1, maxnumber+1))
def sqsum(maxnumber):
return sum(range(1, maxnumber+1))**2
print(sqsum(100) - sumsq(100))
| mit | Python |
|
fab193fa2c31b2e1cf58255d37f5e8dd63fef206 | Add high level for key signing | saltstack/libnacl,mindw/libnacl,cachedout/libnacl,johnttan/libnacl,RaetProtocol/libnacl,coinkite/libnacl | libnacl/sign.py | libnacl/sign.py | '''
High level routines to maintain signing keys and to sign and verify messages
'''
# Import libancl libs
import libnacl
import libnacl.utils
import libnacl.encode
class Signer(libnacl.utils.BaseKey):
'''
The tools needed to sign messages
'''
def __init__(self, seed=None):
'''
Create a signing key, if not seed it supplied a keypair is generated
'''
if seed:
if len(seed) != libnacl.crypto_sign_SEEDBYTES:
raise ValueError('Invalid seed bytes')
self.sk, self.vk = libnacl.crypto_sign_seed_keypair(seed)
else:
self.sk, self.vk = libnacl.crypto_sign_keypair()
def sign(self, msg):
'''
Sign the given message with this key
'''
return libnacl.crypto_sign(msg, self.sk)
class Verifier(libnacl.utils.BaseKey):
'''
Verify signed messages
'''
def __init__(self, vk_hex):
'''
Create a verification key from a hex encoded vkey
'''
self.vk = libnacl.encode.hex_decode(vk_hex)
def verify(self, msg):
'''
Verify the message with tis key
'''
return libnacl.crypto_sign_open(msg, self.vk)
| apache-2.0 | Python |
|
6332ca6fd715e730faf7e377843e2d2f8bfa2b84 | Create blink.py | pumanzor/iot-redlibre,pumanzor/iot-redlibre | linkit/blink.py | linkit/blink.py | #!/usr/bin/python
import mraa # For accessing the GPIO
import time # For sleeping between blinks
LED_GPIO = 5 # we are using D5 pin
blinkLed = mraa.Gpio(LED_GPIO) # Get the LED pin object
blinkLed.dir(mraa.DIR_OUT) # Set the direction as output
ledState = False # LED is off to begin with
blinkLed.write(0)
# One infinite loop coming up
while True:
if ledState == False:
# LED is off, turn it on
blinkLed.write(1)
ledState = True # LED is on
else:
blinkLed.write(0)
ledState = False
print "LED is on? \nAns: %s" %(ledState)
# Wait for some time
time.sleep(1)
| mit | Python |
|
56b5b0d9f1fd420e2ea7cdb0654d5c2f9d637189 | Add light theme | timmygee/promptastic,nimiq/promptastic,egoddard/promptastic | themes/light.py | themes/light.py | from utils import colors
# Segments colors.
USERATHOST_BG = colors.SMERALD
USERATHOST_FG = colors.WHITE
SSH_BG = colors.LIGHT_ORANGE
SSH_FG = colors.WHITE
CURRENTDIR_BG = colors.MID_GREY
CURRENTDIR_FG = colors.LIGHT_GREY
READONLY_BG = colors.LIGHT_GREY
READONLY_FG = colors.RED
EXITCODE_BG = colors.RED
EXITCODE_FG = colors.WHITE
PADDING_BG = colors.WHITE
GIT_UNTRACKED_FILES_BG = colors.PINKISH_RED
GIT_UNTRACKED_FILES_FG = colors.NEARLY_WHITE_GREY
GIT_CHANGES_NOT_STAGED_BG = colors.PINKISH_RED
GIT_CHANGES_NOT_STAGED_FG = colors.NEARLY_WHITE_GREY
GIT_ALL_CHANGES_STAGED_BG = colors.LIGHT_ORANGE
GIT_ALL_CHANGES_STAGED_FG = colors.DARKER_GREY
GIT_CLEAN_BG = colors.PISTACHIO
GIT_CLEAN_FG = colors.DARKER_GREY
VENV_BG = colors.SMERALD
VENV_FG = colors.EXTRA_LIGHT_GREY
JOBS_BG = colors.DARK_PURPLE
JOBS_FG = colors.WHITE
TIME_BG = colors.LIGHT_GREY
TIME_FG = colors.MID_GREY | apache-2.0 | Python |
|
878d0b793c8efa1dafc246326d519685032ee9a7 | Add new package: influxdb (#17909) | LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/influxdb/package.py | var/spack/repos/builtin/packages/influxdb/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import platform
_versions = {
'1.8.1': {
'Linux_amd64': ('64e60e438ac8a8fdacc6623f238c40bffae31c795642146d70eb316533d3d70f', 'https://dl.influxdata.com/influxdb/releases/influxdb-1.8.1-static_linux_amd64.tar.gz'),
'Linux_aarch64': ('fd5d7c962827ab1ccae27f6504595fdcd30c20d505b8e07d8573e274824e1366', 'https://dl.influxdata.com/influxdb/releases/influxdb-1.8.1_linux_arm64.tar.gz')},
'1.8.0': {
'Linux_amd64': ('aedc5083ae2e61ef374dbde5044ec2a5b27300e73eb92ccd135e6ff9844617e2', 'https://dl.influxdata.com/influxdb/releases/influxdb-1.8.0-static_linux_amd64.tar.gz'),
'Linux_aarch64': ('e76c36c10e46c2fd17820156b290dd776a465da0298496af5d490e555504b079', 'https://dl.influxdata.com/influxdb/releases/influxdb-1.8.0_linux_arm64.tar.gz')},
}
class Influxdb(Package):
"""InfluxDB is an open source time series platform."""
homepage = "https://influxdata.com/"
url = "https://dl.influxdata.com/influxdb/releases/influxdb-1.8.1-static_linux_amd64.tar.gz"
for ver, packages in _versions.items():
key = "{0}_{1}".format(platform.system(), platform.machine())
pkg = packages.get(key)
if pkg:
version(ver, sha256=pkg[0], url=pkg[1])
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.usr.bin)
def install(self, spec, prefix):
install_tree('usr', prefix)
install_tree('etc', prefix.etc)
install_tree('var', prefix.var)
| lgpl-2.1 | Python |
|
0c94e01a6d70b9ab228adc6ba3b094a488ee6020 | Create json_yaml.py | app-git-hub/yamSettings | json_yaml.py | json_yaml.py | class FileHandler():
""" lazy read 'num_lines' lines from filePath
to avoid overwhelming the RAM, most importantly
it runs only when asked for """
filePath, num_lines = str(), int()
def setVarValues(self):
filePath = input()
num_lines= input()
moreLines = nLineReader()
return
def nLineReader():
with open(filePath, mode"rt", encoding="UTF-8") as fh:
yield fh.readlines(num_lines)
def giveMeMore(self):
try:
x = next(moreLines)
except StopIteration:
return None
return x
class YAML_to_JSON():
jsonStr = "{"
ds = ['o'] # data structure tracker list, either oBJECT or lIST
TAB = " "
LIST= " - "
onStrictMode = False
def run(self):
FileHandler.setVarValues()
while True:
x = FileHandler.giveMeMore()
if x is None:
break
else:
processThese(x)
return
def dsu(*addWhat):
if len(addWhat) is 0: # or addWhat is None
temp = ds.pop()
jsonStr+= '}' if temp is 'o' else ']'
elif len(addWhat) is 1:
ds.append(addWhat[0])
jsonStr+= '{' if addWhat[0] is 'o' else '['
else:
pass
return
def processThese(lines):
_.rstrip(" ") for _ in lines # needed?
for i, thisLine in enumerate(lines): # is not len(lines) bound to be 100 by default
if i <= len(lines)-3: # 2-3 being the seekAhead offset
# wooow, what to do??? FileHandler.giveMeMore()? but where to store it?
isList = True if thisLine.lstrip(TAB).startswith(LIST) else False
realDepth = thisLine.count(TAB) + int(isList)
if realDepth < len(ds): # if depth decreases ELSE depth is same or increases
while realDepth-len(ds) not 0:
dsu()
elif realDepth is len(ds):
if ds[-1] not '[' and ':' in thisLine:
pre, sep, post = thisLine.partition(":")
if post is None:
promise_broken = True
else:
# simple key value pair
jsonStr+= '"' + pre + '":' + (lambda: post if type(post) is int or type(post) is float or post is "true" else '"'+post+'"')
else:
ERROR
else: # realDepth > len(ds)
if promise_broken: # value for last line,,, what if nested object???
# implies this line must be either a list or a child obj
else:
?
if LIST in thisLine[realDepth:
type(idealDepth) is int:
| apache-2.0 | Python |
|
60e64bac0a3cadccd0f35b5c8e8770b4edf0afff | add new package (#21793) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/virtuoso/package.py | var/spack/repos/builtin/packages/virtuoso/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Virtuoso(AutotoolsPackage):
"""Virtuoso is a high-performance and scalable Multi-Model RDBMS,
Data Integration Middleware, Linked Data Deployment, and HTTP
Application Server Platform"""
homepage = "https://github.com/openlink/virtuoso-opensource"
git = "https://github.com/openlink/virtuoso-opensource.git"
version('7.2.5.1-dev', commit='3ff1d4b3de3977337baf909c264968b9f70b7d2c')
variant('dbpedia-vad', default=False, description='DBpedia vad package')
variant('demo-vad', default=False, description='Demo vad package')
variant('fct-vad', default=True, description='Facet Browser vad package')
variant('ods-vad', default=True, description='ODS vad package')
variant('sparqldemo-vad', default=False, description='Sparql Demo vad package')
variant('tutorial-vad', default=False, description='Tutorial vad package')
variant('isparql-vad', default=True, description='iSPARQL vad package')
variant('rdfmappers-vad', default=True, description='RDF Mappers vad package')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('bison', type='build')
depends_on('flex', type='build')
depends_on('gperf')
depends_on('readline')
depends_on('[email protected]:1.1.99')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./autogen.sh')
def configure_args(self):
readlinep = self.spec['readline'].prefix.lib
args = ['--with-layout=opt',
'--program-transform-name=s/isql/isql-v/',
'--with-readline={0}'.format(readlinep)
]
args.extend(self.enable_or_disable('dbpedia-vad'))
args.extend(self.enable_or_disable('demo-vad'))
args.extend(self.enable_or_disable('fct-vad'))
args.extend(self.enable_or_disable('ods-vad'))
args.extend(self.enable_or_disable('sparqldemo-vad'))
args.extend(self.enable_or_disable('tutorial-vad'))
args.extend(self.enable_or_disable('isparql-vad'))
args.extend(self.enable_or_disable('rdfmappers-vad'))
return args
| lgpl-2.1 | Python |
|
39b63523634801fe8ef2cca03e11b3875d84cdbd | Tweak syntax for f.close() concision, add typehints | mir-group/flare,mir-group/flare | flare/flare_io.py | flare/flare_io.py | from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename: str, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
with open(filename, 'w') as f:
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
def md_trajectory_from_file(filename: str):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
with open(filename, 'r') as f:
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
| from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
f = open(filename, 'w')
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
f.close()
def md_trajectory_from_file(filename):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
f = open(filename, 'r')
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
| mit | Python |
cb0d6124ea31e8fb9ff8957072a2b881b882127e | Add Timelapse script for sunrise timelapses | KonradIT/gopro-py-api,KonradIT/gopro-py-api | examples/hero9_timelapse_webcam.py | examples/hero9_timelapse_webcam.py | import sys
import time
from goprocam import GoProCamera, constants
import threading
import logging
"""
I use PM2 to start my GoPro cameras, using a Raspberry Pi 4, works perfectly.
pm2 start timelapse.py --cron "30 7 * * *" --log timelapse.log --no-autorestart
This script will overrride some settings for reliability:
Voice control: OFF
AutoPower off: NEVER
Beeps: OFF (Do not want the camera beeping at 6AM)
NightLapse configuration left untouched, I recommend always using Auto shutter for sunrise and locking the White Balance to 4000k or higher.
"""
def start_timelapse(interface):
gopro = GoProCamera.GoPro(ip_address=GoProCamera.GoPro.getWebcamIP(
interface), camera=constants.gpcontrol, webcam_device=interface)
logging.info(
"Started goprocam instance with interface {}".format(interface))
gopro.gpControlSet(constants.Setup.VOICE_CONTROL,
constants.Setup.VoiceControl.OFF)
gopro.gpControlSet(constants.Setup.AUTO_OFF, constants.Setup.AutoOff.Never)
logging.info("All config set")
gopro.mode(constants.Mode.MultiShotMode,
constants.Mode.SubMode.MultiShot.NightLapse)
gopro.shutter(constants.start)
logging.info("Started timelapse")
cameras = sys.argv[1]
cameras = cameras.split(",")
for interface in cameras:
thr = threading.Thread(target=start_timelapse, args=(interface,))
thr.start()
| mit | Python |
|
39d9dfc1fa8e57b126a2da4978a62702ea206521 | add test for DeactivateMobileWorkerTrigger | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/users/tests/test_deactivate_mobile_worker_trigger.py | corehq/apps/users/tests/test_deactivate_mobile_worker_trigger.py | import datetime
from django.test import TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import (
CommCareUser,
DeactivateMobileWorkerTrigger,
)
class TestDeactivateMobileWorkerTrigger(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.date_deactivation = datetime.date(2022, 2, 22)
cls.domain = create_domain('test-auto-deactivate-001')
user_normal = CommCareUser.create(
domain=cls.domain.name,
username='user_normal',
password='secret',
created_by=None,
created_via=None,
is_active=True,
)
user_deactivate = CommCareUser.create(
domain=cls.domain.name,
username='user_deactivate',
password='secret',
created_by=None,
created_via=None,
is_active=True,
)
user_past_deactivate = CommCareUser.create(
domain=cls.domain.name,
username='user_past_deactivate',
password='secret',
created_by=None,
created_via=None,
is_active=True,
)
user_future_deactivate = CommCareUser.create(
domain=cls.domain.name,
username='user_future_deactivate',
password='secret',
created_by=None,
created_via=None,
is_active=True,
)
cls.users = [
user_normal,
user_deactivate,
user_past_deactivate,
user_future_deactivate,
]
DeactivateMobileWorkerTrigger.objects.create(
domain=cls.domain.name,
user_id=user_deactivate.user_id,
deactivate_after=cls.date_deactivation
)
DeactivateMobileWorkerTrigger.objects.create(
domain=cls.domain.name,
user_id=user_future_deactivate.user_id,
deactivate_after=cls.date_deactivation + datetime.timedelta(days=1)
)
DeactivateMobileWorkerTrigger.objects.create(
domain=cls.domain.name,
user_id=user_past_deactivate.user_id,
deactivate_after=cls.date_deactivation - datetime.timedelta(days=1)
)
@classmethod
def tearDownClass(cls):
DeactivateMobileWorkerTrigger.objects.all().delete()
for user in cls.users:
user.delete(user.domain, None)
cls.domain.delete()
super().tearDownClass()
def test_users_deactivated(self):
active_statuses = [(u.username, u.is_active) for u in self.users]
self.assertListEqual(
active_statuses,
[
('user_normal', True),
('user_deactivate', True),
('user_past_deactivate', True),
('user_future_deactivate', True),
]
)
self.assertEqual(
DeactivateMobileWorkerTrigger.objects.count(), 3
)
DeactivateMobileWorkerTrigger.deactivate_mobile_workers(
self.domain, self.date_deactivation
)
refreshed_users = [CommCareUser.get_by_user_id(u.get_id) for u in self.users]
new_active_statuses = [(u.username, u.is_active) for u in refreshed_users]
self.assertListEqual(
new_active_statuses,
[
('user_normal', True),
('user_deactivate', False),
('user_past_deactivate', False),
('user_future_deactivate', True),
]
)
self.assertEqual(
DeactivateMobileWorkerTrigger.objects.count(), 1
)
| bsd-3-clause | Python |
|
9d63571c5add6ff2ff064f41c9bc97a6943a69e9 | add missing migration | mcallistersean/b2-issue-tracker,mcallistersean/b2-issue-tracker,mcallistersean/b2-issue-tracker | toucan/invitations/migrations/0002_auto_20161009_2158.py | toucan/invitations/migrations/0002_auto_20161009_2158.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-09 21:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('invitations', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='toucaninvitation',
options={'ordering': ['-pk']},
),
migrations.AlterField(
model_name='toucaninvitation',
name='invitation_sent',
field=models.DateTimeField(blank=True, null=True),
),
]
| mit | Python |
|
4651e178ddbeac9211f8170e2e20f8a35ff0e3ab | Add a simple CLI script for writing out results of scripts | vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium | ocradmin/plugins/test_nodetree.py | ocradmin/plugins/test_nodetree.py | #!/usr/bin/python
import os
import sys
import json
sys.path.append(os.path.abspath(".."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'ocradmin.settings'
sys.path.insert(0, "lib")
from nodetree import script
from nodetree.manager import ModuleManager
def run(nodelist, outpath):
manager = ModuleManager()
manager.register_module("ocradmin.plugins.ocropus_nodes")
manager.register_module("ocradmin.plugins.tesseract_nodes")
manager.register_module("ocradmin.plugins.cuneiform_nodes")
manager.register_module("ocradmin.plugins.numpy_nodes")
manager.register_module("ocradmin.plugins.pil_nodes")
s = script.Script(nodelist, manager=manager)
term = s.get_terminals()[0]
print "Rendering to %s" % outpath
out = manager.get_new_node("Ocropus::FileOut", label="Output",
params=[("path", os.path.abspath(outpath))])
out.set_input(0, term)
out.eval()
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: %s <script> <output>" % sys.argv[0]
sys.exit(1)
nodes = None
with open(sys.argv[1], "r") as f:
nodes = json.load(f)
if nodes is None:
print "No nodes found in script"
sys.exit(1)
run(nodes, sys.argv[2])
| apache-2.0 | Python |
|
74fec13ce5ca0f011c8970e7664727b422597d9a | Add tools.genetics module (#41, #36)) | a5kin/hecate,a5kin/hecate | xentica/tools/genetics.py | xentica/tools/genetics.py | """A collection of functions allowing genetics manipulations."""
def genome_crossover(*genomes):
"""
Crossover given genomes in stochastic way.
:param genomes: A list of genomes (integers) to crossover
:returns: Single integer, a resulting genome.
"""
raise NotImplementedError
| mit | Python |
|
5f0ebdb043a313a784c723c36b87d316e276629f | Add recurring events experiment | joel-wright/DDRPi,fraz3alpha/DDRPi,fraz3alpha/led-disco-dancefloor | experiments/python/pygame_recurring_events.py | experiments/python/pygame_recurring_events.py | import pygame
import pygame.joystick
from pygame.locals import *
def main():
pygame.init()
clock = pygame.time.Clock()
ue = pygame.event.Event(USEREVENT, {'code':'drop'})
pygame.time.set_timer(127, 500)
while(True):
events = pygame.event.get()
for e in events:
print(e)
clock.tick(2)
if __name__ == "__main__":
main()
| mit | Python |
|
4d34906eba347f56a13b193efa3cedb3f2ab2a24 | Add tests for ironic-dbsync. | rdo-management/tuskar,rdo-management/tuskar,rdo-management/tuskar,tuskar/tuskar | ironic/tests/test_dbsync.py | ironic/tests/test_dbsync.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.db import migration
from ironic.tests.db import base
class DbSyncTestCase(base.DbTestCase):
def setUp(self):
super(DbSyncTestCase, self).setUp()
def test_sync_and_version(self):
migration.db_sync()
v = migration.db_version()
self.assertTrue(v > migration.INIT_VERSION)
| apache-2.0 | Python |
|
71849714d809e882838e109d6906086b47fe68c5 | Create hubspot-amulet.py | Pouf/CodingCompetition,Pouf/CodingCompetition | CiO/hubspot-amulet.py | CiO/hubspot-amulet.py | import itertools
def checkio(m):
for a, b, c in itertools.product(range(-180,181), repeat=3):
if not (a + b*m[1][0] + c*m[2][0])%360:
if (a*m[0][1] + b + c*m[2][1])%360 == 225:
if (a*m[0][2] + b*m[1][2] + c)%360 == 315:
return [a, b, c]
| mit | Python |
|
5156c590b43f5a2bf577da224829c20bc3b97230 | add first example | credp/lisa,arnoldlu/lisa,credp/lisa,mdigiorgio/lisa,arnoldlu/lisa,credp/lisa,mdigiorgio/lisa,joelagnel/lisa,bjackman/lisa,ARM-software/lisa,ARM-software/lisa,credp/lisa,bjackman/lisa,JaviMerino/lisa,ARM-software/lisa,ARM-software/lisa,joelagnel/lisa | libs/utils/filters.py | libs/utils/filters.py |
# import glob
# import matplotlib.gridspec as gridspec
# import matplotlib.pyplot as plt
# import numpy as np
# import os
# import pandas as pd
# import pylab as pl
# import re
# import sys
# import trappy
# Configure logging
import logging
class Filters(object):
def __init__(self, trace, tasks=None):
self.trace = trace
self.tasks = tasks
def topBigTasks(self, max_tasks=10, min_samples=100, min_utilization=None):
"""
Tasks which had a 'utilization' bigger than the specified threshold
"""
if min_utilization is None:
min_utilization = self.trace.platform['nrg_model']['little']['cpu']['cap_max']
df = self.trace.df('tload')
big_tasks_events = df[df.utilization > min_utilization]
big_tasks = big_tasks_events.pid.unique()
big_tasks_count = big_tasks.size
print 'Total {} tasks with at least {} "utilization" samples > {}'\
.format(big_tasks_count, min_samples, min_utilization)
big_tasks_stats = big_tasks_events.groupby('pid')\
.describe(include=['object']);
big_tasks_pids = big_tasks_stats.unstack()['comm']\
.sort(columns=['count'], ascending=False)
big_tasks_pids = big_tasks_pids[big_tasks_pids['count'] > min_samples]
big_topmost = big_tasks_pids.head(max_tasks)
print 'Top {} "big" tasks:'.format(max_tasks)
print big_topmost
return list(big_topmost.index)
| apache-2.0 | Python |
|
4137fd528367cfc5caf8c89665e1d2ee0ceb8385 | Clean function cleans data correctly | ShipJ/Code | Projects/Tracking/clean.py | Projects/Tracking/clean.py | import pandas as pd
from Code.config import get_path
def clean_ble(df):
PATH = get_path()
# Remove unwanted columns
df = df.drop(['clientmac', 'proximity', 'type', 'probetime_gmt', 'probetime'], axis=1)
# Rename column headers
df.columns=['id', 'datetime', 'sensor', 'power', 'rssi', 'accuracy']
# Merge with stand locations
sensor_stand_loc = pd.merge(
pd.DataFrame(pd.read_csv(PATH.replace('BLE/Data', 'Location/', 1) + '/stand_locations.txt',
sep='\t')),
pd.DataFrame(pd.read_csv(PATH.replace('BLE/Data', 'Location/', 1) + '/sensor_locations.txt',
sep='\t')),
left_on='id',
right_on='id_location').drop('id', axis=1)
# Merge with location data
df = pd.DataFrame(pd.merge(df,
sensor_stand_loc,
left_on='sensor',
right_on='name',
how='outer').drop(['name', 'type'], axis=1))
# Map IDs to enumerated
map_id = {id: i for i, id in enumerate(set(df['id']))}
df['id'] = df['id'].map(map_id)
# Map Sensors to enumerated
map_sensors = {sensor: i for i, sensor in enumerate(set(df['sensor']))}
df['sensor'] = df['sensor'].map(map_sensors)
# Map datetime strings to datetime
df['datetime'] = pd.to_datetime(df['datetime'])
return df
| mit | Python |
|
c0cf6739d3b10868eaae246c36eb691bff42a5aa | Add webdriver base test suite. | kdart/pycopia,kdart/pycopia,kdart/pycopia,kdart/pycopia,kdart/pycopia | QA/pycopia/QA/webdriver.py | QA/pycopia/QA/webdriver.py | #!/usr/bin/python2.5
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# Copyright (C) 2010 Keith Dart <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
from __future__ import absolute_import
"""
Base class for webdriver test suites. Pre-instantiates a webdriver
instance and fetches the DUT service target.
"""
# TODO other target browser support
from selenium.firefox.webdriver import WebDriver
from pycopia.QA import core
class WebdriverSuite(core.TestSuite):
"""Webdriver test suite.
Add webdriver test cases to this suite. The "webdriver" object will be
at the root of the configuration.
"""
def initialize(self):
cf = self.config
target_url = cf.environment.DUT.get_url(cf.get("serviceprotocol"), cf.get("servicepath"))
self.info("Target URL is: %s" % (target_url,))
cf.webdriver = WebDriver()
cf.webdriver.get(target_url)
def finalize(self):
wd = self.config.webdriver
del self.config.webdriver
wd.quit()
| apache-2.0 | Python |
|
b1f22b33a60da2b27ac17090c0fa759e510dd051 | Create rlmradio.py | Grimnir9/cloudbot-stuff | rlmradio.py | rlmradio.py | #Grabs Current Info for RLM Radio Stream
import requests
import re
from cloudbot import hook
import urllib
import urllib.request
url = "http://38.135.36.125:7359/7.html"
@hook.command("rlmradio", autohelp=False)
def rlmradio(text):
url = "http://38.135.36.125:7359/7.html"
html = urllib.request.urlopen(url).read()
htmlout = html[28:-15]
pw_bytes = htmlout.decode("utf-8")
filtered = pw_bytes.replace("'", "'")
filtered = "Now on the RLM Radio Stream: " + filtered
out = filtered
return out
| mit | Python |
|
862370ec4cc438b41ab2717ac4dafa16cab94df2 | Add scraper for Housing and Urban Development | lukerosiak/inspectors-general,divergentdave/inspectors-general | inspectors/hud.py | inspectors/hud.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
#
# options:
# standard since/year options for a year range to fetch from.
#
# pages - number of pages to fetch. defaults to all of them (using a very high number)
#
# Reports available since March 2004
BASE_URL = 'http://www.hudoig.gov/reports-publications/results'
BASE_REPORT_PAGE_URL = "http://www.hudoig.gov/"
ALL_PAGES = 1000
def run(options):
pages = options.get('pages', ALL_PAGES)
for page in range(1, (int(pages) + 1)):
logging.debug("## Downloading page %i" % page)
url = url_for(options, page=page)
index_body = utils.download(url)
index = BeautifulSoup(index_body)
rows = index.select('div.views-row')
# If no more reports found, quit
if not rows:
break
for row in rows:
report = report_from(row)
if report:
inspector.save_report(report)
def report_from(report_row):
published_date_text = report_row.select('span.date-display-single')[0].text
published_on = datetime.datetime.strptime(published_date_text, "%B %d, %Y")
report_page_link_relative = report_row.select('a')[0]['href']
report_page_link = urljoin(BASE_REPORT_PAGE_URL, report_page_link_relative)
logging.debug("### Processing report %s" % report_page_link)
report_page_body = utils.download(report_page_link)
report_page = BeautifulSoup(report_page_body)
article = report_page.select('article')[0]
try:
report_url = article.select('div.field-name-field-pub-document a')[0]['href']
except:
# Some reports are not available to the general public; just skipping for now
# http://www.hudoig.gov/reports-publications/audit-reports/final-civil-action-%E2%80%93-fraudulent-expenses-paid-community
logging.warning("[%s] Skipping report, not public." % report_page_link)
return None
title = report_page.select('h1.title a')[0].text
report_type = article.select('div.field-name-field-pub-type div.field-item')[0].text
try:
report_id = article.select('div.field-name-field-pub-report-number div.field-item')[0].text
except IndexError:
# Sometimes the report_id is not listed on the page, so we fallback to
# pulling it from the filename.
report_filename = article.select('div.field-name-field-pub-document a')[0].text
report_id = os.path.splitext(report_filename)[0] # Strip off the extension
def get_optional_selector(selector):
try:
return article.select(selector)[0].text
except IndexError:
return ""
program_area = get_optional_selector('div.field-name-field-pub-program-area div.field-item')
state = get_optional_selector('div.field-name-field-pub-state div.field-item')
funding = get_optional_selector('div.field-name-field-related-to-arra div.field-item')
summary = get_optional_selector('div.field-type-text-with-summary')
return {
'inspector': 'hud',
'inspector_url': 'http://www.hudoig.gov/',
'agency': 'hud',
'agency_name': 'Housing and Urban Development',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
'type': report_type,
'program_area': program_area,
'state': state,
'funding': funding,
'summary': summary,
}
def url_for(options, page=1):
year_range = inspector.year_range(options)
start_year = year_range[0]
end_year = year_range[-1]
return '%s?keys=&date_filter[min][year]=%s&date_filter[max][year]=%s&page=%i' % (BASE_URL, start_year, end_year, page)
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.