commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
802926f151d9a1337c2a07adbc485b6193e91733
|
Add template string calling to the state module
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
salt/modules/state.py
|
salt/modules/state.py
|
'''
Control the state system on the minion
'''
# Import Python modules
import os
# Import salt modules
import salt.state
def low(data):
'''
Execute a single low data call
'''
st_ = salt.state.State(__opts__)
err = st_.verify_data(data)
if err:
return err
return st_.call(data)
def high(data):
'''
Execute the compound calls stored in a single set of high data
'''
st_ = salt.state.State(__opts__)
return st_.call_high(data)
def template(tem):
'''
Execute the information stored in a template file on the minion
'''
st_ = salt.state.State(__opts__)
return st_.call_template(tem)
def template_str(tem):
'''
Execute the information stored in a template file on the minion
'''
st_ = salt.state.State(__opts__)
return st_.call_template_str(tem)
|
'''
Control the state system on the minion
'''
# Import Python modules
import os
# Import salt modules
import salt.state
def low(data):
'''
Execute a single low data call
'''
st_ = salt.state.State(__opts__)
err = st_.verify_data(data)
if err:
return err
return st_.call(data)
def high(data):
'''
Execute the compound calls stored in a single set of high data
'''
st_ = salt.state.State(__opts__)
return st_.call_high(data)
def template(tem):
'''
Execute the information stored in a template file on the minion
'''
st_ = salt.state.State(__opts__)
return st_.call_template(tem)
|
apache-2.0
|
Python
|
ac8d29c5855ea05bd42766cd142808704aded867
|
Add space to trigger travis
|
masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api
|
web/impact/impact/permissions/graphql_permissions.py
|
web/impact/impact/permissions/graphql_permissions.py
|
from accelerator.models import (
UserRole,
)
from accelerator_abstract.models.base_user_utils import is_employee
from accelerator.models import ACTIVE_PROGRAM_STATUS
BASIC_ALLOWED_USER_ROLES = [
UserRole.FINALIST,
UserRole.AIR,
UserRole.MENTOR,
UserRole.PARTNER,
UserRole.ALUM
]
BASIC_VISIBLE_USER_ROLES = [UserRole.FINALIST, UserRole.STAFF, UserRole.ALUM]
def check_for_no_user_role(logged_in_user_roles):
count = len(logged_in_user_roles) == 1
return not logged_in_user_roles or count and not logged_in_user_roles[0]
def check_for_basic_user_roles(logged_in_user_roles):
return any(
[role in BASIC_ALLOWED_USER_ROLES for role in logged_in_user_roles]
)
def visible_roles(current_user):
current_logged_in_user_roles = list(
current_user.programrolegrant_set.filter(
program_role__program__program_status=ACTIVE_PROGRAM_STATUS
).values_list('program_role__user_role__name', flat=True).distinct())
if check_for_no_user_role(current_logged_in_user_roles):
return [UserRole.STAFF]
if check_for_basic_user_roles(current_logged_in_user_roles):
return BASIC_VISIBLE_USER_ROLES + [UserRole.MENTOR]
if UserRole.JUDGE in current_logged_in_user_roles:
return BASIC_VISIBLE_USER_ROLES
def can_view_profile(profile_user, roles):
return profile_user.programrolegrant_set.filter(
program_role__user_role__name__in=roles
).exists()
def can_view_entrepreneur_profile(current_user, profile_user):
if not is_employee(current_user):
roles = visible_roles(current_user)
return can_view_profile(profile_user, roles)
return True
|
from accelerator.models import (
UserRole,
)
from accelerator_abstract.models.base_user_utils import is_employee
from accelerator.models import ACTIVE_PROGRAM_STATUS
BASIC_ALLOWED_USER_ROLES = [
UserRole.FINALIST,
UserRole.AIR,
UserRole.MENTOR,
UserRole.PARTNER,
UserRole.ALUM
]
BASIC_VISIBLE_USER_ROLES = [UserRole.FINALIST, UserRole.STAFF, UserRole.ALUM]
def check_for_no_user_role(logged_in_user_roles):
count = len(logged_in_user_roles) == 1
return not logged_in_user_roles or count and not logged_in_user_roles[0]
def check_for_basic_user_roles(logged_in_user_roles):
return any(
[role in BASIC_ALLOWED_USER_ROLES for role in logged_in_user_roles]
)
def visible_roles(current_user):
current_logged_in_user_roles = list(
current_user.programrolegrant_set.filter(
program_role__program__program_status=ACTIVE_PROGRAM_STATUS
).values_list('program_role__user_role__name', flat=True).distinct())
if check_for_no_user_role(current_logged_in_user_roles):
return [UserRole.STAFF]
if check_for_basic_user_roles(current_logged_in_user_roles):
return BASIC_VISIBLE_USER_ROLES + [UserRole.MENTOR]
if UserRole.JUDGE in current_logged_in_user_roles:
return BASIC_VISIBLE_USER_ROLES
def can_view_profile(profile_user, roles):
return profile_user.programrolegrant_set.filter(
program_role__user_role__name__in=roles
).exists()
def can_view_entrepreneur_profile(current_user, profile_user):
if not is_employee(current_user):
roles = visible_roles(current_user)
return can_view_profile(profile_user, roles)
return True
|
mit
|
Python
|
7ad6da17a72010967ccd82d3393a86762cf2a786
|
Mark import-std-module/empty-module as libc++ test
|
llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb
|
packages/Python/lldbsuite/test/commands/expression/import-std-module/empty-module/TestEmptyStdModule.py
|
packages/Python/lldbsuite/test/commands/expression/import-std-module/empty-module/TestEmptyStdModule.py
|
"""
Test that LLDB doesn't crash if the std module we load is empty.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
class ImportStdModule(TestBase):
mydir = TestBase.compute_mydir(__file__)
# We only emulate a fake libc++ in this test and don't use the real libc++,
# but we still add the libc++ category so that this test is only run in
# test configurations where libc++ is actually supposed to be tested.
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
sysroot = os.path.join(os.getcwd(), "root")
# Set the sysroot.
self.runCmd("platform select --sysroot '" + sysroot + "' host", CURRENT_EXECUTABLE_SET)
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
self.runCmd("log enable lldb expr")
# Use the typedef that is only defined in our 'empty' module. If this fails, then LLDB
# somehow figured out the correct define for the header and compiled the right
# standard module that actually contains the std::vector template.
self.expect("expr MissingContent var = 3; var", substrs=['$0 = 3'])
# Try to access our mock std::vector. This should fail but not crash LLDB as the
# std::vector template should be missing from the std module.
self.expect("expr (size_t)v.size()", substrs=["Couldn't lookup symbols"], error=True)
|
"""
Test that LLDB doesn't crash if the std module we load is empty.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
class ImportStdModule(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
sysroot = os.path.join(os.getcwd(), "root")
# Set the sysroot.
self.runCmd("platform select --sysroot '" + sysroot + "' host", CURRENT_EXECUTABLE_SET)
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
self.runCmd("log enable lldb expr")
# Use the typedef that is only defined in our 'empty' module. If this fails, then LLDB
# somehow figured out the correct define for the header and compiled the right
# standard module that actually contains the std::vector template.
self.expect("expr MissingContent var = 3; var", substrs=['$0 = 3'])
# Try to access our mock std::vector. This should fail but not crash LLDB as the
# std::vector template should be missing from the std module.
self.expect("expr (size_t)v.size()", substrs=["Couldn't lookup symbols"], error=True)
|
apache-2.0
|
Python
|
973a7754623c330f0352979bf9e0f2a6020acf62
|
reformat >80 char import line
|
Tendrl/commons
|
tendrl/commons/tests/objects/cluster/atoms/check_cluster_available/test_check_cluster_available_init.py
|
tendrl/commons/tests/objects/cluster/atoms/check_cluster_available/test_check_cluster_available_init.py
|
import etcd
import maps
import pytest
from tendrl.commons.objects.cluster.atoms.check_cluster_available import \
CheckClusterAvailable
from tendrl.commons.objects import AtomExecutionFailedError
class MockCluster(object):
def __init__(self, integration_id = 0):
self.is_managed = True
def load(self):
return self
def exists(self):
return self
def test_check_cluster_available():
NS.publisher_id = 0
NS._int = maps.NamedDict()
NS.tendrl = maps.NamedDict()
NS.tendrl.objects = maps.NamedDict()
NS.tendrl.objects.Cluster = MockCluster
test = CheckClusterAvailable()
test.parameters = maps.NamedDict()
test.parameters['TendrlContext.integration_id'] = \
"7a3f2238-ef79-4943-9edf-762a80cf22a0"
test.parameters['job_id'] = 0
test.parameters['flow_id'] = 0
NS.tendrl_context = maps.NamedDict(integration_id="")
NS._int.client = etcd.Client()
with pytest.raises(AtomExecutionFailedError):
test.run()
|
import etcd
import maps
import pytest
from tendrl.commons.objects.cluster.atoms.check_cluster_available import CheckClusterAvailable # noqa
from tendrl.commons.objects import AtomExecutionFailedError
class MockCluster(object):
def __init__(self, integration_id = 0):
self.is_managed = True
def load(self):
return self
def exists(self):
return self
def test_check_cluster_available():
NS.publisher_id = 0
NS._int = maps.NamedDict()
NS.tendrl = maps.NamedDict()
NS.tendrl.objects = maps.NamedDict()
NS.tendrl.objects.Cluster = MockCluster
test = CheckClusterAvailable()
test.parameters = maps.NamedDict()
test.parameters['TendrlContext.integration_id'] = \
"7a3f2238-ef79-4943-9edf-762a80cf22a0"
test.parameters['job_id'] = 0
test.parameters['flow_id'] = 0
NS.tendrl_context = maps.NamedDict(integration_id="")
NS._int.client = etcd.Client()
with pytest.raises(AtomExecutionFailedError):
test.run()
|
lgpl-2.1
|
Python
|
eea0c4fd610882ee748410063a62c30ce95da0ee
|
Fix the snapshot creation script for the new command line syntax. Review URL: http://codereview.chromium.org//8414015
|
dartino/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-lang/sdk,dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dart-lang/sdk,dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk
|
runtime/tools/create_snapshot_file.py
|
runtime/tools/create_snapshot_file.py
|
#!/usr/bin/env python
#
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script to create snapshot files.
import getopt
import optparse
import string
import subprocess
import sys
import utils
HOST_OS = utils.GuessOS()
HOST_CPUS = utils.GuessCpus()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--executable",
action="store", type="string",
help="path to executable")
result.add_option("--output_bin",
action="store", type="string",
help="binary snapshot output file name")
result.add_option("--input_cc",
action="store", type="string",
help="input template file name")
result.add_option("--output",
action="store", type="string",
help="generated snapshot output file name")
result.add_option("--scripts",
action="store", type="string",
help="list of scripts to include in snapshot")
result.add_option("-v", "--verbose",
help='Verbose output.',
default=False, action="store_true")
return result
def ProcessOptions(options):
if not options.executable:
sys.stderr.write('--executable not specified\n')
return False
if not options.output_bin:
sys.stderr.write('--output_bin not specified\n')
return False
if not options.input_cc:
sys.stderr.write('--input_cc not specified\n')
return False
if not options.output:
sys.stderr.write('--output not specified\n')
return False
return True
def makeString(input_file):
result = ' '
fileHandle = open(input_file, 'rb')
lineCounter = 0
for byte in fileHandle.read():
result += ' %d,' % ord(byte)
lineCounter += 1
if lineCounter == 10:
result += '\n '
lineCounter = 0
if lineCounter != 0:
result += '\n '
return result
def makeFile(output_file, input_cc_file, input_file):
snapshot_cc_text = open(input_cc_file).read()
snapshot_cc_text = snapshot_cc_text % makeString(input_file)
open(output_file, 'w').write(snapshot_cc_text)
return True
def Main():
# Parse the options.
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
# Construct the path to the dart binary.
snapshot_argument = ''.join([ "--snapshot=", options.output_bin ])
if not options.scripts:
command = [ options.executable, snapshot_argument ]
else:
scripts = string.split(options.scripts)
command = [ options.executable, snapshot_argument ] + scripts
if options.verbose:
print ' '.join(command)
subprocess.call(command)
if not makeFile(options.output, options.input_cc, options.output_bin):
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
|
#!/usr/bin/env python
#
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script to create snapshot files.
import getopt
import optparse
import string
import subprocess
import sys
import utils
HOST_OS = utils.GuessOS()
HOST_CPUS = utils.GuessCpus()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--executable",
action="store", type="string",
help="path to executable")
result.add_option("--output_bin",
action="store", type="string",
help="binary snapshot output file name")
result.add_option("--input_cc",
action="store", type="string",
help="input template file name")
result.add_option("--output",
action="store", type="string",
help="generated snapshot output file name")
result.add_option("--scripts",
action="store", type="string",
help="list of scripts to include in snapshot")
result.add_option("-v", "--verbose",
help='Verbose output.',
default=False, action="store_true")
return result
def ProcessOptions(options):
if not options.executable:
sys.stderr.write('--executable not specified\n')
return False
if not options.output_bin:
sys.stderr.write('--output_bin not specified\n')
return False
if not options.input_cc:
sys.stderr.write('--input_cc not specified\n')
return False
if not options.output:
sys.stderr.write('--output not specified\n')
return False
return True
def makeString(input_file):
result = ' '
fileHandle = open(input_file, 'rb')
lineCounter = 0
for byte in fileHandle.read():
result += ' %d,' % ord(byte)
lineCounter += 1
if lineCounter == 10:
result += '\n '
lineCounter = 0
if lineCounter != 0:
result += '\n '
return result
def makeFile(output_file, input_cc_file, input_file):
snapshot_cc_text = open(input_cc_file).read()
snapshot_cc_text = snapshot_cc_text % makeString(input_file)
open(output_file, 'w').write(snapshot_cc_text)
return True
def Main():
# Parse the options.
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
# Construct the path to the dart binary.
snapshot_argument = ''.join([ "--snapshot=", options.output_bin ])
if not options.scripts:
command = [ options.executable, snapshot_argument ]
else:
scripts = string.split(options.scripts)
command = [ options.executable, snapshot_argument, "--" ] + scripts + [ "--" ]
if options.verbose:
print ' '.join(command)
subprocess.call(command)
if not makeFile(options.output, options.input_cc, options.output_bin):
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
|
bsd-3-clause
|
Python
|
788073cdf2a5e2ee142cbcf1263accad7baac153
|
Move chmod
|
hatchery/genepool,hatchery/Genepool2
|
genes/gnu_coreutils/commands.py
|
genes/gnu_coreutils/commands.py
|
#!/usr/bin/env python
from genes.posix.traits import only_posix
from genes.process.commands import run
@only_posix()
def chgrp(path, group):
run(['chgrp', group, path])
@only_posix()
def chown(path, user):
run(['chown', user, path])
@only_posix()
def chmod(*args):
# FIXME: this is ugly, name the args
run(['chmod'] + list(args))
@only_posix()
def groupadd(*args):
run(['groupadd'] + list(args))
@only_posix()
def ln(*args):
run(['ln'] + list(args))
@only_posix()
def mkdir(path, mode=None):
if mode:
run(['mkdir', '-m', mode, path])
else:
run(['mkdir', path])
@only_posix()
def useradd(*args):
# FIXME: this is a bad way to do things
# FIXME: sigh. this is going to be a pain to make it idempotent
run(['useradd'] + list(args))
@only_posix()
def usermod(*args):
# FIXME: this is a bad way to do things
run(['usermod'] + list(args))
|
#!/usr/bin/env python
from genes.posix.traits import only_posix
from genes.process.commands import run
@only_posix()
def chgrp(path, group):
run(['chgrp', group, path])
@only_posix()
def chown(path, user):
run(['chown', user, path])
@only_posix()
def groupadd(*args):
run(['groupadd'] + list(args))
@only_posix()
def ln(*args):
run(['ln'] + list(args))
@only_posix()
def mkdir(path, mode=None):
if mode:
run(['mkdir', '-m', mode, path])
else:
run(['mkdir', path])
@only_posix()
def useradd(*args):
# FIXME: this is a bad way to do things
# FIXME: sigh. this is going to be a pain to make it idempotent
run(['useradd'] + list(args))
@only_posix()
def usermod(*args):
# FIXME: this is a bad way to do things
run(['usermod'] + list(args))
|
mit
|
Python
|
394954fc80230e01112166db4fe133c107febead
|
Allow more than one GitHub repo from the same user
|
evoja/docker-Github-Gitlab-Auto-Deploy,evoja/docker-Github-Gitlab-Auto-Deploy
|
gitautodeploy/parsers/common.py
|
gitautodeploy/parsers/common.py
|
class WebhookRequestParser(object):
"""Abstract parent class for git service parsers. Contains helper
methods."""
def __init__(self, config):
self._config = config
def get_matching_repo_configs(self, urls):
"""Iterates over the various repo URLs provided as argument (git://,
ssh:// and https:// for the repo) and compare them to any repo URL
specified in the config"""
configs = []
for url in urls:
for repo_config in self._config['repositories']:
if repo_config in configs:
continue
if repo_config.get('repo', repo_config.get('url')) == url:
configs.append(repo_config)
elif 'url_without_usernme' in repo_config and repo_config['url_without_usernme'] == url:
configs.append(repo_config)
return configs
|
class WebhookRequestParser(object):
"""Abstract parent class for git service parsers. Contains helper
methods."""
def __init__(self, config):
self._config = config
def get_matching_repo_configs(self, urls):
"""Iterates over the various repo URLs provided as argument (git://,
ssh:// and https:// for the repo) and compare them to any repo URL
specified in the config"""
configs = []
for url in urls:
for repo_config in self._config['repositories']:
if repo_config in configs:
continue
if repo_config['url'] == url:
configs.append(repo_config)
elif 'url_without_usernme' in repo_config and repo_config['url_without_usernme'] == url:
configs.append(repo_config)
return configs
|
mit
|
Python
|
d4fed426153105a9f8cab595848d5303003449b8
|
revert last commit, import properly
|
Naught0/qtbot
|
cogs/games.py
|
cogs/games.py
|
import discord
import json
from discord.ext import commands
from datetime import datetime
from utils import aiohttp_wrap as aw
class Game:
""" Cog which allows fetching of video game information """
IG_URL = 'https://api-2445582011268.apicast.io/{}/'
with open('data/apikeys.json') as f:
KEY = json.load(f)['pgdb']
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
@commands.comand(aliases=['games'])
async def game(self, ctx, *, query: str):
""" Search for some information about a game """
url = self.IG_URL.format('games')
headers = {'user-key': self.KEY}
params = {'search': query,
'fields': 'name,summary,first_release_date,aggregated_rating,cover'}
resp = await aw.session_get(self.session, url, headers=headers, params=params).json()
await ctx.send(f'{resp}'[:500])
def setup(bot):
bot.add_cog(Game(bot))
|
import discord
from discord.ext import commands
from datetime import datetime
from utils import aiohttp_wrap as aw
class Game:
""" Cog which allows fetching of video game information """
IG_URL = 'https://api-2445582011268.apicast.io/{}/'
with open('data/apikeys.json') as f:
KEY = json.load(f)['pgdb']
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
@commands.comand(aliases=['games'])
async def game(self, ctx, *, query: str):
""" Search for some information about a game """
url = self.IG_URL.format('games')
headers = {'user-key': self.KEY}
params = {'search': query,
'fields': 'name,summary,first_release_date,aggregated_rating,cover'}
resp = await aw.aio_get_json(self.session, url, headers=headers, params=params)
await ctx.send(f'{resp}'[:500])
def setup(bot):
bot.add_cog(Game(bot))
|
mit
|
Python
|
7adeb5e668a132ab540fa45c8e6c62cb8481930d
|
fix infinite recursion
|
qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
|
fluff/sync_couchdb.py
|
fluff/sync_couchdb.py
|
from django.db.models import signals
import os
from couchdbkit.ext.django.loading import get_db
from pillowtop.utils import import_pillows
from dimagi.utils.couch import sync_docs
FLUFF = 'fluff'
def sync_design_docs(temp=None):
dir = os.path.abspath(os.path.dirname(__file__))
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
db = get_db(app_label)
sync_docs.sync_design_docs(db, os.path.join(dir, "_design"), FLUFF, temp=temp)
def catch_signal(app, **kwargs):
"""Function used by syncdb signal"""
app_name = app.__name__.rsplit('.', 1)[0]
app_label = app_name.split('.')[-1]
if app_label == FLUFF:
sync_design_docs()
def copy_designs(temp='tmp', delete=True):
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
db = get_db(app_label)
sync_docs.copy_designs(db, FLUFF)
signals.post_syncdb.connect(catch_signal)
|
from django.db.models import signals
import os
from couchdbkit.ext.django.loading import get_db
from pillowtop.utils import import_pillows
from dimagi.utils.couch.sync_docs import sync_design_docs as sync_docs
FLUFF = 'fluff'
def sync_design_docs(temp=None):
dir = os.path.abspath(os.path.dirname(__file__))
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
print 'fluff sync: %s' % app_label
db = get_db(app_label)
sync_docs(db, os.path.join(dir, "_design"), FLUFF, temp=temp)
def catch_signal(app, **kwargs):
"""Function used by syncdb signal"""
app_name = app.__name__.rsplit('.', 1)[0]
app_label = app_name.split('.')[-1]
if app_label == FLUFF:
sync_design_docs()
def copy_designs(temp='tmp', delete=True):
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
db = get_db(app_label)
copy_designs(db, FLUFF)
signals.post_syncdb.connect(catch_signal)
|
bsd-3-clause
|
Python
|
9cc7218f2eef7135e5402a47c2783def31add9f3
|
save screenshot in 800x480 too
|
michaelcontento/monkey-shovel
|
screenshots.py
|
screenshots.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from PIL import Image, ImageFile
from shovel import task
from meta.utils import path_meta, path_generated, depends
ImageFile.MAXBLOCK = 2**20
def save(image, filename):
image.save(filename, "JPEG", quality=98, optimize=True, progressive=True)
@task
def retina_resize():
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
if image.size != (2048, 1580):
continue
resized = image.resize((1024, 790), Image.ANTIALIAS)
resized.save(filename, filename.ext[1:].upper())
@task
def export():
depends("meta.pxm.export")
depends("meta.screenshots.retina_resize")
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
# crop
width, height = image.size
box = (0, height - 768, width, height)
cropped = image.crop(box)
# overlay
name = "".join(filename.namebase.split("-")[1:])
overlayfile = path_meta() / "overlay-" + name + ".png"
if overlayfile.exists():
overlay = Image.open(overlayfile)
cropped.paste(overlay, None, overlay)
# save
for x, y in ((1024, 768), (800, 480), (960, 640), (1136, 640), (1280, 720)):
resized = cropped.resize((x, y), Image.ANTIALIAS)
savename = "screen-" + name + "-" + str(x) + "x" + str(y) + ".jpg"
save(resized, path_generated() / savename)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from PIL import Image, ImageFile
from shovel import task
from meta.utils import path_meta, path_generated, depends
ImageFile.MAXBLOCK = 2**20
def save(image, filename):
image.save(filename, "JPEG", quality=98, optimize=True, progressive=True)
@task
def retina_resize():
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
if image.size != (2048, 1580):
continue
resized = image.resize((1024, 790), Image.ANTIALIAS)
resized.save(filename, filename.ext[1:].upper())
@task
def export():
depends("meta.pxm.export")
depends("meta.screenshots.retina_resize")
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
# crop
width, height = image.size
box = (0, height - 768, width, height)
cropped = image.crop(box)
# overlay
name = "".join(filename.namebase.split("-")[1:])
overlayfile = path_meta() / "overlay-" + name + ".png"
if overlayfile.exists():
overlay = Image.open(overlayfile)
cropped.paste(overlay, None, overlay)
# save
for x, y in ((1024, 768), (960, 640), (1136, 640), (1280, 720)):
resized = cropped.resize((x, y), Image.ANTIALIAS)
savename = "screen-" + name + "-" + str(x) + "x" + str(y) + ".jpg"
save(resized, path_generated() / savename)
|
apache-2.0
|
Python
|
bcb1c8d48532159f76708bdfd0e6868dbda92343
|
make sure command processes run in test database when needed
|
frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe
|
freppledb/__init__.py
|
freppledb/__init__.py
|
r'''
A Django project implementing a web-based user interface for frePPLe.
'''
VERSION = '4.5.0'
def runCommand(taskname, *args, **kwargs):
'''
Auxilary method to run a django command. It is intended to be used
as a target for the multiprocessing module.
The code is put here, such that a child process loads only
a minimum of other python modules.
'''
# Initialize django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "freppledb.settings")
import django
django.setup()
# Be sure to use the correct database
from django.db import DEFAULT_DB_ALIAS, connections
from freppledb.common.middleware import _thread_locals
database = kwargs.get("database", DEFAULT_DB_ALIAS)
setattr(_thread_locals, 'database', database)
if 'FREPPLE_TEST' in os.environ:
from django.conf import settings
connections[database].close()
settings.DATABASES[database]['NAME'] = settings.DATABASES[database]['TEST']['NAME']
# Run the command
try:
from django.core import management
management.call_command(taskname, *args, **kwargs)
except Exception as e:
taskid = kwargs.get("task", None)
if taskid:
from datetime import datetime
from freppledb.execute.models import Task
task = Task.objects.all().using(database).get(pk=taskid)
task.status = 'Failed'
now = datetime.now()
if not task.started:
task.started = now
task.finished = now
task.message = str(e)
task.processid = None
task.save(using=database)
|
r'''
A Django project implementing a web-based user interface for frePPLe.
'''
VERSION = '4.5.0'
def runCommand(taskname, *args, **kwargs):
'''
Auxilary method to run a django command. It is intended to be used
as a target for the multiprocessing module.
The code is put here, such that a child process loads only
a minimum of other python modules.
'''
# Initialize django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "freppledb.settings")
import django
django.setup()
# Be sure to use the correct default database
from django.db import DEFAULT_DB_ALIAS
from freppledb.common.middleware import _thread_locals
database = kwargs.get("database", DEFAULT_DB_ALIAS)
setattr(_thread_locals, 'database', database)
# Run the command
try:
from django.core import management
management.call_command(taskname, *args, **kwargs)
except Exception as e:
taskid = kwargs.get("task", None)
if taskid:
from datetime import datetime
from freppledb.execute.models import Task
task = Task.objects.all().using(database).get(pk=taskid)
task.status = 'Failed'
now = datetime.now()
if not task.started:
task.started = now
task.finished = now
task.message = str(e)
task.processid = None
task.save(using=database)
|
agpl-3.0
|
Python
|
720c841d0930f73d1efe90518b0a2d9dcbd6425d
|
Document context
|
gchrupala/funktional,kadarakos/funktional
|
funktional/context.py
|
funktional/context.py
|
import sys
from contextlib import contextmanager
# Are we training (or testing)
training = False
@contextmanager
def context(**kwargs):
"""Temporarily change the values of context variables passed.
Enables the `with` syntax:
>>> with context(training=True):
...
"""
current = dict((k, getattr(sys.modules[__name__], k)) for k in kwargs)
for k,v in kwargs.items():
setattr(sys.modules[__name__], k, v)
yield
for k,v in current.items():
setattr(sys.modules[__name__], k, v)
|
import sys
from contextlib import contextmanager
training = False
@contextmanager
def context(**kwargs):
current = dict((k, getattr(sys.modules[__name__], k)) for k in kwargs)
for k,v in kwargs.items():
setattr(sys.modules[__name__], k, v)
yield
for k,v in current.items():
setattr(sys.modules[__name__], k, v)
|
mit
|
Python
|
55987e48997f7f5a94adc3c53fcb8ae58e672c3c
|
increase version number
|
NCI-GDC/gdc-client,NCI-GDC/gdc-client
|
gdc_client/version.py
|
gdc_client/version.py
|
__version__ = 'v1.4.0'
|
__version__ = 'v1.3.0'
|
apache-2.0
|
Python
|
ed7f0e555b438b611f4a9b0fdf6de1fca6ec2914
|
fix incorrect use of str replace
|
UC3Music/genSongbook,UC3Music-e/genSongbook,UC3Music/songbook-tools
|
genSongbook.py
|
genSongbook.py
|
#!/usr/bin/python
import sys, os
def query(question, default):
sys.stdout.write(question + " [" + default + "] ? ")
choice = raw_input()
if choice == '':
return default
return choice
if __name__ == '__main__':
print("----------------------")
print("Welcome to genSongbook")
print("----------------------")
# Query song directory path string
songDirectory = query("Please specify the path of the input song directory","/opt/Dropbox/lyrics/english")
print("Will use song directory: " + songDirectory)
# Query template file path string
templateFile = query("Please specify the path of the template file","template/english.tex")
print("Will use template file: " + templateFile)
print("----------------------")
templateFileFd = open(templateFile, 'r')
s = templateFileFd.read()
#sys.stdout.write(s) #-- Screen output for debugging.
rep = ""
for dirname, dirnames, filenames in os.walk( songDirectory ):
for filename in sorted(filenames):
rep += "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
name, extension = os.path.splitext(filename)
rep += "\\chapter{" + name + "}\n" #-- Note that we use \\ instead of \.
rep += "\\begin{verbatim}\n"
song = open( os.path.join(dirname, filename) )
rep += song.read()
rep += "\\end{verbatim}\n"
rep += "\n"
#sys.stdout.write(rep) #-- Screen output for debugging.
s = s.replace("genSongbook",rep)
outFd = open("out.tex", 'w')
outFd.write(s)
|
#!/usr/bin/python
import sys, os
def query(question, default):
sys.stdout.write(question + " [" + default + "] ? ")
choice = raw_input()
if choice == '':
return default
return choice
if __name__ == '__main__':
print("----------------------")
print("Welcome to genSongbook")
print("----------------------")
# Query song directory path string
songDirectory = query("Please specify the path of the input song directory","/opt/Dropbox/lyrics/english")
print("Will use song directory: " + songDirectory)
# Query template file path string
templateFile = query("Please specify the path of the template file","template/english.tex")
print("Will use template file: " + templateFile)
print("----------------------")
templateFileFd = open(templateFile, 'r')
s = templateFileFd.read()
#sys.stdout.write(s) #-- Screen output for debugging.
rep = ""
for dirname, dirnames, filenames in os.walk( songDirectory ):
for filename in sorted(filenames):
rep += "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
name, extension = os.path.splitext(filename)
rep += "\\chapter{" + name + "}\n" #-- Note that we use \\ instead of \.
rep += "\\begin{verbatim}\n"
song = open( os.path.join(dirname, filename) )
rep += song.read()
rep += "\\end{verbatim}\n"
rep += "\n"
s.replace("genSongbook",rep)
outFd = open("out.tex", 'w')
outFd.write(s)
|
unlicense
|
Python
|
c7067fce8723f810ed48de6513c6f756d499d807
|
add whitelist tags.
|
why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado,why2pac/dp-tornado
|
dp_tornado/helper/html.py
|
dp_tornado/helper/html.py
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
try:
# py 2.x
import HTMLParser
html_parser = HTMLParser.HTMLParser()
except:
# py 3.4-
try:
import html.parser
html_parser = html.parser.HTMLParser()
except:
# py 3.4+
import html as html_parser
try:
import htmltag
except:
htmltag = None
import re
class HtmlHelper(dpHelper):
def strip_xss(self, html, whitelist=None, replacement='entities'):
if not htmltag:
raise Exception('htmltag library required.')
if whitelist is None:
whitelist = (
'a', 'abbr', 'aside', 'audio', 'bdi', 'bdo', 'blockquote', 'canvas',
'caption', 'code', 'col', 'colgroup', 'data', 'dd', 'del',
'details', 'div', 'dl', 'dt', 'em', 'figcaption', 'figure', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', 'li',
'mark', 'ol', 'p', 'pre', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'small', 'source', 'span', 'strong', 'sub', 'summary', 'sup',
'table', 'td', 'th', 'time', 'tr', 'track', 'u', 'ul', 'var',
'video', 'wbr', 'b', 'br', 'site', 'font')
return htmltag.strip_xss(html, whitelist, replacement)
def strip_tags(self, text):
return re.sub('<[^<]+?>', '', text)
def entity_decode(self, text):
return html_parser.unescape(text)
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
try:
# py 2.x
import HTMLParser
html_parser = HTMLParser.HTMLParser()
except:
# py 3.4-
try:
import html.parser
html_parser = html.parser.HTMLParser()
except:
# py 3.4+
import html as html_parser
try:
import htmltag
except:
htmltag = None
import re
class HtmlHelper(dpHelper):
def strip_xss(self, html, whitelist=None, replacement='entities'):
if not htmltag:
raise Exception('htmltag library required.')
if whitelist is None:
whitelist = (
'a', 'abbr', 'aside', 'audio', 'bdi', 'bdo', 'blockquote', 'canvas',
'caption', 'code', 'col', 'colgroup', 'data', 'dd', 'del',
'details', 'div', 'dl', 'dt', 'em', 'figcaption', 'figure', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', 'li',
'mark', 'ol', 'p', 'pre', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'small', 'source', 'span', 'strong', 'sub', 'summary', 'sup',
'table', 'td', 'th', 'time', 'tr', 'track', 'u', 'ul', 'var',
'video', 'wbr', 'b')
return htmltag.strip_xss(html, whitelist, replacement)
def strip_tags(self, text):
return re.sub('<[^<]+?>', '', text)
def entity_decode(self, text):
return html_parser.unescape(text)
|
mit
|
Python
|
ab6d09c93a9d43ffbf442880633170f5fc678edd
|
add verbose mode to print processing module
|
thaim/get_module
|
get_modules.py
|
get_modules.py
|
#!/usr/bin/env python3
import os
import sys
import requests
import yaml
import git
import svn.remote
import zipfile
import argparse
def get_modules(yml_file, dest, verbose):
f = open(yml_file)
for data in yaml.load(f):
if (not dest.endswith('/')):
dest = dest + '/'
if not 'version' in data:
version = None
else:
version = data['version']
download_module(data['url'], dest, data['name'], data['type'], version, verbose)
f.close()
def download_module(src, dest, name, type, version, verbose):
if os.path.exists(dest + name):
if verbose: print(name + ' already exist')
return
if verbose and version is not None:
print('download ' + name + ':' + version + ' (' + type + ')')
elif verbose:
print('download ' + name + ' (' + type + ')')
if type == 'git':
download_git(src, dest + name, version)
elif type == 'svn':
download_svn(src, dest + name, version)
elif type == 'zip':
download_zip(src, dest, name)
def download_git(src, dest, version):
if version is None:
git.Repo.clone_from(src, dest)
else:
git.Repo.clone_from(src, dest, branch=version)
def download_svn(src, dest, version):
r = svn.remote.RemoteClient(src)
r.checkout(dest)
def download_zip(src, dest, name):
filename = download_file(src, dest)
zfile = zipfile.ZipFile(filename, "r")
zfile.extractall(dest)
os.rename(dest+zfile.namelist()[0].split("/")[0], dest+name)
os.remove(filename)
def download_file(url, destdir):
filename = destdir + url.split('/')[-1]
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return filename
def create_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('modules',
help='list of modules to download')
parser.add_argument('dest_dir',
help='dest directory to save modules')
parser.add_argument('-V', '--verbose',
action='store_true',
help='show verbose message')
return parser
if __name__ == '__main__':
args = create_argparser().parse_args()
get_modules(args.modules, args.dest_dir, args.verbose)
|
#!/usr/bin/env python3
import os
import sys
import requests
import yaml
import git
import svn.remote
import zipfile
import argparse
def get_modules(yml_file, dest):
f = open(yml_file)
for data in yaml.load(f):
if (not dest.endswith('/')):
dest = dest + '/'
if not 'version' in data:
version = None
else:
version = data['version']
download_module(data['url'], dest, data['name'], data['type'], version)
f.close()
def download_module(src, dest, name, type, version):
if os.path.exists(dest + name):
return
if type == 'git':
download_git(src, dest + name, version)
elif type == 'svn':
download_svn(src, dest + name, version)
elif type == 'zip':
download_zip(src, dest, name)
def download_git(src, dest, version):
if version is None:
git.Repo.clone_from(src, dest)
else:
git.Repo.clone_from(src, dest, branch=version)
def download_svn(src, dest, version):
r = svn.remote.RemoteClient(src)
r.checkout(dest)
def download_zip(src, dest, name):
filename = download_file(src, dest)
zfile = zipfile.ZipFile(filename, "r")
zfile.extractall(dest)
os.rename(dest+zfile.namelist()[0].split("/")[0], dest+name)
os.remove(filename)
def download_file(url, destdir):
filename = destdir + url.split('/')[-1]
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return filename
def create_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('modules',
help='list of modules to download')
parser.add_argument('dest_dir',
help='dest directory to save modules')
return parser
if __name__ == '__main__':
args = create_argparser().parse_args()
get_modules(args.modules, args.dest_dir)
|
mit
|
Python
|
740cf4e1a25533b4d3279a17e23b1ff9f6c13006
|
Update Watchers.py
|
possoumous/Watchers,possoumous/Watchers,possoumous/Watchers,possoumous/Watchers
|
examples/Watchers.py
|
examples/Watchers.py
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open('stockstwits.com') # Navigate to the web page
self.assert_element('sentiment-tab') # Assert element on page
self.click('sentiment-tab') # Click element on page
|
Import openpyxl
from seleniumbase import BaseCase
los = []
url = 'https://stocktwits.com/symbol/'
workbook = openpyxl.load_workbook('Test.xlsx')
worksheet = workbook.get_sheet_by_name(name = 'Sheet1')
for col in worksheet['A']:
los.append(col.value)
los2 = []
print(los)
class MyTestClass(BaseCase):
#for i in los:
# stocksite = url +i + '?q=' +i
#driver.get(stocksite)
#driver.find_element_by_id('sentiment-tab').click()
#Bullish = driver.find_elements_by_css_selector('span.bullish:nth-child(1)')
#Sentiment = [x.text for x in Bullish]
#los2.append(Sentiment[0])
|
mit
|
Python
|
3462a4755eac0ea74b9c90f867e769c47504c5bd
|
add license to top of __init__ in examples
|
cloudkick/cloudkick-py
|
examples/__init__.py
|
examples/__init__.py
|
# Licensed to the Cloudkick, Inc under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# libcloud.org licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
apache-2.0
|
Python
|
|
87da5bcf5b11762605c60f57b3cb2019d458fcd3
|
Set version to v2.1.0a3
|
explosion/spaCy,honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,honnibal/spaCy
|
spacy/about.py
|
spacy/about.py
|
# inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__title__ = 'spacy-nightly'
__version__ = '2.1.0a3'
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
__uri__ = 'https://spacy.io'
__author__ = 'Explosion AI'
__email__ = '[email protected]'
__license__ = 'MIT'
__release__ = False
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
__compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json'
__shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json'
|
# inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__title__ = 'spacy-nightly'
__version__ = '2.1.0a3.dev0'
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
__uri__ = 'https://spacy.io'
__author__ = 'Explosion AI'
__email__ = '[email protected]'
__license__ = 'MIT'
__release__ = False
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
__compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json'
__shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json'
|
mit
|
Python
|
939f7a9e91022c8dab5da13e9e3f738f6c25c524
|
Update perception_obstacle_sender.py
|
msbeta/apollo,msbeta/apollo,msbeta/apollo,msbeta/apollo,msbeta/apollo,msbeta/apollo
|
modules/tools/record_analyzer/tools/perception_obstacle_sender.py
|
modules/tools/record_analyzer/tools/perception_obstacle_sender.py
|
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import time
import argparse
import google.protobuf.text_format as text_format
from cyber_py import cyber
from modules.perception.proto import perception_obstacle_pb2
def update(perception_obstacles):
"""update perception obstacles timestamp"""
now = time.time()
perception_obstacles.header.timestamp_sec = now
perception_obstacles.header.lidar_timestamp = \
(long(now) - long(0.5)) * long(1e9)
for perception_obstacle in perception_obstacles.perception_obstacle:
perception_obstacle.timestamp = now - 0.5
for measure in perception_obstacle.measurements:
measure.timestamp = now - 0.5
return perception_obstacles
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Recode Analyzer is a tool to analyze record files.",
prog="main.py")
parser.add_argument(
"-f", "--file", action="store", type=str, required=True,
help="Specify the message file for sending.")
args = parser.parse_args()
cyber.init()
node = cyber.Node("perception_obstacle_sender")
perception_pub = node.create_writer(
"/apollo/perception/obstacles",
perception_obstacle_pb2.PerceptionObstacles)
perception_obstacles = perception_obstacle_pb2.PerceptionObstacles()
with open(args.file, 'r') as f:
text_format.Merge(f.read(), perception_obstacles)
while not cyber.is_shutdown():
now = time.time()
perception_obstacles = update(perception_obstacles)
perception_pub.write(perception_obstacles)
sleep_time = 0.1 - (time.time() - now)
if sleep_time > 0:
time.sleep(sleep_time)
cyber.shutdown()
|
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import time
import argparse
import google.protobuf.text_format as text_format
from cyber_py import cyber
from modules.perception.proto import perception_obstacle_pb2
def update(perception_obstacles):
"""update perception obstacles timestamp"""
now = time.time()
perception_obstacles.header.timestamp_sec = now
perception_obstacles.header.lidar_timestamp = \
(long(now) - long(0.5)) * long(1e9)
for perception_obstacle in perception_obstacles.perception_obstacle:
perception_obstacle.timestamp = now - 0.5
for measure in perception_obstacle.measurements:
measure.timestamp = now - 0.5
return perception_obstacles
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Recode Analyzer is a tool to analyze record files.",
prog="main.py")
parser.add_argument(
"-f", "--file", action="store", type=str, required=True,
help="Specify the message file for sending.")
args = parser.parse_args()
record_file = args.file
cyber.init()
node = cyber.Node("perception_obstacle_sender")
perception_pub = node.create_writer(
"/apollo/perception/obstacles",
perception_obstacle_pb2.PerceptionObstacles)
perception_obstacles = perception_obstacle_pb2.PerceptionObstacles()
with open(args.file, 'r') as f:
text_format.Merge(f.read(), perception_obstacles)
while not cyber.is_shutdown():
now = time.time()
perception_obstacles = update(perception_obstacles)
perception_pub.write(perception_obstacles)
sleep_time = 0.1 - (time.time() - now)
if sleep_time > 0:
time.sleep(sleep_time)
cyber.shutdown()
|
apache-2.0
|
Python
|
203cba83527ed39cc478c4f0530e513c71f2a6ad
|
format date in title
|
matplotlib/basemap,guziy/basemap,guziy/basemap,matplotlib/basemap
|
examples/daynight.py
|
examples/daynight.py
|
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from datetime import datetime
# example showing how to compute the day/night terminator and shade nightime
# areas on a map.
# miller projection
map = Basemap(projection='mill',lon_0=180)
# plot coastlines, draw label meridians and parallels.
map.drawcoastlines()
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
map.drawmeridians(np.arange(map.lonmin,map.lonmax+30,60),labels=[0,0,0,1])
# fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
# shade the night areas, with alpha transparency so the
# map shows through. Use current time in UTC.
date = datetime.utcnow()
CS=map.nightshade(date)
plt.title('Day/Night Map for %s (UTC)' % date.strftime("%d %b %Y %H:%M:%S"))
plt.show()
|
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from datetime import datetime
# example showing how to compute the day/night terminator and shade nightime
# areas on a map.
# miller projection
map = Basemap(projection='mill',lon_0=180)
# plot coastlines, draw label meridians and parallels.
map.drawcoastlines()
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
map.drawmeridians(np.arange(map.lonmin,map.lonmax+30,60),labels=[0,0,0,1])
# fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
# shade the night areas, with alpha transparency so the
# map shows through. Use current time in UTC.
date = datetime.utcnow()
CS=map.nightshade(date)
plt.title('Day/Night Map for %s (UTC)' % date)
plt.show()
|
mit
|
Python
|
ddb0a5d3b684c96b8fe8c4678cdb5e018f1b3d7b
|
Revert last change. Just in case...
|
notapresent/rbm2m,notapresent/rbm2m
|
rbm2m/action/downloader.py
|
rbm2m/action/downloader.py
|
# -*- coding: utf-8 -*-
import urllib
import sys
import requests
from .debug import dump_exception
HOST = 'http://www.recordsbymail.com/'
GENRE_LIST_URL = '{host}browse.php'.format(host=HOST)
SEARCH_URL = '{host}search.php?genre={genre_slug}&format=LP&instock=1'
IMAGE_LIST_URL = '{host}php/getImageArray.php?item={rec_id}'
TIMEOUTS = (3.05, 30) # Connect, read
def fetch(url):
"""
Download content from url and return response object.
Raises `DownloadError` if operation fails
"""
resp = None
try:
resp = requests.get(url, timeout=TIMEOUTS)
resp.raise_for_status()
except requests.RequestException as e:
exc_type, exc_val, tb = sys.exc_info()
notes = resp.text if resp else ''
dump_exception('download', exc_type, exc_val, tb, notes)
raise DownloadError(e)
else:
assert resp is not None
return resp
def fetch_text(url):
"""
Download text content from url and return it.
Raises `DownloadError` if operation fails
"""
return fetch(url).text
def genre_list():
"""
Download page with the list of genres
"""
return fetch_text(GENRE_LIST_URL)
def get_results_page(genre_title, page):
"""
Download search result page
"""
url = SEARCH_URL.format(host=HOST,
genre_slug=urllib.quote_plus(genre_title))
if page:
url = url + '&page={}'.format(page)
return fetch_text(url)
def get_image_list(rec_id):
"""
Download list of images for a record
"""
url = IMAGE_LIST_URL.format(host=HOST, rec_id=rec_id)
return fetch_text(url)
def get_content(url):
"""
Downloads content from url
"""
return fetch(url).content
class DownloadError(requests.RequestException):
"""
Raised for all download errors (timeouts, http errors etc)
"""
pass
|
# -*- coding: utf-8 -*-
import urllib
import sys
import requests
from .debug import dump_exception
HOST = 'http://www.recordsbymail.com/'
GENRE_LIST_URL = '{host}browse.php'.format(host=HOST)
SEARCH_URL = '{host}search.php?genre={genre_slug}&instock=1'
IMAGE_LIST_URL = '{host}php/getImageArray.php?item={rec_id}'
TIMEOUTS = (3.05, 30) # Connect, read
def fetch(url):
"""
Download content from url and return response object.
Raises `DownloadError` if operation fails
"""
resp = None
try:
resp = requests.get(url, timeout=TIMEOUTS)
resp.raise_for_status()
except requests.RequestException as e:
exc_type, exc_val, tb = sys.exc_info()
notes = resp.text if resp else ''
dump_exception('download', exc_type, exc_val, tb, notes)
raise DownloadError(e)
else:
assert resp is not None
return resp
def fetch_text(url):
"""
Download text content from url and return it.
Raises `DownloadError` if operation fails
"""
return fetch(url).text
def genre_list():
"""
Download page with the list of genres
"""
return fetch_text(GENRE_LIST_URL)
def get_results_page(genre_title, page):
"""
Download search result page
"""
url = SEARCH_URL.format(host=HOST,
genre_slug=urllib.quote_plus(genre_title))
if page:
url = url + '&page={}'.format(page)
return fetch_text(url)
def get_image_list(rec_id):
"""
Download list of images for a record
"""
url = IMAGE_LIST_URL.format(host=HOST, rec_id=rec_id)
return fetch_text(url)
def get_content(url):
"""
Downloads content from url
"""
return fetch(url).content
class DownloadError(requests.RequestException):
"""
Raised for all download errors (timeouts, http errors etc)
"""
pass
|
apache-2.0
|
Python
|
480e55794c5f06129b8b2fb7ed02a787f70275e2
|
add --silent option to update-toplist
|
gpodder/mygpo,gpodder/mygpo,gpodder/mygpo,gpodder/mygpo
|
mygpo/directory/management/commands/update-toplist.py
|
mygpo/directory/management/commands/update-toplist.py
|
from datetime import datetime
from optparse import make_option
from django.core.management.base import BaseCommand
from mygpo.core.models import Podcast, SubscriberData
from mygpo.users.models import PodcastUserState
from mygpo.utils import progress
from mygpo.decorators import repeat_on_conflict
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--silent', action='store_true', dest='silent',
default=False, help="Don't show any output"),
)
def handle(self, *args, **options):
silent = options.get('silent')
# couchdbkit doesn't preserve microseconds
started = datetime.utcnow().replace(microsecond=0)
podcasts = Podcast.all_podcasts()
total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows
for n, podcast in enumerate(podcasts):
subscriber_count = self.get_subscriber_count(podcast.get_id())
self.update(podcast=podcast, started=started, subscriber_count=subscriber_count)
if not silent:
progress(n, total)
@repeat_on_conflict(['podcast'])
def update(self, podcast, started, subscriber_count):
# We've already updated this podcast
if started in [e.timestamp for e in podcast.subscribers]:
return
data = SubscriberData(
timestamp = started,
subscriber_count = max(0, subscriber_count),
)
podcast.subscribers = sorted(podcast.subscribers + [data], key=lambda e: e.timestamp)
podcast.save()
@staticmethod
def get_subscriber_count(podcast_id):
db = PodcastUserState.get_db()
x = db.view('users/subscriptions_by_podcast',
startkey = [podcast_id, None],
endkey = [podcast_id, {}],
reduce = True,
group = True,
group_level = 2,
)
return x.count()
|
from datetime import datetime
from django.core.management.base import BaseCommand
from mygpo.core.models import Podcast, SubscriberData
from mygpo.users.models import PodcastUserState
from mygpo.utils import progress
from mygpo.decorators import repeat_on_conflict
class Command(BaseCommand):
def handle(self, *args, **options):
# couchdbkit doesn't preserve microseconds
started = datetime.utcnow().replace(microsecond=0)
podcasts = Podcast.all_podcasts()
total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows
for n, podcast in enumerate(podcasts):
subscriber_count = self.get_subscriber_count(podcast.get_id())
self.update(podcast=podcast, started=started, subscriber_count=subscriber_count)
progress(n, total)
@repeat_on_conflict(['podcast'])
def update(self, podcast, started, subscriber_count):
# We've already updated this podcast
if started in [e.timestamp for e in podcast.subscribers]:
return
data = SubscriberData(
timestamp = started,
subscriber_count = max(0, subscriber_count),
)
podcast.subscribers = sorted(podcast.subscribers + [data], key=lambda e: e.timestamp)
podcast.save()
@staticmethod
def get_subscriber_count(podcast_id):
db = PodcastUserState.get_db()
x = db.view('users/subscriptions_by_podcast',
startkey = [podcast_id, None],
endkey = [podcast_id, {}],
reduce = True,
group = True,
group_level = 2,
)
return x.count()
|
agpl-3.0
|
Python
|
006e6b67af6cfb2cca214666ac48dc9fd2cc0339
|
Update test values
|
jkitchin/scopus,scopus-api/scopus
|
scopus/tests/test_CitationOverview.py
|
scopus/tests/test_CitationOverview.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `CitationOverview` module."""
from collections import namedtuple
from nose.tools import assert_equal, assert_true
import scopus
co = scopus.CitationOverview("2-s2.0-84930616647", refresh=True,
start=2015, end=2018)
def test_authors():
Author = namedtuple('Author', 'name surname initials id url')
url = 'https://api.elsevier.com/content/author/author_id/7004212771'
expected = [Author(name='Kitchin J.R.', surname='Kitchin',
initials='J.R.', id='7004212771',
url=url)]
assert_equal(co.authors, expected)
def test_cc():
assert_equal(co.cc, [(2015, '0'), (2016, '4'), (2017, '2'), (2018, '1')])
def test_citationType_long():
assert_equal(co.citationType_long, 'Review')
def test_citationType_short():
assert_equal(co.citationType_short, 're')
def test_doi():
assert_equal(co.doi, '10.1021/acscatal.5b00538')
def test_endingPage():
assert_equal(co.endingPage, '3899')
def test_h_index():
assert_equal(co.h_index, '1')
def test_issn():
assert_equal(co.issn, '2155-5435')
def test_issueIdentifier():
assert_equal(co.issueIdentifier, '6')
def test_lcc():
assert_equal(co.lcc, '0')
def test_pcc():
assert_equal(co.pcc, '0')
def test_pii():
assert_equal(co.pii, None)
def test_publicationName():
assert_equal(co.publicationName, 'ACS Catalysis')
def test_rangeCount():
assert_equal(co.rangeCount, '7')
def test_rowTotal():
assert_equal(co.rowTotal, '7')
def test_scopus_id():
assert_equal(co.scopus_id, '84930616647')
def test_startingPage():
assert_equal(co.startingPage, '3894')
def test_title():
expected = 'Examples of effective data sharing in scientific publishing'
assert_equal(co.title, expected)
def test_url():
expected = 'https://api.elsevier.com/content/abstract/scopus_id/84930616647'
assert_equal(co.url, expected)
def test_volume():
assert_equal(co.volume, '5')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `CitationOverview` module."""
from collections import namedtuple
from nose.tools import assert_equal, assert_true
import scopus
co = scopus.CitationOverview("2-s2.0-84930616647", refresh=True,
start=2015, end=2017)
def test_authors():
Author = namedtuple('Author', 'name surname initials id url')
url = 'https://api.elsevier.com/content/author/author_id/7004212771'
expected = [Author(name='Kitchin J.R.', surname='Kitchin',
initials='J.R.', id='7004212771',
url=url)]
assert_equal(co.authors, expected)
def test_cc():
assert_equal(co.cc, [(2015, '0'), (2016, '4'), (2017, '2')])
def test_citationType_long():
assert_equal(co.citationType_long, 'Review')
def test_citationType_short():
assert_equal(co.citationType_short, 're')
def test_doi():
assert_equal(co.doi, '10.1021/acscatal.5b00538')
def test_endingPage():
assert_equal(co.endingPage, '3899')
def test_h_index():
assert_equal(co.h_index, '1')
def test_issn():
assert_equal(co.issn, '2155-5435')
def test_issueIdentifier():
assert_equal(co.issueIdentifier, '6')
def test_lcc():
assert_equal(co.lcc, '0')
def test_pcc():
assert_equal(co.pcc, '0')
def test_pii():
assert_equal(co.pii, None)
def test_publicationName():
assert_equal(co.publicationName, 'ACS Catalysis')
def test_rangeCount():
assert_equal(co.rangeCount, '6')
def test_rowTotal():
assert_equal(co.rowTotal, '6')
def test_scopus_id():
assert_equal(co.scopus_id, '84930616647')
def test_startingPage():
assert_equal(co.startingPage, '3894')
def test_title():
expected = 'Examples of effective data sharing in scientific publishing'
assert_equal(co.title, expected)
def test_url():
expected = 'https://api.elsevier.com/content/abstract/scopus_id/84930616647'
assert_equal(co.url, expected)
def test_volume():
assert_equal(co.volume, '5')
|
mit
|
Python
|
4464b72eac2cc995a3276341f066bee30497d621
|
Bump version to 1.1.0 for release
|
sirosen/globus-sdk-python,globus/globus-sdk-python,aaschaer/globus-sdk-python,globus/globus-sdk-python,globusonline/globus-sdk-python
|
globus_sdk/version.py
|
globus_sdk/version.py
|
# single source of truth for package version,
# see https://packaging.python.org/en/latest/single_source_version/
__version__ = "1.1.0"
|
# single source of truth for package version,
# see https://packaging.python.org/en/latest/single_source_version/
__version__ = "1.0.0"
|
apache-2.0
|
Python
|
bb9d1255548b46dc2ba7a85e26606b7dd4c926f3
|
Update original "Hello, World!" parser to latest coding, plus runTests
|
pyparsing/pyparsing,pyparsing/pyparsing
|
examples/greeting.py
|
examples/greeting.py
|
# greeting.py
#
# Demonstration of the pyparsing module, on the prototypical "Hello, World!"
# example
#
# Copyright 2003, 2019 by Paul McGuire
#
import pyparsing as pp
# define grammar
greet = pp.Word(pp.alphas) + "," + pp.Word(pp.alphas) + pp.oneOf("! ? .")
# input string
hello = "Hello, World!"
# parse input string
print(hello, "->", greet.parseString( hello ))
# parse a bunch of input strings
greet.runTests("""\
Hello, World!
Ahoy, Matey!
Howdy, Pardner!
Morning, Neighbor!
""")
|
# greeting.py
#
# Demonstration of the pyparsing module, on the prototypical "Hello, World!"
# example
#
# Copyright 2003, by Paul McGuire
#
from pyparsing import Word, alphas
# define grammar
greet = Word( alphas ) + "," + Word( alphas ) + "!"
# input string
hello = "Hello, World!"
# parse input string
print(hello, "->", greet.parseString( hello ))
|
mit
|
Python
|
04d0bb1bf71ee3a17efbb4bb15bb808cc832f04b
|
Update examples.py
|
Slater-Victoroff/PyFuzz
|
examples/examples.py
|
examples/examples.py
|
from py_fuzz.generator import *
print random_language(language="russian")
print random_ascii(
seed="this is a test", randomization="byte_jitter",
mutation_rate=0.25
)
print random_regex(
length=20, regex="[a-zA-Z]"
)
print random_utf8(
min_length=10,
max_length=50
)
print random_bytes()
print random_utf8()
print random_regex(regex="[a-zA-Z]")
with open("test.png", "wb") as dump:
dump.write(random_image())
with open("fake.png", 'wb') as dump:
dump.write(random_image(randomization="byte_jitter", height=300, width=500, mutation_rate=0))
with open("randomLenna.png", "wb") as dump:
dump.write("")
random_valid_image(seed="Lenna.png", mutation_rate=0.1)
|
from py_fuzz import *
print random_language(language="russian")
print random_ascii(
seed="this is a test", randomization="byte_jitter",
mutation_rate=0.25
)
print random_regex(
length=20, regex="[a-zA-Z]"
)
print random_utf8(
min_length=10,
max_length=50
)
print random_bytes()
print random_utf8()
print random_regex(regex="[a-zA-Z]")
with open("test.png", "wb") as dump:
dump.write(random_image())
with open("fake.png", 'wb') as dump:
dump.write(random_image(randomization="byte_jitter", height=300, width=500, mutation_rate=0))
with open("randomLenna.png", "wb") as dump:
dump.write("")
random_valid_image(seed="Lenna.png", mutation_rate=0.1)
|
mit
|
Python
|
bc6c3834cd8383f7e1f9e109f0413bb6015a92bf
|
Remove unneeded datetime from view
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
go/scheduler/views.py
|
go/scheduler/views.py
|
from django.views.generic import ListView
from go.scheduler.models import Task
class SchedulerListView(ListView):
paginate_by = 12
context_object_name = 'tasks'
template = 'scheduler/task_list.html'
def get_queryset(self):
return Task.objects.filter(
account_id=self.request.user_api.user_account_key
).order_by('-scheduled_for')
|
import datetime
from django.views.generic import ListView
from go.scheduler.models import Task
class SchedulerListView(ListView):
paginate_by = 12
context_object_name = 'tasks'
template = 'scheduler/task_list.html'
def get_queryset(self):
now = datetime.datetime.utcnow()
return Task.objects.filter(
account_id=self.request.user_api.user_account_key
).order_by('-scheduled_for')
|
bsd-3-clause
|
Python
|
654034d3a0c6ec4e023af6118d6e628336bc39dd
|
Upgrade to Python 3
|
16bytes/rpt2csv.py
|
rpt2csv.py
|
rpt2csv.py
|
import sys
import csv
import codecs
def convert(inputFile,outputFile):
"""
Convert a RPT file to a properly escaped CSV file
RPT files are usually sourced from old versions of Microsoft SQL Server Management Studio
RPT files are fixed width with column names on the first line, a second line with dashes and spaces,
and then on one row per record.
The column widths are calculated from the longest field in a column, so the format varies
depending on the results. Thankfully, we can reliably infer column widths by looking at the indexes
of spaces on the second line.
Here we chop each record at the index of the space on the second line and strip the result.
Note, if the source data has significant whitespace, the striping will remove this, but likely significant
whitespace was destroyed by the RPT field padding anyway.
"""
writer = csv.writer(outputFile)
fieldIndexes = []
headers = ""
for idx, val in enumerate(inputFile):
if(idx == 0):
headers = val
elif(idx == 1):
fieldIndexes = list(getFieldIndexes(val," "))
row = list(getFields(headers,fieldIndexes))
writer.writerow(row)
else:
row = list(getFields(val,fieldIndexes))
writer.writerow(row)
def getFieldIndexes(input, sep):
lastIndex = 0
for idx, c in enumerate(input):
if(c == sep):
yield (lastIndex,idx)
lastIndex = idx+1
yield lastIndex, len(input)
def getFields(input, indexes):
for index in indexes:
yield input[index[0]:index[1]].strip()
if __name__ == '__main__':
if(len(sys.argv) == 3):
with open(sys.argv[1],encoding='utf-8-sig') as inputFile:
with open(sys.argv[2],'w',newline='') as outputFile:
convert(inputFile,outputFile)
else:
print("Usage: rpt2csv.py inputFile outputFile")
|
import sys
import csv
def convert(inputFile,outputFile):
"""
Convert a RPT file to a properly escaped CSV file
RPT files are usually sourced from old versions of Microsoft SQL Server Management Studio
RPT files are fixed width with column names on the first line, a second line with dashes and spaces,
and then on one row per record.
The column widths are calculated from the longest field in a column, so the format varies
depending on the results. Thankfully, we can reliably infer column widths by looking at the indexes
of spaces on the second line.
Here we chop each record at the index of the space on the second line and strip the result.
Note, if the source data has significant whitespace, the striping will remove this, but likely significant
whitespace was destroyed by the RPT field padding anyway.
"""
writer = csv.writer(outputFile)
fieldIndexes = []
headers = ""
for idx, val in enumerate(inputFile):
if(idx == 0):
headers = val.decode('utf-8-sig')
elif(idx == 1):
fieldIndexes = list(getFieldIndexes(val," "))
row = list(getFields(headers,fieldIndexes))
writer.writerow(row)
else:
row = list(getFields(val,fieldIndexes))
writer.writerow(row)
def getFieldIndexes(input, sep):
lastIndex = 0
for idx, c in enumerate(input):
if(c == sep):
yield (lastIndex,idx)
lastIndex = idx+1
yield lastIndex, len(input)
def getFields(input, indexes):
for index in indexes:
yield input[index[0]:index[1]].strip()
if __name__ == '__main__':
if(len(sys.argv) == 3):
with open(sys.argv[1]) as inputFile:
with open(sys.argv[2],'wb') as outputFile:
convert(inputFile,outputFile)
else:
print("Usage: rpt2csv.py inputFile outputFile")
|
mit
|
Python
|
4d5edd17d7382108b90d3f60f2f11317da228603
|
Add kafka start/stop script
|
wangyangjun/RealtimeStreamBenchmark,wangyangjun/RealtimeStreamBenchmark,wangyangjun/RealtimeStreamBenchmark,wangyangjun/RealtimeStreamBenchmark,wangyangjun/RealtimeStreamBenchmark,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/RealtimeStreamBenchmark,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/RealtimeStreamBenchmark,wangyangjun/RealtimeStreamBenchmark,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/RealtimeStreamBenchmark
|
script/kafkaServer.py
|
script/kafkaServer.py
|
#!/bin/python
from __future__ import print_function
import subprocess
import sys
import json
from util import appendline, get_ip_address
if __name__ == "__main__":
# start server one by one
if len(sys.argv) < 2 or sys.argv[1] not in ['start', 'stop']:
sys.stderr.write("Usage: python %s start or stop\n" % (sys.argv[0]))
sys.exit(1)
else:
config = json.load(open('cluster-config.json'))
if sys.argv[1] == 'start':
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-start.sh'])
else:
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-stop.sh'])
|
#!/bin/python
from __future__ import print_function
import subprocess
import sys
import json
from util import appendline, get_ip_address
if __name__ == "__main__":
# start server one by one
if len(sys.argv) < 2 or sys.argv[1] not in ['start', 'stop']:
sys.stderr.write("Usage: python %s start or stop\n" % (sys.argv[0]))
sys.exit(1)
else:
config = json.load(open('cluster-config.json'))
if sys.argv[1] == 'start':
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-start.sh ')
else:
for node in config['nodes']:
subprocess.call(['ssh', 'cloud-user@'+node['ip'], 'bash /usr/local/kafka/bin/kafka-server-stop.sh ')
|
apache-2.0
|
Python
|
ebfaf30fca157e83ea9e4bf33173221fc9525caf
|
Fix emplorrs demo salary db error
|
viewflow/django-material,viewflow/django-material,viewflow/django-material
|
demo/examples/employees/forms.py
|
demo/examples/employees/forms.py
|
from django import forms
from .models import Employee, DeptManager, Title, Salary
class ChangeManagerForm(forms.Form):
manager = forms.ModelChoiceField(queryset=Employee.objects.all()[:100])
def __init__(self, *args, **kwargs):
self.department = kwargs.pop('department')
super(ChangeManagerForm, self).__init__(*args, **kwargs)
def save(self):
new_manager = self.cleaned_data['manager']
DeptManager.objects.filter(
department=self.department
).set(
department=self.department,
employee=new_manager
)
class ChangeTitleForm(forms.Form):
position = forms.CharField()
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeTitleForm, self).__init__(*args, **kwargs)
def save(self):
new_title = self.cleaned_data['position']
Title.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
title=new_title
)
class ChangeSalaryForm(forms.Form):
salary = forms.IntegerField(max_value=1000000)
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeSalaryForm, self).__init__(*args, **kwargs)
def save(self):
new_salary = self.cleaned_data['salary']
Salary.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
salary=new_salary,
)
|
from datetime import date
from django import forms
from django.utils import timezone
from .models import Employee, DeptManager, Title, Salary
class ChangeManagerForm(forms.Form):
manager = forms.ModelChoiceField(queryset=Employee.objects.all()[:100])
def __init__(self, *args, **kwargs):
self.department = kwargs.pop('department')
super(ChangeManagerForm, self).__init__(*args, **kwargs)
def save(self):
new_manager = self.cleaned_data['manager']
DeptManager.objects.filter(
department=self.department
).set(
department=self.department,
employee=new_manager
)
class ChangeTitleForm(forms.Form):
position = forms.CharField()
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeTitleForm, self).__init__(*args, **kwargs)
def save(self):
new_title = self.cleaned_data['position']
Title.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
title=new_title
)
class ChangeSalaryForm(forms.Form):
salary = forms.IntegerField()
def __init__(self, *args, **kwargs):
self.employee = kwargs.pop('employee')
super(ChangeSalaryForm, self).__init__(*args, **kwargs)
def save(self):
new_salary = self.cleaned_data['salary']
Salary.objects.filter(
employee=self.employee,
).set(
employee=self.employee,
salary=new_salary,
)
|
bsd-3-clause
|
Python
|
06f78c21e6b7e3327244e89e90365169f4c32ea1
|
Fix style issues raised by pep8.
|
myersjustinc/django-calaccess-campaign-browser,dwillis/django-calaccess-campaign-browser,myersjustinc/django-calaccess-campaign-browser,california-civic-data-coalition/django-calaccess-campaign-browser,dwillis/django-calaccess-campaign-browser,california-civic-data-coalition/django-calaccess-campaign-browser
|
calaccess_campaign_browser/api.py
|
calaccess_campaign_browser/api.py
|
from tastypie.resources import ModelResource, ALL
from .models import Filer, Filing
from .utils.serializer import CIRCustomSerializer
class FilerResource(ModelResource):
class Meta:
queryset = Filer.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filer_id_raw': ALL}
excludes = ['id']
class FilingResource(ModelResource):
class Meta:
queryset = Filing.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filing_id_raw': ALL}
excludes = ['id']
|
from tastypie.resources import ModelResource, ALL
from .models import Filer, Filing
from .utils.serializer import CIRCustomSerializer
class FilerResource(ModelResource):
class Meta:
queryset = Filer.objects.all()
serializer = CIRCustomSerializer()
filtering = { 'filer_id_raw': ALL }
excludes = [ 'id' ]
class FilingResource(ModelResource):
class Meta:
queryset = Filing.objects.all()
serializer = CIRCustomSerializer()
filtering = { 'filing_id_raw': ALL }
excludes = [ 'id' ]
|
mit
|
Python
|
a473b2cb9af95c1296ecae4d2138142f2be397ee
|
Add variant extension in example script
|
cihai/cihai,cihai/cihai-python,cihai/cihai
|
examples/variants.py
|
examples/variants.py
|
#!/usr/bin/env python
# -*- coding: utf8 - *-
from __future__ import print_function, unicode_literals
from cihai.bootstrap import bootstrap_unihan
from cihai.core import Cihai
def variant_list(unihan, field):
for char in unihan.with_fields(field):
print("Character: {}".format(char.char))
for var in char.untagged_vars(field):
print(var)
def script(unihan_options={}):
"""Wrapped so we can test in tests/test_examples.py"""
print("This example prints variant character data.")
c = Cihai()
c.add_dataset('cihai.unihan.Unihan', namespace='unihan')
if not c.sql.is_bootstrapped: # download and install Unihan to db
bootstrap_unihan(c.sql.metadata, options=unihan_options)
c.sql.reflect_db() # automap new table created during bootstrap
c.unihan.add_extension('cihai.unihan.UnihanVariants', namespace='variants')
print("## ZVariants")
variant_list(c.unihan, "kZVariant")
print("## kSemanticVariant")
variant_list(c.unihan, "kSemanticVariant")
print("## kSpecializedSemanticVariant")
variant_list(c.unihan, "kSpecializedSemanticVariant")
if __name__ == '__main__':
script()
|
#!/usr/bin/env python
# -*- coding: utf8 - *-
from __future__ import print_function, unicode_literals
from cihai.bootstrap import bootstrap_unihan
from cihai.core import Cihai
def variant_list(unihan, field):
for char in unihan.with_fields(field):
print("Character: {}".format(char.char))
for var in char.untagged_vars(field):
print(var)
def script(unihan_options={}):
"""Wrapped so we can test in tests/test_examples.py"""
print("This example prints variant character data.")
c = Cihai()
c.add_dataset('cihai.unihan.Unihan', namespace='unihan')
if not c.sql.is_bootstrapped: # download and install Unihan to db
bootstrap_unihan(c.sql.metadata, options=unihan_options)
c.sql.reflect_db() # automap new table created during bootstrap
print("## ZVariants")
variant_list(c.unihan, "kZVariant")
print("## kSemanticVariant")
variant_list(c.unihan, "kSemanticVariant")
print("## kSpecializedSemanticVariant")
variant_list(c.unihan, "kSpecializedSemanticVariant")
if __name__ == '__main__':
script()
|
mit
|
Python
|
eeebe264c4d873369f3d24b2e7b676e004eb6671
|
Fix path bug in update_source.
|
yarikoptic/NiPy-OLD,yarikoptic/NiPy-OLD
|
neuroimaging/externals/pynifti/utils/update_source.py
|
neuroimaging/externals/pynifti/utils/update_source.py
|
#!/usr/bin/env python
"""Copy source files from pynifti git directory into nipy source directory.
We only want to copy the files necessary to build pynifti and the nifticlibs,
and use them within nipy. We will not copy docs, tests, etc...
Pynifti should be build before this script is run so swig generates the
wrapper for nifticlib. We do not want swig as a dependency for nipy.
"""
from os import mkdir
from os.path import join, exists, expanduser
from shutil import copy2 as copy
"""
The pynifti source should be in a directory level with nipy-trunk
Ex:
/Users/cburns/src/nipy
/Users/cburns/src/pynifti
"""
src_dir = expanduser('~/src/pynifti')
# Destination directory is the top-level externals/pynifti directory
dst_dir = '..'
assert exists(src_dir)
copy(join(src_dir, 'AUTHOR'), join(dst_dir, 'AUTHOR'))
copy(join(src_dir, 'COPYING'), join(dst_dir, 'COPYING'))
# pynifti source and swig wrappers
nifti_list = ['niftiformat.py', 'niftiimage.py', 'utils.py',
'nifticlib.py', 'nifticlib_wrap.c']
nifti_src = join(src_dir, 'nifti')
nifti_dst = join(dst_dir, 'nifti')
if not exists(nifti_dst):
mkdir(nifti_dst)
def copynifti(filename):
copy(join(nifti_src, filename), join(nifti_dst, filename))
for nf in nifti_list:
copynifti(nf)
# nifticlib sources
nifticlib_list = ['LICENSE', 'README', 'nifti1.h', 'nifti1_io.c',
'nifti1_io.h', 'znzlib.c', 'znzlib.h']
nifticlib_src = join(src_dir, '3rd', 'nifticlibs')
nifticlib_dst = join(nifti_dst, 'nifticlibs')
if not exists(nifticlib_dst):
mkdir(nifticlib_dst)
def copynifticlib(filename):
copy(join(nifticlib_src, filename), join(nifticlib_dst, filename))
for nf in nifticlib_list:
copynifticlib(nf)
|
#!/usr/bin/env python
"""Copy source files from pynifti git directory into nipy source directory.
We only want to copy the files necessary to build pynifti and the nifticlibs,
and use them within nipy. We will not copy docs, tests, etc...
Pynifti should be build before this script is run so swig generates the
wrapper for nifticlib. We do not want swig as a dependency for nipy.
"""
from os import mkdir
from os.path import join, exists
from shutil import copy2 as copy
"""
The pynifti source should be in a directory level with nipy-trunk
Ex:
/Users/cburns/src/nipy
/Users/cburns/src/pynifti
"""
src_dir = '../../../../../pynifti'
# Destination directory is the top-level externals/pynifti directory
dst_dir = '..'
assert exists(src_dir)
copy(join(src_dir, 'AUTHOR'), join(dst_dir, 'AUTHOR'))
copy(join(src_dir, 'COPYING'), join(dst_dir, 'COPYING'))
# pynifti source and swig wrappers
nifti_list = ['niftiformat.py', 'niftiimage.py', 'utils.py',
'nifticlib.py', 'nifticlib_wrap.c']
nifti_src = join(src_dir, 'nifti')
nifti_dst = join(dst_dir, 'nifti')
if not exists(nifti_dst):
mkdir(nifti_dst)
def copynifti(filename):
copy(join(nifti_src, filename), join(nifti_dst, filename))
for nf in nifti_list:
copynifti(nf)
# nifticlib sources
nifticlib_list = ['LICENSE', 'README', 'nifti1.h', 'nifti1_io.c',
'nifti1_io.h', 'znzlib.c', 'znzlib.h']
nifticlib_src = join(src_dir, '3rd', 'nifticlibs')
nifticlib_dst = join(nifti_dst, 'nifticlibs')
if not exists(nifticlib_dst):
mkdir(nifticlib_dst)
def copynifticlib(filename):
copy(join(nifticlib_src, filename), join(nifticlib_dst, filename))
for nf in nifticlib_list:
copynifticlib(nf)
|
bsd-3-clause
|
Python
|
ccb6728111a3142830bd4b3fccb8a956002013f0
|
Update example to remove upload, not relevant for plotly!
|
liquidinstruments/pymoku,benizl/pymoku
|
examples/plotly_datalogger.py
|
examples/plotly_datalogger.py
|
from pymoku import Moku, MokuException
from pymoku.instruments import *
import pymoku.plotly_support as pmp
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.INFO)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
linespec = {
'shape' : 'spline',
'width' : '2'
}
try:
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
if i.datalogger_busy():
i.datalogger_stop()
pmp.stream_init(m, 'benizl.anu', 'na8qic5nqw', 'kdi5h54dhl', 'v7qd9o6bcq', line=linespec)
i.datalogger_start(start=10, duration=600, filetype='plot')
print "Plotly URL is: %s" % pmp.stream_url(m)
while True:
time.sleep(1)
trems, treme = i.datalogger_remaining()
samples = i.datalogger_samples()
print "Captured (%d samples); %d seconds from start, %d from end" % (samples, trems, treme)
# TODO: Symbolic constants
if i.datalogger_completed():
break
e = i.datalogger_error()
if e:
print "Error occured: %s" % e
except Exception:
traceback.print_exc()
finally:
i.datalogger_stop()
m.close()
|
from pymoku import Moku, MokuException
from pymoku.instruments import *
import pymoku.plotly_support as pmp
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.DEBUG)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
linespec = {
'shape' : 'spline',
'width' : '2'
}
try:
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
if i.datalogger_busy():
i.datalogger_stop()
pmp.stream_init(m, 'benizl.anu', 'na8qic5nqw', 'kdi5h54dhl', 'v7qd9o6bcq', line=linespec)
i.datalogger_start(start=0, duration=60*10, filetype='plot')
print "Plotly URL is: %s" % pmp.stream_url(m)
while True:
time.sleep(1)
trems, treme = i.datalogger_remaining()
samples = i.datalogger_samples()
print "Captured (%d samples); %d seconds from start, %d from end" % (samples, trems, treme)
# TODO: Symbolic constants
if i.datalogger_completed():
break
e = i.datalogger_error()
if e:
print "Error occured: %s" % e
i.datalogger_stop()
i.datalogger_upload()
except Exception as e:
print e
finally:
i.datalogger_stop()
m.close()
|
mit
|
Python
|
09bd40bc8d29fab157630d6411aa8316148a10d6
|
Fix indentation bug
|
SPIhub/hummingbird,FXIhub/hummingbird,FXIhub/hummingbird
|
src/backend.py
|
src/backend.py
|
import os
import logging
import imp
import translation
#from mpi4py import MPI
class Backend(object):
def __init__(self, config_file):
if(config_file is None):
# Try to load an example configuration file
config_file = os.path.abspath(os.path.dirname(__file__)+
"/../examples/cxitut13/conf.py")
logging.warning("No configuration file given! "
"Loading example configuration from %s" % (config_file))
self.backend_conf = imp.load_source('backend_conf', config_file)
self.translator = translation.init_translator(self.backend_conf.state)
print 'Starting backend...'
def mpi_init(self):
comm = MPI.COMM_WORLD
self.rank = comm.Get_rank()
print "MPI rank %d inited" % rank
def start(self):
self.backend_conf.state['_running'] = True
while(self.backend_conf.state['_running']):
evt = self.translator.nextEvent()
self.backend_conf.onEvent(evt)
|
import os
import logging
import imp
import translation
#from mpi4py import MPI
class Backend(object):
def __init__(self, config_file):
if(config_file is None):
# Try to load an example configuration file
config_file = os.path.abspath(os.path.dirname(__file__)+
"/../examples/cxitut13/conf.py")
logging.warning("No configuration file given! "
"Loading example configuration from %s" % (config_file))
self.backend_conf = imp.load_source('backend_conf', config_file)
self.translator = translation.init_translator(self.backend_conf.state)
print 'Starting backend...'
def mpi_init(self):
comm = MPI.COMM_WORLD
self.rank = comm.Get_rank()
print "MPI rank %d inited" % rank
def start(self):
self.backend_conf.state['_running'] = True
while(self.backend_conf.state['_running']):
evt = self.translator.nextEvent()
self.backend_conf.onEvent(evt)
|
bsd-2-clause
|
Python
|
2e5f5fc689ee55f32556be69dcbf0672ea7fdbed
|
change deprecation warning
|
nikitanovosibirsk/district42
|
district42/json_schema/schema.py
|
district42/json_schema/schema.py
|
import warnings
from copy import deepcopy
from ..errors import DeclarationError
from .types import (Any, AnyOf, Array, ArrayOf, Boolean, Enum, Null, Number,
Object, OneOf, SchemaType, String, Timestamp, Undefined)
class Schema:
def ref(self, schema):
return deepcopy(schema)
def from_native(self, value):
if value is None:
return self.null
datatype = type(value)
if datatype is bool:
return self.boolean(value)
elif datatype is int:
return self.integer(value)
elif datatype is float:
return self.float(value)
elif datatype is str:
return self.string(value)
elif datatype is list:
return self.array([self.from_native(elem) for elem in value])
elif datatype is dict:
return self.object({k: self.from_native(v) for k, v in value.items()})
elif datatype is tuple:
return self.enum(*value)
raise DeclarationError('Unknown type "{}"'.format(datatype))
@property
def null(self):
return Null()
@property
def boolean(self):
return Boolean()
@property
def number(self):
return Number()
@property
def integer(self):
return Number().integer
@property
def float(self):
return Number().float
@property
def string(self):
return String()
@property
def timestamp(self):
return Timestamp()
@property
def array(self):
return Array()
@property
def array_of(self):
message = 'schema.array_of is deprecated, use schema.array.of instead'
warnings.warn(message, DeprecationWarning, stacklevel=2)
return ArrayOf()
@property
def object(self):
return Object()
@property
def any(self):
return Any()
@property
def any_of(self):
return AnyOf()
@property
def one_of(self):
return OneOf()
@property
def enum(self):
return Enum()
@property
def undefined(self):
return Undefined()
|
import warnings
from copy import deepcopy
from ..errors import DeclarationError
from .types import (Any, AnyOf, Array, ArrayOf, Boolean, Enum, Null, Number,
Object, OneOf, SchemaType, String, Timestamp, Undefined)
class Schema:
def ref(self, schema):
return deepcopy(schema)
def from_native(self, value):
if value is None:
return self.null
datatype = type(value)
if datatype is bool:
return self.boolean(value)
elif datatype is int:
return self.integer(value)
elif datatype is float:
return self.float(value)
elif datatype is str:
return self.string(value)
elif datatype is list:
return self.array([self.from_native(elem) for elem in value])
elif datatype is dict:
return self.object({k: self.from_native(v) for k, v in value.items()})
elif datatype is tuple:
return self.enum(*value)
raise DeclarationError('Unknown type "{}"'.format(datatype))
@property
def null(self):
return Null()
@property
def boolean(self):
return Boolean()
@property
def number(self):
return Number()
@property
def integer(self):
return Number().integer
@property
def float(self):
return Number().float
@property
def string(self):
return String()
@property
def timestamp(self):
return Timestamp()
@property
def array(self):
return Array()
@property
def array_of(self):
warnings.warn('deprecated', DeprecationWarning, stacklevel=2)
return ArrayOf()
@property
def object(self):
return Object()
@property
def any(self):
return Any()
@property
def any_of(self):
return AnyOf()
@property
def one_of(self):
return OneOf()
@property
def enum(self):
return Enum()
@property
def undefined(self):
return Undefined()
|
apache-2.0
|
Python
|
a0e1183d9da98dd9f79c496b055cab0bb2638532
|
Update h_RNN
|
carefree0910/MachineLearning
|
h_RNN/Mnist.py
|
h_RNN/Mnist.py
|
import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import time
import numpy as np
import tensorflow as tf
from h_RNN.RNN import RNNWrapper, Generator
from h_RNN.SpRNN import SparseRNN
from Util.Util import DataUtil
class MnistGenerator(Generator):
def __init__(self, im=None, om=None, one_hot=True):
super(MnistGenerator, self).__init__(im, om)
self._x, self._y = DataUtil.get_dataset("mnist", "../_Data/mnist.txt", quantized=True, one_hot=one_hot)
self._x = self._x.reshape(-1, 28, 28)
self._x_train, self._x_test = self._x[:1800], self._x[1800:]
self._y_train, self._y_test = self._y[:1800], self._y[1800:]
def gen(self, batch, test=False, **kwargs):
if batch == 0:
if test:
return self._x_test, self._y_test
return self._x_train, self._y_train
batch = np.random.choice(len(self._x_train), batch)
return self._x_train[batch], self._y_train[batch]
if __name__ == '__main__':
n_history = 3
print("=" * 60, "\n" + "Normal LSTM", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
rnn = RNNWrapper()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10, squeeze=True)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
print("=" * 60, "\n" + "Sparse LSTM" + "\n" + "-" * 60)
generator = MnistGenerator(one_hot=False)
t = time.time()
tf.reset_default_graph()
rnn = SparseRNN()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
|
import time
import tflearn
import numpy as np
import tensorflow as tf
from h_RNN.RNN import RNNWrapper, Generator
from h_RNN.SpRNN import SparseRNN
from Util.Util import DataUtil
class MnistGenerator(Generator):
def __init__(self, im=None, om=None, one_hot=True):
super(MnistGenerator, self).__init__(im, om)
self._x, self._y = DataUtil.get_dataset("mnist", "../_Data/mnist.txt", quantized=True, one_hot=one_hot)
self._x = self._x.reshape(-1, 28, 28)
self._x_train, self._x_test = self._x[:1800], self._x[1800:]
self._y_train, self._y_test = self._y[:1800], self._y[1800:]
def gen(self, batch, test=False, **kwargs):
if batch == 0:
if test:
return self._x_test, self._y_test
return self._x_train, self._y_train
batch = np.random.choice(len(self._x_train), batch)
return self._x_train[batch], self._y_train[batch]
if __name__ == '__main__':
n_history = 3
print("=" * 60, "\n" + "Normal LSTM", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
rnn = RNNWrapper()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10, squeeze=True)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
print("=" * 60, "\n" + "Sparse LSTM" + "\n" + "-" * 60)
generator = MnistGenerator(one_hot=False)
t = time.time()
tf.reset_default_graph()
rnn = SparseRNN()
rnn.fit(28, 10, generator, n_history=n_history, epoch=10)
print("Time Cost: {}".format(time.time() - t))
rnn.draw_err_logs()
print("=" * 60, "\n" + "Tflearn", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
net = tflearn.input_data(shape=[None, 28, 28])
net = tf.concat(tflearn.lstm(net, 128, return_seq=True)[-n_history:], axis=1)
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net, optimizer='adam', batch_size=64,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(*generator.gen(0), n_epoch=10, validation_set=generator.gen(0, True), show_metric=True)
print("Time Cost: {}".format(time.time() - t))
|
mit
|
Python
|
b9cc76d410ca034918c615402e3fbe82b226859e
|
Add public address validation test.
|
joeyespo/path-and-address
|
path_and_address/tests/test_validation.py
|
path_and_address/tests/test_validation.py
|
from itertools import product
from ..validation import valid_address, valid_hostname, valid_port
def _join(host_and_port):
return '%s:%s' % host_and_port
def _join_all(hostnames, ports):
return map(_join, product(hostnames, ports))
hostnames = [
'0.0.0.0',
'127.0.0.1',
'localhost',
'example.com',
'example.org',
]
invalid_hostnames = [
'http://example.com',
'http://example.com:8080',
'example.com/',
'example.com:8080/',
'example.com:0',
'0.0.0.0:0',
]
ports = [1, 80, 5000, 8080, 65535]
invalid_ports = [None, -80, -1, 0, 65536, 75000,
float('nan'), '', 'nan', 'hello', 'a string']
addresses = hostnames + ports + _join_all(hostnames, ports)
invalid_addresses = invalid_hostnames \
+ _join_all(hostnames, invalid_ports) \
+ _join_all(invalid_hostnames, ports) \
+ _join_all(invalid_hostnames, invalid_ports)
def test_valid_address():
for address in addresses:
assert valid_address(address), 'Invalid address, expected to be valid: ' + repr(address)
for address in invalid_addresses:
assert not valid_address(address), 'Valid address, expected to be invalid: ' + repr(address)
def test_valid_hostname():
for hostname in hostnames:
assert valid_hostname(hostname), 'Invalid hostname, expected to be valid: ' + repr(hostname)
for hostname in invalid_hostnames:
assert not valid_hostname(hostname), 'Valid hostname, expected to be invalid: ' + repr(hostname)
def test_valid_port():
for port in ports:
assert valid_port(port), 'Invalid port, expected to be valid: ' + repr(port)
for port in invalid_ports:
assert not valid_port(port), 'Valid port, expected to be invalid: ' + repr(port)
|
from itertools import product
from ..validation import valid_address, valid_hostname, valid_port
def _join(host_and_port):
return '%s:%s' % host_and_port
def _join_all(hostnames, ports):
return map(_join, product(hostnames, ports))
hostnames = [
'127.0.0.1',
'localhost',
'example.com',
'example.org',
]
invalid_hostnames = [
'http://example.com',
'http://example.com:8080',
'example.com/',
'example.com:8080/',
'example.com:0',
'localhost:0',
'127.0.0.1:0',
]
ports = [1, 80, 5000, 8080, 65535]
invalid_ports = [None, -80, -1, 0, 65536, 75000,
float('nan'), '', 'nan', 'hello', 'a string']
addresses = hostnames + ports + _join_all(hostnames, ports)
invalid_addresses = invalid_hostnames \
+ _join_all(hostnames, invalid_ports) \
+ _join_all(invalid_hostnames, ports) \
+ _join_all(invalid_hostnames, invalid_ports)
def test_valid_address():
for address in addresses:
assert valid_address(address), 'Invalid address, expected to be valid: ' + repr(address)
for address in invalid_addresses:
assert not valid_address(address), 'Valid address, expected to be invalid: ' + repr(address)
def test_valid_hostname():
for hostname in hostnames:
assert valid_hostname(hostname), 'Invalid hostname, expected to be valid: ' + repr(hostname)
for hostname in invalid_hostnames:
assert not valid_hostname(hostname), 'Valid hostname, expected to be invalid: ' + repr(hostname)
def test_valid_port():
for port in ports:
assert valid_port(port), 'Invalid port, expected to be valid: ' + repr(port)
for port in invalid_ports:
assert not valid_port(port), 'Valid port, expected to be invalid: ' + repr(port)
|
mit
|
Python
|
fd7454610f4cffcfc8c289539b3824f023fe973f
|
change cruise input dim
|
msbeta/apollo,msbeta/apollo,msbeta/apollo,msbeta/apollo,msbeta/apollo,msbeta/apollo
|
modules/tools/prediction/mlp_train/common/configure.py
|
modules/tools/prediction/mlp_train/common/configure.py
|
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
parameters = {
'mlp': {
'train_data_rate': 0.8,
'size_obstacle_feature': 22,
'size_lane_sequence_feature': 40,
'dim_input': 22 + 40,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 1
},
'cruise_mlp': {
'dim_input': 23 + 8 + 180,
'dim_hidden_1': 50,
'dim_hidden_2': 18,
'dim_output': 2
},
'junction_mlp': {
'dim_input': 3 + 60,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 12
},
'feature': {
'threshold_label_time_delta': 1.0,
'prediction_label_timeframe': 3.0,
'maximum_maneuver_finish_time': 6.0,
# Lane change is defined to be finished if the ratio of deviation
# from center-line to the lane width is within this: (must be < 0.5)
'lane_change_finish_condition': 0.1
}
}
labels = {'go_false': 0, 'go_true': 1, 'cutin_false': -1, 'cutin_true': 2}
|
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
parameters = {
'mlp': {
'train_data_rate': 0.8,
'size_obstacle_feature': 22,
'size_lane_sequence_feature': 40,
'dim_input': 22 + 40,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 1
},
'cruise_mlp': {
'dim_input': 23 + 180,
'dim_hidden_1': 50,
'dim_hidden_2': 18,
'dim_output': 2
},
'junction_mlp': {
'dim_input': 3 + 60,
'dim_hidden_1': 30,
'dim_hidden_2': 15,
'dim_output': 12
},
'feature': {
'threshold_label_time_delta': 1.0,
'prediction_label_timeframe': 3.0,
'maximum_maneuver_finish_time': 6.0,
# Lane change is defined to be finished if the ratio of deviation
# from center-line to the lane width is within this: (must be < 0.5)
'lane_change_finish_condition': 0.1
}
}
labels = {'go_false': 0, 'go_true': 1, 'cutin_false': -1, 'cutin_true': 2}
|
apache-2.0
|
Python
|
8092efdd0bf5f5ca8d5498cf679b019920c00bfd
|
format with black
|
yeti-platform/yeti,yeti-platform/yeti,yeti-platform/yeti,yeti-platform/yeti
|
plugins/feeds/public/virustotal_apiv3.py
|
plugins/feeds/public/virustotal_apiv3.py
|
import logging
import re
import json
from datetime import timedelta, datetime
from core import Feed
from core.config.config import yeti_config
from core.observables import Hash, File
# Variable
VTAPI = yeti_config.get("vt", "key")
headers = {"x-apikey": VTAPI}
limit = 10
params = {"limit": limit}
regex = "[A-Fa-f0-9]{64}" # Find SHA256
class VirusTotalPriv(Feed):
default_values = {
"frequency": timedelta(minutes=5),
"name": "VirusTotalHuntingV3",
"source": "https://www.virustotal.com/api/v3/intelligence/hunting_notifications",
"description": "Feed of hunting for VirusTotal API v3",
}
settings = {
"vt_url_hunting_v3": {
"name": "VT Url Hunting v3",
"description": "Hunting feed for VT API v3",
}
}
def update(self):
if VTAPI:
self.source = (
"https://www.virustotal.com/api/v3/intelligence/hunting_notifications"
)
for index, item in self.update_json(
params=params, headers=headers, key="data"
):
self.analyze(item)
else:
logging.error("Your VT API key is not set in the config file!")
def analyze(self, item):
tags = []
context = {"source": self.name}
# Parse value of interest
subject = item["attributes"]["rule_name"]
date = item["attributes"]["date"]
tags2 = item["attributes"]["tags"]
sha2 = re.search(regex, str(tags2)).group()
date_string = datetime.utcfromtimestamp(date).strftime("%d/%m/%Y %H:%M:%S")
tags2.remove(sha2)
# Update to Yeti DB
f_vt3 = File.get_or_create(value="FILE:{}".format(sha2))
sha256 = Hash.get_or_create(value=sha2)
f_vt3.active_link_to(sha256, "sha256", self.name)
tags.append(tags2)
context["date_added"] = date_string
context["snippet"] = item["attributes"]["snippet"]
# context['source_country'] = item["attributes"]['source_country']
context["raw"] = item
f_vt3.tag(str(tags))
f_vt3.add_context(context)
|
import logging
import re
import json
from datetime import timedelta, datetime
from core import Feed
from core.config.config import yeti_config
from core.observables import Hash, File
# Variable
VTAPI = yeti_config.get('vt', 'key')
headers = {"x-apikey": VTAPI}
limit = 10
params = {'limit': limit}
regex = "[A-Fa-f0-9]{64}" # Find SHA256
class VirusTotalPriv(Feed):
default_values = {
"frequency": timedelta(minutes=5),
"name": "VirusTotalHuntingV3",
"source": "https://www.virustotal.com/api/v3/intelligence/hunting_notifications",
"description": "Feed of hunting for VirusTotal API v3",
}
settings = {
'vt_url_hunting_v3': {
'name': 'VT Url Hunting v3',
'description': 'Hunting feed for VT API v3'
}
}
def update(self):
if VTAPI:
self.source = "https://www.virustotal.com/api/v3/intelligence/hunting_notifications"
for index, item in self.update_json(params=params, headers=headers, key="data"):
self.analyze(item)
else:
logging.error("Your VT API key is not set in the config file!")
def analyze(self, item):
tags = []
context = {'source': self.name}
# Parse value of interest
subject = item["attributes"]["rule_name"]
date = item["attributes"]["date"]
tags2 = item["attributes"]["tags"]
sha2 = re.search(regex, str(tags2)).group()
date_string = datetime.utcfromtimestamp(date).strftime('%d/%m/%Y %H:%M:%S')
tags2.remove(sha2)
# Update to Yeti DB
f_vt3 = File.get_or_create(value='FILE:{}'.format(sha2))
sha256 = Hash.get_or_create(value=sha2)
f_vt3.active_link_to(sha256, 'sha256', self.name)
tags.append(tags2)
context['date_added'] = date_string
context['snippet'] = item["attributes"]['snippet']
# context['source_country'] = item["attributes"]['source_country']
context['raw'] = item
f_vt3.tag(str(tags))
f_vt3.add_context(context)
|
apache-2.0
|
Python
|
51a9a02ccf4a133818f14f3ff6e864c1e041ec37
|
Update event_chat.py
|
f4ble/pyarc,f4ble/Arkon
|
ark/events/event_chat.py
|
ark/events/event_chat.py
|
from ark.chat_commands import ChatCommands
from ark.cli import *
from ark.database import Db
from ark.rcon import Rcon
class EventChat(object):
@classmethod
def output_chat_from_server(cls,text,line):
out(line)
@classmethod
def parse_chat_command(cls,steam_name,player_name,text,line):
ChatCommands.parse(steam_name,player_name,text)
@classmethod
def update_player_name(cls,steam_name,player_name,text,line):
steam_id = Rcon.find_online_steam_id(steam_name)
if steam_id:
Db.update_player(steam_id, steam_name=steam_name, name=player_name)
@classmethod
def store_chat(cls,steam_name,player_name,text,line):
player = Db.find_player(steam_name=player_name)
player_id = player.id if player is not None else None
Db.create_chat_entry(player_id,player_name,text)
@classmethod
def output_chat(cls,steam_name,player_name,text,line):
out(line)
@classmethod
def filter_chat(cls,steam_name,player_name,text,line):
words=text.split()
res=None
for word in words:
if res is None:
res=Db.check_word(word)
if res:
player=Db.find_player(steam_name=steam_name)
steamid=player.steam_id if player is not None else None
if steamid is not None:
"""Rcon.kick_player(steamid)"""
"""msg=Lang.get('chat_filter_player_kicked').format(player_name,res)"""
msg=Lang.get('chat_filter_forbidden_word').format(player_name,res)
Rcon.broadcast(msg, rcon.response_callback_response_only)
|
from ark.chat_commands import ChatCommands
from ark.cli import *
from ark.database import Db
from ark.rcon import Rcon
class EventChat(object):
@classmethod
def output_chat_from_server(cls,text,line):
out(line)
@classmethod
def parse_chat_command(cls,steam_name,player_name,text,line):
ChatCommands.parse(steam_name,player_name,text)
@classmethod
def update_player_name(cls,steam_name,player_name,text,line):
steam_id = Rcon.find_online_steam_id(steam_name)
if steam_id:
Db.update_player(steam_id, steam_name=steam_name, name=player_name)
@classmethod
def store_chat(cls,steam_name,player_name,text,line):
player = Db.find_player(steam_name=player_name)
player_id = player.id if player is not None else None
Db.create_chat_entry(player_id,player_name,text)
@classmethod
def output_chat(cls,steam_name,player_name,text,line):
out(line)
|
apache-2.0
|
Python
|
e236b7d34cdf156cc16ba8c95b0526785e717898
|
update scenario
|
pkimber/enquiry,pkimber/enquiry,pkimber/enquiry
|
enquiry/tests/scenario.py
|
enquiry/tests/scenario.py
|
from datetime import datetime
from dateutil.relativedelta import relativedelta
from enquiry.tests.model_maker import make_enquiry
def default_scenario_enquiry():
make_enquiry(
'Rick',
'Can I buy some hay?',
'',
'07840 538 357',
)
make_enquiry(
'Ryan',
(
'Can I see some of the fencing you have done?\n'
"I would like to see some of your standard agricultural "
"fencing on a local dairy farm. "
"I like this fencing: http://en.wikipedia.org/wiki/Fencing"
),
'[email protected]',
'01234 567 890',
email_sent=datetime.now() + relativedelta(days=1),
)
|
from enquiry.tests.model_maker import make_enquiry
def default_scenario_enquiry():
make_enquiry(
'Rick',
'Can I buy some hay?',
'',
'07840 538 357',
)
make_enquiry(
'Ryan',
(
'Can I see some of the fencing you have done?\n'
"I would like to see some of your standard agricultural "
"fencing on a local dairy farm. "
"I like this fencing: http://en.wikipedia.org/wiki/Fencing"
),
'[email protected]',
'01234 567 890',
)
|
apache-2.0
|
Python
|
f113aaae2232d0041e01a6f12ab2ba083df65d44
|
Change submit module to use new interface.
|
appeltel/AutoCMS,appeltel/AutoCMS,appeltel/AutoCMS
|
autocms/submit.py
|
autocms/submit.py
|
"""Functions to submit and register new jobs."""
import os
def submit_and_stamp(counter, testname, scheduler, config):
"""Submit a job to the scheduler and produce a newstamp file.
The full path of the newstamp file is returned."""
result = scheduler.submit_job(counter, testname, config)
stamp_filename = ('stamp.' +
str(result.submit_time) +
str(counter))
stamp_path = os.path.join(config['AUTOCMS_BASEDIR'],
testname,
stamp_filename)
with open(stamp_path, 'w') as stampfile:
stampfile.write(result.stamp())
return stamp_path
def get_job_counter(testname, config):
"""Return an integer for the counter to pass to the next job."""
counter_path = os.path.join(config['AUTOCMS_BASEDIR'],
testname,
'counter')
if os.path.exists(counter_path):
with open(counter_path) as handle:
count = handle.read()
else:
count = 1
return int(count)
def set_job_counter(count, testname, config):
"""Write the job counter to file."""
counter_path = os.path.join(config['AUTOCMS_BASEDIR'],
testname,
'counter')
with open(counter_path, 'w') as handle:
handle.write(str(count))
|
"""Functions to submit and register new jobs."""
import os
import socket
def submit_and_stamp(counter, testname, scheduler, config):
"""Submit a job to the scheduler and produce a newstamp file.
This function should be run from within the test directory.
If the submission fails an output log will be produced with the
standard output of the submitter.
The name of the newstamp file is returned."""
result = scheduler.submit_job(counter, testname, config)
newstamp = str(result.id) + ' ' + str(timestamp) + ' ' + str(returncode)
if returncode != 0:
logfile_name = (testname + '.submission.' + str(counter) +
'.' + str(timestamp) + '.log')
newstamp += ' ' + logfile_name
log = "Job submission failed at {0}\n".format(timestamp)
log += "On node {0}\n".format(socket.gethostname())
log += "Submission command output:\n\n"
for line in output:
log += line + '\n'
with open(logfile_name, 'w') as logfile:
logfile.write(log)
newstamp += "\n"
newstamp_filename = 'newstamp.' + str(timestamp)
with open(newstamp_filename, 'w') as nsfile:
nsfile.write(newstamp)
return newstamp_filename
def get_job_counter():
"""Return an integer for the counter to pass to the next job.
This should be called from within the test directory."""
if os.path.exists('counter'):
with open('counter') as handle:
count = handle.read()
else:
count = 1
return int(count)
def set_job_counter(count):
"""Write the job counter to file.
This should be called from within the test directory."""
with open('counter', 'w') as handle:
handle.write(str(count))
|
mit
|
Python
|
0973acf04fd2fd59db4880d5ba4d994f4c1733db
|
Add length detection for PNG images.
|
Bindernews/TheHound
|
identifiers/image_identifier.py
|
identifiers/image_identifier.py
|
import io
from struct import unpack
import sys
from identifier import Result
#############
# Constants #
#############
PNG_CHUNK_IEND = b'IEND'
PNG_CHUNK_IHDR = b'IHDR'
#######################
# Identifier Patterns #
#######################
JPEG_PATTERNS = [
'FF D8 FF E0',
'FF D8 FF E1',
'FF D8 FF FE',
]
GIF_PATTERNS = [
'47 49 46 38 39 61',
'47 49 46 38 37 61',
]
PNG_PATTERNS = [
'89 50 4E 47 0D 0A 1A 0A'
]
BMP_PATTERNS = [
'42 4D 62 25',
'42 4D F8 A9',
'42 4D 76 02',
]
ICO_PATTERNS = [
'00 00 01 00'
]
def read4UB(stream):
return unpack('>I', stream.read(4))[0]
class PngResolver:
def next_chunk(self, stream):
"""
Assumes there is a chunk at the current position in the stream.
Returns the name of the current chunk and its length.
Also advances the stream to the start of the next chunk.
"""
chunk_len = read4UB(stream)
chunk_name = stream.read(4)
stream.seek(chunk_len + 4, io.SEEK_CUR)
return (chunk_name, chunk_len)
def identify(self, stream):
try:
origin = stream.tell()
# Skip to the beginning of the first PNG chunk
stream.seek(origin + 8)
# Check to make sure the first chunk is the IHDR chunk
chunk_name, chunk_len = self.next_chunk(stream)
if chunk_name != PNG_CHUNK_IHDR or chunk_len != 0x0D:
return
# Loop through till we find the final chunk
while chunk_name != PNG_CHUNK_IEND:
chunk_name, chunk_len = self.next_chunk(stream)
# Now calculate the actual file length
end = stream.tell()
length = end - origin
return Result('PNG', 'PNG image file', length=length)
except BaseException as e:
print(e, file=sys.stderr)
# Ignore all errors
pass
class JpegResolver:
def identify(self, stream):
return Result('JPEG', 'JPEG image file')
class GifResolver:
def identify(self, stream):
return Result('GIF', 'GIF image file')
class BmpResolver:
def identify(self, stream):
return Result('BMP', 'BMP image file')
class IcoResolver:
def identity(self, stream):
return Result('ICO', 'Windows icon file')
def load(hound):
# Register JPEGs
hound.add_matches(JPEG_PATTERNS, JpegResolver())
# Register PNGs
hound.add_matches(PNG_PATTERNS, PngResolver())
# Register GIFs
hound.add_matches(GIF_PATTERNS, GifResolver())
# Register BMPs
hound.add_matches(BMP_PATTERNS, BmpResolver())
# Register ICOs
hound.add_matches(ICO_PATTERNS, IcoResolver())
|
# Identifier for basic image files
from identifier import Result
JPEG_PATTERNS = [
'FF D8 FF E0',
'FF D8 FF E1',
'FF D8 FF FE',
]
GIF_PATTERNS = [
'47 49 46 38 39 61',
'47 49 46 38 37 61',
]
PNG_PATTERNS = [
'89 50 4E 47'
]
BMP_PATTERNS = [
'42 4D 62 25',
'42 4D F8 A9',
'42 4D 76 02',
]
ICO_PATTERNS = [
'00 00 01 00'
]
class PngResolver:
def identify(self, stream):
return Result('PNG', 'PNG image file')
class JpegResolver:
def identify(self, stream):
return Result('JPEG', 'JPEG image file')
class GifResolver:
def identify(self, stream):
return Result('GIF', 'GIF image file')
class BmpResolver:
def identify(self, stream):
return Result('BMP', 'BMP image file')
class IcoResolver:
def identity(self, stream):
return Result('ICO', 'Windows icon file')
def load(hound):
# Register JPEGs
hound.add_matches(JPEG_PATTERNS, JpegResolver())
# Register PNGs
hound.add_matches(PNG_PATTERNS, PngResolver())
# Register GIFs
hound.add_matches(GIF_PATTERNS, GifResolver())
# Register BMPs
hound.add_matches(BMP_PATTERNS, BmpResolver())
# Register ICOs
hound.add_matches(ICO_PATTERNS, IcoResolver())
|
mit
|
Python
|
d7c5b8784fd747355884e3371f1c85ede9a9bf6f
|
Disable some packages for now, so that packaging can finish on the buildbots as they are. This should let wrench run the Mono test suite.
|
BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,mono/bockbuild,mono/bockbuild,BansheeMediaPlayer/bockbuild
|
profiles/mono-mac-release-64/packages.py
|
profiles/mono-mac-release-64/packages.py
|
import os
from bockbuild.darwinprofile import DarwinProfile
class MonoReleasePackages:
def __init__(self):
# Toolchain
#package order is very important.
#autoconf and automake don't depend on CC
#ccache uses a different CC since it's not installed yet
#every thing after ccache needs a working ccache
self.packages.extend ([
'autoconf.py',
'automake.py',
'ccache.py',
'libtool.py',
'xz.py',
'tar.py',
'gettext.py',
'pkg-config.py'
])
#needed to autogen gtk+
self.packages.extend ([
'gtk-osx-docbook.py',
'gtk-doc.py',
])
# # Base Libraries
self.packages.extend([
'libpng.py', #needed by cairo
'libjpeg.py',
'libtiff.py',
'libgif.py',
'libxml2.py',
'freetype.py',
'fontconfig.py',
'pixman.py', #needed by cairo
'cairo.py', #needed by Mono graphics functions (System.Drawing)
'libffi.py', #needed by glib
'glib.py',
'pango.py',
'atk.py',
'intltool.py',
'gdk-pixbuf.py',
'gtk+.py',
'libglade.py',
'sqlite.py',
'expat.py',
'ige-mac-integration.py'
])
# # Theme
self.packages.extend([
'libcroco.py',
'librsvg.py',
'hicolor-icon-theme.py',
'gtk-engines.py',
'murrine.py',
'xamarin-gtk-theme.py',
'gtk-quartz-engine.py'
])
# Mono
self.packages.extend([
'mono-llvm.py',
'mono-master.py',
#'libgdiplus.py',
#'xsp.py',
#'gtk-sharp-2.12-release.py',
#'boo.py',
# 'nant.py',
#'ironlangs.py',
#'fsharp-3.1.py',
#'mono-addins.py',
#'mono-basic.py',
])
self.packages = [os.path.join('..', '..', 'packages', p) for p in self.packages]
|
import os
from bockbuild.darwinprofile import DarwinProfile
class MonoReleasePackages:
def __init__(self):
# Toolchain
#package order is very important.
#autoconf and automake don't depend on CC
#ccache uses a different CC since it's not installed yet
#every thing after ccache needs a working ccache
self.packages.extend ([
'autoconf.py',
'automake.py',
'ccache.py',
'libtool.py',
'xz.py',
'tar.py',
'gettext.py',
'pkg-config.py'
])
#needed to autogen gtk+
self.packages.extend ([
'gtk-osx-docbook.py',
'gtk-doc.py',
])
# # Base Libraries
self.packages.extend([
'libpng.py', #needed by cairo
'libjpeg.py',
'libtiff.py',
'libgif.py',
'libxml2.py',
'freetype.py',
'fontconfig.py',
'pixman.py', #needed by cairo
'cairo.py', #needed by Mono graphics functions (System.Drawing)
'libffi.py', #needed by glib
'glib.py',
'pango.py',
'atk.py',
'intltool.py',
'gdk-pixbuf.py',
'gtk+.py',
'libglade.py',
'sqlite.py',
'expat.py',
'ige-mac-integration.py'
])
# # Theme
self.packages.extend([
'libcroco.py',
'librsvg.py',
'hicolor-icon-theme.py',
'gtk-engines.py',
'murrine.py',
'xamarin-gtk-theme.py',
'gtk-quartz-engine.py'
])
# Mono
self.packages.extend([
'mono-llvm.py',
'mono-master.py',
'libgdiplus.py',
'xsp.py',
'gtk-sharp-2.12-release.py',
'boo.py',
# 'nant.py',
'ironlangs.py',
'fsharp-3.1.py',
'mono-addins.py',
'mono-basic.py',
])
self.packages = [os.path.join('..', '..', 'packages', p) for p in self.packages]
|
mit
|
Python
|
fe0d872c69280b5713a4ad6f0a1cd4a5623fdd75
|
Add createnapartcommand contents
|
scholer/cadnano2.5
|
cadnano/part/createnapartcommand.py
|
cadnano/part/createnapartcommand.py
|
from ast import literal_eval
from cadnano.cnproxy import UndoCommand
from cadnano.part.nucleicacidpart import NucleicAcidPart
class CreateNucleicAcidPartCommand(UndoCommand):
def __init__(self, document, grid_type, use_undostack):
# TODO[NF]: Docstring
super(CreateNucleicAcidPartCommand, self).__init__("Create NA Part")
self.document = document
self.grid_type = grid_type
self.use_undostack = use_undostack
def redo(self):
new_part = NucleicAcidPart(document=self.document, grid_type=self.grid_type)
self.document._addPart(new_part, use_undostack=self.use_undostack)
def undo(self):
self.document.deactivateActivePart()
|
mit
|
Python
|
|
899254d3bd064ba8e5653ad9081674b7af1495fa
|
fix capture=True
|
kjtanaka/fabric_hadoop
|
fabfile/openstack.py
|
fabfile/openstack.py
|
#!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import yaml
from fabric.api import task, local, settings, warn_only
from cuisine import file_exists
@task
def up():
""" Boot instances """
# call class OpenStack
op = OpenStack()
# Check if fingerprint exists on the list
op.check_key()
class OpenStack:
def __init__(self):
cfg_dir = os.path.dirname(__file__).replace('fabfile','ymlfile')
cfg_file = '{0}/{1}'.format(cfg_dir, 'openstack.yml')
f = open(cfg_file)
self.cfg = yaml.safe_load(f)
self.cfg['key_file'] = os.path.abspath(os.path.expanduser(self.cfg['key_file']))
f.close()
self.key_fingerprint = \
local('ssh-keygen -l -f {}|awk \'{{print $2}}\''.format(self.cfg['key_file']), capture=True)
def check_key(self):
if not os.path.exists(self.cfg['key_file']):
print "{} doesn't exist".format(self.cfg['key_file'])
exit(1)
with settings(warn_only=True):
output = local('nova keypair-list|grep {}'.format(self.key_fingerprint), capture=True)
if not output.return_code == 0:
print "ERROR: your key is not registered yet."
exit(1)
if not output.split()[1] == self.cfg['key_name']:
print "your key is already registered with a different name."
exit(1)
#def check_image(self):
# with settings(warn_only=True):
|
#!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import yaml
from fabric.api import task, local, settings, warn_only
from cuisine import file_exists
@task
def up():
""" Boot instances """
# call class OpenStack
op = OpenStack()
# Check if fingerprint exists on the list
op.check_key()
class OpenStack:
def __init__(self):
cfg_dir = os.path.dirname(__file__).replace('fabfile','ymlfile')
cfg_file = '{0}/{1}'.format(cfg_dir, 'openstack.yml')
f = open(cfg_file)
self.cfg = yaml.safe_load(f)
self.cfg['key_file'] = os.path.abspath(os.path.expanduser(self.cfg['key_file']))
f.close()
self.key_fingerprint = \
local('ssh-keygen -l -f {}|awk \'{{print $2}}\''.format(self.cfg['key_file']), capture=True)
def check_key(self):
if not os.path.exists(self.cfg['key_file']):
print "{} doesn't exist".format(self.cfg['key_file'])
exit(1)
with settings(warn_only=True):
output = local('nova keypair-list|grep {}'.format(self.key_fingerprint))
print '#### ', output
if not output.return_code == 0:
print "ERROR: your key is not registered yet."
exit(1)
if not output.split()[1] == self.cfg['key_name']:
print "your key is already registered with a different name."
exit(1)
#def check_image(self):
# with settings(warn_only=True):
|
mit
|
Python
|
0727ad29721a3dad4c36113a299f5c67bda70822
|
Sort everything alphabetically on separate lines.
|
python/importlib_resources
|
importlib_resources/__init__.py
|
importlib_resources/__init__.py
|
"""Read resources contained within a package."""
import sys
__all__ = [
'Package',
'Resource',
'ResourceReader',
'contents',
'is_resource',
'open_binary',
'open_text',
'path',
'read_binary',
'read_text',
]
if sys.version_info >= (3,):
from importlib_resources._py3 import (
Package,
Resource,
contents,
is_resource,
open_binary,
open_text,
path,
read_binary,
read_text,
)
from importlib_resources.abc import ResourceReader
else:
from importlib_resources._py2 import (
contents,
is_resource,
open_binary,
open_text,
path,
read_binary,
read_text,
)
del __all__[:3]
__version__ = read_text('importlib_resources', 'version.txt').strip()
|
"""Read resources contained within a package."""
import sys
__all__ = [
'contents',
'is_resource',
'open_binary',
'open_text',
'path',
'read_binary',
'read_text',
'Package',
'Resource',
'ResourceReader',
]
if sys.version_info >= (3,):
from importlib_resources._py3 import (
Package, Resource, contents, is_resource, open_binary, open_text, path,
read_binary, read_text)
from importlib_resources.abc import ResourceReader
else:
from importlib_resources._py2 import (
contents, is_resource, open_binary, open_text, path, read_binary,
read_text)
del __all__[-3:]
__version__ = read_text('importlib_resources', 'version.txt').strip()
|
apache-2.0
|
Python
|
4d1c465e5c946ac17334e29e0ded7b6134533d12
|
Disable save in Crop multi roi and show the image instead
|
hadim/fiji_tools,hadim/fiji_tools,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_scripts
|
plugins/Scripts/Plugins/Crop_Multi_Roi.py
|
plugins/Scripts/Plugins/Crop_Multi_Roi.py
|
from ij import IJ
from ij.plugin.frame import RoiManager
from io.scif.config import SCIFIOConfig
from io.scif.img import ImageRegion
from io.scif.img import ImgOpener
from io.scif.img import ImgSaver
from net.imagej.axis import Axes
from net.imglib2.img.display.imagej import ImageJFunctions
import os
def main():
# Get current image filename
imp = IJ.getImage()
f = imp.getOriginalFileInfo()
if not f:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Iterate over all ROIs from ROI Manager
rois = RoiManager.getInstance()
if not rois:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
fname = os.path.join(f.directory, f.fileName)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Opening crop %i / %i" % (crop_id, len(rois_array)))
# Get ROI bounds
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
axes = [Axes.X, Axes.Y]
ranges = ["%i-%i" % (x, x+w), "%i-%i" % (y, y+h)]
config = SCIFIOConfig()
config.imgOpenerSetRegion(ImageRegion(axes, ranges))
opener = ImgOpener()
imps = opener.openImgs(fname, config)
imp = imps[0]
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, f.fileName)
crop_fname = os.path.join(f.directory, crop_basename)
imp.setName(crop_basename)
# Save cropped image
#IJ.log("Saving crop to %s" % crop_fname)
#saver = ImgSaver()
#saver.saveImg(crop_fname, imp)
# Show cropped image
ImageJFunctions.show(imp)
IJ.log('Done')
main()
|
from ij import IJ
from ij.plugin.frame import RoiManager
from io.scif.config import SCIFIOConfig
from io.scif.img import ImageRegion
from io.scif.img import ImgOpener
from io.scif.img import ImgSaver
from net.imagej.axis import Axes
import os
def main():
# Get current image filename
imp = IJ.getImage()
f = imp.getOriginalFileInfo()
if not f:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Iterate over all ROIs from ROI Manager
rois = RoiManager.getInstance()
if not rois:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
fname = os.path.join(f.directory, f.fileName)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Opening crop %i / %i" % (crop_id, len(rois_array)))
# Get ROI bounds
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
axes = [Axes.X, Axes.Y]
ranges = ["%i-%i" % (x, x+w), "%i-%i" % (y, y+h)]
config = SCIFIOConfig()
config.imgOpenerSetRegion(ImageRegion(axes, ranges))
opener = ImgOpener()
imps = opener.openImgs(fname, config)
imp = imps[0]
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, f.fileName)
crop_fname = os.path.join(f.directory, crop_basename)
IJ.log("Saving crop to %s" % crop_fname)
# Save cropped image
saver = ImgSaver()
saver.saveImg(crop_fname, imp)
IJ.log('Done')
main()
|
bsd-3-clause
|
Python
|
9a19c34a104aabd0c5b34734f587573d5766a4bd
|
support multi-file results
|
rtmclay/Themis,rtmclay/Themis,rtmclay/Themis
|
finishTest/Finish.py
|
finishTest/Finish.py
|
from __future__ import print_function
from BaseTask import BaseTask
from Engine import MasterTbl, Error, get_platform
from Dbg import Dbg
import os, json, time, platform
dbg = Dbg()
validA = ("passed", "failed", "diff")
comment_block = """
Test Results:
'notfinished': means that the test has started but not completed.
'failed': means that the test has started but not completed.
'notrun': test has not started running.
'diff' : Test has run but is different from gold copy.
'passed': Test has run and matches gold copy.
"""
class Finish(BaseTask):
def __init__(self,name):
super(Finish, self).__init__(name)
def __parse_input_fn(self, fnA):
result = "passed"
for fn in fnA:
if (not os.path.exists(fn)):
return "failed"
f = open(fn)
lineA = f.readlines()
f.close()
found = False
for line in lineA:
line = line.strip()
if (line[0] == "#" or len(line) < 1):
continue
found = True
idx = line.find(",")
if (idx > 0):
line = line[0:idx]
line = line.lower()
if (line != "passed"):
result = line
break
if (not result in validA or not found):
result = "failed"
break
return result
def execute(self, *args, **kwargs):
masterTbl = MasterTbl()
result_fn = masterTbl['result_fn']
runtime_fn = masterTbl['runtime_fn']
input_fnA = masterTbl['pargs']
result = self.__parse_input_fn(input_fnA)
my_result = { 'testresult' : result, "comment" : comment_block.split('\n') }
f = open(result_fn,"w")
f.write(json.dumps(my_result, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
if (not os.path.exists(runtime_fn)):
Error("Unable to open: ", runtime_fn)
f = open(runtime_fn)
runtime = json.loads(f.read())
f.close()
t1 = time.time()
runtime['T1'] = t1
runtime['TT'] = t1 - runtime['T0']
unameT = get_platform()
for k in unameT:
runtime[k] = unameT[k]
f = open(runtime_fn,"w")
f.write(json.dumps(runtime, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
|
from __future__ import print_function
from BaseTask import BaseTask
from Engine import MasterTbl, Error, get_platform
from Dbg import Dbg
import os, json, time, platform
dbg = Dbg()
validA = ("passed", "failed", "diff")
comment_block = """
Test Results:
'notfinished': means that the test has started but not completed.
'failed': means that the test has started but not completed.
'notrun': test has not started running.
'diff' : Test has run but is different from gold copy.
'passed': Test has run and matches gold copy.
"""
class Finish(BaseTask):
def __init__(self,name):
super(Finish, self).__init__(name)
def __parse_input_fn(self, fn):
if (not os.path.exists(fn)):
return "failed"
f = open(fn)
lineA = f.readlines()
f.close()
found = False
result = "passed"
for line in lineA:
line = line.strip()
if (line[0] == "#" or len(line) < 1):
continue
found = True
idx = line.find(",")
if (idx > 0):
line = line[0:idx]
line = line.lower()
if (line != "passed"):
result = line
break
if (not result in validA or not found):
result = "failed"
return result
def execute(self, *args, **kwargs):
masterTbl = MasterTbl()
result_fn = masterTbl['result_fn']
runtime_fn = masterTbl['runtime_fn']
input_fn = masterTbl['pargs'][0]
result = self.__parse_input_fn(input_fn)
my_result = { 'testresult' : result, "comment" : comment_block.split('\n') }
f = open(result_fn,"w")
f.write(json.dumps(my_result, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
if (not os.path.exists(runtime_fn)):
Error("Unable to open: ", runtime_fn)
f = open(runtime_fn)
runtime = json.loads(f.read())
f.close()
t1 = time.time()
runtime['T1'] = t1
runtime['TT'] = t1 - runtime['T0']
unameT = get_platform()
for k in unameT:
runtime[k] = unameT[k]
f = open(runtime_fn,"w")
f.write(json.dumps(runtime, sort_keys=True, indent=2, separators=(', ', ': ')))
f.close()
|
mit
|
Python
|
38efb136609b645b0076c0aa1481330f9e28ee51
|
Add a rule for matching packages by regex.
|
fedora-infra/fmn.rules,jeremycline/fmn,jeremycline/fmn,jeremycline/fmn
|
fmn/rules/generic.py
|
fmn/rules/generic.py
|
# Generic rules for FMN
import re
import fedmsg
import fmn.rules.utils
def user_filter(config, message, fasnick=None, *args, **kw):
""" All messages for a certain user
Use this rule to include messages that are associated with a
specific user.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
return fasnick in fedmsg.meta.msg2usernames(message, **config)
def not_user_filter(config, message, fasnick=None, *args, **kw):
""" All messages not concerning one or more users
Use this rule to exclude messages that are associated with one or more
users. Specify several users by separating them with a comma ','.
"""
fasnick = kw.get('fasnick', fasnick)
if not fasnick:
return False
fasnick = fasnick or [] and fasnick.split(',')
valid = True
for nick in fasnick:
if nick.strip() in fedmsg.meta.msg2usernames(message, **config):
valid = False
break
return valid
def user_package_filter(config, message, fasnick=None, *args, **kw):
""" All messages concerning user's packages
This rule includes messages that relate to packages where the
specified user has **commit** ACLs.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
user_packages = fmn.rules.utils.get_packages_of_user(config, fasnick)
msg_packages = fedmsg.meta.msg2packages(message, **config)
return user_packages.intersection(msg_packages)
def package_filter(config, message, package=None, *args, **kw):
""" All messages pertaining to a certain package
Use this rule to include messages that relate to a certain package
(*i.e., nethack*).
"""
package = kw.get('package', package)
if package:
return package in fedmsg.meta.msg2packages(message, **config)
def package_regex_filter(config, message, pattern=None, *args, **kw):
""" All messages pertaining to packages matching a given regex
Use this rule to include messages that relate to packages that match
particular regular expressions
(*i.e., (maven|javapackages-tools|maven-surefire)*).
"""
pattern = kw.get('pattern', pattern)
if pattern:
packages = fedmsg.meta.msg2packages(message, **config)
regex = re.compile(pattern)
return any([regex.match(package) for package in packages])
def trac_hosted_filter(config, message, project=None, *args, **kw):
""" Filter the messages for one or more fedorahosted projects
Adding this rule allows you to get notifications for one or more
`fedorahosted <https://fedorahosted.org>`_ project. Specify multiple
projects by separating them with a comma ','.
"""
project = kw.get('project', project)
link = fedmsg.meta.msg2link(message, **config)
if not link:
return False
project = project or [] and project.split(',')
valid = False
for proj in project:
if '://fedorahosted.org/%s/' % proj.strip() in link:
valid = True
return valid
|
# Generic rules for FMN
import fedmsg
import fmn.rules.utils
def user_filter(config, message, fasnick=None, *args, **kw):
""" All messages for a certain user
Use this rule to include messages that are associated with a
specific user.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
return fasnick in fedmsg.meta.msg2usernames(message, **config)
def not_user_filter(config, message, fasnick=None, *args, **kw):
""" All messages not concerning one or more users
Use this rule to exclude messages that are associated with one or more
users. Specify several users by separating them with a comma ','.
"""
fasnick = kw.get('fasnick', fasnick)
if not fasnick:
return False
fasnick = fasnick or [] and fasnick.split(',')
valid = True
for nick in fasnick:
if nick.strip() in fedmsg.meta.msg2usernames(message, **config):
valid = False
break
return valid
def user_package_filter(config, message, fasnick=None, *args, **kw):
""" All messages concerning user's packages
This rule includes messages that relate to packages where the
specified user has **commit** ACLs.
"""
fasnick = kw.get('fasnick', fasnick)
if fasnick:
user_packages = fmn.rules.utils.get_packages_of_user(config, fasnick)
msg_packages = fedmsg.meta.msg2packages(message, **config)
return user_packages.intersection(msg_packages)
def package_filter(config, message, package=None, *args, **kw):
""" All messages pertaining to a certain package
Use this rule to include messages that relate to a certain package
(*i.e., nethack*).
"""
package = kw.get('package', package)
if package:
return package in fedmsg.meta.msg2packages(message, **config)
def trac_hosted_filter(config, message, project=None, *args, **kw):
""" Filter the messages for one or more fedorahosted projects
Adding this rule allows you to get notifications for one or more
`fedorahosted <https://fedorahosted.org>`_ project. Specify multiple
projects by separating them with a comma ','.
"""
project = kw.get('project', project)
link = fedmsg.meta.msg2link(message, **config)
if not link:
return False
project = project or [] and project.split(',')
valid = False
for proj in project:
if '://fedorahosted.org/%s/' % proj.strip() in link:
valid = True
return valid
|
lgpl-2.1
|
Python
|
7f974b87c278ef009535271461b5e49686057a9a
|
Fix for django >= 1.10
|
grantmcconnaughey/django-avatar,jezdez/django-avatar,grantmcconnaughey/django-avatar,ad-m/django-avatar,ad-m/django-avatar,jezdez/django-avatar
|
avatar/management/commands/rebuild_avatars.py
|
avatar/management/commands/rebuild_avatars.py
|
from django.core.management.base import BaseCommand
from avatar.conf import settings
from avatar.models import Avatar
class Command(BaseCommand):
help = ("Regenerates avatar thumbnails for the sizes specified in "
"settings.AVATAR_AUTO_GENERATE_SIZES.")
def handle(self, *args, **options):
for avatar in Avatar.objects.all():
for size in settings.AVATAR_AUTO_GENERATE_SIZES:
if options['verbosity'] != 0:
print("Rebuilding Avatar id=%s at size %s." % (avatar.id, size))
avatar.create_thumbnail(size)
|
from django.core.management.base import NoArgsCommand
from avatar.conf import settings
from avatar.models import Avatar
class Command(NoArgsCommand):
help = ("Regenerates avatar thumbnails for the sizes specified in "
"settings.AVATAR_AUTO_GENERATE_SIZES.")
def handle_noargs(self, **options):
for avatar in Avatar.objects.all():
for size in settings.AVATAR_AUTO_GENERATE_SIZES:
if options['verbosity'] != 0:
print("Rebuilding Avatar id=%s at size %s." % (avatar.id, size))
avatar.create_thumbnail(size)
|
bsd-3-clause
|
Python
|
0da189464703837e212bff06c24cc6eb5b62eeea
|
Fix name of room
|
apiaryio/black-belt
|
blackbelt/slack.py
|
blackbelt/slack.py
|
from slacker import Slacker
from blackbelt.config import config
class Slack(object):
def __init__(self, token=None):
if not token:
token = config['slack']['access_token']
slack = Slacker(token)
self.slack = slack
if not token:
raise ValueError("Can't do things with Slack without access token. Run bb init.")
self.token = token
def get_user_id(self):
return self.slack.auth.test().body['user_id']
def post_message(self, message, room):
return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":blackbelt:")
def post_message(message, room='#engine-room'):
client = Slack()
msg = "<@%s> %s" % (client.get_user_id(), message)
client.post_message(msg, room)
|
from slacker import Slacker
from blackbelt.config import config
class Slack(object):
def __init__(self, token=None):
if not token:
token = config['slack']['access_token']
slack = Slacker(token)
self.slack = slack
if not token:
raise ValueError("Can't do things with Slack without access token. Run bb init.")
self.token = token
def get_user_id(self):
return self.slack.auth.test().body['user_id']
def post_message(self, message, room):
return self.slack.chat.post_message(room, message, username = "Black Belt", icon_emoji = ":blackbelt:")
def post_message(message, room='#sre'):
client = Slack()
msg = "<@%s> %s" % (client.get_user_id(), message)
client.post_message(msg, room)
|
mit
|
Python
|
eb3a332cf5aeb6b213c333cbfba78b26b776db49
|
fix facebook api
|
scailer/django-social-publisher,scailer/django-social-publisher
|
social_publisher/backends/facebook.py
|
social_publisher/backends/facebook.py
|
# -*- coding: utf-8 -*-
from social_publisher import facebook
from social_publisher.backends import base
class FacebookBackend(base.BaseBackend):
name = 'facebook'
auth_provider = 'facebook'
def get_api(self, social_user):
return facebook.GraphAPI(social_user.extra_data.get('access_token'))
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file> as object_attachment
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
image = kwargs.get('image')
if image:
res = self.get_api(social_user).post(
'{}/photos'.format(owner_id),
params={'image': image})
kwargs['object_attachment'] = res['id']
return self.get_api(social_user).post(
'{}/feed'.format(owner_id),
params=kwargs
)
return _post
class FacebookPostImageBackend(FacebookBackend):
name = 'facebook_post_image'
auth_provider = 'facebook'
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file>
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
return self.get_api(social_user).post(
'{}/photos'.format(owner_id),
params=kwargs
)
return _post
|
# -*- coding: utf-8 -*-
from social_publisher import facebook
from social_publisher.backends import base
class FacebookBackend(base.BaseBackend):
name = 'facebook'
auth_provider = 'facebook'
def get_api(self, social_user):
return facebook.GraphAPI(social_user.extra_data.get('access_token'))
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file> as object_attachment
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
image = kwargs.get('image')
if image:
res = self.get_api(social_user).post(
'{}/photos'.format(owner_id), image=image)
kwargs['object_attachment'] = res['id']
return self.get_api(social_user).post(
'{}/feed'.format(owner_id),
params=kwargs
)
return _post
class FacebookPostImageBackend(FacebookBackend):
name = 'facebook_post_image'
auth_provider = 'facebook'
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file>
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
owner_id = owner_id or 'me'
return self.get_api(social_user).post(
'{}/photos'.format(owner_id),
params=kwargs
)
return _post
|
mit
|
Python
|
07c8888a3623ea40c4f2047e11445726e61e2438
|
Fix lint.
|
StackStorm/st2contrib,pearsontechnology/st2contrib,armab/st2contrib,tonybaloney/st2contrib,psychopenguin/st2contrib,pearsontechnology/st2contrib,pidah/st2contrib,StackStorm/st2contrib,armab/st2contrib,pidah/st2contrib,pearsontechnology/st2contrib,StackStorm/st2contrib,psychopenguin/st2contrib,armab/st2contrib,tonybaloney/st2contrib,pearsontechnology/st2contrib,pidah/st2contrib,tonybaloney/st2contrib
|
packs/csv/tests/test_action_parse.py
|
packs/csv/tests/test_action_parse.py
|
import unittest2
from parse_csv import ParseCSVAction
__all__ = [
'ParseCSVActionTestCase'
]
MOCK_DATA = """
first,last,year
name1,surename1,1990
""".strip()
class ParseCSVActionTestCase(unittest2.TestCase):
def test_run(self):
result = ParseCSVAction().run(data=MOCK_DATA, delimiter=',')
expected = [
['first', 'last', 'year'],
['name1', 'surename1', '1990']
]
self.assertEqual(result, expected)
|
import unittest2
from parse_csv import ParseCSVAction
__all__ = [
'ParseCSVActionTestCase'
]
MOCK_DATA = """
first,last,year
name1,surename1,1990
""".strip()
class ParseCSVActionTestCase(unittest2.TestCase):
def test_run(self):
result = ParseCSVAction().run(data=MOCK_DATA, delimiter=',')
expected = [
['first', 'last', 'year'],
['name1', 'surename1', '1990']
]
self.assertEqual(result, expected)
|
apache-2.0
|
Python
|
9c898d7e547b13bb289c0d1cada0bbd4078803dc
|
Allow passing of size_cutoff to preassembler methods.
|
pvtodorov/indra,johnbachman/belpy,sorgerlab/belpy,sorgerlab/indra,sorgerlab/indra,johnbachman/indra,johnbachman/indra,johnbachman/indra,johnbachman/belpy,bgyori/indra,sorgerlab/indra,pvtodorov/indra,bgyori/indra,johnbachman/belpy,pvtodorov/indra,sorgerlab/belpy,sorgerlab/belpy,bgyori/indra,pvtodorov/indra
|
indra/db/pre_assemble_script.py
|
indra/db/pre_assemble_script.py
|
import indra.tools.assemble_corpus as ac
from indra.db.util import get_statements, insert_pa_stmts
from indra.preassembler import Preassembler
from indra.preassembler.hierarchy_manager import hierarchies
def make_unique_statement_set(preassembler, stmts):
stmt_groups = preassembler.get_stmt_matching_groups(stmts)
unique_stmts = []
for _, duplicates in stmt_groups:
# Get the first statement and add the evidence of all subsequent
# Statements to it
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix == 0:
first_stmt = stmt.get_new_copy()
first_stmt.evidence.append(stmt.uuid)
# This should never be None or anything else
assert isinstance(first_stmt, type(stmt))
unique_stmts.append(first_stmt)
return unique_stmts
def get_match_key_maps(preassembler, unique_stmts, **generate_id_map_kwargs):
id_maps = preassembler.generate_id_maps(unique_stmts,
**generate_id_map_kwargs)
return {tuple([unique_stmts[idx].matches_key() for idx in idx_pair])
for idx_pair in id_maps}
def process_statements(stmts, **generate_id_map_kwargs):
stmts = ac.map_grounding(stmts)
stmts = ac.map_sequence(stmts)
pa = Preassembler(hierarchies)
unique_stmts = make_unique_statement_set(pa, stmts)
match_key_maps = get_match_key_maps(pa, unique_stmts,
**generate_id_map_kwargs)
return unique_stmts, match_key_maps
def preassemble_db_stmts(db, num_proc, *clauses):
"""Run pre-assembly on a set of statements in the database."""
stmts = get_statements(clauses, db=db, do_stmt_count=False)
unique_stmts, match_key_maps = process_statements(stmts, poolsize=num_proc)
insert_pa_stmts(db, unique_stmts)
return unique_stmts, match_key_maps
|
import indra.tools.assemble_corpus as ac
from indra.db.util import get_statements, insert_pa_stmts
from indra.preassembler import Preassembler
from indra.preassembler.hierarchy_manager import hierarchies
def make_unique_statement_set(preassembler, stmts):
stmt_groups = preassembler.get_stmt_matching_groups(stmts)
unique_stmts = []
for _, duplicates in stmt_groups:
# Get the first statement and add the evidence of all subsequent
# Statements to it
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix == 0:
first_stmt = stmt.get_new_copy()
first_stmt.evidence.append(stmt.uuid)
# This should never be None or anything else
assert isinstance(first_stmt, type(stmt))
unique_stmts.append(first_stmt)
return unique_stmts
def get_match_key_maps(preassembler, unique_stmts, num_procs=1):
id_maps = preassembler.generate_id_maps(unique_stmts, num_procs)
return [[unique_stmts[idx].matches_key() for idx in idx_pair]
for idx_pair in id_maps]
def process_statements(stmts, num_procs=1):
stmts = ac.map_grounding(stmts)
stmts = ac.map_sequence(stmts)
pa = Preassembler(hierarchies)
unique_stmts = make_unique_statement_set(pa, stmts)
match_key_maps = get_match_key_maps(pa, unique_stmts, num_procs)
return unique_stmts, match_key_maps
def preassemble_db_stmts(db, num_procs, *clauses):
"""Run pre-assembly on a set of statements in the database."""
stmts = get_statements(clauses, db=db, do_stmt_count=False)
pa_stmts = process_statements(stmts, num_procs)
insert_pa_stmts(db, pa_stmts)
return pa_stmts
|
bsd-2-clause
|
Python
|
37c65efa1b78abcc75d506554e6fb877678ec2f2
|
Fix a typo
|
editorsnotes/editorsnotes,editorsnotes/editorsnotes
|
editorsnotes/api/views/topics.py
|
editorsnotes/api/views/topics.py
|
from editorsnotes.main.models import Topic
from .. import filters as es_filters
from ..serializers.topics import TopicSerializer
from .base import BaseListAPIView, BaseDetailView, DeleteConfirmAPIView
from .mixins import (ElasticSearchListMixin, EmbeddedMarkupReferencesMixin,
HydraProjectPermissionsMixin)
__all__ = ['TopicList', 'TopicDetail', 'TopicConfirmDelete']
class TopicList(ElasticSearchListMixin, HydraProjectPermissionsMixin,
BaseListAPIView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
es_filter_backends = (
es_filters.ProjectFilterBackend,
es_filters.QFilterBackend,
es_filters.UpdaterFilterBackend,
)
hydra_project_perms = ('main.add_topic',)
class TopicDetail(EmbeddedMarkupReferencesMixin, HydraProjectPermissionsMixin,
BaseDetailView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
hydra_project_perms = ('main.change_topic', 'main.delete_topic',)
class TopicConfirmDelete(DeleteConfirmAPIView):
queryset = Topic.objects.all()
permissions = {
'GET': ('main.delete_topic',),
'HEAD': ('main.delete_topic',)
}
|
from editorsnotes.main.models import Topic
from .. import filters as es_filters
from ..serializers.topics import TopicSerializer
from .base import BaseListAPIView, BaseDetailView, DeleteConfirmAPIView
from .mixins import (ElasticSearchListMixin, EmbeddedMarkupReferencesMixin,
HydraProjectPermissionsMixin)
__all__ = ['TopicList', 'TopicDetail', 'TopicConfirmDelete']
class TopicList(ElasticSearchListMixin, HydraProjectPermissionsMixin,
BaseListAPIView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
es_filter_backends = (
es_filters.ProjectFilterBackend,
es_filters.QFilterBackend,
es_filters.UpdaterFilterBackend,
)
hydra_project_perms = ('main.add_note',)
class TopicDetail(EmbeddedMarkupReferencesMixin, HydraProjectPermissionsMixin,
BaseDetailView):
queryset = Topic.objects.all()
serializer_class = TopicSerializer
hydra_project_perms = ('main.change_note', 'main.delete_note',)
class TopicConfirmDelete(DeleteConfirmAPIView):
queryset = Topic.objects.all()
permissions = {
'GET': ('main.delete_topic',),
'HEAD': ('main.delete_topic',)
}
|
agpl-3.0
|
Python
|
0091c41d8dd064b40ccf35d4d24c01ae4438f028
|
Set sender in signal handlers
|
NUKnightLab/cityhallmonitor,NUKnightLab/cityhallmonitor,NUKnightLab/cityhallmonitor,NUKnightLab/cityhallmonitor
|
cityhallmonitor/signals/handlers.py
|
cityhallmonitor/signals/handlers.py
|
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.utils import timezone
from cityhallmonitor.models import DirtyFieldsModel
@receiver(pre_save, sender=DirtyFieldsModel)
def handle_pre_save(sender, instance, *args, **kwargs):
"""Set updated_at timestamp if model is actually dirty"""
if hasattr(sender, 'is_dirty'):
if instance.is_dirty():
instance.updated_at = timezone.now()
@receiver(post_save, sender=DirtyFieldsModel)
def handle_post_save(sender, instance, **kwargs):
"""Reset dirty state"""
if hasattr(sender, 'reset_state'):
instance.reset_state()
|
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.utils import timezone
@receiver(pre_save)
def handle_pre_save(sender, instance, *args, **kwargs):
"""
Set updated_at timestamp if model is actually dirty
"""
if hasattr(sender, 'is_dirty'):
if instance.is_dirty():
instance.updated_at = timezone.now()
@receiver(post_save)
def handle_post_save(sender, instance, **kwargs):
"""
Reset dirty state
"""
if hasattr(sender, 'reset_state'):
instance.reset_state()
|
mit
|
Python
|
41d6c18aee851c9b2430d74c51ef51b49948b0f4
|
raise version
|
xiezhen/brilws,xiezhen/brilws
|
brilws/_version.py
|
brilws/_version.py
|
__version__ = "3.5.0"
|
__version__ = "3.4.1"
|
mit
|
Python
|
6e2362351d9ccaa46a5a2bc69c4360e4faff166d
|
Add encoding spec to comply Python 2
|
fikr4n/iclib-python
|
iclib/qibla.py
|
iclib/qibla.py
|
# -*- coding: utf-8 -*-
from . import formula
def direction(lat, lng):
return formula.qibla(lat, lng)
def direction_dms(lat, lng):
return _dms(formula.qibla(lat, lng))
def direction_str(lat, lng, prec=0):
d, m, s = direction_dms(lat, lng)
# negative input might returns wrong result
return '{}° {}\' {:.{}f}"'.format(d, m, s, prec)
def _dms(deg):
seconds = deg * 3600
m, s = divmod(seconds, 60)
d, m = divmod(m, 60)
return (int(d), int(m), s)
|
from . import formula
def direction(lat, lng):
return formula.qibla(lat, lng)
def direction_dms(lat, lng):
return _dms(formula.qibla(lat, lng))
def direction_str(lat, lng, prec=0):
d, m, s = direction_dms(lat, lng)
# negative input might returns wrong result
return '{}° {}\' {:.{}f}"'.format(d, m, s, prec)
def _dms(deg):
seconds = deg * 3600
m, s = divmod(seconds, 60)
d, m = divmod(m, 60)
return (int(d), int(m), s)
|
apache-2.0
|
Python
|
9f1913ca658228c2c6551b2c8de1d48ddd73c8aa
|
raise version to 2
|
xiezhen/brilws,xiezhen/brilws
|
brilws/_version.py
|
brilws/_version.py
|
__version__ = "2.0.0"
|
__version__ = "1.0.3"
|
mit
|
Python
|
97831652f0d06236d83d0731813ffcdc44a4e190
|
Update pypi version
|
glasslion/fontdump
|
fontdump/__init__.py
|
fontdump/__init__.py
|
__version__ = '1.1.0'
|
__version__ = '0.1.0'
|
mit
|
Python
|
22461c6ddc1a6bff0ee8637139146b8531b3e0b4
|
improve python error message when tp fails to start
|
google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto
|
python/perfetto/trace_processor/shell.py
|
python/perfetto/trace_processor/shell.py
|
#!/usr/bin/env python3
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import time
from urllib import request, error
from perfetto.trace_processor.platform import PlatformDelegate
# Default port that trace_processor_shell runs on
TP_PORT = 9001
def load_shell(bin_path: str, unique_port: bool, verbose: bool,
ingest_ftrace_in_raw: bool, platform_delegate: PlatformDelegate):
addr, port = platform_delegate.get_bind_addr(
port=0 if unique_port else TP_PORT)
url = f'{addr}:{str(port)}'
shell_path = platform_delegate.get_shell_path(bin_path=bin_path)
if os.name == 'nt' and not shell_path.endswith('.exe'):
tp_exec = [sys.executable, shell_path]
else:
tp_exec = [shell_path]
args = ['-D', '--http-port', str(port)]
if not ingest_ftrace_in_raw:
args.append('--no-ftrace-raw')
p = subprocess.Popen(
tp_exec + args,
stdout=subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL)
success = False
for i in range(3):
try:
if p.poll() is None:
_ = request.urlretrieve(f'http://{url}/status')
success = True
break
except error.URLError:
time.sleep(1)
if not success:
raise Exception(
"Trace processor failed to start. Try rerunning with "
"verbose=True in TraceProcessorConfig for more detailed "
"information and file a bug at https://goto.google.com/perfetto-bug "
"or https://github.com/google/perfetto/issues if necessary.")
return url, p
|
#!/usr/bin/env python3
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import time
from urllib import request, error
from perfetto.trace_processor.platform import PlatformDelegate
# Default port that trace_processor_shell runs on
TP_PORT = 9001
def load_shell(bin_path: str, unique_port: bool, verbose: bool,
ingest_ftrace_in_raw: bool, platform_delegate: PlatformDelegate):
addr, port = platform_delegate.get_bind_addr(
port=0 if unique_port else TP_PORT)
url = f'{addr}:{str(port)}'
shell_path = platform_delegate.get_shell_path(bin_path=bin_path)
if os.name == 'nt' and not shell_path.endswith('.exe'):
tp_exec = [sys.executable, shell_path]
else:
tp_exec = [shell_path]
args = ['-D', '--http-port', str(port)]
if not ingest_ftrace_in_raw:
args.append('--no-ftrace-raw')
p = subprocess.Popen(
tp_exec + args,
stdout=subprocess.DEVNULL,
stderr=None if verbose else subprocess.DEVNULL)
while True:
try:
if p.poll() != None:
if unique_port:
raise Exception(
"Random port allocation failed, please file a bug at https://goto.google.com/perfetto-bug"
)
raise Exception(
"Trace processor failed to start, please file a bug at https://goto.google.com/perfetto-bug"
)
_ = request.urlretrieve(f'http://{url}/status')
time.sleep(1)
break
except error.URLError:
pass
return url, p
|
apache-2.0
|
Python
|
c182e4f3d7df431fe5c542988fcef9f05825913c
|
Update the raw_parameter_script
|
mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase
|
examples/raw_parameter_script.py
|
examples/raw_parameter_script.py
|
""" The main purpose of this file is to demonstrate running SeleniumBase
scripts without the use of Pytest by calling the script directly
with Python or from a Python interactive interpreter. Based on
whether relative imports work or don't, the script can autodetect
how this file was run. With pure Python, it will initialize
all the variables that would've been automatically initialized
by the Pytest plugin. The setUp() and tearDown() methods are also
now called from the script itself.
One big advantage to running tests with Pytest is that most of this
is done for you automatically, with the option to update any of the
parameters through command line parsing. Pytest also provides you
with other plugins, such as ones for generating test reports,
handling multithreading, and parametrized tests. Depending on your
specific needs, you may need to call SeleniumBase commands without
using Pytest, and this example shows you how. """
try:
# Running with Pytest / (Finds test methods to run using autodiscovery)
# Example run command: "pytest raw_parameter_script.py"
from .my_first_test import MyTestClass # (relative imports work: ".~")
except (ImportError, ValueError):
# Running with pure Python OR from a Python interactive interpreter
# Example run command: "python raw_parameter_script.py"
from my_first_test import MyTestClass # (relative imports DON'T work)
sb = MyTestClass("test_basic")
sb.browser = "chrome"
sb.headless = False
sb.headed = False
sb.start_page = None
sb.servername = "localhost"
sb.port = 4444
sb.data = None
sb.environment = "test"
sb.user_agent = None
sb.extension_zip = None
sb.extension_dir = None
sb.database_env = "test"
sb.log_path = "latest_logs/"
sb.archive_logs = False
sb.disable_csp = False
sb.enable_sync = False
sb.visual_baseline = False
sb.maximize_option = False
sb.save_screenshot_after_test = False
sb.timeout_multiplier = None
sb.pytest_html_report = None
sb.report_on = False
sb.with_db_reporting = False
sb.with_s3_logging = False
sb.js_checking_on = False
sb.is_pytest = False
sb.demo_mode = False
sb.demo_sleep = 1
sb.message_duration = 2
sb.settings_file = None
sb.user_data_dir = None
sb.proxy_string = None
sb.ad_block_on = False
sb.highlights = None
sb.check_js = False
sb.cap_file = None
sb.setUp()
try:
sb.test_basic()
finally:
sb.tearDown()
del sb
|
""" The main purpose of this file is to demonstrate running SeleniumBase
scripts without the use of Pytest by calling the script directly
with Python or from a Python interactive interpreter. Based on
whether relative imports work or don't, the script can autodetect
how this file was run. With pure Python, it will initialize
all the variables that would've been automatically initialized
by the Pytest plugin. The setUp() and tearDown() methods are also
now called from the script itself.
One big advantage to running tests with Pytest is that most of this
is done for you automatically, with the option to update any of the
parameters through command line parsing. Pytest also provides you
with other plugins, such as ones for generating test reports,
handling multithreading, and parametrized tests. Depending on your
specific needs, you may need to call SeleniumBase commands without
using Pytest, and this example shows you how. """
try:
# Running with Pytest / (Finds test methods to run using autodiscovery)
# Example run command: "pytest raw_parameter_script.py"
from .my_first_test import MyTestClass # (relative imports work: ".~")
except (ImportError, ValueError):
# Running with pure Python OR from a Python interactive interpreter
# Example run command: "python raw_parameter_script.py"
from my_first_test import MyTestClass # (relative imports DON'T work)
b = MyTestClass("test_basic")
b.browser = "chrome"
b.headless = False
b.headed = False
b.start_page = None
b.servername = "localhost"
b.port = 4444
b.data = None
b.environment = "test"
b.user_agent = None
b.extension_zip = None
b.extension_dir = None
b.database_env = "test"
b.log_path = "latest_logs/"
b.archive_logs = False
b.disable_csp = False
b.enable_sync = False
b.visual_baseline = False
b.maximize_option = False
b.save_screenshot_after_test = False
b.timeout_multiplier = None
b.pytest_html_report = None
b.report_on = False
b.with_db_reporting = False
b.with_s3_logging = False
b.js_checking_on = False
b.is_pytest = False
b.demo_mode = False
b.demo_sleep = 1
b.message_duration = 2
b.settings_file = None
b.user_data_dir = None
b.proxy_string = None
b.ad_block_on = False
b.highlights = None
b.check_js = False
b.cap_file = None
b.setUp()
try:
b.test_basic()
finally:
b.tearDown()
del b
|
mit
|
Python
|
a713bbb1226863b4417362019431de0266faa2d9
|
Update automateprojectscript.py
|
TomHulme/306-Swarm-Robotics-Project,TomHulme/306-Swarm-Robotics-Project,TomHulme/306-Swarm-Robotics-Project
|
automateprojectscript.py
|
automateprojectscript.py
|
#!/usr/bin/python
"""
This python file just runs all of the terminal commands needed to run the project. It just saves time not having to manually type in these commands every time you want to run the project.
At the moment it only works for the example project, as the project later develops this script might be updated if the other people in the team decide to use this.
This is a first version, next I might work on getting a seperate terminal open to run each robot in order for it to be easy to see the positions of each robot. At the moment, since only 1 terminal is used, all of the output is put in it (which of course makes it messy)
To run the script, simply open up a terminal and type: python automateprojectscript.py
Author: ttho618
"""
import os
from subprocess import Popen, PIPE, signal
from os.path import join
findRoscorePro = Popen("pgrep roscore", stdout=PIPE, shell=True)
killroscorePro = Popen("kill "+findRoscorePro.communicate()[0], shell=True)
# The world file to look for
lookfor = "myworld.world"
# I assume that the project on your computer is located within the home directory
for root, dirs, files in os.walk('/home', topdown=True):
#print "searching", root
if '.local' in dirs:
dirs.remove('.local')
if 'catkin_ws' in dirs: # If the project is within this directory, then you need to change this to rosbuild_ws
dirs.remove('catkin_ws')
if lookfor in files:
print "found: %s" % join(root, lookfor)
worldfile = join(root, lookfor)
print worldfile
# This would need to be changed if your project is named something different
rosmakePro= Popen('rosmake se306Project',shell=True)
rosmakePro.communicate() # Waits until rosmake has finished
core = Popen('roscore',shell=True)
stagePro = Popen('rosrun stage stageros %s' %worldfile,shell=True)
# These below lines would need to be changed to fit what you are wanting to run.
runNode= Popen('rosrun se306Project R0',shell=True)
runNode= Popen('rosrun se306Project R1',shell=True)
|
#!/usr/bin/python
"""
This python file just runs all of the terminal commands needed to run the project. It just saves time not having to manually type in these commands every time you want to run the project.
At the moment it only works for the example project, as the project later develops this script might be updated if the other people in the team decide to use this.
This is a first version, next I might work on getting a seperate terminal open to run each robot in order for it to be easy to see the positions of each robot. At the moment, since only 1 terminal is used, all of the output is put in it (which of course makes it messy)
To run the script, simply open up a terminal and type: python automateprojectscript.py
Author: ttho618
"""
import os
from subprocess import Popen, PIPE, signal
from os.path import join
findRoscorePro = Popen("pgrep roscore", stdout=PIPE, shell=True)
killroscorePro = Popen("kill "+findRoscorePro.communicate()[0], shell=True)
# The world file to look for
lookfor = "myworld.world"
# I assume that the project on your computer is located within the home directory
for root, dirs, files in os.walk('/home', topdown=True):
#print "searching", root
if '.local' in dirs:
dirs.remove('.local')
if 'catkin_ws' in dirs: # If the project is within this directory, then you need to change this to rosbuild_ws
dirs.remove('catkin_ws')
if lookfor in files:
print "found: %s" % join(root, lookfor)
worldfile = join(root, lookfor)
print worldfile
core = Popen('roscore',shell=True)
stagePro = Popen('rosrun stage stageros %s' %worldfile,shell=True)
# These below lines would need to be changed to fit what you are wanting to run.
runNode= Popen('rosrun se306Project R0',shell=True)
runNode= Popen('rosrun se306Project R1',shell=True)
|
apache-2.0
|
Python
|
3f1d30c2aeff73bb4863f2d0fd0660a264715739
|
Tidy up
|
petr-tik/chess_app,petr-tik/chess_app,petr-tik/chess_app
|
src/planner.py
|
src/planner.py
|
from collections import deque
class GamePlan(object):
"""
initialise the tournament object with an overall list of players' IDs
input:
a list of players
output:
a list (len = number of rounds) of lists of tuples
with players' names (maybe change to IDs from db) in white, black order
GamePlans with odd number of players have each person sitting out
Created as a tuple with ('_BYE', 'real player')
Template needs to check for '_BYE' in each tuple and
"""
def __init__(self, players):
self.players = list(players)
def berger_robin(self, players):
"""
Input:
array of player names/ids
Returns:
tournament - an array of hashmaps,
each containing matches and bye for the round
taken from
https://en.wikipedia.org/wiki/Round-robin_tournament#Scheduling_algorithm
"""
number_of_players = len(players)
shift = number_of_players / 2
last = players.pop()
pl_deque = deque(players)
tournament = []
for stage in xrange(number_of_players - 1):
round_dict = {'matches': [], 'bye': "__NONE"}
if last == '_BYE':
round_dict['bye'] = pl_deque[0]
else:
if stage % 2 == 0:
round_dict['matches'].append((last, pl_deque[0]))
else:
round_dict['matches'].append((pl_deque[0], last))
other_games = [(pl_deque[idx], pl_deque[idx + 1])
for idx in xrange(1, (len(pl_deque) - 1), 2)]
round_dict['matches'] += other_games
tournament.append(round_dict)
pl_deque.rotate(shift) # for the next for-loop iteration
return tournament
def generate(self):
players = self.players
if len(players) % 2 == 1:
players.append('_BYE')
return self.berger_robin(players)
|
from collections import deque
class GamePlan(object):
"""
initialise the tournament object with an overall list of players' IDs
input:
a list of players
output:
a list (len = number of rounds) of lists of tuples
with players' names (maybe change to IDs from db) in white, black order
GamePlans with odd number of players have each person sitting out
Created as a tuple with ('_BYE', 'real player')
Template needs to check for '_BYE' in each tuple and
"""
def __init__(self, players):
self.players = list(players)
def berger_robin(self, players):
"""
Input:
array of player names/ids
Returns:
tournament - an array of hashmaps,
each containing matches and bye for the round
taken from
https://en.wikipedia.org/wiki/Round-robin_tournament#Scheduling_algorithm
"""
number_of_players = len(players)
shift = number_of_players / 2
last = players.pop()
pl_deque = deque(players)
tournament = []
for x in xrange(number_of_players - 1):
round_dict = {'matches': [], 'bye': "__NONE"}
if last == '_BYE':
round_dict['bye'] = pl_deque[0]
else:
if x % 2 == 0:
round_dict['matches'].append((last, pl_deque[0]))
else:
round_dict['matches'].append((pl_deque[0], last))
other_games = [(pl_deque[idx], pl_deque[idx + 1])
for idx in xrange(1, (len(pl_deque) - 1), 2)]
round_dict['matches'] += other_games
tournament.append(round_dict)
pl_deque.rotate(shift) # for the next for-loop iteration
return tournament
def generate(self):
if len(self.players) % 2 == 0:
players = self.players
else:
players = self.players
players.append('_BYE')
return self.berger_robin(players)
|
mit
|
Python
|
65783ec0baac5886232a5334905a748750b3c0c2
|
fix NameError
|
onelab-eu/sfa,yippeecw/sfa,yippeecw/sfa,onelab-eu/sfa,yippeecw/sfa,onelab-eu/sfa
|
sfa/methods/Update.py
|
sfa/methods/Update.py
|
### $Id: update.py 16477 2010-01-05 16:31:37Z thierry $
### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/update.py $
import time
from sfa.util.faults import *
from sfa.util.method import Method
from sfa.util.parameter import Parameter, Mixed
from sfa.trust.credential import Credential
class Update(Method):
"""
Update an object in the registry. Currently, this only updates the
PLC information associated with the record. The SFA fields (name, type,
GID) are fixed.
@param cred credential string specifying rights of the caller
@param record a record dictionary to be updated
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(dict, "Record dictionary to be updated"),
Parameter(str, "Credential string"),
]
returns = Parameter(int, "1 if successful")
def call(self, record_dict, creds):
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "update")
# verify permissions
api.auth.verify_object_permission(record_dict.get('hrn', ''))
# log
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, None, self.name))
manager = self.api.get_interface_manager()
return manager.update(self.api, record_dict)
|
### $Id: update.py 16477 2010-01-05 16:31:37Z thierry $
### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/update.py $
import time
from sfa.util.faults import *
from sfa.util.method import Method
from sfa.util.parameter import Parameter, Mixed
from sfa.trust.credential import Credential
class Update(Method):
"""
Update an object in the registry. Currently, this only updates the
PLC information associated with the record. The SFA fields (name, type,
GID) are fixed.
@param cred credential string specifying rights of the caller
@param record a record dictionary to be updated
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(dict, "Record dictionary to be updated"),
Parameter(str, "Credential string"),
]
returns = Parameter(int, "1 if successful")
def call(self, record_dict, creds):
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "update")
# verify permissions
api.auth.verify_object_permission(record.get('hrn', ''))
# log
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, None, self.name))
manager = self.api.get_interface_manager()
return manager.update(self.api, record_dict)
|
mit
|
Python
|
d307b65f8bf5f9ae8eaaefa071fd2055304a6725
|
Remove custom form from admin.
|
tiagovaz/saskatoon,tiagovaz/saskatoon,tiagovaz/saskatoon,tiagovaz/saskatoon
|
saskatoon/harvest/admin.py
|
saskatoon/harvest/admin.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from forms import RFPForm, PropertyForm, HarvestForm, HarvestYieldForm, EquipmentForm
from member.models import *
from harvest.models import *
from harvest.forms import *
class PropertyInline(admin.TabularInline):
model = Property
extra = 0
class PersonInline(admin.TabularInline):
model = RequestForParticipation
verbose_name = "Cueilleurs pour cette récolte"
verbose_name_plural = "Cueilleurs pour cette récolte"
form = RFPForm
exclude = ['creation_date', 'confirmation_date']
extra = 3
class OrganizationAdmin(admin.ModelAdmin):
inlines = [
PropertyInline,
]
search_fields = ['name', 'description']
class HarvestYieldInline(admin.TabularInline):
model = HarvestYield
form = HarvestYieldForm
class HarvestAdmin(admin.ModelAdmin):
#form = HarvestForm
inlines = (PersonInline, HarvestYieldInline)
class RequestForParticipationAdmin(admin.ModelAdmin):
form = RFPForm
class EquipmentAdmin(admin.ModelAdmin):
form = EquipmentForm
class PropertyImageInline(admin.TabularInline):
model = PropertyImage
extra = 3
class PropertyAdmin(admin.ModelAdmin):
model = Property
inlines = [ PropertyImageInline, ]
form = PropertyForm
admin.site.register(Property, PropertyAdmin)
admin.site.register(Harvest, HarvestAdmin)
admin.site.register(RequestForParticipation, RequestForParticipationAdmin)
admin.site.register(TreeType)
admin.site.register(Equipment, EquipmentAdmin)
admin.site.register(EquipmentType)
admin.site.register(HarvestYield)
admin.site.register(Comment)
admin.site.register(Actor)
admin.site.register(Language)
admin.site.register(Person)
admin.site.register(Organization)
admin.site.register(Neighborhood)
admin.site.register(City)
admin.site.register(State)
admin.site.register(Country)
admin.site.register(PropertyImage)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from forms import RFPForm, PropertyForm, HarvestForm, HarvestYieldForm, EquipmentForm
from member.models import *
from harvest.models import *
from harvest.forms import *
class PropertyInline(admin.TabularInline):
model = Property
extra = 0
class PersonInline(admin.TabularInline):
model = RequestForParticipation
verbose_name = "Cueilleurs pour cette récolte"
verbose_name_plural = "Cueilleurs pour cette récolte"
form = RFPForm
exclude = ['creation_date', 'confirmation_date']
extra = 3
class OrganizationAdmin(admin.ModelAdmin):
inlines = [
PropertyInline,
]
search_fields = ['name', 'description']
class HarvestYieldInline(admin.TabularInline):
model = HarvestYield
form = HarvestYieldForm
class HarvestAdmin(admin.ModelAdmin):
form = HarvestForm
inlines = (PersonInline, HarvestYieldInline)
class RequestForParticipationAdmin(admin.ModelAdmin):
form = RFPForm
class EquipmentAdmin(admin.ModelAdmin):
form = EquipmentForm
class PropertyImageInline(admin.TabularInline):
model = PropertyImage
extra = 3
class PropertyAdmin(admin.ModelAdmin):
model = Property
inlines = [ PropertyImageInline, ]
form = PropertyForm
admin.site.register(Property, PropertyAdmin)
admin.site.register(Harvest, HarvestAdmin)
admin.site.register(RequestForParticipation, RequestForParticipationAdmin)
admin.site.register(TreeType)
admin.site.register(Equipment, EquipmentAdmin)
admin.site.register(EquipmentType)
admin.site.register(HarvestYield)
admin.site.register(Comment)
admin.site.register(Actor)
admin.site.register(Language)
admin.site.register(Person)
admin.site.register(Organization)
admin.site.register(Neighborhood)
admin.site.register(City)
admin.site.register(State)
admin.site.register(Country)
admin.site.register(PropertyImage)
|
agpl-3.0
|
Python
|
5bb92bea9d910c788efa3ea5b7ca41499d92be26
|
update cuba.py with the autogenerated one
|
simphony/simphony-common
|
simphony/core/cuba.py
|
simphony/core/cuba.py
|
# code auto-generated by the cuba-generate.py script.
from enum import IntEnum, unique
@unique
class CUBA(IntEnum):
NAME = 1
DIRECTION = 3
STATUS = 4
LABEL = 5
MATERIAL_ID = 6
CHEMICAL_SPECIE = 7
MATERIAL_TYPE = 8
SHAPE_CENTER = 9
SHAPE_LENGTH_UC = 10
SHAPE_LENGTH = 11
SHAPE_RADIUS = 12
SHAPE_SIDE = 13
CRYSTAL_STORAGE = 14
NAME_UC = 15
LATTICE_VECTORS = 16
SYMMETRY_LATTICE_VECTORS = 17
OCCUPANCY = 18
BOND_LABEL = 19
BOND_TYPE = 20
VELOCITY = 21
ACCELERATION = 22
NUMBER_OF_POINTS = 23
RADIUS = 24
SIZE = 25
MASS = 26
VOLUME = 27
ANGULAR_VELOCITY = 28
ANGULAR_ACCELERATION = 29
SIMULATION_DOMAIN_DIMENSIONS = 30
SIMULATION_DOMAIN_ORIGIN = 31
DYNAMIC_VISCOSITY = 32
KINEMATIC_VISCOSITY = 33
DIFFUSION_COEFFICIENT = 34
PROBABILITY_COEFFICIENT = 35
FRICTION_COEFFICIENT = 36
SCALING_COEFFICIENT = 37
EQUATION_OF_STATE_COEFFICIENT = 38
CONTANCT_ANGLE = 39
AMPHIPHILICITY = 40
PHASE_INTERACTION_STRENGTH = 41
HAMAKER_CONSTANT = 42
ZETA_POTENTIAL = 43
ION_VALENCE_EFFECT = 44
DEBYE_LENGTH = 45
SMOOTHING_LENGTH = 46
LATTICE_SPACING = 47
TIME_STEP = 48
NUMBER_OF_TIME_STEPS = 49
FORCE = 50
TORQUE = 51
DENSITY = 52
CONCENTRATION = 53
PRESSURE = 54
TEMPERATURE = 55
DISTRIBUTION = 56
ORDER_PARAMETER = 57
ORIGINAL_POSITION = 58
DELTA_DISPLACEMENT = 59
EXTERNAL_APPLIED_FORCE = 60
EULER_ANGLES = 61
SPHERICITY = 62
YOUNG_MODULUS = 63
POISSON_RATIO = 64
LN_OF_RESTITUTION_COEFFICIENT = 65
ROLLING_FRICTION = 66
VOLUME_FRACTION = 67
|
from enum import IntEnum, unique
@unique
class CUBA(IntEnum):
NAME = 0
DIRECTION = 1
STATUS = 2
LABEL = 3
MATERIAL_ID = 4
MATERIAL_TYPE = 5
SHAPE_CENTER = 6
SHAPE_LENGTH_UC = 7
SHAPE_LENGTH = 8
SHAPE_RADIUS = 9
SHAPE_SIDE = 10
CRYSTAL_STORAGE = 11
NAME_UC = 12
LATTICE_VECTORS = 13
SYMMETRY_LATTICE_VECTORS = 14
OCCUPANCY = 15
BOND_LABEL = 16
BOND_TYPE = 17
VELOCITY = 18
ACCELERATION = 19
NUMBER_OF_POINTS = 20
RADIUS = 21
SIZE = 22
MASS = 23
VOLUME = 24
ANGULAR_VELOCITY = 25
ANGULAR_ACCELERATION = 26
SIMULATION_DOMAIN_DIMENSIONS = 27
SIMULATION_DOMAIN_ORIGIN = 28
DYNAMIC_VISCOSITY = 29
KINEMATIC_VISCOSITY = 30
DIFFUSION_COEFFICIENT = 31
PROBABILITY_COEFFICIENT = 32
FRICTION_COEFFICIENT = 33
SCALING_COEFFICIENT = 34
EQUATION_OF_STATE_COEFFICIENT = 35
CONTANCT_ANGLE = 36
AMPHIPHILICITY = 37
PHASE_INTERACTION_STRENGTH = 38
HAMAKER_CONSTANT = 39
ZETA_POTENTIAL = 40
ION_VALENCE_EFFECT = 41
DEBYE_LENGTH = 42
SMOOTHING_LENGTH = 43
LATTICE_SPACING = 44
TIME_STEP = 45
NUMBEROF_TIME_STEPS = 46
FORCE = 47
TORQUE = 48
DENSITY = 49
CONCENTRATION = 50
PRESSURE = 51
TEMPERATURE = 52
DISTRIBUTION = 53
ORDER_PARAMETER = 54
ORIGINAL_POSITION = 55
DELTA_DISPLACEMENT = 56
EXTERNAL_APPLIED_FORCE = 57
EULE_RANGLES = 58
SPHERICITY = 59
YOUNG_MODULUS = 60
POISSON_RATIO = 61
LN_OF_RESTITUTION_COEFFICIENT = 62
ROLLING_FRICTION = 63
VOLUME_FRACTION = 64
|
bsd-2-clause
|
Python
|
e28a41e5996651aefdf7966ead73310a5a761040
|
fix flake8 violation
|
simphony/simphony-common
|
simphony/cuds/bond.py
|
simphony/cuds/bond.py
|
class Bond(object):
"""
Bond entity
"""
def __init__(self, id, particles, data=None):
self.id = id
self.particles = particles
if data is None:
self.data = {}
else:
self.data = data
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.id == other.id and
self.particles == other.particles and
self.data == self.data)
else:
return False
def __ne__(self, other):
return not self == other
|
class Bond(object):
"""
Bond entity
"""
def __init__(self, id, particles, data=None):
self.id = id
self.particles = particles
if data is None:
self.data = {}
else:
self.data = data
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.id == other.id and
self.particles == other.particles and
self.data == self.data)
else:
return False
def __ne__(self, other):
return not self == other
|
bsd-2-clause
|
Python
|
2e2f6d2a6480a4ca43c76e6559cfe6aadc434a8b
|
change to dumps
|
lordmuffin/aws-cfn-lambdawebhook,lordmuffin/aws-cfn-lambdawebhook
|
functions/webhook.py
|
functions/webhook.py
|
#!/usr/bin/python
# Written by: Andrew Jackson
# This is used to send a JSON payload to a webhook.
import json
import logging
import os
import time
import uuid
import boto3
import requests
import decimal
#def default(obj):
# if isinstance(obj, decimal.Decimal):
# return int(obj)
# return o.__dict__
def handler(event, context):
print "event.dump = " + json.dumps(event)
data = json.dumps(event)
url = data['webhookurl']
payload = data['payload']
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload))
#print(r.text)
|
#!/usr/bin/python
# Written by: Andrew Jackson
# This is used to send a JSON payload to a webhook.
import json
import logging
import os
import time
import uuid
import boto3
import requests
import decimal
#def default(obj):
# if isinstance(obj, decimal.Decimal):
# return int(obj)
# return o.__dict__
def handler(event, context):
print "event.dump = " + json.dumps(event)
data = json.loads(event)
url = data['webhookurl']
payload = data['payload']
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload))
print(r.text)
|
mit
|
Python
|
f83369a263fb606a6f92b62a45d72e8faf0f1770
|
Add RunGM and RunBench steps for Android Review URL: https://codereview.appspot.com/5987049
|
google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot
|
master/skia_master_scripts/android_factory.py
|
master/skia_master_scripts/android_factory.py
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility class to build the Skia master BuildFactory's for Android buildbots.
Overrides SkiaFactory with any Android-specific steps."""
from skia_master_scripts import factory as skia_factory
class AndroidFactory(skia_factory.SkiaFactory):
"""Overrides for Android builds."""
def Build(self, clobber=None):
"""Build and return the complete BuildFactory.
clobber: boolean indicating whether we should clean before building
"""
if clobber is None:
clobber = self._default_clobber
if clobber:
self._skia_cmd_obj.AddClean()
self._skia_cmd_obj.AddRunCommand(
command='../android/bin/android_make all -d nexus_s %s' % (
self._make_flags),
description='BuildAll')
self.PushBinaryToDeviceAndRun(binary_name='tests', description='RunTests')
self.PushBinaryToDeviceAndRun(binary_name='gm',
arguments='--nopdf --noreplay',
description='RunGM')
self.PushBinaryToDeviceAndRun(binary_name='bench', description='RunBench')
return self._factory
def PushBinaryToDeviceAndRun(self, binary_name, arguments='',
description=None, timeout=None):
"""Adds a build step: push a binary file to the USB-connected Android
device and run it.
binary_name: which binary to run on the device
arguments: additional arguments to pass to the binary when running it
description: text description (e.g., 'RunTests')
timeout: timeout in seconds, or None to use the default timeout
The shell command (running on the buildbot slave) will exit with a nonzero
return code if and only if the command running on the Android device
exits with a nonzero return code... so a nonzero return code from the
command running on the Android device will turn the buildbot red.
"""
if not description:
description = 'Run %s' % binary_name
path_to_adb = self.TargetPathJoin('..', 'android', 'bin', 'linux', 'adb')
command_list = [
'%s root' % path_to_adb,
'%s remount' % path_to_adb,
'%s push out/%s/%s /system/bin/skia_%s' % (
path_to_adb, self._configuration, binary_name, binary_name),
'%s logcat -c' % path_to_adb,
'STDOUT=$(%s shell "skia_%s %s && echo ADB_SHELL_SUCCESS")' % (
path_to_adb, binary_name, arguments),
'echo $STDOUT',
'%s logcat -d' % path_to_adb,
'echo $STDOUT | grep ADB_SHELL_SUCCESS',
]
self._skia_cmd_obj.AddRunCommandList(
command_list=command_list, description=description)
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility class to build the Skia master BuildFactory's for Android buildbots.
Overrides SkiaFactory with any Android-specific steps."""
from skia_master_scripts import factory as skia_factory
class AndroidFactory(skia_factory.SkiaFactory):
"""Overrides for Android builds."""
def Build(self, clobber=None):
"""Build and return the complete BuildFactory.
clobber: boolean indicating whether we should clean before building
"""
if clobber is None:
clobber = self._default_clobber
if clobber:
self._skia_cmd_obj.AddClean()
self._skia_cmd_obj.AddRunCommand(
command='../android/bin/android_make all -d nexus_s %s' % (
self._make_flags),
description='BuildAll')
self.PushBinaryToDeviceAndRun(binary_name='tests', description='RunTests')
return self._factory
def PushBinaryToDeviceAndRun(self, binary_name, description, timeout=None):
"""Adds a build step: push a binary file to the USB-connected Android
device and run it.
binary_name: which binary to run on the device
description: text description (e.g., 'RunTests')
timeout: timeout in seconds, or None to use the default timeout
The shell command (running on the buildbot slave) will exit with a nonzero
return code if and only if the command running on the Android device
exits with a nonzero return code... so a nonzero return code from the
command running on the Android device will turn the buildbot red.
"""
path_to_adb = self.TargetPathJoin('..', 'android', 'bin', 'linux', 'adb')
command_list = [
'%s root' % path_to_adb,
'%s remount' % path_to_adb,
'%s push out/%s/%s /system/bin/skia_%s' % (
path_to_adb, self._configuration, binary_name, binary_name),
'%s logcat -c' % path_to_adb,
'STDOUT=$(%s shell "skia_%s && echo ADB_SHELL_SUCCESS")' % (
path_to_adb, binary_name),
'echo $STDOUT',
'%s logcat -d' % path_to_adb,
'echo $STDOUT | grep ADB_SHELL_SUCCESS',
]
self._skia_cmd_obj.AddRunCommandList(
command_list=command_list, description=description)
|
bsd-3-clause
|
Python
|
984422fe3fb0b34a17e42910a9c1b98afa572452
|
Revert r9607 -- it caused a BuildbotSelfTest failure
|
Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot
|
master/skia_master_scripts/android_factory.py
|
master/skia_master_scripts/android_factory.py
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility class to build the Skia master BuildFactory's for Android buildbots.
Overrides SkiaFactory with any Android-specific steps."""
from buildbot.process.properties import WithProperties
from skia_master_scripts import factory as skia_factory
class AndroidFactory(skia_factory.SkiaFactory):
"""Overrides for Android builds."""
def __init__(self, device, **kwargs):
""" Instantiates an AndroidFactory with properties and build steps specific
to Android devices.
device: string indicating which Android device type we are targeting
"""
skia_factory.SkiaFactory.__init__(self, bench_pictures_cfg=device,
deps_target_os='android',
flavor='android',
build_targets=['all'],
**kwargs)
self._device = device
self._common_args += ['--device', self._device,
'--serial', WithProperties('%(serial:-None)s'),
'--has_root', WithProperties('%(has_root:-True)s'),
'--android_sdk_root',
WithProperties('%(android_sdk_root)s')]
self._default_clobber = True
def CompareGMs(self):
""" Run the "skdiff" tool to compare the "actual" GM images we just
generated to the baselines in _gm_image_subdir. """
# We have bypass the Android-flavored compile in order to build SkDiff for
# the host.
self.AddSlaveScript(script='compile.py',
description='BuildSkDiff',
is_rebaseline_step=True,
args=['--target', 'tools',
'--gyp_defines',
' '.join('%s=%s' % (k, v)
for k, v in self._gyp_defines.items())])
skia_factory.SkiaFactory.CompareGMs(self)
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility class to build the Skia master BuildFactory's for Android buildbots.
Overrides SkiaFactory with any Android-specific steps."""
from buildbot.process.properties import WithProperties
from skia_master_scripts import factory as skia_factory
class AndroidFactory(skia_factory.SkiaFactory):
"""Overrides for Android builds."""
def __init__(self, device, **kwargs):
""" Instantiates an AndroidFactory with properties and build steps specific
to Android devices.
device: string indicating which Android device type we are targeting
"""
skia_factory.SkiaFactory.__init__(self, bench_pictures_cfg=device,
deps_target_os='android',
flavor='android',
build_targets=['all'],
**kwargs)
self._device = device
self._common_args += ['--device', self._device,
'--serial', WithProperties('%(serial:-None)s'),
'--has_root', WithProperties('%(has_root:-True)s'),
'--android_sdk_root',
WithProperties('%(android_sdk_root)s')]
self._default_clobber = True
def PreRender(self):
""" Before chaining to SkiaFactory.PreRender(), build tools (skdiff,
skimage) that we might need on the buildslave host machine. """
# We bypass the Android-flavored compile in order to build tools for
# the host.
self.AddSlaveScript(script='compile.py',
description='BuildHostTools',
is_rebaseline_step=True,
args=['--target', 'tools',
'--gyp_defines',
' '.join('%s=%s' % (k, v)
for k, v in self._gyp_defines.items())])
skia_factory.SkiaFactory.PreRender(self)
|
bsd-3-clause
|
Python
|
4b740ddb11fb5c4b2b29bc6eef0a5569349272f8
|
make random_metadata compliant
|
planetlabs/datalake,planetlabs/datalake-common,planetlabs/datalake,planetlabs/atl,planetlabs/datalake,planetlabs/datalake
|
datalake_common/tests/conftest.py
|
datalake_common/tests/conftest.py
|
import pytest
import random
import string
from datetime import datetime, timedelta
@pytest.fixture
def basic_metadata():
return {
'version': 0,
'start': 1426809600000,
'end': 1426895999999,
'where': 'nebraska',
'what': 'apache',
'hash': '12345'
}
def random_word(length):
return ''.join(random.choice(string.lowercase) for i in xrange(length))
def random_hex(length):
return ('%0' + str(length) + 'x') % random.randrange(16**length)
def random_interval():
now = datetime.now()
start = now - timedelta(days=random.randint(0, 365*3))
end = start - timedelta(days=random.randint(1, 10))
return start.isoformat(), end.isoformat()
def random_work_id():
if random.randint(0, 1):
return None
return '{}-{}'.format(random_word(5), random.randint(0,2**15))
@pytest.fixture
def random_metadata():
start, end = random_interval()
return {
'version': 0,
'start': start,
'end': end,
'work_id': random_work_id(),
'where': random_word(10),
'what': random_word(10),
'id': random_hex(40),
'hash': random_hex(40),
}
|
import pytest
import random
import string
from datetime import datetime, timedelta
@pytest.fixture
def basic_metadata():
return {
'version': 0,
'start': 1426809600000,
'end': 1426895999999,
'where': 'nebraska',
'what': 'apache',
'hash': '12345'
}
def random_word(length):
return ''.join(random.choice(string.lowercase) for i in xrange(length))
def random_interval():
now = datetime.now()
start = now - timedelta(days=random.randint(0, 365*3))
end = start - timedelta(days=random.randint(1, 10))
return start.isoformat(), end.isoformat()
@pytest.fixture
def random_metadata():
start, end = random_interval()
return {
'version': 0,
'start': start,
'end': end,
'where': random_word(10),
'what': random_word(10),
}
|
apache-2.0
|
Python
|
5d82c2d9f6d2874ae4621edb4dc1e6455652666b
|
Remove Dropout and unnecessary imports
|
kemaswill/keras
|
examples/imdb_fasttext.py
|
examples/imdb_fasttext.py
|
'''This example demonstrates the use of fasttext for text classification
Based on Joulin et al's paper:
Bags of Tricks for Efficient Text Classification
https://arxiv.org/abs/1607.01759
Can achieve accuracy around 88% after 5 epochs in 70s.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Embedding
from keras.layers import AveragePooling1D
from keras.datasets import imdb
from keras import backend as K
# set parameters:
max_features = 20000
maxlen = 400
batch_size = 32
embedding_dims = 20
nb_epoch = 5
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_length=model.output_shape[1]))
# We flatten the output of the conv layer
model.add(Flatten())
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
|
'''This example demonstrates the use of fasttext for text classification
Based on Joulin et al's paper:
Bags of Tricks for Efficient Text Classification
https://arxiv.org/abs/1607.01759
Can achieve accuracy around 88% after 5 epochs in 70s.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import Embedding
from keras.layers import AveragePooling1D
from keras.datasets import imdb
from keras import backend as K
# set parameters:
max_features = 20000
maxlen = 400
batch_size = 32
embedding_dims = 20
nb_epoch = 5
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_length=model.output_shape[1]))
# We flatten the output of the conv layer,
# so that we can add a dense layer:
model.add(Flatten())
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
|
mit
|
Python
|
20ef3aed661d5b77bedf48df9ed6917e24319c01
|
Fix typo
|
holzman/glideinwms-old,bbockelm/glideinWMS,bbockelm/glideinWMS,holzman/glideinwms-old,bbockelm/glideinWMS,holzman/glideinwms-old,bbockelm/glideinWMS
|
factory/glideFactoryLogParser.py
|
factory/glideFactoryLogParser.py
|
#
# Description:
# This module implements classes to track
# changes in glidein status logs
#
# Author:
# Igor Sfiligoi (Feb 2nd 2007)
#
import os, os.path
import condorLogParser
# for now it is just a constructor wrapper
# Further on it will need to implement glidein exit code checks
class dirSummaryTimings(condorLogParser.dirSummaryTimings):
def __init__(self,dirname,client_name,inactive_files=None):
condorLogParser.dirSummaryTimings.__init__(self,dirname,log_prefix="condor_activity_",log_suffix="_"+client_name+".log",inactive_files=inactive_files)
|
#
# Description:
# This module implements classes to track
# changes in glidein status logs
#
# Author:
# Igor Sfiligoi (Feb 2nd 2007)
#
import os, os.path
import condorLogParser
# for now it is just a constructor wrapper
# Further on it will need to implement glidein exit code checks
class dirSummaryTimings(condorLogParser.dirSummary):
def __init__(self,dirname,client_name,inactive_files=None):
condorLogParser.dirSummaryTimings.__init__(self,dirname,log_prefix="condor_activity_",log_suffix="_"+client_name+".log",inactive_files=inactive_files)
|
bsd-3-clause
|
Python
|
c47a51db4f7ccc514aa687a1859ed592574d1a58
|
Change API Endpoint to BzAPI Compatibility Layer
|
mozilla/bztools,anoopvalluthadam/bztools,mozilla/relman-auto-nag,mozilla/relman-auto-nag,mozilla/relman-auto-nag
|
bugzilla/agents.py
|
bugzilla/agents.py
|
from bugzilla.models import *
from bugzilla.utils import *
class InvalidAPI_ROOT(Exception):
def __str__(self):
return "Invalid API url specified. " + \
"Please set BZ_API_ROOT in your environment " + \
"or pass it to the agent constructor"
class BugzillaAgent(object):
def __init__(self, api_root=None, username=None, password=None):
if not api_root:
api_root = os.environ.get('BZ_API_ROOT')
if not api_root:
raise InvalidAPI_ROOT
self.API_ROOT = api_root
self.username, self.password = username, password
def get_bug(self, bug, include_fields='_default,token,cc,keywords,whiteboard,comments', exclude_fields=None, params={}):
params['include_fields'] = [include_fields]
params['exclude_fields'] = [exclude_fields]
url = urljoin(self.API_ROOT, 'bug/%s?%s' % (bug, self.qs(**params)))
return Bug.get(url)
def get_bug_list(self, params={}):
url = url = urljoin(self.API_ROOT, 'bug/?%s' % (self.qs(**params)))
return BugSearch.get(url).bugs
def qs(self, **params):
if self.username and self.password:
params['username'] = [self.username]
params['password'] = [self.password]
return qs(**params)
class BMOAgent(BugzillaAgent):
def __init__(self, username=None, password=None):
super(BMOAgent, self).__init__('https://bugzilla.mozilla.org/bzapi/', username, password)
|
from bugzilla.models import *
from bugzilla.utils import *
class InvalidAPI_ROOT(Exception):
def __str__(self):
return "Invalid API url specified. " + \
"Please set BZ_API_ROOT in your environment " + \
"or pass it to the agent constructor"
class BugzillaAgent(object):
def __init__(self, api_root=None, username=None, password=None):
if not api_root:
api_root = os.environ.get('BZ_API_ROOT')
if not api_root:
raise InvalidAPI_ROOT
self.API_ROOT = api_root
self.username, self.password = username, password
def get_bug(self, bug, include_fields='_default,token,cc,keywords,whiteboard,comments', exclude_fields=None, params={}):
params['include_fields'] = [include_fields]
params['exclude_fields'] = [exclude_fields]
url = urljoin(self.API_ROOT, 'bug/%s?%s' % (bug, self.qs(**params)))
return Bug.get(url)
def get_bug_list(self, params={}):
url = url = urljoin(self.API_ROOT, 'bug/?%s' % (self.qs(**params)))
return BugSearch.get(url).bugs
def qs(self, **params):
if self.username and self.password:
params['username'] = [self.username]
params['password'] = [self.password]
return qs(**params)
class BMOAgent(BugzillaAgent):
def __init__(self, username=None, password=None):
super(BMOAgent, self).__init__('https://api-dev.bugzilla.mozilla.org/latest/', username, password)
|
bsd-3-clause
|
Python
|
14c31307fd31631ecce0378aedbef95cec8531f2
|
Fix autodiscovery
|
nkovshov/gargoyle,roverdotcom/gargoyle,nkovshov/gargoyle,YPlan/gargoyle,roverdotcom/gargoyle,roverdotcom/gargoyle,YPlan/gargoyle,YPlan/gargoyle,nkovshov/gargoyle
|
gargoyle/__init__.py
|
gargoyle/__init__.py
|
"""
gargoyle
~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.utils.module_loading import autodiscover_modules
from gargoyle.manager import gargoyle
__version__ = '1.2.0'
VERSION = __version__ # old version compat
__all__ = ('gargoyle', 'autodiscover', '__version__', 'VERSION')
default_app_config = 'gargoyle.apps.GargoyleAppConfig'
def autodiscover():
"""
Auto-discover INSTALLED_APPS' gargoyle modules and fail silently when
not present. This forces an import on them to register any gargoyle bits they
may want.
"""
import gargoyle.builtins # noqa
autodiscover_modules('gargoyle')
|
"""
gargoyle
~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.utils.module_loading import autodiscover_modules
from gargoyle.manager import gargoyle
__version__ = '1.2.0'
VERSION = __version__ # old version compat
__all__ = ('gargoyle', 'autodiscover', '__version__', 'VERSION')
default_app_config = 'gargoyle.apps.GargoyleAppConfig'
def autodiscover():
"""
Auto-discover INSTALLED_APPS' gargoyle modules and fail silently when
not present. This forces an import on them to register any gargoyle bits they
may want.
"""
autodiscover_modules('gargoyle')
|
apache-2.0
|
Python
|
f4063d86404adbb5489edefd6c12d855de246dee
|
test that we can decode all doubly-encoded characters (doesn't pass yet)
|
rspeer/python-ftfy
|
ftfy/test_unicode.py
|
ftfy/test_unicode.py
|
# -*- coding: utf-8 -*-
from ftfy.fixes import fix_text_encoding
import unicodedata
import sys
from nose.tools import eq_
if sys.hexversion >= 0x03000000:
unichr = chr
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in range(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn', 'Cs', 'Mc', 'Mn'):
garble = char.encode('utf-8').decode('latin-1')
garble2 = char.encode('utf-8').decode('latin-1').encode('utf-8').decode('latin-1')
eq_(fix_text_encoding(garble), char)
eq_(fix_text_encoding(garble2), char)
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
eq_(fix_text_encoding(text), text)
|
# -*- coding: utf-8 -*-
from ftfy.fixes import fix_text_encoding
import unicodedata
import sys
if sys.hexversion >= 0x03000000:
unichr = chr
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in range(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn', 'Cs', 'Mc', 'Mn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_text_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_text_encoding(text) == text, text
|
mit
|
Python
|
c9e37f9b241c2bef2ffdb4811cec41c951b21ef9
|
Update fluid_cat_slim.py
|
leios/OIST.CSC,leios/OIST.CSC,leios/OIST.CSC
|
cat_boxing/caged_cat/python/fluid_cat_slim.py
|
cat_boxing/caged_cat/python/fluid_cat_slim.py
|
from random import randint
def generate_cat():
cat_size = randint(1,100)
return cat_size
def fill_box():
empty_room = 400
j = 0
while empty_room > 0:
cat = generate_cat()
empty_room = empty_room - cat
j = j + 1
return j
def fill_truck():
truck_size = 40
cat_num = 0
i = 0
while i <= truck_size:
cats_in_box = fill_box()
cat_num = cat_num + cats_in_box
i = i + 1
print("There are ", cat_num, " cats in our truck. Let's move out!")
|
from random import randint
def generate_cat():
cat_size = randint(1,100)
return cat_size
def fill_box():
box_size = 400
empty_room = 400
j = 0
while empty_room > 0:
cat = generate_cat()
empty_room = empty_room - cat
j = j + 1
return j
def fill_truck():
truck_size = 40
cat_num = 0
i = 0
while i <= truck_size:
cats_in_box = fill_box()
cat_num = cat_num + cats_in_box
i = i + 1
print("There are ", cat_num, " cats in our truck. Let's move out!")
|
mit
|
Python
|
d1e66c414aac60cc7770ddeff091dedc5c0047f6
|
Remove debug `print` from feature extraction
|
widoptimization-willett/feature-extraction
|
feature_extraction/extraction.py
|
feature_extraction/extraction.py
|
import numpy as np
import skimage.exposure as exposure
from .util import AttributeDict
def extract_features(image, measurements):
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.hstack([m.compute(image) for m in measurements])
def normalize_features(X):
# recenter features and normalize over the dataset
X -= np.mean(X, axis=0)
X /= np.linalg.norm(X, axis=0)
# normalize for each record
X /= np.vstack(np.linalg.norm(X, axis=1))
return X
def feature_postprocessing(X, options):
_options = AttributeDict({'normalize': True, 'fill_nans': False})
_options.update(options or {}); options = _options
if options.fill_nans:
X = np.nan_to_num(X)
if options.normalize:
X = normalize_features(X)
return X
def image_preprocessing(im, options):
_options = AttributeDict({'normalize': True, 'equalize': None})
_options.update(options or {}); options = _options
if options.normalize:
im = exposure.rescale_intensity(im)
if options.equalize:
if options.equalize['method'] == "histogram":
im = exposure.equalize_hist(im)
elif options.equalize['method'] == "stretch":
pmin, pmax = np.percentile(im,
(options.equalize['saturation'], 100-options.equalize['saturation']))
im = exposure.rescale_intensity(im, in_range=(pmin, pmax))
return im
|
import numpy as np
import skimage.exposure as exposure
from .util import AttributeDict
def extract_features(image, measurements):
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.hstack([m.compute(image) for m in measurements])
def normalize_features(X):
# recenter features and normalize over the dataset
X -= np.mean(X, axis=0)
X /= np.linalg.norm(X, axis=0)
# normalize for each record
X /= np.vstack(np.linalg.norm(X, axis=1))
return X
def feature_postprocessing(X, options):
_options = AttributeDict({'normalize': True, 'fill_nans': False})
_options.update(options or {}); options = _options
if options.fill_nans:
X = np.nan_to_num(X)
if options.normalize:
X = normalize_features(X)
return X
def image_preprocessing(im, options):
_options = AttributeDict({'normalize': True, 'equalize': None})
_options.update(options or {}); options = _options
if options.normalize:
im = exposure.rescale_intensity(im)
print options
if options.equalize:
if options.equalize['method'] == "histogram":
im = exposure.equalize_hist(im)
elif options.equalize['method'] == "stretch":
pmin, pmax = np.percentile(im,
(options.equalize['saturation'], 100-options.equalize['saturation']))
im = exposure.rescale_intensity(im, in_range=(pmin, pmax))
return im
|
apache-2.0
|
Python
|
15652a0b80b0fa0c87ac9ccd33eaada22859bfa2
|
Update the_most_numbers.py
|
JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking
|
checkio/python/elementary/the_most_numbers.py
|
checkio/python/elementary/the_most_numbers.py
|
def distance(*args):
if args:
min = args[0]
max = args[0]
for x in args:
if x < min:
min = x
if x > max:
max = x
else:
min = 0
max = 0
return max - min
|
mit
|
Python
|
|
2e3341c7e32182cc35f6a658d613c77a72b9b377
|
Modify comments
|
Skalman/owl_analytics
|
src/marketdata/access/remote/google.py
|
src/marketdata/access/remote/google.py
|
import urllib2
import urllib
from marketdata.utils.transform.google.rawquote_intraday import TranformIntradayQuote
def _getUrl(url, urlconditions):
url_values = urllib.urlencode(urlconditions)
return url + '?' + url_values
def _pullQuote(url, urlconditions):
req = urllib2.Request(_getUrl(url, urlconditions))
response = urllib2.urlopen(req).readlines()
return response
class IntradayMinutes(object):
'''Extract intraday market data from Google finance.
URL to access market data:
http://www.google.com/finance/getprices?q=IBM&x=NYSE&i=60&p=5d&f=d,c,h,l,o,v
Abbreviations in the URL:
q = quote symbol
x = exchange symbol
i = interval in seconds i.e. 60 = 1 minute
p = number of past trading days (max has been 15d)
f = quote format (date, close, high, low, open, volume)
'''
def __init__(self, symbol, exchange, minutes=1, days=1):
'''Constructor
'''
self.url = 'http://www.google.com/finance/getprices'
quoteformat = 'd,c,h,l,o,v'
self.urlconditions = {}
self.urlconditions['q'] = symbol # 'IBM', 'JPM', 'GE', 'AMD'
self.urlconditions['x'] = exchange # 'NYSE', 'INDEXNASDAQ'
self.urlconditions['i'] = str(minutes * 60) # 60 refers to 1 minute interval
self.urlconditions['p'] = str(days) + 'd' # 1d refers to 1 day (max 15 days)
self.urlconditions['f'] = quoteformat # date, close, high, low, open, volume
self.quote = self.__extractTransform()
def __extractRawQuote(self):
return _pullQuote(self.url, self.urlconditions)
def __transformRawQuote(self, raw_quote):
interval = self.urlconditions['i']
return TranformIntradayQuote(raw_quote, interval)
def __extractTransform(self):
raw_quote = self.__extractRawQuote()
return self.__transformRawQuote(raw_quote)
def json(self):
return self.quote.json_uts_chlov()
def dict_np(self):
return self.quote.dts_chlov()
|
import urllib2
import urllib
from marketdata.utils.transform.google.rawquote_intraday import TranformIntradayQuote
def _getUrl(url, urlconditions):
url_values = urllib.urlencode(urlconditions)
return url + '?' + url_values
def _pullQuote(url, urlconditions):
req = urllib2.Request(_getUrl(url, urlconditions))
response = urllib2.urlopen(req).readlines()
return response
class IntradayMinutes(object):
'''Extract intraday market data from Google finance.
URL to access market data from Google finance:
http://www.google.com/finance/getprices?q=IBM&x=NYSE&i=60&p=5d&f=d,c,h,l,o,v
Description of abbreviations present in the above URL:
q = quote symbol
x = exchange symbol
i = interval in seconds i.e. 60 = 1 minute
p = number of past trading days (max has been 15d)
f = quote format (date, close, high, low, open, volume)
'''
def __init__(self, symbol, exchange, minutes=1, days=1):
'''Constructor
'''
self.url = 'http://www.google.com/finance/getprices'
quoteformat = 'd,c,h,l,o,v'
self.urlconditions = {}
self.urlconditions['q'] = symbol # 'IBM', 'JPM', 'GE', 'AMD'
self.urlconditions['x'] = exchange # 'NYSE', 'INDEXNASDAQ'
self.urlconditions['i'] = str(minutes * 60) # 60 refers to 1 minute interval
self.urlconditions['p'] = str(days) + 'd' # 1d refers to 1 day (max 15 days)
self.urlconditions['f'] = quoteformat # date, close, high, low, open, volume
self.quote = self.__extractTransform()
def __extractRawQuote(self):
return _pullQuote(self.url, self.urlconditions)
def __transformRawQuote(self, raw_quote):
interval = self.urlconditions['i']
return TranformIntradayQuote(raw_quote, interval)
def __extractTransform(self):
raw_quote = self.__extractRawQuote()
return self.__transformRawQuote(raw_quote)
def json(self):
return self.quote.json_uts_chlov()
def dict_np(self):
return self.quote.dts_chlov()
|
mpl-2.0
|
Python
|
3577de6383053e0f8e05d531c8a632be12e89ca6
|
fix for route parser to handle when path=None
|
bretthandrews/marvin,albireox/marvin,albireox/marvin,sdss/marvin,sdss/marvin,albireox/marvin,albireox/marvin,sdss/marvin,sdss/marvin,bretthandrews/marvin,bretthandrews/marvin,bretthandrews/marvin
|
python/marvin/utils/general/decorators.py
|
python/marvin/utils/general/decorators.py
|
from functools import wraps
# General Decorators
def parseRoutePath(f):
''' Decorator to parse generic route path '''
@wraps(f)
def decorated_function(inst, *args, **kwargs):
if 'path' in kwargs and kwargs['path']:
for kw in kwargs['path'].split('/'):
if len(kw) == 0:
continue
var, value = kw.split('=')
kwargs[var] = value
kwargs.pop('path')
return f(inst, *args, **kwargs)
return decorated_function
|
from functools import wraps
# General Decorators
def parseRoutePath(f):
''' Decorator to parse generic route path '''
@wraps(f)
def decorated_function(inst, *args, **kwargs):
for kw in kwargs['path'].split('/'):
if len(kw) == 0:
continue
var, value = kw.split('=')
kwargs[var] = value
kwargs.pop('path')
return f(inst, *args, **kwargs)
return decorated_function
|
bsd-3-clause
|
Python
|
27acea8beae7876159f142add8d3e55b62d61f8f
|
Add read method to modulators
|
watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder
|
feder/questionaries/modulator.py
|
feder/questionaries/modulator.py
|
from django import forms
from django.utils.translation import ugettext as _
class BaseBlobFormModulator(object):
description = None
def __init__(self, blob=None):
self.blob = blob or {}
super(BaseBlobFormModulator, self).__init__()
def create(self, fields):
raise NotImplementedError("Provide method 'create'")
def answer(self, fields):
raise NotImplementedError("Provide method 'answer'")
def read(self, cleaned_data):
raise NotImplementedError("Provide method 'read'")
class BaseSimpleModulator(BaseBlobFormModulator):
output_field_cls = None
def create(self, fields):
fields['name'] = forms.CharField(label=_("Question"))
fields['help_text'] = forms.CharField(label=_("Description of question"))
fields['required'] = forms.BooleanField(label=_("This fields is required?"))
def answer(self, fields):
fields['value'] = self.output_field_cls(label=self.blob['name'],
help_text=self.blob['help_text'], required=self.blob.get('required', True))
def read(self, cleaned_data):
return cleaned_data['value']
class CharModulator(BaseSimpleModulator):
description = "Char modulator"
output_field_cls = forms.CharField
class IntegerModulator(BaseSimpleModulator):
description = "Integer modulator"
output_field_cls = forms.CharField
class EmailModulator(BaseSimpleModulator):
description = "E-mail modulator"
output_field_cls = forms.CharField
modulators = {'char': CharModulator,
'int': IntegerModulator,
'email': EmailModulator}
|
from django import forms
from django.utils.translation import ugettext as _
class BaseBlobFormModulator(object):
description = None
def __init__(self, blob=None):
self.blob = blob or {}
super(BaseBlobFormModulator, self).__init__()
def create(self):
raise NotImplementedError("")
def answer(self):
raise NotImplementedError("")
class BaseSimpleModulator(BaseBlobFormModulator):
output_field_cls = None
def create(self, fields):
fields['name'] = forms.CharField(label=_("Question"))
fields['help_text'] = forms.CharField(label=_("Description of question"))
fields['required'] = forms.BooleanField(label=_("This fields is required?"))
def answer(self, fields):
fields['value'] = self.output_field_cls(label=self.blob['name'],
help_text=self.blob['help_text'], required=self.blob.get('required', True))
class CharModulator(BaseSimpleModulator):
description = "Char modulator"
output_field_cls = forms.CharField
class IntegerModulator(BaseSimpleModulator):
description = "Integer modulator"
output_field_cls = forms.CharField
class EmailModulator(BaseSimpleModulator):
description = "E-mail modulator"
output_field_cls = forms.CharField
modulators = {'char': CharModulator, 'int': IntegerModulator, 'email': EmailModulator}
|
mit
|
Python
|
90e1b254266155abded62bc3155785961acc0ff0
|
Split filepath and count in credential module
|
CIRCL/AIL-framework,CIRCL/AIL-framework,CIRCL/AIL-framework,CIRCL/AIL-framework
|
bin/Credential.py
|
bin/Credential.py
|
#!/usr/bin/env python2
# -*-coding:UTF-8 -*
import time
from packages import Paste
from pubsublogger import publisher
from Helper import Process
import re
if __name__ == "__main__":
publisher.port = 6380
publisher.channel = "Script"
config_section = "Credential"
p = Process(config_section)
publisher.info("Find credentials")
critical = 8
regex_web = "/^(https?:\/\/)?([\da-z\.-]+)\.([a-z\.]{2,6})([\/\w \.-]*)*\/?$/"
regex_cred = "[a-zA-Z0-9._-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6}:[a-zA-Z0-9\_\-]+"
while True:
message = p.get_from_set()
if message is None:
publisher.debug("Script Credential is Idling 10s")
print('Sleeping')
time.sleep(10)
continue
filepath, count = message.split()
if count < 5:
# Less than 5 matches from the top password list, false positive.
continue
paste = Paste.Paste(filepath)
content = paste.get_p_content()
creds = set(re.findall(regex_cred, content))
if len(creds) == 0:
continue
sites = set(re.findall(regex_web, content))
message = '{} credentials found.'.format(len(creds))
if sites:
message += ' Related websites: {}'.format(', '.join(sites))
to_print = 'Credential;{};{};{};{}'.format(paste.p_source, paste.p_date, paste.p_name, message)
print('\n '.join(creds))
if len(creds) > critical:
print("========> Found more than 10 credentials in this file : {}".format(filepath))
publisher.warning(to_print)
if sites:
print("=======> Probably on : {}".format(', '.join(sites)))
else:
publisher.info(to_print)
|
#!/usr/bin/env python2
# -*-coding:UTF-8 -*
import time
from packages import Paste
from pubsublogger import publisher
from Helper import Process
import re
if __name__ == "__main__":
publisher.port = 6380
publisher.channel = "Script"
config_section = "Credential"
p = Process(config_section)
publisher.info("Find credentials")
critical = 10
regex_web = "/^(https?:\/\/)?([\da-z\.-]+)\.([a-z\.]{2,6})([\/\w \.-]*)*\/?$/"
regex_cred = "[a-zA-Z0-9._-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6}:[a-zA-Z0-9\_\-]+"
while True:
filepath = p.get_from_set()
if filepath is None:
publisher.debug("Script Credential is Idling 10s")
print('Sleeping')
time.sleep(10)
continue
paste = Paste.Paste(filepath)
content = paste.get_p_content()
creds = set(re.findall(regex_cred, content))
if len(creds) == 0:
continue
sites = set(re.findall(regex_web, content))
message = '{} credentials found.'.format(len(creds))
if sites:
message += ' Related websites: {}'.format(', '.join(sites))
to_print = 'Credential;{};{};{};{}'.format(paste.p_source, paste.p_date, paste.p_name, message)
print('\n '.join(creds))
if len(creds) > critical:
print("========> Found more than 10 credentials on this file : {}".format(filepath))
publisher.warning(to_print)
if sites:
print("=======> Probably on : {}".format(', '.join(sites)))
else:
publisher.info(to_print)
|
agpl-3.0
|
Python
|
e18047a3cb3c8303bf64dc9ce5fc230e29b25b56
|
Fix fac-gitall.py
|
lnls-fac/scripts,lnls-fac/scripts
|
bin/fac-gitall.py
|
bin/fac-gitall.py
|
#!/usr/bin/env python3
import sys
import os
import lnls
#import git
from termcolor import colored
import subprocess
git_functions = ('pull','push','status','diff','clone')
def run_git_clone():
if not os.path.exists(lnls.folder_code):
print('fac-gitall.py: please create ' + lnls.folder_code + ' folder with correct permissions first!')
return
all_repos = ('collective_effects',
'fieldmaptrack',
'job_manager',
'lnls',
'mathphys',
'MatlabMiddleLayer',
'pyaccel',
'scripts',
'sirius',
'sirius_parameters',
'sirius_wiki',
'tools',
'trackcpp',
'tracy_sirius',
'va',
)
for repo in all_repos:
cmd = 'git clone ssh://[email protected]/lnls-fac/' + repo + '.git'
os.system(cmd)
def run_git(func):
if func == 'clone': return run_git_clone()
fnames = os.listdir(lnls.folder_code)
for fname in fnames:
repo_folder = os.path.join(lnls.folder_code, fname)
if not os.path.exists(os.path.join(repo_folder,'.git')): continue
print('processing ' + func + colored(' <'+fname+'>','yellow')+'...')
cmd = 'cd ' + repo_folder + '; git ' + func
text = subprocess.call([cmd], shell=True, stdout=sys.stdout)
print('...ok')
print()
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] not in git_functions:
print('usage: fac-gitall.py [' + '|'.join(git_functions) + ']')
else:
print()
run_git(sys.argv[1])
|
#!/usr/bin/env python3
import sys
import os
import lnls
#import git
from termcolor import colored
import subprocess
git_functions = ('pull','push','status','diff','clone')
def run_git_clone():
if not os.path.exists(lnls.folder_code):
print('gitall.py: please create ' + lnls.folder_code + ' folder with correct permissions first!')
return
all_repos = ('collective_effects',
'fieldmaptrack',
'job_manager',
'lnls',
'mathphys',
'MatlabMiddleLayer',
'pyaccel',
'scripts',
'sirius',
'sirius_parameters',
'sirius_wiki',
'tools',
'trackcpp',
'tracy_sirius',
'va',
)
for repo in all_repos:
cmd = 'git clone ssh://[email protected]/lnls-fac/' + repo + '.git'
os.system(cmd)
def run_git(func):
if func == 'clone': return run_git_clone()
fnames = os.listdir(lnls.folder_code)
for fname in fnames:
repo_folder = os.path.join(lnls.folder_code, fname)
if not os.path.exists(os.path.join(repo_folder,'.git')): continue
print('processing ' + func + colored(' <'+fname+'>','yellow')+'...')
cmd = 'cd ' + repo_folder + '; git ' + func
text = subprocess.call([cmd], shell=True, stdout=sys.stdout)
print('...ok')
print()
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1] not in git_functions:
print('usage: gitall.py [' + '|'.join(git_functions) + ']')
else:
print()
run_git(sys.argv[1])
|
mit
|
Python
|
1b172c592bb5efc1a0dcf8f18d6ea6a1037ec9ff
|
Clean things up a bit
|
jhaals/filebutler-upload
|
filebutler_upload/filehandler.py
|
filebutler_upload/filehandler.py
|
import requests
class Filemanager:
def __init__(self, url, username, password):
self.headers = {'Accept': 'application/json'}
self.username = username
self.password = password
self.url = url
def list(self):
'''
List all files uploaded by user
'''
data = {
'username': self.username,
'password': self.password
}
response = requests.post(
self.url + 'files',
data=data,
headers=self.headers
)
if response.status_code == 200:
return response.json['message']
else:
return {}
def delete(self, hash):
''' delete specified hash '''
if hash == 'all':
pass
data = {
'username': self.username,
'password': self.password,
}
response = requests.post(
self.url + hash + '/delete',
data=data,
headers=self.headers
)
return response.text
def upload(self, upload_file,
download_password, one_time_download, expire):
files = {'file': upload_file}
data = {
'username': self.username,
'password': self.password,
'download_password': download_password,
'one_time_download': '1' if one_time_download else '0',
'expire': expire
}
response = requests.post(
self.url,
data=data,
files=files,
headers=self.headers
)
return response
|
import requests
#import os
#from ConfigParser import RawConfigParser
#from text_table import TextTable
class Filemanager:
def __init__(self, url, username, password):
self.headers = {'Accept': 'application/json'}
self.username = username
self.password = password
self.url = url
def list(self):
'''
List all files uploaded by user
'''
data = {
'username': self.username,
'password': self.password
}
response = requests.post(
self.url + 'files',
data=data,
headers=self.headers
)
if response.status_code == 200:
return response.json['message']
else:
return {}
def delete(self, hash):
''' delete specified hash '''
if hash == 'all':
pass
data = {
'username': self.username,
'password': self.password,
}
response = requests.post(
self.url + hash + '/delete',
data=data,
headers=self.headers
)
return response.text
def upload(self, upload_file,
download_password, one_time_download, expire):
files = {'file': upload_file}
data = {
'username': self.config.get('settings', 'username'),
'password': self.config.get('settings', 'password'),
'download_password': self.options.password,
'one_time_download': '1' if self.options.onetime else '0',
'expire': self.options.lifetime
}
response = requests.post(
self.url,
data=data,
files=files, headers=self.headers
)
return response
# For testing, remove when finished.
#config = RawConfigParser()
#config.read(os.path.expanduser('~/.filebutler-upload.conf'))
#username = config.get('settings', 'username')
#password = config.get('settings', 'password')
#url = config.get('settings', 'upload_url')
#fm = Filemanager(url, username, password)
#t = TextTable((40, 'Download hash'), (35, 'Filename'))
#for hash, filename in fm.list().iteritems():
# t.row(hash, filename)
#print t.draw()
print fm.delete('a13170f4cdbd96743e18126306ddba484785ba6b')
|
bsd-3-clause
|
Python
|
3fea731e62653dfc847e82b8185feb029d844fd8
|
Revert "minifiying doctype json's"
|
elba7r/frameworking,paurosello/frappe,maxtorete/frappe,mbauskar/frappe,rmehta/frappe,neilLasrado/frappe,aboganas/frappe,bohlian/frappe,paurosello/frappe,adityahase/frappe,frappe/frappe,elba7r/builder,maxtorete/frappe,manassolanki/frappe,vqw/frappe,rmehta/frappe,ESS-LLP/frappe,saurabh6790/frappe,StrellaGroup/frappe,paurosello/frappe,elba7r/builder,vqw/frappe,manassolanki/frappe,tmimori/frappe,rmehta/frappe,indautgrp/frappe,tundebabzy/frappe,mhbu50/frappe,vqw/frappe,indautgrp/frappe,elba7r/frameworking,mbauskar/frappe,ESS-LLP/frappe,mhbu50/frappe,RicardoJohann/frappe,chdecultot/frappe,tmimori/frappe,rmehta/frappe,drukhil/frappe,vjFaLk/frappe,yashodhank/frappe,neilLasrado/frappe,StrellaGroup/frappe,mbauskar/frappe,almeidapaulopt/frappe,RicardoJohann/frappe,saurabh6790/frappe,tundebabzy/frappe,saurabh6790/frappe,almeidapaulopt/frappe,yashodhank/frappe,indautgrp/frappe,mhbu50/frappe,adityahase/frappe,chdecultot/frappe,tmimori/frappe,bcornwellmott/frappe,vjFaLk/frappe,mhbu50/frappe,ESS-LLP/frappe,frappe/frappe,chdecultot/frappe,drukhil/frappe,manassolanki/frappe,aboganas/frappe,elba7r/builder,tmimori/frappe,bohlian/frappe,indautgrp/frappe,adityahase/frappe,almeidapaulopt/frappe,tundebabzy/frappe,bcornwellmott/frappe,vjFaLk/frappe,vqw/frappe,bcornwellmott/frappe,drukhil/frappe,manassolanki/frappe,frappe/frappe,maxtorete/frappe,mbauskar/frappe,elba7r/builder,rohitwaghchaure/frappe,yashodhank/frappe,saurabh6790/frappe,rohitwaghchaure/frappe,bohlian/frappe,neilLasrado/frappe,bohlian/frappe,RicardoJohann/frappe,drukhil/frappe,RicardoJohann/frappe,neilLasrado/frappe,vjFaLk/frappe,tundebabzy/frappe,yashodhank/frappe,elba7r/frameworking,aboganas/frappe,aboganas/frappe,almeidapaulopt/frappe,rohitwaghchaure/frappe,adityahase/frappe,rohitwaghchaure/frappe,chdecultot/frappe,ESS-LLP/frappe,bcornwellmott/frappe,paurosello/frappe,maxtorete/frappe,StrellaGroup/frappe,elba7r/frameworking
|
frappe/modules/export_file.py
|
frappe/modules/export_file.py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, json
import frappe.model
from frappe.modules import scrub, get_module_path, lower_case_files_for, scrub_dt_dn
def export_doc(doc):
export_to_files([[doc.doctype, doc.name]])
def export_to_files(record_list=None, record_module=None, verbose=0, create_init=None):
"""
Export record_list to files. record_list is a list of lists ([doctype],[docname] ) ,
"""
if frappe.flags.in_import:
return
if record_list:
for record in record_list:
write_document_file(frappe.get_doc(record[0], record[1]), record_module, create_init=create_init)
def write_document_file(doc, record_module=None, create_init=None):
newdoc = doc.as_dict(no_nulls=True)
# strip out default fields from children
for df in doc.meta.get_table_fields():
for d in newdoc.get(df.fieldname):
for fieldname in frappe.model.default_fields:
if fieldname in d:
del d[fieldname]
module = record_module or get_module_name(doc)
if create_init is None:
create_init = doc.doctype in lower_case_files_for
# create folder
folder = create_folder(module, doc.doctype, doc.name, create_init)
# write the data file
fname = (doc.doctype in lower_case_files_for and scrub(doc.name)) or doc.name
with open(os.path.join(folder, fname +".json"),'w+') as txtfile:
txtfile.write(frappe.as_json(newdoc))
def get_module_name(doc):
if doc.doctype == 'Module Def':
module = doc.name
elif doc.doctype=="Workflow":
module = frappe.db.get_value("DocType", doc.document_type, "module")
elif hasattr(doc, 'module'):
module = doc.module
else:
module = frappe.db.get_value("DocType", doc.doctype, "module")
return module
def create_folder(module, dt, dn, create_init):
module_path = get_module_path(module)
dt, dn = scrub_dt_dn(dt, dn)
# create folder
folder = os.path.join(module_path, dt, dn)
frappe.create_folder(folder)
# create init_py_files
if create_init:
create_init_py(module_path, dt, dn)
return folder
def create_init_py(module_path, dt, dn):
def create_if_not_exists(path):
initpy = os.path.join(path, '__init__.py')
if not os.path.exists(initpy):
open(initpy, 'w').close()
create_if_not_exists(os.path.join(module_path))
create_if_not_exists(os.path.join(module_path, dt))
create_if_not_exists(os.path.join(module_path, dt, dn))
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, json
import frappe.model
from frappe.modules import scrub, get_module_path, lower_case_files_for, scrub_dt_dn
def export_doc(doc):
export_to_files([[doc.doctype, doc.name]])
def export_to_files(record_list=None, record_module=None, verbose=0, create_init=None):
"""
Export record_list to files. record_list is a list of lists ([doctype],[docname] ) ,
"""
if frappe.flags.in_import:
return
if record_list:
for record in record_list:
write_document_file(frappe.get_doc(record[0], record[1]), record_module, create_init=create_init)
def write_document_file(doc, record_module=None, create_init=None):
newdoc = doc.as_dict(no_nulls=True)
# strip out default fields from children
for df in doc.meta.get_table_fields():
for d in newdoc.get(df.fieldname):
for fieldname in frappe.model.default_fields:
if fieldname in d:
del d[fieldname]
for fieldname in d.keys():
if d[fieldname] == 0 or d[fieldname] == "":
del d[fieldname]
module = record_module or get_module_name(doc)
if create_init is None:
create_init = doc.doctype in lower_case_files_for
# create folder
folder = create_folder(module, doc.doctype, doc.name, create_init)
# write the data file
fname = (doc.doctype in lower_case_files_for and scrub(doc.name)) or doc.name
with open(os.path.join(folder, fname +".json"),'w+') as txtfile:
txtfile.write(frappe.as_json(newdoc))
def get_module_name(doc):
if doc.doctype == 'Module Def':
module = doc.name
elif doc.doctype=="Workflow":
module = frappe.db.get_value("DocType", doc.document_type, "module")
elif hasattr(doc, 'module'):
module = doc.module
else:
module = frappe.db.get_value("DocType", doc.doctype, "module")
return module
def create_folder(module, dt, dn, create_init):
module_path = get_module_path(module)
dt, dn = scrub_dt_dn(dt, dn)
# create folder
folder = os.path.join(module_path, dt, dn)
frappe.create_folder(folder)
# create init_py_files
if create_init:
create_init_py(module_path, dt, dn)
return folder
def create_init_py(module_path, dt, dn):
def create_if_not_exists(path):
initpy = os.path.join(path, '__init__.py')
if not os.path.exists(initpy):
open(initpy, 'w').close()
create_if_not_exists(os.path.join(module_path))
create_if_not_exists(os.path.join(module_path, dt))
create_if_not_exists(os.path.join(module_path, dt, dn))
|
mit
|
Python
|
f03ba99cd7c4db064b2ece3d226b30c8e9ca63bf
|
Add a test for scipy.integrate.newton_cotes. A more comprehensive set of tests would be better, but it's a start.
|
lesserwhirls/scipy-cwt,lesserwhirls/scipy-cwt,scipy/scipy-svn,jasonmccampbell/scipy-refactor,jasonmccampbell/scipy-refactor,jasonmccampbell/scipy-refactor,lesserwhirls/scipy-cwt,lesserwhirls/scipy-cwt,scipy/scipy-svn,jasonmccampbell/scipy-refactor,scipy/scipy-svn,scipy/scipy-svn
|
scipy/integrate/tests/test_quadrature.py
|
scipy/integrate/tests/test_quadrature.py
|
import numpy
from numpy import cos, sin, pi
from numpy.testing import *
from scipy.integrate import quadrature, romberg, romb, newton_cotes
class TestQuadrature(TestCase):
def quad(self, x, a, b, args):
raise NotImplementedError
def test_quadrature(self):
# Typical function with two extra arguments:
def myfunc(x,n,z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc,0,pi,(2,1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romberg(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val = romberg(myfunc,0,pi, args=(2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romb(self):
assert_equal(romb(numpy.arange(17)),128)
def test_non_dtype(self):
# Check that we work fine with functions returning float
import math
valmath = romberg(math.sin, 0, 1)
expected_val = 0.45969769413185085
assert_almost_equal(valmath, expected_val, decimal=7)
def test_newton_cotes(self):
"""Test the first few degrees, for evenly spaced points."""
n = 1
wts, errcoff = newton_cotes(n, 1)
assert_equal(wts, n*numpy.array([0.5, 0.5]))
assert_almost_equal(errcoff, -n**3/12.0)
n = 2
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*numpy.array([1.0, 4.0, 1.0])/6.0)
assert_almost_equal(errcoff, -n**5/2880.0)
n = 3
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*numpy.array([1.0, 3.0, 3.0, 1.0])/8.0)
assert_almost_equal(errcoff, -n**5/6480.0)
n = 4
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*numpy.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
assert_almost_equal(errcoff, -n**7/1935360.0)
if __name__ == "__main__":
run_module_suite()
|
import numpy
from numpy import cos, sin, pi
from numpy.testing import *
from scipy.integrate import quadrature, romberg, romb
class TestQuadrature(TestCase):
def quad(self, x, a, b, args):
raise NotImplementedError
def test_quadrature(self):
# Typical function with two extra arguments:
def myfunc(x,n,z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc,0,pi,(2,1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romberg(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val = romberg(myfunc,0,pi, args=(2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romb(self):
assert_equal(romb(numpy.arange(17)),128)
def test_non_dtype(self):
# Check that we work fine with functions returning float
import math
valmath = romberg(math.sin, 0, 1)
expected_val = 0.45969769413185085
assert_almost_equal(valmath, expected_val, decimal=7)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
Python
|
a9b2b6fe868ab564653f40e611ce6a788f396981
|
Fix wrong variable replacement
|
vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks
|
backend/globaleaks/tests/jobs/test_pgp_check_sched.py
|
backend/globaleaks/tests/jobs/test_pgp_check_sched.py
|
# -*- coding: utf-8 -*-
from twisted.internet.defer import inlineCallbacks
from globaleaks.tests import helpers
from globaleaks.jobs import pgp_check_sched
class TestPGPCheckSchedule(helpers.TestGLWithPopulatedDB):
encryption_scenario = 'ONE_VALID_ONE_EXPIRED'
@inlineCallbacks
def test_pgp_check_schedule(self):
# FIXME: complete this unit test by performing checks
# on the actions performed by the scheduler.
yield pgp_check_sched.PGPCheckSchedule().operation()
|
# -*- coding: utf-8 -*-
from twisted.internet.defer import inlineCallbacks
from globaleaks.tests import helpers
from globaleaks.jobs import secure_file_delete_sched
class TestPGPCheckSchedule(helpers.TestGLWithPopulatedDB):
encryption_scenario = 'ONE_VALID_ONE_EXPIRED'
@inlineCallbacks
def test_pgp_check_schedule(self):
# FIXME: complete this unit test by performing checks
# on the actions performed by the scheduler.
yield pgp_check_sched.PGPCheckSchedule().operation()
|
agpl-3.0
|
Python
|
6ef76159ab32e454241f7979a1cdf320c463dd9e
|
add config file option
|
wathsalav/xos,zdw/xos,zdw/xos,opencord/xos,zdw/xos,jermowery/xos,xmaruto/mcord,wathsalav/xos,jermowery/xos,wathsalav/xos,cboling/xos,open-cloud/xos,cboling/xos,jermowery/xos,opencord/xos,wathsalav/xos,cboling/xos,cboling/xos,xmaruto/mcord,cboling/xos,open-cloud/xos,opencord/xos,jermowery/xos,open-cloud/xos,zdw/xos,xmaruto/mcord,xmaruto/mcord
|
planetstack/planetstack-backend.py
|
planetstack/planetstack-backend.py
|
#!/usr/bin/env python
import os
import argparse
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
from observer.backend import Backend
from planetstack.config import Config
config = Config()
# after http://www.erlenstar.demon.co.uk/unix/faq_2.html
def daemon():
"""Daemonize the current process."""
if os.fork() != 0: os._exit(0)
os.setsid()
if os.fork() != 0: os._exit(0)
os.umask(0)
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, 0)
# xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull
logdir=os.path.dirname(config.observer_logfile)
# when installed in standalone we might not have httpd installed
if not os.path.isdir(logdir): os.mkdir(logdir)
crashlog = os.open('%s'%config.observer_logfile, os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
os.dup2(crashlog, 1)
os.dup2(crashlog, 2)
def main():
# Generate command line parser
parser = argparse.ArgumentParser(usage='%(prog)s [options]')
parser.add_argument('-d', '--daemon', dest='daemon', action='store_true', default=False,
help='Run as daemon.')
# smbaker: util/config.py parses sys.argv[] directly to get config file name; include the option here to avoid
# throwing unrecognized argument exceptions
parser.add_argument('-C', '--config', dest='config_file', action='store', default="/opt/planetstack/plstackapi_config",
help='Name of config file.')
args = parser.parse_args()
if args.daemon: daemon()
backend = Backend()
backend.run()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import os
import argparse
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
from observer.backend import Backend
from planetstack.config import Config
config = Config()
# after http://www.erlenstar.demon.co.uk/unix/faq_2.html
def daemon():
"""Daemonize the current process."""
if os.fork() != 0: os._exit(0)
os.setsid()
if os.fork() != 0: os._exit(0)
os.umask(0)
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, 0)
# xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull
logdir=os.path.dirname(config.observer_logfile)
# when installed in standalone we might not have httpd installed
if not os.path.isdir(logdir): os.mkdir(logdir)
crashlog = os.open('%s'%config.observer_logfile, os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
os.dup2(crashlog, 1)
os.dup2(crashlog, 2)
def main():
# Generate command line parser
parser = argparse.ArgumentParser(usage='%(prog)s [options]')
parser.add_argument('-d', '--daemon', dest='daemon', action='store_true', default=False,
help='Run as daemon.')
args = parser.parse_args()
if args.daemon: daemon()
backend = Backend()
backend.run()
if __name__ == '__main__':
main()
|
apache-2.0
|
Python
|
a5942402fdf8f8013dbe62636ea29582538e33c6
|
fix argument name
|
EBIvariation/eva-cttv-pipeline
|
bin/trait_mapping/create_table_for_manual_curation.py
|
bin/trait_mapping/create_table_for_manual_curation.py
|
#!/usr/bin/env python3
import argparse
from eva_cttv_pipeline.trait_mapping.ols import (
get_ontology_label_from_ols, is_current_and_in_efo, is_in_efo,
)
def find_previous_mapping(trait_name, previous_mappings):
if trait_name not in previous_mappings:
return ''
uri = previous_mappings[trait_name]
label = get_ontology_label_from_ols(uri)
uri_is_current_and_in_efo = is_current_and_in_efo(uri)
uri_in_efo = is_in_efo(uri)
if uri_in_efo:
trait_status = 'EFO_CURRENT' if uri_is_current_and_in_efo else 'EFO_OBSOLETE'
else:
trait_status = 'NOT_CONTAINED'
trait_string = '|'.join([uri, label, 'NOT_SPECIFIED', 'previously-used', trait_status])
return trait_string
def find_exact_mapping(trait_name, mappings):
for mapping in mappings:
if mapping.lower().split('|')[1] == trait_name:
return mapping
return ''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--traits-for-curation',
help='Table with traits for which the pipeline failed to make a confident prediction')
parser.add_argument(
'-m', '--previous-mappings',
help='Table with all mappings previously issued by EVA')
parser.add_argument(
'-o', '--output',
help='Output TSV to be loaded in Google Sheets for manual curation')
args = parser.parse_args()
outfile = open(args.output, 'w')
# Load all previous mappings
previous_mappings = dict(l.rstrip().split('\t') for l in open(args.previous_mappings))
# Process all mappings which require manual curation
for line in open(args.traits_for_curation):
fields = line.rstrip().split('\t')
trait_name, trait_freq = fields[:2]
mappings = fields[2:]
previous_mapping = find_previous_mapping(trait_name, previous_mappings)
exact_mapping = find_exact_mapping(trait_name, mappings)
out_line = '\t'.join(
[trait_name, trait_freq,
# Mapping to use, if ready, comment, mapping URI, mapping label, whether exact, in EFO
'', '', '', '', '', '', '',
previous_mapping, exact_mapping] + mappings
) + '\n'
outfile.write(out_line)
|
#!/usr/bin/env python3
import argparse
from eva_cttv_pipeline.trait_mapping.ols import (
get_ontology_label_from_ols, is_current_and_in_efo, is_in_efo,
)
def find_previous_mapping(trait_name, previous_mappings):
if trait_name not in previous_mappings:
return ''
uri = previous_mappings[trait_name]
label = get_ontology_label_from_ols(uri)
uri_is_current_and_in_efo = is_current_and_in_efo(uri)
uri_in_efo = is_in_efo(uri)
if uri_in_efo:
trait_status = 'EFO_CURRENT' if uri_is_current_and_in_efo else 'EFO_OBSOLETE'
else:
trait_status = 'NOT_CONTAINED'
trait_string = '|'.join([uri, label, 'NOT_SPECIFIED', 'previously-used', trait_status])
return trait_string
def find_exact_mapping(trait_name, mappings):
for mapping in mappings:
if mapping.lower().split('|')[1] == trait_name:
return mapping
return ''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--traits-for-curation',
help='Table with traits for which the pipeline failed to make a confident prediction')
parser.add_argument(
'-m', '--previous-mappings',
help='Table with all mappings previously issued by EVA')
parser.add_argument(
'-o', '--output',
help='Output TSV to be loaded in Google Sheets for manual curation')
args = parser.parse_args()
outfile = open(args.final_table_for_curation, 'w')
# Load all previous mappings
previous_mappings = dict(l.rstrip().split('\t') for l in open(args.previous_mappings))
# Process all mappings which require manual curation
for line in open(args.traits_for_curation):
fields = line.rstrip().split('\t')
trait_name, trait_freq = fields[:2]
mappings = fields[2:]
previous_mapping = find_previous_mapping(trait_name, previous_mappings)
exact_mapping = find_exact_mapping(trait_name, mappings)
out_line = '\t'.join(
[trait_name, trait_freq,
# Mapping to use, if ready, comment, mapping URI, mapping label, whether exact, in EFO
'', '', '', '', '', '', '',
previous_mapping, exact_mapping] + mappings
) + '\n'
outfile.write(out_line)
|
apache-2.0
|
Python
|
662608e6a183810072cb5e9dc7545145c866cf34
|
Add missing import
|
m-ober/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps
|
byceps/services/shop/order/action_registry_service.py
|
byceps/services/shop/order/action_registry_service.py
|
"""
byceps.services.shop.order.action_registry_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from ...seating.models.category import CategoryID
from ...user_badge.models.badge import BadgeID
from ..article.models.article import ArticleNumber
from .models.payment import PaymentState
from . import action_service
def register_badge_awarding(article_number: ArticleNumber, badge_id: BadgeID
) -> None:
# Award badge to orderer when order is marked as paid.
params = {
'badge_id': str(badge_id),
}
action_service.create_action(article_number, PaymentState.paid,
'create_tickets', params_create)
def register_tickets_creation(article_number: ArticleNumber,
ticket_category_id: CategoryID) -> None:
# Create tickets for order when it is marked as paid.
params_create = {
'category_id': str(ticket_category_id),
}
action_service.create_action(article_number, PaymentState.paid,
'create_tickets', params_create)
# Revoke tickets that have been created for order when it is
# canceled after being marked as paid.
params_revoke = {}
action_service.create_action(article_number, PaymentState.canceled_after_paid,
'revoke_tickets', params_revoke)
|
"""
byceps.services.shop.order.action_registry_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from ...seating.models.category import CategoryID
from ...user_badge.models.badge import BadgeID
from ..article.models.article import ArticleNumber
from .models.payment import PaymentState
def register_badge_awarding(article_number: ArticleNumber, badge_id: BadgeID
) -> None:
# Award badge to orderer when order is marked as paid.
params = {
'badge_id': str(badge_id),
}
action_service.create_action(article_number, PaymentState.paid,
'create_tickets', params_create)
def register_tickets_creation(article_number: ArticleNumber,
ticket_category_id: CategoryID) -> None:
# Create tickets for order when it is marked as paid.
params_create = {
'category_id': str(ticket_category_id),
}
action_service.create_action(article_number, PaymentState.paid,
'create_tickets', params_create)
# Revoke tickets that have been created for order when it is
# canceled after being marked as paid.
params_revoke = {}
action_service.create_action(article_number, PaymentState.canceled_after_paid,
'revoke_tickets', params_revoke)
|
bsd-3-clause
|
Python
|
5cdf89e64ab9dabf277a867a774a88f12e1ece5e
|
Fix broken exception `BadHeader`
|
vuolter/pyload,vuolter/pyload,vuolter/pyload
|
src/pyload/core/network/http/exceptions.py
|
src/pyload/core/network/http/exceptions.py
|
# -*- coding: utf-8 -*-
PROPRIETARY_RESPONSES = {
440: "Login Timeout - The client's session has expired and must log in again.",
449: "Retry With - The server cannot honour the request because the user has not provided the required information",
451: "Redirect - Unsupported Redirect Header",
509: "Bandwidth Limit Exceeded",
520: "Unknown Error",
521: "Web Server Is Down - The origin server has refused the connection from CloudFlare",
522: "Connection Timed Out - CloudFlare could not negotiate a TCP handshake with the origin server",
523: "Origin Is Unreachable - CloudFlare could not reach the origin server",
524: "A Timeout Occurred - CloudFlare did not receive a timely HTTP response",
525: "SSL Handshake Failed - CloudFlare could not negotiate a SSL/TLS handshake with the origin server",
526: "Invalid SSL Certificate - CloudFlare could not validate the SSL/TLS certificate that the origin server presented",
527: "Railgun Error - CloudFlare requests timeout or failed after the WAN connection has been established",
530: "Site Is Frozen - Used by the Pantheon web platform to indicate a site that has been frozen due to inactivity",
}
class BadHeader(Exception):
def __init__(self, code, header=b"", content=b""):
code = int(code)
response = PROPRIETARY_RESPONSES.get(code, "unknown error code")
super().__init__(f"Bad server response: {code} {response}")
self.code = code
self.header = header
self.content = content
|
# -*- coding: utf-8 -*-
PROPRIETARY_RESPONSES = {
440: "Login Timeout - The client's session has expired and must log in again.",
449: "Retry With - The server cannot honour the request because the user has not provided the required information",
451: "Redirect - Unsupported Redirect Header",
509: "Bandwidth Limit Exceeded",
520: "Unknown Error",
521: "Web Server Is Down - The origin server has refused the connection from CloudFlare",
522: "Connection Timed Out - CloudFlare could not negotiate a TCP handshake with the origin server",
523: "Origin Is Unreachable - CloudFlare could not reach the origin server",
524: "A Timeout Occurred - CloudFlare did not receive a timely HTTP response",
525: "SSL Handshake Failed - CloudFlare could not negotiate a SSL/TLS handshake with the origin server",
526: "Invalid SSL Certificate - CloudFlare could not validate the SSL/TLS certificate that the origin server presented",
527: "Railgun Error - CloudFlare requests timeout or failed after the WAN connection has been established",
530: "Site Is Frozen - Used by the Pantheon web platform to indicate a site that has been frozen due to inactivity",
}
class BadHeader(Exception):
def __init__(self, code, header=b"", content=b""):
int_code = int(code)
response = responses.get(
int_code, PROPRIETARY_RESPONSES.get(int_code, "unknown error code")
)
super().__init__(f"Bad server response: {code} {response}")
self.code = int_code
self.header = header
self.content = content
|
agpl-3.0
|
Python
|
a23c6132792bd6aff420791cf4b78a955cc0dfad
|
add headless
|
huaying/ins-crawler
|
inscrawler/browser.py
|
inscrawler/browser.py
|
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
from time import sleep
class Browser:
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
service_args = ['--ignore-ssl-errors=true']
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(
executable_path='%s/bin/chromedriver' % dir_path,
service_args=service_args,
chrome_options=chrome_options)
self.driver.implicitly_wait(5)
@property
def page_height(self):
return self.driver.execute_script('return document.body.scrollHeight')
def get(self, url):
self.driver.get(url)
def find_one(self, css_selector, elem=None):
obj = elem or self.driver
try:
return obj.find_element(By.CSS_SELECTOR, css_selector)
except NoSuchElementException:
return None
def find(self, css_selector, elem=None):
obj = elem or self.driver
try:
return obj.find_elements(By.CSS_SELECTOR, css_selector)
except NoSuchElementException:
return None
def scroll_down(self, wait=0.5):
self.driver.execute_script(
'window.scrollTo(0, document.body.scrollHeight)')
sleep(wait)
def scroll_up(self, wait=2):
self.driver.execute_script(
'window.scrollTo(0, 0)')
sleep(wait)
def js_click(self, elem):
self.driver.execute_script("arguments[0].click();", elem)
def __del__(self):
try:
self.driver.quit()
except Exception:
pass
|
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
from time import sleep
class Browser:
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
service_args = ['--ignore-ssl-errors=true']
chrome_options = Options()
# chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(
executable_path='%s/bin/chromedriver' % dir_path,
service_args=service_args,
chrome_options=chrome_options)
self.driver.implicitly_wait(5)
@property
def page_height(self):
return self.driver.execute_script('return document.body.scrollHeight')
def get(self, url):
self.driver.get(url)
def find_one(self, css_selector, elem=None):
obj = elem or self.driver
try:
return obj.find_element(By.CSS_SELECTOR, css_selector)
except NoSuchElementException:
return None
def find(self, css_selector, elem=None):
obj = elem or self.driver
try:
return obj.find_elements(By.CSS_SELECTOR, css_selector)
except NoSuchElementException:
return None
def scroll_down(self, wait=0.5):
self.driver.execute_script(
'window.scrollTo(0, document.body.scrollHeight)')
sleep(wait)
def scroll_up(self, wait=2):
self.driver.execute_script(
'window.scrollTo(0, 0)')
sleep(wait)
def js_click(self, elem):
self.driver.execute_script("arguments[0].click();", elem)
def __del__(self):
try:
self.driver.quit()
except Exception:
pass
|
mit
|
Python
|
b44a9dfbf26e07b9db6a31119044b8347907a5a5
|
disable fid map
|
teuben/pyASC,teuben/pyASC,warnerem/pyASC,teuben/pyASC,teuben/pyASC,warnerem/pyASC,warnerem/pyASC,warnerem/pyASC,warnerem/pyASC,teuben/pyASC,teuben/masc,teuben/masc,teuben/pyASC,warnerem/pyASC,teuben/masc,warnerem/pyASC,teuben/pyASC
|
examples/fitsdiff2.py
|
examples/fitsdiff2.py
|
#! /usr/bin/env python
#
# This routine can diff images from its neighbors. For a series i=1,N
# this can loop over i=2,N to produce N-1 difference images
#
# B_i = A_i - A_i-1
#
from __future__ import print_function
import glob
import sys
import shutil
import os
from astropy.io import fits
import numpy as np
if len(sys.argv) == 3:
f1 = sys.argv[1]
f2 = sys.argv[2]
print("Using %s %s" % (f1,f2))
hdu1 = fits.open(f1)
hdu2 = fits.open(f2)
h2 = hdu2[0].header
d1 = hdu1[0].data.astype(np.float32)
d2 = hdu2[0].data.astype(np.float32)
print(f1,d1.min(),d1.max())
print(f2,d2.min(),d2.max())
diff = d2 - d1
max1 = d1.max()
std1 = diff.std()
fidelity = max1 / std1
print("MEAN/STD/FID:",diff.mean(), std1, fidelity)
fits.writeto('diff.fits',diff,h2,overwrite=True)
#
#fid = np.abs(d2) / np.max(np.abs(diff),std1/1.4)
#fits.writeto('fidelity.fits',fid,h2,overwrite=True)
try:
import matplotlib.pyplot as plt
plt.figure(1)
plt.hist(diff.ravel())
plt.show()
except:
print("Failing to plot")
|
#! /usr/bin/env python
#
# This routine can diff images from its neighbors. For a series i=1,N
# this can loop over i=2,N to produce N-1 difference images
#
# B_i = A_i - A_i-1
#
from __future__ import print_function
import glob
import sys
import shutil
import os
from astropy.io import fits
import numpy as np
if len(sys.argv) == 3:
f1 = sys.argv[1]
f2 = sys.argv[2]
print("Using %s %s" % (f1,f2))
hdu1 = fits.open(f1)
hdu2 = fits.open(f2)
h2 = hdu2[0].header
d1 = hdu1[0].data.astype(np.float32)
d2 = hdu2[0].data.astype(np.float32)
print(f1,d1.min(),d1.max())
print(f2,d2.min(),d2.max())
diff = d2 - d1
max1 = d1.max()
std1 = diff.std()
fidelity = max1 / std1
fid = np.abs(d2) / np.max(np.abs(diff),std1/1.4)
print("MEAN/STD/FID:",diff.mean(), std1, fidelity)
fits.writeto('diff.fits',diff,h2,overwrite=True)
fits.writeto('fidelity.fits',fid,h2,overwrite=True)
try:
import matplotlib.pyplot as plt
plt.figure(1)
plt.hist(diff.ravel())
plt.show()
except:
print("Failing to plot")
|
mit
|
Python
|
304760823382e72efb8f98ab3b5a98147f98c0e8
|
Improve userlist liveness guarentees
|
ekimekim/girc
|
geventirc/channel.py
|
geventirc/channel.py
|
import gevent
from geventirc.message import Join, Part, Privmsg
from geventirc.replycodes import replies
from geventirc.userlist import UserList
class Channel(object):
"""Object representing an IRC channel.
This is the reccomended way to do operations like joins, or tracking user lists.
A channel may be join()ed and part()ed multiple times.
The user list will be the most recent info available, or None before first join.
In particular, the user list can be considered up to date iff users_ready is set.
Can be used in a with statement to join then part.
"""
USERS_READY_TIMEOUT = 10
joined = False
users_ready = gevent.event.Event()
userlist = None
def __init__(self, client, name):
self.client = client
self.name = name
self.client.add_handler(self._recv_part, command=Part, channels=lambda value: self.name in value)
self.client.add_handler(self._recv_end_of_names, command=replies.ENDOFNAMES, params=[None, self.name, None])
def join(self, block=True):
"""Join the channel if not already joined. If block=True, do not return until name list is received."""
if self.joined: return
self.joined = True
self.users_ready.clear()
self.userlist = UserList(self.client, self.name)
self.client.send(Join(self.name))
if not block: return
self.users_ready.wait(self.USERS_READY_TIMEOUT)
def part(self, block=True):
"""Part from the channel if joined. If block=True, do not return until fully parted."""
if not self.joined: return
self.joined = False
@gevent.spawn
def _part():
# we delay unregistering until the part is sent.
self.client.send(Part(self.name), block=True)
self.userlist.unregister()
if block: _part.get()
def msg(self, content, block=False):
self.client.msg(self.name, content, block=block)
def action(self, content, block=False):
self.client.send(Privmsg.action(self.name, content), block=block)
def _recv_end_of_names(self, client, msg):
self.users_ready.set()
def _recv_part(self, client, msg):
# we receive a forced PART from the server
self.joined = False
self.userlist.unregister()
def __enter__(self):
self.join()
def __exit__(self, *exc_info):
# if we're cleaning up after an exception, ignore errors in part()
# as they are most likely a carry-on error or same root cause.
try:
self.part()
except Exception:
if exc_info == (None, None, None):
raise
|
import gevent
from geventirc.message import Join, Part, Privmsg
from geventirc.replycodes import replies
from geventirc.userlist import UserList
class Channel(object):
"""Object representing an IRC channel.
This is the reccomended way to do operations like joins, or tracking user lists.
A channel may be join()ed and part()ed multiple times.
The user list will be the most recent info available, or None before first join.
Can be used in a with statement to join then part.
"""
joined = False
userlist = None
def __init__(self, client, name):
self.client = client
self.name = name
self.client.add_handler(self._recv_part, command=Part, channels=lambda value: self.name in value)
def join(self, block=True):
"""Join the channel if not already joined. If block=True, do not return until name list is received."""
if self.joined: return
self.joined = True
self.userlist = UserList(self.client, self.name)
self.client.send(Join(self.name))
if not block: return
self.client.wait_for(command=replies.ENDOFNAMES, params=[None, self.name, None])
def part(self, block=True):
"""Part from the channel if joined. If block=True, do not return until fully parted."""
if not self.joined: return
self.joined = False
@gevent.spawn
def _part():
# we delay unregistering until the part is sent.
self.client.send(Part(self.name), block=True)
self.userlist.unregister()
if block: _part.get()
def msg(self, content, block=False):
self.client.msg(self.name, content, block=block)
def action(self, content, block=False):
self.client.send(Privmsg.action(self.name, content), block=block)
def _recv_part(self, client, msg):
# we receive a forced PART from the server
self.joined = False
self.userlist.unregister()
def __enter__(self):
self.join()
def __exit__(self, *exc_info):
# if we're cleaning up after an exception, ignore errors in part()
# as they are most likely a carry-on error or same root cause.
try:
self.part()
except Exception:
if exc_info == (None, None, None):
raise
|
mit
|
Python
|
cda111aecdd650d1f08b75e2c92774526bf9e06d
|
Change Misc to Miscellaneous Utilities
|
demis001/scikit-bio,anderspitman/scikit-bio,johnchase/scikit-bio,SamStudio8/scikit-bio,jairideout/scikit-bio,Achuth17/scikit-bio,Kleptobismol/scikit-bio,jairideout/scikit-bio,xguse/scikit-bio,anderspitman/scikit-bio,wdwvt1/scikit-bio,colinbrislawn/scikit-bio,kdmurray91/scikit-bio,jensreeder/scikit-bio,colinbrislawn/scikit-bio,Kleptobismol/scikit-bio,demis001/scikit-bio,kdmurray91/scikit-bio,Jorge-C/bipy,jensreeder/scikit-bio,jdrudolph/scikit-bio,Achuth17/scikit-bio,johnchase/scikit-bio,gregcaporaso/scikit-bio,corburn/scikit-bio,wdwvt1/scikit-bio,gregcaporaso/scikit-bio,Kleptobismol/scikit-bio,xguse/scikit-bio,SamStudio8/scikit-bio,averagehat/scikit-bio,averagehat/scikit-bio,corburn/scikit-bio,jdrudolph/scikit-bio
|
bipy/util/misc.py
|
bipy/util/misc.py
|
#!/usr/bin/env python
r"""
Miscellaneous Utilities (:mod:`bipy.util.misc`)
============================
.. currentmodule:: bipy.util.misc
This module provides miscellaneous useful utility classes and methods that do
not fit in any specific module.
Functions
---------
.. autosummary::
:toctree: generated/
safe_md5
"""
from __future__ import division
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, bipy development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import hashlib
def safe_md5(open_file, block_size=2**20):
"""Computes an md5 sum without loading the file into memory
Parameters
----------
open_file : file object
open file handle to the archive to compute the checksum
block_size : int, optional
size of the block taken per iteration
Returns
-------
md5 : md5 object from the hashlib module
object with the loaded file
Notes
-----
This method is based on the answers given in:
http://stackoverflow.com/a/1131255/379593
Examples
--------
>>> from StringIO import StringIO
>>> from bipy.util.misc import safe_md5
>>> fd = StringIO("foo bar baz") # open file like object
>>> x = safe_md5(fd)
>>> x.hexdigest()
'ab07acbb1e496801937adfa772424bf7'
>>> fd.close()
"""
md5 = hashlib.md5()
data = True
while data:
data = open_file.read(block_size)
if data:
md5.update(data)
return md5
|
#!/usr/bin/env python
r"""
Misc (:mod:`bipy.util.misc`)
============================
.. currentmodule:: bipy.util.misc
This module provides miscellaneous useful utility classes and methods that do
not fit in any specific module.
Functions
---------
.. autosummary::
:toctree: generated/
safe_md5
"""
from __future__ import division
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, bipy development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import hashlib
def safe_md5(open_file, block_size=2**20):
"""Computes an md5 sum without loading the file into memory
Parameters
----------
open_file : file object
open file handle to the archive to compute the checksum
block_size : int, optional
size of the block taken per iteration
Returns
-------
md5 : md5 object from the hashlib module
object with the loaded file
Notes
-----
This method is based on the answers given in:
http://stackoverflow.com/a/1131255/379593
Examples
--------
>>> from StringIO import StringIO
>>> from bipy.util.misc import safe_md5
>>> fd = StringIO("foo bar baz") # open file like object
>>> x = safe_md5(fd)
>>> x.hexdigest()
'ab07acbb1e496801937adfa772424bf7'
>>> fd.close()
"""
md5 = hashlib.md5()
data = True
while data:
data = open_file.read(block_size)
if data:
md5.update(data)
return md5
|
bsd-3-clause
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.