commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
60f13bdfb97e83ac1bf2f72e3eec2e2c2b88cbb3 | add tests for potential density computation | adrn/Biff,adrn/Biff,adrn/Biff | biff/tests/test_bfe.py | biff/tests/test_bfe.py | # coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Third-party
import astropy.units as u
from astropy.constants import G as _G
G = _G.decompose([u.kpc,u.Myr,u.Msun]).value
import numpy as np
# Project
from .._bfe import density
# Check that we get A000=1. for putting in hernquist density
def hernquist_density(xyz, M, r_s):
xyz = np.atleast_2d(xyz)
r = np.sqrt(np.sum(xyz**2, axis=-1))
return M/(2*np.pi) * r_s / (r * (r+r_s)**3)
def hernquist_potential(xyz, M, r_s):
xyz = np.atleast_2d(xyz)
r = np.sqrt(np.sum(xyz**2, axis=-1))
return -G*M / (r + r_s)
def test_hernquist():
nmax = 6
lmax = 2
Anlm = np.zeros((nmax+1,lmax+1,lmax+1))
Anlm[0,0,0] = 1.
M = 1E10
r_s = 3.5
nbins = 128
rr = np.linspace(0.1,10.,nbins)
xyz = np.zeros((nbins,3))
xyz[:,0] = rr
bfe_dens = density(xyz, M, r_s, Anlm, nmax, lmax)
true_dens = hernquist_density(xyz, M, r_s)
np.testing.assert_allclose(bfe_dens, true_dens)
| mit | Python |
|
1e808aa70882cd30cd0ac7a567d12efde99b5e61 | Create runserver.py | DanielKoehler/pyucwa | runserver.py | runserver.py | from ucwa.http import app
app.run(debug=True)
| isc | Python |
|
f737a8be41111f65944b00eb85a76687653fc8c0 | Create sort_fpkm.py | iandriver/RNA-sequence-tools,idbedead/RNA-sequence-tools,idbedead/RNA-sequence-tools,iandriver/RNA-sequence-tools,idbedead/RNA-sequence-tools,iandriver/RNA-sequence-tools | sort_fpkm.py | sort_fpkm.py | import os
import fnmatch
import sys, csv ,operator
for root, dirnames, filenames in os.walk('/Users/idriver/RockLab-files/test'):
for filename in fnmatch.filter(filenames, '*.fpkm_tracking'):
if filename =='isoforms.fpkm_tracking':
data = csv.reader(open(os.path.join(root, filename), 'rU'),delimiter='\t')
header = next(data, None) # returns the headers or `None` if the input is empty
sortedlist = sorted(data, key=operator.itemgetter(0))
#now write the sorte result into new CSV file
with open(root+'/'+root.split('/')[-1]+'_isoforms.fpkm_tracking', "wb") as f:
fileWriter = csv.writer(f, delimiter='\t')
fileWriter.writerow(header)
for row in sortedlist:
fileWriter.writerow(row)
| mit | Python |
|
d42aad6a15dfe9cc5a63dbb19efe112534b91a5e | Add autoexec script for reference (already bundled in config) | UMDSpaceSystemsLab/DisplayBoards,UMDSpaceSystemsLab/DisplayBoards | resources/autoexec.py | resources/autoexec.py | # place at ~/.kodi/userdata/autoexec.py
import xbmc
import time
xbmc.executebuiltin("XBMC.ReplaceWindow(1234)")
time.sleep(0.1)
xbmc.executebuiltin('PlayMedia("/storage/videos/SSL","isdir")')
xbmc.executebuiltin('xbmc.PlayerControl(repeatall)')
xbmc.executebuiltin("Action(Fullscreen)")
| mit | Python |
|
159ed7dd9dd5ade6c4310d2aa106b13bf94aa903 | Add empty cloner | mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge | stoneridge_cloner.py | stoneridge_cloner.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# TODO - This will run on the central server, and download releases from ftp.m.o
# to a local directory for serving up to the clients, which will download the
# necessary stuff via stoneridge_downloader.py
| mpl-2.0 | Python |
|
d9710fa2af26ab4ab5fef62adc5be670437bea68 | Create logistics_regression.py | gu-yan/mlAlgorithms | logistics_regression.py | logistics_regression.py | #!/usr/bin/python
# -*-coding:utf-8 -*-
from math import exp
import random
import data_tool
#y = x1*a1 + x2*a2 + x3*a3 + ... + xn*an + b
def predict(data,
coef,
bias):
pred = 0.0
for index in range(len(coef)):
pred += (data[index] * coef[index] + bias)
return sigmoid(pred)
def sigmoid(x):
res = 0.0
try :
if x > 60:
res = 1.0 / (1.0 + exp(-60))
elif x < -60:
res = 1.0 / (1.0 + exp(60))
else:
res = 1.0 / (1.0 + exp(-x))
except:
print 'over math.exp range ', x
return res
def sgd(train,
labels,
coef,
bias,
learn_rate,
nepoch):
for epoch in range(nepoch):
sum_error = 0.0
for index in range(len(train)):
pred = predict(train[index], coef, bias)
sum_error += (labels[index] - pred)
bias = (bias + learn_rate * sum_error * pred * (1 - pred))
for i in range(len(coef)):
coef[i] = (coef[i] + learn_rate * sum_error * pred * (1 - pred) * train[index][i])
return coef, bias
#generate standard normal distribution
def param_gauss(size):
param = []
for i in range(size):
param.append(random.gauss(mu=0, sigma=0.05))
return param
def logistic_regression(features_train, labels_train,
features_test, labels_test,
learn_rate, nepoch):
coef = param_gauss(len(features_train[0]))
bias = param_gauss(1)[0]
coef, bias = sgd(features_train, labels_train, coef, bias, learn_rate, nepoch)
pred = []
for index in range(len(features_test)):
pred.append(predict(features_test[index], coef, bias=bias))
return pred, coef, bias
def accuracy(pred, y_true):
correct = 0.0
for index in range(len(pred)):
if pred[index] == y_true[index]:
correct += 1.0
return correct / len(pred)
#test
features_train, labels_train, features_test, labels_test = data_tool.train_test_split(
data_tool.load_data(),
test_rate=0.3)
for i in range(5):
print 'cycle +++++++++++++++++++++++++++++++++++++++++++++++++++++ ', i
pred, coef, bias = logistic_regression(features_train, labels_train, features_test, labels_test,
learn_rate=0.02, nepoch=100)
score = accuracy(pred, labels_test)
print 'coef is: ', coef
print 'bias is: ', bias
print 'accuracy is: ', score
| apache-2.0 | Python |
|
8ee7798af73f374485c1a97e82a98fd5ff8b3c48 | Add module for loading specific classes | n0ano/gantt,n0ano/gantt | nova/loadables.py | nova/loadables.py | # Copyright (c) 2011-2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generic Loadable class support.
Meant to be used by such things as scheduler filters and weights where we
want to load modules from certain directories and find certain types of
classes within those modules. Note that this is quite different than
generic plugins and the pluginmanager code that exists elsewhere.
Usage:
Create a directory with an __init__.py with code such as:
class SomeLoadableClass(object):
pass
class MyLoader(nova.loadables.BaseLoader)
def __init__(self):
super(MyLoader, self).__init__(SomeLoadableClass)
If you create modules in the same directory and subclass SomeLoadableClass
within them, MyLoader().get_all_classes() will return a list
of such classes.
"""
import inspect
import os
import sys
from nova import exception
from nova.openstack.common import importutils
class BaseLoader(object):
def __init__(self, loadable_cls_type):
mod = sys.modules[self.__class__.__module__]
self.path = mod.__path__[0]
self.package = mod.__package__
self.loadable_cls_type = loadable_cls_type
def _is_correct_class(self, obj):
"""Return whether an object is a class of the correct type and
is not prefixed with an underscore.
"""
return (inspect.isclass(obj) and
(not obj.__name__.startswith('_')) and
issubclass(obj, self.loadable_cls_type))
def _get_classes_from_module(self, module_name):
"""Get the classes from a module that match the type we want."""
classes = []
module = importutils.import_module(module_name)
for obj_name in dir(module):
# Skip objects that are meant to be private.
if obj_name.startswith('_'):
continue
itm = getattr(module, obj_name)
if self._is_correct_class(itm):
classes.append(itm)
return classes
def get_all_classes(self):
"""Get the classes of the type we want from all modules found
in the directory that defines this class.
"""
classes = []
for dirpath, dirnames, filenames in os.walk(self.path):
relpath = os.path.relpath(dirpath, self.path)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
for fname in filenames:
root, ext = os.path.splitext(fname)
if ext != '.py' or root == '__init__':
continue
module_name = "%s%s.%s" % (self.package, relpkg, root)
mod_classes = self._get_classes_from_module(module_name)
classes.extend(mod_classes)
return classes
def get_matching_classes(self, loadable_class_names):
"""Get loadable classes from a list of names. Each name can be
a full module path or the full path to a method that returns
classes to use. The latter behavior is useful to specify a method
that returns a list of classes to use in a default case.
"""
classes = []
for cls_name in loadable_class_names:
obj = importutils.import_class(cls_name)
if self._is_correct_class(obj):
classes.append(obj)
elif inspect.isfunction(obj):
# Get list of classes from a function
for cls in obj():
classes.append(cls)
else:
error_str = 'Not a class of the correct type'
raise exception.ClassNotFound(class_name=cls_name,
exception=error_str)
return classes
| apache-2.0 | Python |
|
5211117033f596bd506e81e8825ddfb08634c25e | Create battery.py | cclauss/In-Harms-Way,cclauss/In-Harms-Way,cclauss/In-Harms-Way | client/iOS/battery.py | client/iOS/battery.py | # coding: utf-8
import collections, objc_util
battery_info = collections.namedtuple('battery_info', 'level state')
def get_battery_info():
device = objc_util.ObjCClass('UIDevice').currentDevice()
device.setBatteryMonitoringEnabled_(True)
try:
return battery_info(int(device.batteryLevel() * 100),
'unknown unplugged charging full'.split()[device.batteryState()])
finally:
device.setBatteryMonitoringEnabled_(False)
def battery_is_low(threshold = 20):
battery_info = get_battery_info()
return (battery_info.level <= threshold
and battery_info.state.startswith('un'))
__all__ = 'get_battery_info battery_is_low'.split()
if __name__ == '__main__':
print(get_battery_info())
print(battery_is_low(15))
| apache-2.0 | Python |
|
840d4d555b7b2858ca593251f1593943b10b135b | Add setup_egg.py | wanderine/nipype,rameshvs/nipype,sgiavasis/nipype,rameshvs/nipype,dgellis90/nipype,mick-d/nipype,christianbrodbeck/nipype,carolFrohlich/nipype,glatard/nipype,gerddie/nipype,fprados/nipype,JohnGriffiths/nipype,wanderine/nipype,grlee77/nipype,JohnGriffiths/nipype,mick-d/nipype,christianbrodbeck/nipype,blakedewey/nipype,carolFrohlich/nipype,rameshvs/nipype,mick-d/nipype_source,fprados/nipype,dgellis90/nipype,gerddie/nipype,dmordom/nipype,satra/NiPypeold,sgiavasis/nipype,arokem/nipype,JohnGriffiths/nipype,FredLoney/nipype,FCP-INDI/nipype,pearsonlab/nipype,gerddie/nipype,grlee77/nipype,arokem/nipype,dmordom/nipype,mick-d/nipype,carlohamalainen/nipype,carlohamalainen/nipype,iglpdc/nipype,pearsonlab/nipype,carlohamalainen/nipype,glatard/nipype,FCP-INDI/nipype,dgellis90/nipype,carolFrohlich/nipype,glatard/nipype,pearsonlab/nipype,FCP-INDI/nipype,mick-d/nipype_source,Leoniela/nipype,blakedewey/nipype,rameshvs/nipype,Leoniela/nipype,dgellis90/nipype,glatard/nipype,grlee77/nipype,wanderine/nipype,FCP-INDI/nipype,arokem/nipype,fprados/nipype,mick-d/nipype,blakedewey/nipype,mick-d/nipype_source,sgiavasis/nipype,blakedewey/nipype,iglpdc/nipype,wanderine/nipype,FredLoney/nipype,FredLoney/nipype,pearsonlab/nipype,grlee77/nipype,dmordom/nipype,Leoniela/nipype,iglpdc/nipype,arokem/nipype,satra/NiPypeold,carolFrohlich/nipype,iglpdc/nipype,sgiavasis/nipype,JohnGriffiths/nipype,gerddie/nipype | setup_egg.py | setup_egg.py | #!/usr/bin/env python
"""Wrapper to run setup.py using setuptools."""
from setuptools import setup
################################################################################
# Call the setup.py script, injecting the setuptools-specific arguments.
extra_setuptools_args = dict(
tests_require=['nose'],
test_suite='nose.collector',
zip_safe=False,
)
if __name__ == '__main__':
execfile('setup.py', dict(__name__='__main__',
extra_setuptools_args=extra_setuptools_args))
| bsd-3-clause | Python |
|
71a6c671f802e3b1c123b083ef34f81efeb55750 | Create MakeMaskfiles.py | LauritsSkov/Introgression-detection | MakeMaskfiles.py | MakeMaskfiles.py | import gzip
import sys
from collections import defaultdict
def readFasta(infile):
sequence = ''
if '.gz' in infile:
with gzip.open(infile) as data:
for line in data:
if '>' in line:
seqname = line.strip().replace('>','')
else:
sequence += line.strip().replace(' ','')
else:
with open(infile) as data:
for line in data:
if '>' in line:
seqname = line.strip().replace('>','')
else:
sequence += line.strip().replace(' ','')
return sequence
_, repeatmask_file, callable_mask_file, window_size, chrom, outprefix = sys.argv
window_size = int(window_size)
#repeatmask_file = "helperfiles/RepeatMasks/chr{}.fa.masked"
#callable_mask_file = "helperfiles/AccessibilityMasks/20140520.chr{}.strict_mask.fasta.gz"
bases_called = 0
# Mask file for repetitative regions
repeatmask = readFasta(repeatmask_file)
callable_mask = readFasta(callable_mask_file)
with open(outprefix + '.bed','w') as outbed, open (outprefix + '.txt','w') as out:
d = defaultdict(int)
prev_base = 'Notcalled'
start = 0
for i in range(len(callable_mask)):
repeat_base = repeatmask[i]
callable_base = callable_mask[i]
# Round down to nearest window start
window = i - i%window_size
d[window] += 0
if repeat_base != 'N' and callable_base == 'P':
current_base = 'Called'
d[window] += 1
else:
current_base = 'Notcalled'
# extend
if current_base == prev_base:
end = i
# Make a new one
if current_base != prev_base:
if prev_base == 'Called':
outbed.write('{}\t{}\t{}\t{}\n'.format(chrom, start, end, prev_base))
start = i
end = i
prev_base = current_base
if prev_base == 'Called':
outbed.write('{}\t{}\t{}\t{}\n'.format(chrom, start, end, prev_base))
# Write output files
for window in range(0, max(d)+window_size, window_size):
out.write('{}\t{}\t{}\n'.format(chrom, window, d[window] / float(window_size)))
| mit | Python |
|
8e4cbd3dd09aac90cf2d71adb5ad841274b60575 | Create convolution_digit_recognition.py | rupertsmall/machine-learning,rupertsmall/machine-learning | Convolution_Neural_Networks/convolution_digit_recognition.py | Convolution_Neural_Networks/convolution_digit_recognition.py | # Optimise a neural network
import threading
from Queue import Queue
from numpy import *
from create_MEGA_THETA import *
from RLU_forward_backward import *
from get_overlaps import *
# get data from csv file into array
data = genfromtxt('train2.csv', delimiter=',')
# get a subset for testing
num_cpus = 4
N = 5*num_cpus # batch size of data subset
y_vals = data[:,0] # outputs
x_vals = data[:,1:].T # inputs
#x_vals = (x_vals > 0)*ones(shape(x_vals)) # make inputs 0/1
data_size = size(y_vals)
# define some NN layers
base_in = 783
xi = array([1525,10])
mangle_upper = 30
# randomly initiate NN values
MT = create_MEGA_THETA(xi)
#MT = genfromtxt('convltn_to_10_backup.csv', delimiter=',')
MTDIM = shape(MT)
DELTA = zeros([MTDIM[0],MTDIM[1]])
# learning rate
alpha = 1*N**2
beta = .02
# create some Queues for DELTA, time, good
time_queue = Queue()
good_queue = Queue()
DELTA_queue = Queue()
# initiate queues
time_queue.put(1.0)
good_queue.put(0.0)
success = 0 # ouch
counter = 0 # meh
# execute backprop procedure until accuracy is high
while success < .9999999: # success rate goal
selection = random.random_integers(0,data_size-1,N) # stochastic gradient descent
# get some pixels labels to mangle
mangle0 = random.randint(0,mangle_upper)
mangle1 = random.randint(0,mangle_upper)
random_mangle0 = random.random_integers(0,base_in,mangle0)
random_mangle1 = random.random_integers(0,base_in,mangle1)
batch_x_vals = x_vals[:,selection] # SGD batch
batch_y_vals = y_vals[selection] # SGD batch
# mangle pixels, some --> 0 some --> 1
batch_x_vals[random_mangle0,:] = 0
batch_x_vals[random_mangle1,:] = 1
DELTA_queue.put(DELTA)
DELTA_queue.task_done()
# run backprop optimisation
threads = [] # use multi-threading
for i in range(0,N):
# input vector
x = batch_x_vals[:,i]
overlaps = get() # from get_overlaps import
x1 = x[overlaps[0]]
x2 = x[overlaps[1]]
x4 = x[overlaps[2]]
x = hstack((x1, x2, x2, x4, x4, x4, x4))
y = zeros(10) # output of NN (row-less)
y[batch_y_vals[i]] = 1
thr = threading.Thread(target=forwardBackward,
args=(xi, x, y, MT, time_queue, good_queue, DELTA_queue))
threads.append(thr)
print 'StArTiNg ThReAdS'
# start all threads
for i in range(0,N):
threads[i].start()
# spin until all threads finish
for i in range(0,N):
threads[i].join()
good = good_queue.get()
time = time_queue.get()
success = good/time
print 'Success: ',success
good_queue.put(good)
good_queue.task_done()
time_queue.put(time)
time_queue.task_done()
DELTA = DELTA_queue.get()
print 'Queue size: ',DELTA_queue.qsize()
MT = MT - (alpha/N)*DELTA #- N*beta*MT
DELTA = 0*DELTA
if counter == 50:
savetxt('convltn_to_10.csv', MT, delimiter=',')
print 'Success: ',success
print '\n### SAVED DATA to convltn_to_10.csv ###\n'
counter = 0 # reset
counter += 1
savetxt('convltn_to_10.csv', MT, delimiter=',')
| mit | Python |
|
675de92e16e268badd8c6f5de992c3901cc8f2ce | Update Category Model | samitnuk/online_shop,samitnuk/online_shop,samitnuk/online_shop | apps/shop/migrations/0004_category_parent_category.py | apps/shop/migrations/0004_category_parent_category.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-11 19:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_product_model_name'),
]
operations = [
migrations.AddField(
model_name='category',
name='parent_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.Category'),
),
]
| mit | Python |
|
f2d1421555f00f7bcb77f43cd010c221045c6bfd | Add tests for nd-shifty | mjoblin/netdumplings,mjoblin/netdumplings,mjoblin/netdumplings | tests/console/test_shifty.py | tests/console/test_shifty.py | import click.testing
from netdumplings.console.shifty import shifty
from netdumplings.exceptions import NetDumplingsError
class TestShifty:
"""
Test the nd-shifty commandline tool.
"""
def test_shifty(self, mocker):
"""
Test that the DumplingHub is instantiated as expected and that run()
is called.
"""
mock_hub = mocker.patch('netdumplings.DumplingHub')
runner = click.testing.CliRunner()
result = runner.invoke(
shifty,
[
'--address', 'testhost',
'--in-port', 1001,
'--out-port', 1002,
'--status-freq', 99,
],
)
mock_hub.assert_called_once_with(
address='testhost',
in_port=1001,
out_port=1002,
status_freq=99,
)
mock_hub.return_value.run.assert_called_once()
assert result.exit_code == 0
def test_shifty_with_error(self, mocker):
"""
Test that a NetDumplingsError in DumplingHub.run() results in shifty
exiting with status code 1.
"""
mock_hub = mocker.patch('netdumplings.DumplingHub')
mock_hub.return_value.run.side_effect = NetDumplingsError
runner = click.testing.CliRunner()
result = runner.invoke(shifty)
assert result.exit_code == 1
| mit | Python |
|
dd983ae232829559766bcdf4d2ea58861b8a47ad | Bring your own daemon. | bearstech/varnishstatd | varnish_statd.py | varnish_statd.py | #!/usr/bin/env python
import time
import os
from pprint import pprint
import varnishapi
def stat(name=None):
if name is None:
vsc = varnishapi.VarnishStat()
else:
vsc = varnishapi.VarnishStat(opt=["-n", name])
r = vsc.getStats()
values = dict(((k, v['val']) for k, v in r.iteritems()))
vsc.Fini()
return values
names = os.getenv('VARNISH_STATD_NAMES')
if names:
names = names.split(',')
else:
names = (None,)
wait = int(os.getenv('VARNISH_STATD_WAIT', 60))
carbon = os.getenv('CARBON_HOST', '127.0.0.1')
stats = os.getenv("VARNISH_STATD_STATS", "hitmisspass").split(',')
while True:
for n in names:
s = stat(n)
if 'hitmisspass' in stats:
for k in ['cache_hit', 'cache_hitpass', 'cache_miss']:
v = s['MAIN.%s' % k]
print("%s: %s" % (k, v))
#pprint(s)
time.sleep(wait)
| bsd-2-clause | Python |
|
c1d4525d5f43a5c2bfbfd88ab0dd943eb2452574 | add 127 | EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler | vol3/127.py | vol3/127.py | from fractions import gcd
if __name__ == "__main__":
LIMIT = 120000
rad = [1] * LIMIT
for i in range(2, LIMIT):
if rad[i] == 1:
for j in range(i, LIMIT, i):
rad[j] *= i
ele = []
for i in range(1, LIMIT):
ele.append([rad[i], i])
ele = sorted(ele)
ans = 0
for c in range(3, LIMIT):
chalf = c / 2
for [ra, a] in ele:
if ra * rad[c] > chalf:
break
b = c - a
if a >= b:
continue
if ra * rad[b] * rad[c] >= c:
continue
if gcd(ra, rad[b]) != 1:
continue
ans += c
print ans
| mit | Python |
|
8e7b57c8bc7be6a061d0c841700291a7d85df989 | add 174 | zeyuanxy/project-euler,zeyuanxy/project-euler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler | vol4/174.py | vol4/174.py | if __name__ == "__main__":
L = 10 ** 6
count = [0] * (L + 1)
for inner in range(1, L / 4 + 1):
outer = inner + 2
used = outer * outer - inner * inner
while used <= L:
count[used] += 1
outer += 2
used = outer * outer - inner * inner
print sum(map(lambda x: 1 if 1 <= x <= 10 else 0, count))
| mit | Python |
|
a5b4fa261750fa79d61fc16b6061d449aa7e3523 | Add missing block.py | brendan-ward/rasterio,brendan-ward/rasterio,brendan-ward/rasterio | rasterio/block.py | rasterio/block.py | """Raster Blocks"""
from collections import namedtuple
BlockInfo = namedtuple('BlockInfo', ['row', 'col', 'window', 'size'])
| bsd-3-clause | Python |
|
65843b537e45b98068566c6cc57e4a3ad139d607 | add variant.py | AndersenLab/cegwas-web,AndersenLab/CeNDR,AndersenLab/cegwas-web,AndersenLab/cegwas-web,AndersenLab/CeNDR,AndersenLab/CeNDR,AndersenLab/CeNDR,AndersenLab/cegwas-web | cendr/views/api/variant.py | cendr/views/api/variant.py | # NEW API
from cendr import api, cache, app
from cyvcf2 import VCF
from flask import jsonify
import re
import sys
from subprocess import Popen, PIPE
def get_region(region):
m = re.match("^([0-9A-Za-z]+):([0-9]+)-([0-9]+)$", region)
if not m:
return msg(None, "Invalid region", 400)
chrom = m.group(1)
start = int(m.group(2))
end = int(m.group(3))
return chrom, start, end
@app.route('/api/variant/<region>')
def variant_from_region(region):
vcf = "http://storage.googleapis.com/elegansvariation.org/releases/{version}/WI.{version}.vcf.gz".format(version = 20170312)
m = re.match("^([0-9A-Za-z]+):([0-9]+)-([0-9]+)$", region)
if not m:
return "Error - malformed region.", 400
start = int(m.group(2))
end = int(m.group(3))
if start >= end:
return "Invalid start and end region values", 400
if end - start > 1e5:
return "Maximum region size is 100 kb", 400
comm = ["bcftools", "view", vcf, region]
out, err = Popen(comm, stdout=PIPE, stderr=PIPE).communicate()
#if err:
# return err, 400
#v = VCF(out)
return jsonify({"out": out.splitlines(), "comm": ' '.join(comm)})
| mit | Python |
|
d2bcba204d36a8ffd1e6a1ed79b89fcb6f1c88c5 | Add file to test out kmc approach. Dump training k-mers to fasta file | dkoslicki/CMash,dkoslicki/CMash | ideas/test_kmc.py | ideas/test_kmc.py | # This code will test out the idea of using kmc to
# 1. quickly enumerate the k-mers
# 2. intersect these with the training database, output as fasta
# 3. use that reduced fasta of intersecting kmers as the query to CMash
####################################################################
# First, I will need to dump the training database to a fasta file
from CMash import MinHash as MH
import os
import blist
training_out_file = '/nfs1/Koslicki_Lab/koslickd/KMC_test/NathanRefSeqTraining60mers.fa'
training_data ='/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/micopdb_n_1000_k_60.h5'
training_file_names = "/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/absolute_file_names.txt"
file_names = []
with open(training_file_names, 'r') as fid:
iter = 0
for line in fid.readlines():
line = line.strip()
file_names.append(os.path.basename(line))
iter += 1
if iter > 1000:
break
all_kmers = blist.blist()
for file_name in file_names:
sketch = MH.import_multiple_from_single_hdf5(training_data, import_list=[file_name])[0]
all_kmers += sketch._kmers
all_kmers_set = set(all_kmers)
with open(training_out_file, 'w') as fid:
iter = 0
for kmer in all_kmers_set:
fid.write(">seq_%d\n" % iter)
fid.write("%s\n" % kmer)
iter += 1
##########################################################################
| bsd-3-clause | Python |
|
c584bca2f9ac7bc005128d22b4e81a6b4885724c | allow Fabric to infrastructure config from YAML data files | kamiljsokolowski/LAB,kamiljsokolowski/LAB,kamiljsokolowski/LAB | templates/fabfile.py | templates/fabfile.py | import yaml
from fabric.api import env, run
def import_inf(data='web_app_basic.yml'):
inf_data = open(data, 'r')
inf = yaml.load(inf_data)
# for box in inf:
# print '\n'
# for parameter in box:
# print parameter, ':', box[parameter]
return inf
inf_data.close()
inf = import_inf()
env.hosts = [inf[1]['ip']]
env.user = 'vagrant'
env.password = 'vagrant'
def hostinf():
run('hostname')
run('ip a sh dev eth1')
run('uname -a')
| mit | Python |
|
f657a02a560af1a5860f9a532052f54330018620 | Build "shell" target with chromium_code set. | Jonekee/chromium.src,Just-D/chromium-1,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,anirudhSK/chromium,dushu1203/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,Just-D/chromium-1,markYoungH/chromium.src,ChromiumWebApps/chromium,dushu1203/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,patrickm/chromium.src,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,anirudhSK/chromium,Just-D/chromium-1,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,M4sse/chromium.src,fujunwei/chromium-crosswalk,ltilve/chromium,Just-D/chromium-1,Chilledheart/chromium,M4sse/chromium.src,jaruba/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk,Chilledheart/chromium,ltilve/chromium,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,ltilve/chromium,dushu1203/chromium.src,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,Chilledheart/chromium,markYoungH/chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,jaruba/chromium.src,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,littlstar/chromium.src,axinging/chromium-crosswalk,patrickm/chromium.src,dushu1203/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,Jonekee/chromium.src,Chilledheart/chromium,ondra-novak/chromium.src,ondra-novak/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,ondra-novak/chromium.src,patrickm/chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,dushu1203/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,anirudhSK/chromium,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,littlstar/chromium.src,patrickm/chromium.src,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,axinging/chromium-crosswalk,ondra-novak/chromium.src,jaruba/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,anirudhSK/chromium,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,dednal/chromium.src,anirudhSK/chromium,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,anirudhSK/chromium,markYoungH/chromium.src,Jonekee/chromium.src,dednal/chromium.src,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,markYoungH/chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,dushu1203/chromium.src,M4sse/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,markYoungH/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,M4sse/chromium.src,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,anirudhSK/chromium,littlstar/chromium.src,dednal/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,ChromiumWebApps/chromium,Jonekee/chromium.src,dednal/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,littlstar/chromium.src,markYoungH/chromium.src,Jonekee/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,anirudhSK/chromium,chuan9/chromium-crosswalk,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,ltilve/chromium,dednal/chromium.src | ui/shell/shell.gyp | ui/shell/shell.gyp | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'shell',
'type': 'static_library',
'dependencies': [
'../../skia/skia.gyp:skia',
'../aura/aura.gyp:aura',
'../views/views.gyp:views',
],
'sources': [
'minimal_shell.cc',
'minimal_shell.h',
],
},
],
}
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'shell',
'type': 'static_library',
'dependencies': [
'../aura/aura.gyp:aura',
'../views/views.gyp:views',
'../../skia/skia.gyp:skia',
],
'sources': [
'minimal_shell.cc',
'minimal_shell.h',
],
},
],
}
| bsd-3-clause | Python |
bddfeeec193d9fb61d99c70be68093c854e541f7 | Add initial check thorium state | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/thorium/check.py | salt/thorium/check.py | '''
The check Thorium state is used to create gateways to commands, the checks
make it easy to make states that watch registers for changes and then just
succeed or fail based on the state of the register, this creates the pattern
of having a command execution get gated by a check state via a requisite.
'''
def gt(name, value):
'''
Only succeed if the value in the given register location is greater than
the given value
'''
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
if name not in __reg__:
ret['result'] = None
ret['comment'] = 'Value {0} not in register'.format(name)
return ret
if __reg__[name]['val'] > value:
ret['result'] = True
return ret
def lt(name, value):
'''
Only succeed if the value in the given register location is greater than
the given value
'''
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
if name not in __reg__:
ret['result'] = None
ret['comment'] = 'Value {0} not in register'.format(name)
return ret
if __reg__[name]['val'] < value:
ret['result'] = True
return ret
def contains(name, value):
'''
Only succeed if the value in the given register location is greater than
the given value
'''
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
if name not in __reg__:
ret['result'] = None
ret['comment'] = 'Value {0} not in register'.format(name)
return ret
try:
if __reg__[name]['val'] in value:
ret['result'] = True
except TypeError:
pass
return ret
| apache-2.0 | Python |
|
6c08a3d795f9bd2f2d0850fb4c2b7f20474908a9 | Add test for scrunch block | ledatelescope/bifrost,ledatelescope/bifrost,ledatelescope/bifrost,ledatelescope/bifrost | test/test_scrunch.py | test/test_scrunch.py | # Copyright (c) 2016, The Bifrost Authors. All rights reserved.
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test all aspects of `bifrost.blocks.scrunch`"""
import unittest
import bifrost as bf
import bifrost.pipeline as bfp
import bifrost.blocks as blocks
class CallbackBlock(blocks.CopyBlock):
"""Testing-only block which calls user-defined
functions on sequence and on data"""
def __init__(self, iring, seq_callback, data_callback, *args, **kwargs):
super(CallbackBlock, self).__init__(iring, *args, **kwargs)
self.seq_callback = seq_callback
self.data_callback = data_callback
def on_sequence(self, iseq):
self.seq_callback(iseq)
return super(CallbackBlock, self).on_sequence(iseq)
def on_data(self, ispan, ospan):
self.data_callback(ispan, ospan)
return super(CallbackBlock, self).on_data(ispan, ospan)
class TestScrunchBlock(unittest.TestCase):
def setUp(self):
"""Create settings shared between tests"""
self.fil_file = "./data/2chan4bitNoDM.fil"
self.gulp_nframe = 101
self.shape_settings = [-1, 1, 2]
def check_sequence_before(self, seq):
"""Function passed to `CallbackBlock`, which
checks sequence before scrunch"""
tensor = seq.header['_tensor']
self.assertEqual(tensor['shape'], [-1,1,2])
self.assertEqual(tensor['dtype'], 'u8')
self.assertEqual(tensor['labels'], ['time', 'pol', 'freq'])
self.assertEqual(tensor['units'], ['s', None, 'MHz'])
def check_data_before(self, ispan, ospan):
"""Function passed to `CallbackBlock`, which
checks data before scrunch"""
self.assertLessEqual(ispan.nframe, self.gulp_nframe)
self.assertEqual(ospan.nframe, ispan.nframe)
self.assertEqual(ispan.data.shape, (ispan.nframe,1,2))
self.assertEqual(ospan.data.shape, (ospan.nframe,1,2))
def check_sequence_after(self, seq):
"""Function passed to `CallbackBlock`, which
checks sequence after scrunch"""
tensor = seq.header['_tensor']
self.assertEqual(tensor['shape'], self.shape_settings)
self.assertEqual(tensor['dtype'], 'u8')
self.assertEqual(tensor['labels'], ['time', 'pol', 'freq'])
self.assertEqual(tensor['units'], ['s', None, 'MHz'])
def check_data_after(self, ispan, ospan):
"""Function passed to `CallbackBlock`, which
checks data after scrunch"""
self.assertLessEqual(ispan.nframe, self.gulp_nframe)
self.assertEqual(ospan.nframe, ispan.nframe)
self.assertEqual(ispan.data.shape, (ispan.nframe,1,2))
self.assertEqual(ospan.data.shape, (ospan.nframe,1,2))
def test_null_scrunch(self):
"""Check that scrunching no spans leaves header intact"""
self.shape_settings = [-1, 1, 2]
with bfp.Pipeline() as pipeline:
data = blocks.sigproc.read_sigproc([self.fil_file], self.gulp_nframe)
call_data = CallbackBlock(
data, self.check_sequence_before, self.check_data_before)
scrunched = blocks.scrunch(data, 1)
call_data = CallbackBlock(
scrunched, self.check_sequence_after, self.check_data_after)
pipeline.run()
| bsd-3-clause | Python |
|
0b0647a0537c3c325f5cf57cae933e06f7997ea9 | add "_" prefix to plot names | probcomp/crosscat,fivejjs/crosscat,probcomp/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,fivejjs/crosscat,probcomp/crosscat,fivejjs/crosscat,fivejjs/crosscat,fivejjs/crosscat,fivejjs/crosscat,probcomp/crosscat,fivejjs/crosscat,mit-probabilistic-computing-project/crosscat | crosscat/tests/timing_analysis.py | crosscat/tests/timing_analysis.py | import argparse
def _generate_parser():
default_num_rows = [100, 400, 1000, 4000]
default_num_cols = [8, 16, 32]
default_num_clusters = [1, 2]
default_num_views = [1, 2]
#
parser = argparse.ArgumentParser()
parser.add_argument('--dirname', default='timing_analysis', type=str)
parser.add_argument('--num_rows', nargs='+', default=default_num_rows, type=int)
parser.add_argument('--num_cols', nargs='+', default=default_num_cols, type=int)
parser.add_argument('--num_clusters', nargs='+', default=default_num_clusters, type=int)
parser.add_argument('--num_views', nargs='+', default=default_num_views, type=int)
parser.add_argument('--no_plots', action='store_true')
return parser
def _munge_args(args):
kwargs = args.__dict__.copy()
dirname = kwargs.pop('dirname')
generate_plots = not kwargs.pop('no_plots')
return kwargs, dirname, generate_plots
if __name__ == '__main__':
from crosscat.utils.general_utils import Timer, MapperContext, NoDaemonPool
from crosscat.utils.timing_test_utils import reader, read_all_configs, \
read_results, writer, runner, gen_configs
import crosscat.utils.timing_test_utils as ttu
import experiment_runner.experiment_utils as eu
# parse args
parser = _generate_parser()
args = parser.parse_args()
kwargs, dirname, generate_plots = _munge_args(args)
config_list = ttu.gen_configs(
kernel_list = ttu._kernel_list,
n_steps=[10],
**kwargs
)
with Timer('experiments') as timer:
with MapperContext(Pool=NoDaemonPool) as mapper:
# use non-daemonic mapper since run_geweke spawns daemonic processes
eu.do_experiments(config_list, runner, writer, dirname, mapper)
pass
pass
if generate_plots:
# read the data back in
all_configs = read_all_configs(dirname)
_all_results = read_results(all_configs, dirname)
is_same_shape = lambda result: result['start_dims'] == result['end_dims']
use_results = filter(is_same_shape, _all_results)
# add plot_prefix so plots show up at top of list of files/folders
ttu.plot_results(use_results, plot_prefix='_', dirname=dirname)
| import argparse
def _generate_parser():
default_num_rows = [100, 400, 1000, 4000]
default_num_cols = [8, 16, 32]
default_num_clusters = [1, 2]
default_num_views = [1, 2]
#
parser = argparse.ArgumentParser()
parser.add_argument('--dirname', default='timing_analysis', type=str)
parser.add_argument('--num_rows', nargs='+', default=default_num_rows, type=int)
parser.add_argument('--num_cols', nargs='+', default=default_num_cols, type=int)
parser.add_argument('--num_clusters', nargs='+', default=default_num_clusters, type=int)
parser.add_argument('--num_views', nargs='+', default=default_num_views, type=int)
parser.add_argument('--no_plots', action='store_true')
return parser
def _munge_args(args):
kwargs = args.__dict__.copy()
dirname = kwargs.pop('dirname')
generate_plots = not kwargs.pop('no_plots')
return kwargs, dirname, generate_plots
if __name__ == '__main__':
from crosscat.utils.general_utils import Timer, MapperContext, NoDaemonPool
from crosscat.utils.timing_test_utils import reader, read_all_configs, \
read_results, writer, runner, gen_configs
import crosscat.utils.timing_test_utils as ttu
import experiment_runner.experiment_utils as eu
# parse args
parser = _generate_parser()
args = parser.parse_args()
kwargs, dirname, generate_plots = _munge_args(args)
config_list = ttu.gen_configs(
kernel_list = ttu._kernel_list,
n_steps=[10],
**kwargs
)
with Timer('experiments') as timer:
with MapperContext(Pool=NoDaemonPool) as mapper:
# use non-daemonic mapper since run_geweke spawns daemonic processes
eu.do_experiments(config_list, runner, writer, dirname, mapper)
pass
pass
if generate_plots:
# read the data back in
all_configs = read_all_configs(dirname)
_all_results = read_results(all_configs, dirname)
is_same_shape = lambda result: result['start_dims'] == result['end_dims']
use_results = filter(is_same_shape, _all_results)
ttu.plot_results(use_results, dirname=dirname)
| apache-2.0 | Python |
42e0504933d6b9e55cdb6edb9931ba080baab136 | add 408, replace print in test cases into assert | ufjfeng/leetcode-jf-soln,ufjfeng/leetcode-jf-soln | python/408_valid_word_abbreviation.py | python/408_valid_word_abbreviation.py | """
Given a non-empty string s and an abbreviation abbr, return whether the string
matches with the given abbreviation.
A string such as "word" contains only the following valid abbreviations:
["word", "1ord", "w1rd", "wo1d", "wor1", "2rd", "w2d", "wo2", "1o1d", "1or1",
"w1r1", "1o2", "2r1", "3d", "w3", "4"]
Notice that only the above abbreviations are valid abbreviations of the string
"word". Any other string is not a valid abbreviation of "word".
Note:
Assume s contains only lowercase letters and abbr contains only lowercase
letters and digits.
Example 1:
Given s = "internationalization", abbr = "i12iz4n":
Return true.
Example 2:
Given s = "apple", abbr = "a2e":
Return false.
"""
class Solution(object):
def validWordAbbreviation(self, word, abbr):
"""
:type word: str
:type abbr: str
:rtype: bool
"""
nums = set([str(i) for i in range(10)])
digits = []
loc = -1
for c in abbr:
if c in nums:
if c == '0' and digits == []:
return False
digits.append(c)
else:
if digits:
loc += int("".join(digits))
digits = []
loc += 1
if loc >= len(word):
return False
if c != word[loc]:
return False
if digits:
loc += int("".join(digits))
return loc == len(word) - 1
assert Solution().validWordAbbreviation("a", "2") == False
assert Solution().validWordAbbreviation("word", "w2d") == True
assert Solution().validWordAbbreviation("internationalization", "i12iz4n") == True
assert Solution().validWordAbbreviation("apple", "a3e") == True
assert Solution().validWordAbbreviation("apple", "a2e") == False
print("all cases passed")
| mit | Python |
|
5e91e3b2c7e4cbc9f14067a832b87c336c0811e7 | update add test for c4 | enixdark/im-r-e-d-i-s,enixdark/im-r-e-d-i-s,enixdark/im-r-e-d-i-s,enixdark/im-r-e-d-i-s | redis_i_action/c4-process-log-and-replication/test.py | redis_i_action/c4-process-log-and-replication/test.py | class TestCh04(unittest.TestCase):
def setUp(self):
import redis
self.conn = redis.Redis(db=15)
self.conn.flushdb()
def tearDown(self):
self.conn.flushdb()
del self.conn
print
print
def test_list_item(self):
import pprint
conn = self.conn
print "We need to set up just enough state so that a user can list an item"
seller = 'userX'
item = 'itemX'
conn.sadd('inventory:' + seller, item)
i = conn.smembers('inventory:' + seller)
print "The user's inventory has:", i
self.assertTrue(i)
print
print "Listing the item..."
l = list_item(conn, item, seller, 10)
print "Listing the item succeeded?", l
self.assertTrue(l)
r = conn.zrange('market:', 0, -1, withscores=True)
print "The market contains:"
pprint.pprint(r)
self.assertTrue(r)
self.assertTrue(any(x[0] == 'itemX.userX' for x in r))
def test_purchase_item(self):
self.test_list_item()
conn = self.conn
print "We need to set up just enough state so a user can buy an item"
buyer = 'userY'
conn.hset('users:userY', 'funds', 125)
r = conn.hgetall('users:userY')
print "The user has some money:", r
self.assertTrue(r)
self.assertTrue(r.get('funds'))
print
print "Let's purchase an item"
p = purchase_item(conn, 'userY', 'itemX', 'userX', 10)
print "Purchasing an item succeeded?", p
self.assertTrue(p)
r = conn.hgetall('users:userY')
print "Their money is now:", r
self.assertTrue(r)
i = conn.smembers('inventory:' + buyer)
print "Their inventory is now:", i
self.assertTrue(i)
self.assertTrue('itemX' in i)
self.assertEquals(conn.zscore('market:', 'itemX.userX'), None)
def test_benchmark_update_token(self):
benchmark_update_token(self.conn, 5)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
5fd556bc01fdd5d3c9690a56a70557fbd6eb73f8 | print the to calc statistical test | jadsonjs/DataScience,jadsonjs/DataScience | MachineLearning/print_ensemble_precisions.py | MachineLearning/print_ensemble_precisions.py | #
# This program is distributed without any warranty and it
# can be freely redistributed for research, classes or private studies,
# since the copyright notices are not removed.
#
# This file just read the data to calculate the statistical test
#
# Jadson Santos - [email protected]
#
# to run this exemple install pyhton modules:
#
# pip3 install pandas
#
# Python Data Analysis Library
# https://pandas.pydata.org
import pandas as pd
# This module provides functions for calculating mathematical statistics of numeric (Real-valued) data.
# https://docs.python.org/3/library/statistics.html
import statistics
#
# PUT THE RESULT DIRECTORY AND ENSEMBLE ALGORITHM GENEREATED BY WEKA ON HERE
#
# read the CSV file with your data base and put into a Pandas DataFrame
# https://www.shanelynn.ie/using-pandas-dataframe-creating-editing-viewing-data-in-python/
#
directory = '/Users/jadson/tmp/results/' # where are the files generated by weka
#
# prints the data of all homogeneous ensemble
#
def printHomogeneo():
for model in ['knn', 'ad', 'nb', 'mlp']:
for ensemble in ['bagging', 'boosting', 'stacking_homogeneo']:
print(' -------------------- ')
print(model+' --> '+ensemble)
print(' -------------------- ')
for num_classifiers in [10, 15, 20]:
df = pd.read_csv( directory+ensemble+'_'+model+'_'+str(num_classifiers)+'.csv' )
#Getting the precision data
precision = df['IR_precision'].values
# {0} is the num of argument of format function : {.4} sets the precision to 4 decimals.
for p in range(len(precision)):
print('{0:.4}'.format(precision[p]))
#
# prints the data of all heterogeneous ensemble
#
def printHeterogeneo():
for ensemble in ['stacking_heterogeneo']:
print(' -------------------- ')
print(ensemble)
print(' -------------------- ')
for model in ['MLP_AD', 'MLP_NB', 'MLP_NB_AD', 'NB_AD']:
for num_classifiers in [10, 15, 20]:
df = pd.read_csv( directory+ensemble+'_'+model+'_'+str(num_classifiers)+'.csv' )
#Getting the precision data
precision = df['IR_precision'].values
# {0} is the num of argument of format function : {.4} sets the precision to 4 decimals.
for p in range(len(precision)):
print('{0:.4}'.format(precision[p]))
printHomogeneo()
printHeterogeneo()
| apache-2.0 | Python |
|
88d2ad776518d62a66fa3b8f7dd7520cff3debfc | Create bulk_parse.py | haruka-YNU/email_parser | scripts/bulk_parse.py | scripts/bulk_parse.py | mit | Python |
||
10d71b1208175eac4af0a20d7ee0a8176c7829ef | add new rename script to prepend to *.c files | sg-/scripts,sg-/scripts | rename/prepend.py | rename/prepend.py | import os
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'usage: <path> <prepend>'
sys.exit()
exts=['.c']
change_count = 0
for root, dirs, files in os.walk(sys.argv[1]):
for filename in files:
if any(filename.lower().endswith(ext) for ext in exts):
if sys.argv[2] not in filename :
os.rename(os.path.join(root, filename), os.path.join(root, sys.argv[2] + filename))
print os.path.join(root, sys.argv[2] + filename)
change_count += 1
print 'files changed: ', change_count
| apache-2.0 | Python |
|
b20b8bc06b6141fad1fbab9befa184644821351f | add joblib02.py | devlights/try-python | trypython/extlib/joblib02.py | trypython/extlib/joblib02.py | # coding: utf-8
"""
joblibモジュールについてのサンプルです。
joblib.Parallel の利用にて joblib側のログを出力する方法について。
"""
import datetime
import os
import random
import time
import joblib as job
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr, hr
NOW = datetime.datetime.now
RND = random.Random()
CPU_COUNT = -1
# ログ出力 (簡易)
LOG_VERBOSE = 1
# ログ出力 (詳細)
LOG_VERBOSE_ALL_ITERATION_REPORT = 10
class Sample(SampleBase):
def exec(self):
start_dt = NOW()
# ----------------------------------------------------------
# joblibのParallel() は、CPU数とは別にいくつかのオプション引数
# を持つ。verboseもその一つで、値を指定することでjoblibの内部ログを
# 出力することが出来る。値は、intとなっており、以下の値域を持つ。
#
# [verboseの値域]
# 0以外: 進捗ログを出力する (簡易)
# 10以上: 進捗ログを出力する (各イテレーション毎に出力してくれる)
# ----------------------------------------------------------
results = job.Parallel(n_jobs=CPU_COUNT, verbose=LOG_VERBOSE)(
[
job.delayed(heavy_proc)(f'value-{i}', RND.randrange(1, 3), True)
for i in range(1, 5)
]
)
end_dt = NOW()
pr('job-results', results)
pr('total elapsed', (end_dt - start_dt).seconds)
hr('log-verbose-all-iteration-report')
start_dt = NOW()
results = job.Parallel(n_jobs=CPU_COUNT, verbose=LOG_VERBOSE_ALL_ITERATION_REPORT)(
[
job.delayed(heavy_proc)(f'value-{i}', RND.randrange(1, 3), True)
for i in range(1, 5)
]
)
end_dt = NOW()
pr('job-results', results)
pr('total elapsed', (end_dt - start_dt).seconds)
def heavy_proc(value: str, sleep_seconds: int, silent: bool) -> dict:
start_dt = NOW()
pid = os.getpid()
if not silent:
pr('start', f'pid: {pid} [{value}] sleep: {sleep_seconds}')
time.sleep(sleep_seconds)
if not silent:
pr('end', f'pid: {pid} [{value}]')
end_dt = NOW()
return {
'pid': pid,
'elapsed': (end_dt - start_dt).seconds
}
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| mit | Python |
|
02ad029840b2e770bc802fd7f8504498cb0f756d | Add `issubset` and `issuperset` tests | thaim/ansible,thaim/ansible | lib/ansible/plugins/test/mathstuff.py | lib/ansible/plugins/test/mathstuff.py | # (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__metaclass__ = type
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
class TestModule(object):
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'issuperset': issuperset,
}
| mit | Python |
|
adede4415e36830485429f49b8476f655f3d4929 | Add environment.py | vrutkovs/mysql,vrutkovs/mysql | tests/environment.py | tests/environment.py | # -*- coding: UTF-8 -*-
import shutil
from steps.common_steps.common_environment import docker_setup
def before_all(context):
docker_setup(context)
context.build_or_pull_image(skip_pull=True, skip_build=True)
def after_scenario(context, scenario):
if 'KEEP_CONTAINER_AFTER_TEST' in context.config.userdata:
return
context.remove_container()
def after_all(context):
if hasattr(context, 'temp_dir'):
shutil.rmtree(context.temp_dir) # FIXME catch exception
| apache-2.0 | Python |
|
bf86584829f56f91b363f251d77f3157f952db0f | Add tests for masking of data based on being within a range of values | ceholden/yatsm,c11/yatsm,jmorton/yatsm,valpasq/yatsm,ceholden/yatsm,valpasq/yatsm,jmorton/yatsm,c11/yatsm,jmorton/yatsm | tests/test_cyprep.py | tests/test_cyprep.py | import unittest
import numpy as np
import yatsm._cyprep
class TestCyPrep(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Test data
n_band = 7
n_mask = 50
n_images = 1000
cls.data = np.random.randint(
0, 10000, size=(n_band, n_images)).astype(np.int32)
for b in range(n_band):
cls.data[b, np.random.choice(np.arange(0, n_images),
size=n_mask, replace=False)] = 16000
cls.mins = np.repeat(0, n_band).astype(np.int16)
cls.maxes = np.repeat(10000, n_band).astype(np.int16)
def test_get_valid_mask(self):
truth = np.all([((b > _min) & (b < _max)) for b, _min, _max in
zip(np.rollaxis(self.data, 0),
self.mins,
self.maxes)], axis=0)
np.testing.assert_equal(
truth,
yatsm._cyprep.get_valid_mask(self.data, self.mins, self.maxes))
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
9c249d3f9d202632b7fd2241d39dfc2e180fd358 | Add ledger tests | Storj/accounts | tests/test_ledger.py | tests/test_ledger.py | # -*- coding: utf-8 -*-
import pytest
from accounts.ledger import Ledger
# Database migrations run for each test in this module.
# See `conftest.pytest_runtest*`.
DB_MIGRATIONS = ['0003-create-balances', '0004-create-movements']
# Fixtures ###
@pytest.fixture
def ledger(db):
return Ledger(db.connection)
# Tests ###
TOKEN = 'test'
AMOUNT = 100
def _get_balance(db, token):
db.execute("SELECT amount FROM balances WHERE token = %s", [token])
res = db.fetchone()
return res and res[0]
def test_balance(db, ledger):
assert ledger.balance(TOKEN) == 0
db.execute("INSERT INTO balances (token, amount) VALUES (%s, %s)", [TOKEN, AMOUNT])
db.connection.commit()
assert ledger.balance(TOKEN) == AMOUNT
def test_deposit(db, ledger):
# Account doesn't exist yet
assert _get_balance(db, TOKEN) is None
assert ledger.deposit(TOKEN, AMOUNT) is True
assert _get_balance(db, TOKEN) == AMOUNT
db.execute("SELECT amount FROM movements WHERE token = %s", [TOKEN])
assert db.fetchone()[0] == AMOUNT
def test_withdraw(db, ledger):
assert _get_balance(db, TOKEN) is None
# Insufficient funds
assert ledger.withdraw(TOKEN, AMOUNT) is False
assert _get_balance(db, TOKEN) is None
db.execute("INSERT INTO balances (token, amount) VALUES (%s, %s)", [TOKEN, AMOUNT+10])
db.connection.commit()
assert ledger.withdraw(TOKEN, AMOUNT) is True
assert _get_balance(db, TOKEN) == 10
db.execute("SELECT amount FROM movements WHERE token = %s", [TOKEN])
assert db.fetchone()[0] == -AMOUNT
| mit | Python |
|
65f6b1101aba2086654f2ff0ff3e942f69d584b2 | Add an application that returns spaCy similarity query | paopow/word_similarity_api,paopow/word_similarity_api | app/app.py | app/app.py | from flask import Flask, jsonify
import spacy.en
from numpy import dot
from numpy.linalg import norm
app = Flask(__name__)
nlp = spacy.en.English()
def cossim(a, b):
return dot(a, b) / (norm(a) * norm(b))
@app.route('/')
def index():
return "Hello, World!"
@app.route('/spaCy/api/similarity/<word1>/<word2>', methods=['GET'])
def get_spacy_sim(word1, word2):
tok1 = nlp(word1)[0]
tok2 = nlp(word2)[0]
sim = cossim(tok1.repvec, tok2.repvec)
print type(sim)
return jsonify({'word1': word1, 'word2': word2, 'similarity': float(sim)})
if __name__ == '__main__':
app.run(debug=True)
| mit | Python |
|
ddd4473f8edc4e7cfc503fc6cdbb570f33f224a4 | Add Preprocessor module Edges to generate possible edges between two entities given the relation type | Rostlab/nalaf | nala/preprocessing/edges.py | nala/preprocessing/edges.py | import abc
from nala.structures.data import Edge
class EdgeGenerator:
"""
Abstract class for generating edges between two entities. Each edge represents
a possible relationship between the two entities
Subclasses that inherit this class should:
* Be named [Name]EdgeGenerator
* Implement the abstract method generate
* Append new items to the list field "edges" of each Part in the dataset
"""
@abc.abstractmethod
def generate(self, dataset):
"""
:type dataset: nala.structures.data.Dataset
"""
return
class SimpleEdgeGenerator(EdgeGenerator):
"""
Simple implementation of generating edges between the two entities
if they are contained in the same sentence.
Implements the abstract class EdgeGenerator.
:type entity1_class: str
:type entity2_class: str
:type relation_type: str
"""
def __init__(self, entity1_class, entity2_class, relation_type):
self.entity1_class = entity1_class
self.entity2_class = entity2_class
self.relation_type = relation_type
def generate(self, dataset):
from itertools import product
for part in dataset.parts():
for ann_1, ann_2 in product(
(ann for ann in part.annotations if ann.class_id == self.entity1_class),
(ann for ann in part.annotations if ann.class_id == self.entity2_class)):
index_1 = part.get_sentence_index_for_annotation(ann_1)
index_2 = part.get_sentence_index_for_annotation(ann_2)
if index_1 == None:
print (ann_1)
x = input()
if index_2 == None:
print (ann_2)
x = input()
if index_1 == index_2 and index_1 != None:
part.edges.append(
Edge(ann_1, ann_2, self.relation_type,
part.get_sentence_string_array()[index_1]))
| apache-2.0 | Python |
|
0377cf9cc3c2460c2936ec9153edbdb196cff5bf | Add zdt agent | wairton/zephyrus-mas | zephyrus/examples/zdt/agent.py | zephyrus/examples/zdt/agent.py | import sys
from itertools import islice
from math import sqrt
from zephyrus.agent import Agent
from zephyrus.message import Message
class ZDTAgent(Agent):
def mainloop(self):
msg = self.socket_receive.recv()
action = self.perceive(msg.content)
self.socket_send(str(action))
def act(self, perceived):
f1 = perceived[0]
g = 1 + 9 * sum(islice(perceived, 1, None)) / (len(perceived) - 1)
zdt = 1 - sqrt(f1 / g)
return Message("agent", "RESULT", zdt)
if __name__ == '__main__':
ZDTAgent(1, *sys.argv[1:]).start()
| mit | Python |
|
bc812daf7c99b34a3952d933666f240597eb835d | add a spider for Xin Shi Dai board, Ya Zhou catagory. | Nymphet/t66y-spider | t66ySpider/t66ySpider/spiders/t66yXinshidaiYazhouSpider.py | t66ySpider/t66ySpider/spiders/t66yXinshidaiYazhouSpider.py | # -*- coding: utf-8 -*-
import scrapy
from t66ySpider.items import T66YspiderXinshidaiItem
class t66yDagaierSpider(scrapy.Spider):
name = 'XinShiDaiYaZhou'
allowed_domains = ['t66y.com']
start_urls = ["http://t66y.com/thread0806.php?fid=8&type=1"]
unicode_next_page = u'\u4e0b\u4e00\u9801'
def parse(self, response):
thread_hrefs = response.selector.xpath('//h3/a/@href')
for thread_href in thread_hrefs:
thread_url = response.urljoin(thread_href.extract())
yield scrapy.Request(thread_url, callback=self.parse_thread)
next_page_href = response.selector.xpath(
"//a[text()='%s']/@href" % self.unicode_next_page)[0]
next_page_url = response.urljoin(next_page_href.extract())
yield scrapy.Request(next_page_url, callback=self.parse)
def parse_thread(self, response):
item = T66YspiderXinshidaiItem()
item['t_title'] = response.selector.xpath(
'string(//title)')[0].extract()
item['t_image_list'] = response.selector.xpath(
'//input/@src').extract()
yield item
| apache-2.0 | Python |
|
25d8cbfd4b59166ba748d5cd42fbcd7ffe925f0e | Allow using exogenous data in hierachical models #124 | antoinecarme/pyaf,antoinecarme/pyaf,antoinecarme/pyaf | tests/hierarchical/test_hierarchy_AU_AllMethods_Exogenous_all_nodes.py | tests/hierarchical/test_hierarchy_AU_AllMethods_Exogenous_all_nodes.py | import pandas as pd
import numpy as np
import pyaf.HierarchicalForecastEngine as hautof
import pyaf.Bench.TS_datasets as tsds
import datetime
#get_ipython().magic('matplotlib inline')
def create_exog_data(b1):
# fake exog data based on date variable
lDate1 = b1.mPastData['Date']
lDate2 = b1.mFutureData['Date'] # not needed. exogfenous data are missing when not available.
lDate = lDate1.append(lDate2)
lExogenousDataFrame = pd.DataFrame()
lExogenousDataFrame['Date'] = lDate
lExogenousDataFrame['Date_second'] = lDate.dt.second
lExogenousDataFrame['Date_minute'] = lDate.dt.minute
lExogenousDataFrame['Date_hour'] = lDate.dt.hour
lExogenousDataFrame['Date_dayofweek'] = lDate.dt.dayofweek
lExogenousDataFrame['Date_day'] = lDate.dt.day
lExogenousDataFrame['Date_dayofyear'] = lDate.dt.dayofyear
lExogenousDataFrame['Date_month'] = lDate.dt.month
lExogenousDataFrame['Date_week'] = lDate.dt.week
# a column in the exog data can be of any type
lExogenousDataFrame['Date_day_name'] = lDate.dt.day_name()
lExogenousDataFrame['Date_month_name'] = lDate.dt.month_name()
lExogenousVariables = [col for col in lExogenousDataFrame.columns if col.startswith('Date_')]
lExogenousData = (lExogenousDataFrame , lExogenousVariables)
return lExogenousData
b1 = tsds.load_AU_hierarchical_dataset();
df = b1.mPastData;
lEngine = hautof.cHierarchicalForecastEngine()
lEngine.mOptions.mHierarchicalCombinationMethod = ["BU" , 'TD' , 'MO' , 'OC'];
lEngine.mOptions.mNbCores = 16
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lExogenousData = create_exog_data(b1)
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H, b1.mHierarchy, iExogenousData = lExogenousData);
lEngine.getModelInfo();
#lEngine.standardPlots("outputs/AU");
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/Hierarchical_AU_apply_out.csv")
| bsd-3-clause | Python |
|
b135e8e473837909c6847f8a52711527409b5224 | Add windows build tools | yuvipanda/mwparserfromhell,wikimedia/operations-debs-python-mwparserfromhell,gencer/mwparserfromhell,earwig/mwparserfromhell,gencer/mwparserfromhell,jayvdb/mwparserfromhell,SunghanKim/mwparserfromhell,earwig/mwparserfromhell,yuvipanda/mwparserfromhell,earwig/mwparserfromhell,jayvdb/mwparserfromhell,kumasento/mwparserfromhell,gencer/mwparserfromhell,SunghanKim/mwparserfromhell,kumasento/mwparserfromhell,yuvipanda/mwparserfromhell,wikimedia/operations-debs-python-mwparserfromhell,SunghanKim/mwparserfromhell,jayvdb/mwparserfromhell,wikimedia/operations-debs-python-mwparserfromhell,kumasento/mwparserfromhell,SunghanKim/mwparserfromhell,gencer/mwparserfromhell,wikimedia/operations-debs-python-mwparserfromhell,jayvdb/mwparserfromhell,yuvipanda/mwparserfromhell | tools/build_mwpfh.py | tools/build_mwpfh.py | from __future__ import print_function
import subprocess
import sys
import os
path = os.path.split(__file__)[0]
if path:
os.chdir(path)
environments = ['26', '27', '32', '33', '34']
target = "pypi" if "--push" in sys.argv else "test"
returnvalues = {}
def run(pyver, cmds, target=None):
cmd = [r"C:\Python%s\Python.exe" % pyver, "setup.py"] + cmds
if target:
cmd += ["-r", target]
print(" ".join(cmd), end=" ")
retval = subprocess.call(cmd, stdout=open("%s%s.log" % (cmds[0], pyver), 'w'), stderr=subprocess.STDOUT, cwd="..")
if not retval:
print("[OK]")
else:
print("[FAILED (%i)]" % retval)
return retval
run("27", ["register"], target)
if 'failed' in open('register27.log').read():
raise Exception
for pyver in environments:
print()
try:
os.unlink('mwparserfromhell/parser/_tokenizer.pyd')
except WindowsError:
pass
if run(pyver, ["test"]) == 0:
run(pyver, ["bdist_wheel", "upload"], target) | mit | Python |
|
012acdc7a280b307bbb110449dcfee5d05a77e38 | Create new package (#6379) | iulian787/spack,matthiasdiener/spack,LLNL/spack,matthiasdiener/spack,tmerrick1/spack,EmreAtes/spack,matthiasdiener/spack,mfherbst/spack,LLNL/spack,mfherbst/spack,krafczyk/spack,krafczyk/spack,krafczyk/spack,tmerrick1/spack,EmreAtes/spack,tmerrick1/spack,mfherbst/spack,krafczyk/spack,matthiasdiener/spack,mfherbst/spack,krafczyk/spack,LLNL/spack,LLNL/spack,EmreAtes/spack,mfherbst/spack,LLNL/spack,tmerrick1/spack,iulian787/spack,iulian787/spack,iulian787/spack,EmreAtes/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,tmerrick1/spack | var/spack/repos/builtin/packages/r-chemometrics/package.py | var/spack/repos/builtin/packages/r-chemometrics/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RChemometrics(RPackage):
"""R companion to the book "Introduction to Multivariate Statistical Analysis
in Chemometrics" written by K. Varmuza and P. Filzmoser (2009)."""
homepage = "https://cran.r-project.org/web/packages/chemometrics/index.html"
url = "https://cran.r-project.org/src/contrib/chemometrics_1.4.2.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/chemometrics"
version('1.4.2', '8137b0ca4004add9cc2ea81d2c54427f')
version('1.4.1', '1e5a89442bb4a61db0da884eedd74fc2')
version('1.3.9', '2b619791896db1513ca3d714acb68af3')
version('1.3.8', '7fad828bd094b5485fbf20bdf7d3d0d1')
version('1.3.7', 'a9e2f32efb1545421dd96185fd849184')
depends_on('[email protected]:3.4.9')
depends_on('r-rpart', type=('build', 'run'))
depends_on('r-mclust', type=('build', 'run'))
depends_on('r-lars', type=('build', 'run'))
depends_on('r-robustbase', type=('build', 'run'))
depends_on('r-e1071', type=('build', 'run'))
depends_on('r-pls', type=('build', 'run'))
depends_on('r-som', type=('build', 'run'))
depends_on('r-pcapp', type=('build', 'run'))
| lgpl-2.1 | Python |
|
aa78a2670766b0a5e093a1876cb402ed513573bd | Add script to explore parameters units | antoinearnoud/openfisca-france,antoinearnoud/openfisca-france,sgmap/openfisca-france,sgmap/openfisca-france | openfisca_france/scripts/parameters/explore_parameters_unit.py | openfisca_france/scripts/parameters/explore_parameters_unit.py | # -*- coding: utf-8 -*-
from openfisca_core.parameters import ParameterNode, Scale
from openfisca_france import FranceTaxBenefitSystem
tax_benefit_system = FranceTaxBenefitSystem()
parameters = tax_benefit_system.parameters
def get_parameters_by_unit(parameter, parameters_by_unit = None):
if parameters_by_unit is None:
parameters_by_unit = dict(
scale = list(),
none = list(),
currency = list(),
rate = list(),
year = list(),
)
for name, sub_parameter in parameter.children.items():
if isinstance(sub_parameter, ParameterNode):
get_parameters_by_unit(sub_parameter, parameters_by_unit)
else:
if isinstance(sub_parameter, Scale):
parameters_by_unit['scale'].append(sub_parameter)
elif sub_parameter.unit is None:
parameters_by_unit['none'].append(sub_parameter)
elif sub_parameter.unit == "/1":
parameters_by_unit['rate'].append(sub_parameter)
elif sub_parameter.unit == "currency":
parameters_by_unit['currency'].append(sub_parameter)
elif sub_parameter.unit == "year":
parameters_by_unit['year'].append(sub_parameter)
else:
raise ValueError("Parameter {} has a stange unit {}".format(
sub_parameter.name, sub_parameter.unit))
return parameters_by_unit
if __name__ == '__main__':
parameters_by_unit = get_parameters_by_unit(parameters)
print('Distribution of parameters types:')
for type_, sub_parameters in parameters_by_unit.items():
print(type_, len(parameters_by_unit[type_]))
print('\n')
print('List of parameters with no units')
for param in parameters_by_unit['none']:
print (param.name)
| agpl-3.0 | Python |
|
e095b6a76ac36255983d8c69d4899d64178e0ef3 | Add segment_euclidean_length tests module | danforthcenter/plantcv,danforthcenter/plantcv,danforthcenter/plantcv | tests/plantcv/morphology/test_segment_euclidean_length.py | tests/plantcv/morphology/test_segment_euclidean_length.py | import pytest
import cv2
import numpy as np
from plantcv.plantcv import outputs
from plantcv.plantcv.morphology import segment_euclidean_length
def test_segment_euclidean_length(morphology_test_data):
# Clear previous outputs
outputs.clear()
skeleton = cv2.imread(morphology_test_data.skel_img, -1)
_ = segment_euclidean_length(segmented_img=skeleton,
objects=morphology_test_data.load_segments(morphology_test_data.segments_file, "leaves"))
assert len(outputs.observations['default']['segment_eu_length']['value']) == 4
def test_segment_euclidean_length_bad_input():
skel = np.zeros((10, 10), dtype=np.uint8)
edges = [np.array([[[5, 3]], [[4, 4]], [[3, 5]], [[4, 6]], [[5, 7]], [[6, 6]], [[7, 5]], [[6, 4]]], dtype=np.int32)]
with pytest.raises(RuntimeError):
_ = segment_euclidean_length(segmented_img=skel, objects=edges)
| mit | Python |
|
26ab37868e67b5b815cf8df67cc04876ff44c148 | Add file for Nongrammar entities tests | PatrikValkovic/grammpy | tests/rules_tests/isValid_tests/NongrammarEntitiesTest.py | tests/rules_tests/isValid_tests/NongrammarEntitiesTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
from .grammar import *
class NongrammarEntitiesTest(TestCase):
pass
if __name__ == '__main__':
main() | mit | Python |
|
e80ec7adc6fe71310e1c2adba720be9640a49d0f | test code for midiGenerator | anassinator/beethoven | src/test4.py | src/test4.py | import midiGenerator
generator = midiGenerator.MidiGenerator(200,1)
channel = midiGenerator.Channel()
note = midiGenerator.Note(43,100,200)
channel.addNote(note)
channel.addNote(midiGenerator.Note(45,200,300))
channel.addNote(midiGenerator.Note(57,300,400))
channel.addNote(midiGenerator.Note(38,400,500))
channel.addNote(midiGenerator.Note(33,500,600))
channel.addNote(midiGenerator.Note(45,600,700))
channel.endTrack()
generator.addChannel(channel)
generator.save("t.midi")
| mit | Python |
|
63065390fca52045db0665bbb8f2b4df7a7b57d4 | Implement pivoted Cholesky decomposition and code to do Woodbury solves with them. | jrg365/gpytorch,jrg365/gpytorch,jrg365/gpytorch | gpytorch/utils/pivoted_cholesky.py | gpytorch/utils/pivoted_cholesky.py | import torch
def pivoted_cholesky(matrix, max_iter, error_tol=1e-5):
matrix_size = matrix.size(-1)
matrix_diag = matrix.diag()
# TODO: This check won't be necessary in PyTorch 0.4
if isinstance(matrix_diag, torch.autograd.Variable):
matrix_diag = matrix_diag.data
error = torch.norm(matrix_diag, 1)
permutation = matrix_diag.new(matrix_size).long()
torch.arange(0, matrix_size, out=permutation)
m = 0
# TODO: pivoted_cholesky should take tensor_cls and use that here instead
L = matrix_diag.new(max_iter, matrix_size).zero_()
while m < max_iter and error > error_tol:
max_diag_value, max_diag_index = torch.max(matrix_diag[permutation][m:], 0)
max_diag_index = max_diag_index + m
pi_m = permutation[m]
permutation[m] = permutation[max_diag_index][0]
permutation[max_diag_index] = pi_m
pi_m = permutation[m]
L_m = L[m] # Will be all zeros -- should we use torch.zeros?
L_m[pi_m] = torch.sqrt(max_diag_value)[0]
row = matrix[pi_m]
if isinstance(row, torch.autograd.Variable):
row = row.data
pi_i = permutation[m + 1:]
L_m[pi_i] = row[pi_i]
if m > 0:
L_prev = L[:m].index_select(1, pi_i)
L_m[pi_i] -= torch.sum(L[:m, pi_m].unsqueeze(1) * L_prev, dim=0)
L_m[pi_i] /= L_m[pi_m]
matrix_diag[pi_i] = matrix_diag[pi_i] - (L_m[pi_i] ** 2)
L[m] = L_m
error = torch.sum(matrix_diag[permutation[m + 1:]])
m = m + 1
return L[:m, :]
def woodbury_factor(low_rank_mat, shift):
"""
Given a low rank (k x n) matrix V and a shift, returns the
matrix R so that
R = (I_k + 1/shift VV')^{-1}V
to be used in solves with (V'V + shift I) via the Woodbury formula
"""
n = low_rank_mat.size(-1)
k = low_rank_mat.size(-2)
shifted_mat = (1 / shift) * low_rank_mat.matmul(low_rank_mat.t())
shifted_mat = shifted_mat + shifted_mat.new(k).fill_(1).diag()
R = torch.potrs(low_rank_mat, shifted_mat.potrf())
return R
def woodbury_solve(vector, low_rank_mat, woodbury_factor, shift):
"""
Solves the system of equations:
(sigma*I + VV')x = b
Using the Woodbury formula.
Input:
- vector (size n) - right hand side vector b to solve with.
- woodbury_factor (k x n) - The result of calling woodbury_factor on V
and the shift, \sigma
- shift (scalar) - shift value sigma
"""
right = (1 / shift) * low_rank_mat.t().matmul(woodbury_factor.matmul(vector))
return (1 / shift) * (vector - right)
| mit | Python |
|
e195aef0fa870bf0f471be99a0144a59fdcc5b97 | Create norm_distri_of_proj_valu.py | vi3k6i5/AV_H3_Benchmark_RF_1 | norm_distri_of_proj_valu.py | norm_distri_of_proj_valu.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import math
x_train = pd.read_csv("Train.csv")
x_test = pd.read_csv("Test.csv")
def log_method(x):
if x == 0:
return 0
return math.log(x,2)
test = x_train["Project_Valuation"].order()
test = test.apply(lambda x: log_method(x))
mean = sum(test) / len(test)
varience = sum((average - value) ** 2 for value in test) / len(test)
sigma = math.sqrt(variance)
plt.plot(test,mlab.normpdf(test,mean,sigma))
| mit | Python |
|
3724e828ea7c0aa2a910db16c1392390f7c9f7a8 | add a simple schema building tool | arskom/spyne,arskom/spyne,arskom/spyne | spyne/test/interface/build_schema.py | spyne/test/interface/build_schema.py | #!/usr/bin/env python
# This can be used to debug invalid Xml Schema documents.
import sys
from lxml import etree
if len(sys.argv) != 2:
print "Usage: %s <path_to_xsd_file>" % sys.argv[0]
sys.exit(1)
f = open(sys.argv[1])
etree.XMLSchema(etree.parse(f))
| lgpl-2.1 | Python |
|
0a5167807d615f59808195aed6114cfa9b293eda | Update migrations to work with Django 1.9. | webu/pybbm,hovel/pybbm,webu/pybbm,hovel/pybbm,artfinder/pybbm,webu/pybbm,hovel/pybbm,artfinder/pybbm,artfinder/pybbm | pybb/migrations/0005_auto_20151108_1528.py | pybb/migrations/0005_auto_20151108_1528.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9b1 on 2015-11-08 23:28
from __future__ import unicode_literals
from django.db import migrations, models
import pybb.util
class Migration(migrations.Migration):
dependencies = [
('pybb', '0004_slugs_required'),
]
operations = [
migrations.AlterField(
model_name='post',
name='user_ip',
field=models.GenericIPAddressField(blank=True, default='0.0.0.0', null=True, verbose_name='User IP'),
),
migrations.AlterField(
model_name='profile',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to=pybb.util.FilePathGenerator(to='pybb/avatar'), verbose_name='Avatar'),
),
migrations.AlterField(
model_name='profile',
name='language',
field=models.CharField(blank=True, choices=[('af', 'Afrikaans'), ('ar', 'Arabic'), ('ast', 'Asturian'), ('az', 'Azerbaijani'), ('bg', 'Bulgarian'), ('be', 'Belarusian'), ('bn', 'Bengali'), ('br', 'Breton'), ('bs', 'Bosnian'), ('ca', 'Catalan'), ('cs', 'Czech'), ('cy', 'Welsh'), ('da', 'Danish'), ('de', 'German'), ('el', 'Greek'), ('en', 'English'), ('en-au', 'Australian English'), ('en-gb', 'British English'), ('eo', 'Esperanto'), ('es', 'Spanish'), ('es-ar', 'Argentinian Spanish'), ('es-mx', 'Mexican Spanish'), ('es-ni', 'Nicaraguan Spanish'), ('es-ve', 'Venezuelan Spanish'), ('et', 'Estonian'), ('eu', 'Basque'), ('fa', 'Persian'), ('fi', 'Finnish'), ('fr', 'French'), ('fy', 'Frisian'), ('ga', 'Irish'), ('gl', 'Galician'), ('he', 'Hebrew'), ('hi', 'Hindi'), ('hr', 'Croatian'), ('hu', 'Hungarian'), ('ia', 'Interlingua'), ('id', 'Indonesian'), ('io', 'Ido'), ('is', 'Icelandic'), ('it', 'Italian'), ('ja', 'Japanese'), ('ka', 'Georgian'), ('kk', 'Kazakh'), ('km', 'Khmer'), ('kn', 'Kannada'), ('ko', 'Korean'), ('lb', 'Luxembourgish'), ('lt', 'Lithuanian'), ('lv', 'Latvian'), ('mk', 'Macedonian'), ('ml', 'Malayalam'), ('mn', 'Mongolian'), ('mr', 'Marathi'), ('my', 'Burmese'), ('nb', 'Norwegian Bokmal'), ('ne', 'Nepali'), ('nl', 'Dutch'), ('nn', 'Norwegian Nynorsk'), ('os', 'Ossetic'), ('pa', 'Punjabi'), ('pl', 'Polish'), ('pt', 'Portuguese'), ('pt-br', 'Brazilian Portuguese'), ('ro', 'Romanian'), ('ru', 'Russian'), ('sk', 'Slovak'), ('sl', 'Slovenian'), ('sq', 'Albanian'), ('sr', 'Serbian'), ('sr-latn', 'Serbian Latin'), ('sv', 'Swedish'), ('sw', 'Swahili'), ('ta', 'Tamil'), ('te', 'Telugu'), ('th', 'Thai'), ('tr', 'Turkish'), ('tt', 'Tatar'), ('udm', 'Udmurt'), ('uk', 'Ukrainian'), ('ur', 'Urdu'), ('vi', 'Vietnamese'), ('zh-hans', 'Simplified Chinese'), ('zh-hant', 'Traditional Chinese')], default='en', max_length=10, verbose_name='Language'),
),
migrations.AlterField(
model_name='profile',
name='time_zone',
field=models.FloatField(choices=[(-12.0, '-12'), (-11.0, '-11'), (-10.0, '-10'), (-9.5, '-09.5'), (-9.0, '-09'), (-8.5, '-08.5'), (-8.0, '-08 PST'), (-7.0, '-07 MST'), (-6.0, '-06 CST'), (-5.0, '-05 EST'), (-4.0, '-04 AST'), (-3.5, '-03.5'), (-3.0, '-03 ADT'), (-2.0, '-02'), (-1.0, '-01'), (0.0, '00 GMT'), (1.0, '+01 CET'), (2.0, '+02'), (3.0, '+03'), (3.5, '+03.5'), (4.0, '+04'), (4.5, '+04.5'), (5.0, '+05'), (5.5, '+05.5'), (6.0, '+06'), (6.5, '+06.5'), (7.0, '+07'), (8.0, '+08'), (9.0, '+09'), (9.5, '+09.5'), (10.0, '+10'), (10.5, '+10.5'), (11.0, '+11'), (11.5, '+11.5'), (12.0, '+12'), (13.0, '+13'), (14.0, '+14')], default=3.0, verbose_name='Time zone'),
),
]
| bsd-2-clause | Python |
|
56a8250baa197285a5727dfbca12adaab81238ab | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/tkinter/python3/menu_checkbutton.py | python/tkinter/python3/menu_checkbutton.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See also: http://effbot.org/tkinterbook/checkbutton.htm
import tkinter as tk
root = tk.Tk()
test_var = tk.IntVar()
#test_var.set(1) # Initialize
def callback():
print("var = ", test_var.get())
# CREATE A TOPLEVEL MENU ######################################################
menubar = tk.Menu(root)
# CREATE A PULLDOWN MENU ######################################################
#
# tearoff:
# "tearoff=1" permet à l'utilisateur de détacher le sous menu dans une
# fenêtre à part.
file_menu = tk.Menu(menubar, tearoff=0)
file_menu.add_checkbutton(label="Checkbutton test", variable=test_var, command=callback)
menubar.add_cascade(label="Test", menu=file_menu)
# DISPLAY THE MENU ############################################################
#
# The config method is used to attach the menu to the root window. The
# contents of that menu is used to create a menubar at the top of the root
# window. There is no need to pack the menu, since it is automatically
# displayed by Tkinter.
root.config(menu=menubar)
root.mainloop()
| mit | Python |
|
fa3a02e6660ce556defc2f2c6008c6eb24eb71c1 | Add a simple sampler for playing wav files triggered by note on messages | sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia | Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Sampler.py | Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Sampler.py | import time
import wave
import pygame
import numpy
import Axon
from Axon.SchedulingComponent import SchedulingComponent
class WavVoice(SchedulingComponent):
bufferSize = 1024
def __init__(self, fileName, **argd):
super(WavVoice, self).__init__(**argd)
self.on = False
self.wavFile = wave.open(fileName)
self.sampleRate = self.wavFile.getframerate()
self.period = float(self.bufferSize)/self.sampleRate
self.frame = 0
self.lastSendTime = time.time()
self.scheduleAbs("Send", self.lastSendTime + self.period)
def main(self):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
self.on = True
self.wavFile.rewind()
self.frame = 0
if address == "Off":
self.on = False
if self.dataReady("event"):
self.recv("event")
if self.on:
if self.frame < self.wavFile.getnframes():
sample = self.wavFile.readframes(self.bufferSize)
sample = numpy.frombuffer(sample, dtype="int16")
self.frame += len(sample)
if len(sample) < self.bufferSize:
# Pad with zeroes
padSize = self.bufferSize - len(sample)
sample = numpy.append(sample, numpy.zeros(padSize))
# Convert to float
sample = sample.astype("float64")
# Scale to -1 - 1
sample /= 2**(8 * self.wavFile.getsampwidth() - 1)
else:
sample = numpy.zeros(self.bufferSize)
else:
sample = numpy.zeros(self.bufferSize)
self.send(sample, "outbox")
self.lastSendTime += self.period
self.scheduleAbs("Send", self.lastSendTime + self.period)
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Util.Numpy import TypeConverter
from Kamaelia.Codec.Vorbis import AOAudioPlaybackAdaptor
from Kamaelia.Util.PureTransformer import PureTransformer
from Kamaelia.Apps.Jam.UI.StepSequencer import StepSequencer
from Kamaelia.Apps.Jam.Audio.Synth import Synth
from Kamaelia.Apps.Jam.Audio.Polyphony import Targetter
files = ["Ride", "HH", "Snare", "Kick"]
files = ["/home/joe/Desktop/%s.wav"%fileName for fileName in files]
def voiceGenerator():
for i in range(4):
yield WavVoice(files[i])
Pipeline(StepSequencer(stepsPerBeat=4), Synth(voiceGenerator, polyphoniser=Targetter, polyphony=4), PureTransformer(lambda x:x*(2**15-1)), TypeConverter(type="int16"), AOAudioPlaybackAdaptor()).run()
| apache-2.0 | Python |
|
bff1e954213fb7592505c94294eb3800a8b199c3 | Update patternMatch.py | christieewen/Algorithms,christieewen/Algorithms | TechInterviews/Python/patternMatch.py | TechInterviews/Python/patternMatch.py | import sys
import re
# Strip only the beginning and ending slashes
def stripSlashes(path):
if path.startswith('/'):
path = path[1:]
if path.endswith('/'):
path = path[:-1]
return path
def findBestWildCardMatch(patterns):
#The best match is wildcards that are rightmost
#Get the positions of the * and add them to get the largest number to figure out which is rightmost
pass
def getRePattern(pattern):
return pattern.replace(',', '/').replace('*', '[a-zA-Z0-9_]*')
def findBestMatch(patterns, paths):
result = []
temp = []
for path in paths:
temp.clear()
for pattern in patterns:
rePattern = getRePattern(pattern)
if re.search(rePattern, stripSlashes(path)):
temp.append(pattern)
if len(temp) > 1:
result.append(findBestWildCardMatch(temp))
elif len(temp) == 0:
result.append("NO MATCH FOUND")
return result
#['foot', 'fell', 'fastest']
# Example to call this program: python34 patternMatch.py <input_file> output_file
def main(args):
input_file = open(args[1], 'r')
output_file = open(args[2], 'w')
pattern_list = []
path_list = []
# Expects correct format in file: int N followed by pattern lines then int M followed by path lines.
N = int(input_file.readline())
for j in range(N):
pattern_list.append(input_file.readline())
M = int(input_file.readline())
for i in range(M):
path_list.append(input_file.readline())
print(findBestMatch(pattern_list, path_list))
input_file.close()
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| import sys
import re
def stripSlashes(path):
if path.startswith('/'):
path = path[1:]
if path.endswith('/'):
path = path[:-1]
return path
def findBestWildCardMatch(patterns):
pass
def getRePattern(pattern):
return pattern.replace(',', '/').replace('*', '[a-zA-Z0-9_]*')
def findBestMatch(patterns, paths):
result = []
temp = []
for path in paths:
temp.clear()
for pattern in patterns:
rePattern = getRePattern(pattern)
if re.search(rePattern, stripSlashes(path)):
temp.append(pattern)
if len(temp) > 1:
result.append(findBestWildCardMatch(temp))
elif len(temp) == 0:
result.append("NO MATCH FOUND")
return result
#['foot', 'fell', 'fastest']
# Example to call this program: python34 patternMatch.py <input_file> output_file
def main(args):
input_file = open(args[1], 'r')
output_file = open(args[2], 'w')
pattern_list = []
path_list = []
# Expects correct format in file: int N followed by pattern lines then int M followed by path lines.
N = int(input_file.readline())
for j in range(N):
pattern_list.append(input_file.readline())
M = int(input_file.readline())
for i in range(M):
path_list.append(input_file.readline())
print(findBestMatch(pattern_list, path_list))
input_file.close()
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| mit | Python |
bbe0cf1666b4706973bfba73ed77126581026057 | add new test case to test add image from local file system. | zstackio/zstack-woodpecker,zstackorg/zstack-woodpecker,zstackio/zstack-woodpecker,zstackorg/zstack-woodpecker,quarkonics/zstack-woodpecker,quarkonics/zstack-woodpecker,zstackio/zstack-woodpecker,zstackorg/zstack-woodpecker | integrationtest/vm/virt_plus/other/test_add_local_image.py | integrationtest/vm/virt_plus/other/test_add_local_image.py | '''
New Integration Test for add image from MN local URI.
The file should be placed in MN.
@author: Youyk
'''
import os
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
test_image = '/tmp/zstack_wp_test_local_uri.img'
def test():
os.system('dd if=/dev/zero of=%s bs=1M count=1 seek=300' % test_image)
time.sleep(10)
image_name = 'test-image-%s' % time.time()
image_option = test_util.ImageOption()
image_option.set_name(image_name)
image_option.set_description('test image which is upload from local filesystem.')
image_option.set_url('file://%s' % test_image)
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
image_option.set_backup_storage_uuid_list([bs.uuid])
image_option.set_format('raw')
image_option.set_mediaType('RootVolumeTemplate')
image_inv = img_ops.add_root_volume_template(image_option)
time.sleep(10)
image = zstack_image_header.ZstackTestImage()
image.set_creation_option(image_option)
image.set_image(image_inv)
test_obj_dict.add_image(image)
image.check()
vm = test_stub.create_vm(image_name = image_name)
vm.destroy()
image.delete()
os.system('rm -f %s' % test_image)
test_util.test_pass('Test adding image from local stroage pass.')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
os.system('rm -f %s' % test_image)
| apache-2.0 | Python |
|
bfdcebfb287b6c3495e74888ace0409f47b530c9 | add testGroup script | USC-ACTLab/crazyswarm,USC-ACTLab/crazyswarm,USC-ACTLab/crazyswarm,USC-ACTLab/crazyswarm | ros_ws/src/crazyswarm/scripts/testGroup.py | ros_ws/src/crazyswarm/scripts/testGroup.py | #!/usr/bin/env python
import numpy as np
from pycrazyswarm import *
Z = 1.5
if __name__ == "__main__":
swarm = Crazyswarm()
timeHelper = swarm.timeHelper
allcfs = swarm.allcfs
allcfs.crazyfliesById[9].setGroup(1)
allcfs.crazyfliesById[10].setGroup(2)
allcfs.takeoff(targetHeight=Z, duration=1.0 + Z, group = 1)
timeHelper.sleep(1.5 + Z)
allcfs.land(targetHeight=0.06, duration=1.0 + Z)
timeHelper.sleep(1.5 + Z)
| mit | Python |
|
ae3374305bad49c358a173e26490c5c90b219208 | test for multiple open-read-close cycle | yandex-load/volta,yandex-load/volta,yandex-load/volta,yandex-load/volta | tests/multiple_readings.py | tests/multiple_readings.py | import serial
import struct
import time
import pandas as pd
import numpy as np
def measure():
start_time = time.time()
with serial.Serial('/dev/cu.usbmodem14121', 1000000, timeout=1) as inport:
open_time = time.time()
data = inport.read(100)
read_time = time.time()
close_time = time.time()
return (open_time - start_time, read_time - open_time, close_time - read_time, len(data))
df = pd.DataFrame.from_records(
(measure() for i in range(100)),
columns=["open", "read", "close", "datalength"])
print(df)
print(df.describe())
| mpl-2.0 | Python |
|
4e3644234fab9cb14a3d511b24bce3ed8a1446e0 | Add in a minor testcase. | paultag/python-muse | tests/scales/test_minor.py | tests/scales/test_minor.py | # Copyright (c) Paul R. Tagliamonte <[email protected]>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from muse.scales.minor import NaturalMinorScale
from muse.tone import Tone
def take(it, count):
for _ in range(count):
yield next(it)
SCALE = ['B♭4', 'C4', 'C♯4', 'E♭4', 'F4', 'F♯4', 'G♯4', 'B♭5']
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(100)) # Bb4
series = list(take(cs.acending(), 8))
assert [x._tone_name() for x in series] == SCALE
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(1300)) # Bb5
series = list(take(cs.decending(), 8))
assert [x._tone_name() for x in series] == list(reversed(SCALE))
| mit | Python |
|
e98065e04cfd52bb369d3b07d29f37fb458baa91 | add solution for Merge Intervals | zhyu/leetcode,zhyu/leetcode | src/mergeIntervals.py | src/mergeIntervals.py | # Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
# @param intervals, a list of Interval
# @return a list of Interval
def merge(self, intervals):
if not intervals:
return []
res = []
intervals = sorted([[i.start, i.end] for i in intervals])
prev = intervals[0]
for i in xrange(1, len(intervals)):
if intervals[i][0] <= prev[1]:
prev[1] = max(prev[1], intervals[i][1])
else:
res.append(prev)
prev = intervals[i]
res.append(prev)
return res
| mit | Python |
|
6f8c64ed6f99493811cab54137a1eed44d851260 | Add python script to get group and module given a class name | InsightSoftwareConsortium/ITKExamples,InsightSoftwareConsortium/ITKExamples,InsightSoftwareConsortium/ITKExamples,InsightSoftwareConsortium/ITKExamples,InsightSoftwareConsortium/ITKExamples | scripts/GetGroupAndModuleFromClassName.py | scripts/GetGroupAndModuleFromClassName.py | #!/usr/bin/env python
""" Given the path to the ITK Source Dir
print group and module of a given class
for instance, try:
./GetGroupAndModuleFromClassName /path/to/ITK Image
"""
import sys
import os
itk_dir = sys.argv[1]
cmakefile = os.path.join( itk_dir, 'CMake', 'UseITK.cmake' )
if not os.path.exists( cmakefile ):
print 'Error: wrong path'
else:
class_name = sys.argv[2]
path = ''
for root, dirs, files in os.walk( os.path.join( itk_dir, 'Modules' ) ):
for f in files:
if f == 'itk' + class_name + '.h':
path = root
if len( path ) != 0:
# let's extract the Group
temp = path.strip( os.path.join( itk_dir, 'Modules' ) )
temp = temp.strip( 'include' )
GroupModule = temp.split( '/' )
print 'Group: ' + GroupModule[ 0 ]
print 'Module: ' + GroupModule[ 1 ]
else:
print 'Error: this class is not part of itk'
| apache-2.0 | Python |
|
3ff18745a561ab28e04d9218e00fc0aa367631f5 | add `solution` module | scott-maddox/obpds | src/obpds/solution.py | src/obpds/solution.py | #
# Copyright (c) 2015, Scott J Maddox
#
# This file is part of Open Band Parameters Device Simulator (OBPDS).
#
# OBPDS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OBPDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OBPDS. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
import numpy
__all__ = ['Solution', 'EquilibriumSolution']
class Solution(object):
pass
class FlatbandSolution(Solution):
def __init__(self, T, N, x, Ev, Ec, Ei):
self.T = T
self.N = N
self.x = x
self.Ev = Ev
self.Ec = Ec
self.Ei = Ei
class EquilibriumSolution(Solution):
def __init__(self, T, N, x, Na, Nd, Ev, Ec, Ei, psi, n, p):
self.T = T
self.N = N
self.x = x
self.Na = Na
self.Nd = Nd
self.Ev = Ev
self.Ec = Ec
self.Ei = Ei
self.psi = psi
self.n = n
self.p = p
self.Ef = numpy.zeros(N) | agpl-3.0 | Python |
|
9a2f68d14ae2d576c59035c67ffa12c96b4f748a | Add provider tests | Mause/statistical_atlas_of_au | test_saau.py | test_saau.py | from saau.loading import load_image_providers, load_service_providers
def test_load_service_providers():
assert load_service_providers(None)
def test_load_image_providers():
assert load_image_providers(None) | mit | Python |
|
018be657ea3e088b3116e8a78fe81713a2a30e29 | Add tifftopdf, a frontend for tiff2pdf and tiffinfo. | rsmith-nl/scripts,rsmith-nl/scripts | tifftopdf.py | tifftopdf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: R.F. Smith <[email protected]>
# 2012-06-29
#
# To the extent possible under law, Roland Smith has waived all copyright and
# related or neighboring rights to NAME. This work is published from the
# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
'''Description.'''
import os
import sys
import subprocess
from multiprocessing import Pool, Lock
globallock = Lock()
def checkfor(args):
"""Make sure that a program necessary for using this script is available."""
try:
f = open('/dev/null')
subprocess.call(args, stderr=subprocess.STDOUT, stdout=f)
f.close()
except:
print "Required program '{}' not found! exiting.".format(args[0])
sys.exit(1)
def process(fname):
"""Process the file named fname."""
try:
args = ['tiffinfo', fname]
# Gather information about the TIFF file.
txt = subprocess.check_output(args).split()
if not txt[7] == 'Width:':
raise ValueError
width = float(txt[8])
length = float(txt[11])
xres = float(txt[13][:-1])
yres = float(txt[14])
# Create the output file name.
if fname.endswith(('.tif', '.TIF')):
outname = fname[:-4]
elif fname.endswith(('.tiff', '.TIFF')):
outname = fname[:-5]
outname = outname.replace(' ', '_') + '.pdf'
args = ['tiff2pdf', '-w', str(width/xres), '-l', str(length/xres),
'-x', str(xres), '-y', str(yres), '-o', outname, fname]
subprocess.call(args)
globallock.acquire()
print "File '{}' converted to '{}'.".format(fname, outname)
globallock.release()
except:
globallock.acquire()
print "Converting {} failed.".format(fname)
globallock.release()
## This is the main program ##
if __name__ == '__main__':
if len(sys.argv) == 1:
path, binary = os.path.split(sys.argv[0])
print "Usage: {} [file ...]".format(binary)
sys.exit(0)
checkfor(['tiffinfo'])
checkfor(['tiff2pdf'])
p = Pool()
p.map(process, sys.argv[1:])
p.close()
| mit | Python |
|
6dcbb2004271860b7d2e8bf0d12da46c925f151c | add a utility to show/set/clear software write protect on a lun | rosjat/python-scsi,AHelper/python-scsi,AHelper/python-scsi | tools/swp.py | tools/swp.py | #!/usr/bin/env python
# coding: utf-8
#
# A simple example to show/set/clear the software write protect flag SWP
#
import sys
from pyscsi.pyscsi.scsi import SCSI
from pyscsi.pyscsi.scsi_device import SCSIDevice
from pyscsi.pyscsi import scsi_enum_modesense6 as MODESENSE6
def usage():
print 'Usage: swp.py [--help] [--on|--off] <device>'
def main():
swp_on = 0
swp_off = 0
i = 1
while i < len(sys.argv):
if sys.argv[i] == '--help':
return usage()
if sys.argv[i] == '--on':
del sys.argv[i]
swp_on = 1
continue
if sys.argv[i] == '--off':
del sys.argv[i]
swp_off = 1
continue
i += 1
if len(sys.argv) < 2:
return usage()
device = sys.argv[1]
sd = SCSIDevice(device)
s = SCSI(sd)
i = s.modesense6(page_code=MODESENSE6.PAGE_CODE.CONTROL).result
if swp_on:
i['mode_pages'][0]['swp'] = 1
s.modeselect6(i)
print 'Set SWP ON'
return
if swp_off:
i['mode_pages'][0]['swp'] = 0
s.modeselect6(i)
print 'Set SWP OFF'
return
print 'SWP is %s' % ("ON" if i['mode_pages'][0]['swp'] else "OFF")
if __name__ == "__main__":
main()
| lgpl-2.1 | Python |
|
80d2fa29185e9c3c54ed1e173122bbe5a78624a4 | Create tutorial4.py | mcfey/ggame-tutorials | tutorial4.py | tutorial4.py | mit | Python |
||
f8d4596db159f143d51c62ea2a097a72f9877ee6 | Add test for clusqmgr | fbraem/mqweb,fbraem/mqweb,fbraem/mqweb | test/clusqmgr.py | test/clusqmgr.py | import unittest
from testbase import MQWebTest
class TestQueueActions(MQWebTest):
def testInquire(self):
data = self.getJSON('/api/clusqmgr/inquire/' + self.qmgr)
self.assertFalse('mqweb' not in data, 'No mqweb data returned')
if 'error' in data:
self.assertFalse(True, 'Received a WebSphere MQ error:' + str(data['error']['reason']['code']))
self.assertFalse('clusqmgrs' not in data, 'No clusqmgrs array returned')
self.assertFalse(len(data['clusqmgrs']) == 0, 'No cluster information found')
self.assertTrue(self.checkIds(data['clusqmgrs'][0]), 'There are unmapped Websphere MQ attributes')
suite = unittest.TestLoader().loadTestsFromTestCase(TestQueueActions)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit | Python |
|
e21e04436f0596f25ca3fb75a9fe15916687c955 | Add utils.py tests | openstack/monasca-persister,stackforge/monasca-persister,stackforge/monasca-persister,openstack/monasca-persister,openstack/monasca-persister,stackforge/monasca-persister | monasca_persister/tests/test_utils.py | monasca_persister/tests/test_utils.py | # (C) Copyright 2019 Fujitsu Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock
from oslotest import base
from monasca_persister.repositories import utils
class TestUtils(base.BaseTestCase):
def setUp(self):
super(TestUtils, self).setUp()
def tearDown(self):
super(TestUtils, self).tearDown()
def test_parse_measurement_message(self):
message = Mock()
message.message.value = """{
"metric": {
"name": "metric_name",
"timestamp": "metric_timestamp",
"value": "0.0",
"value_meta": {
},
"dimensions": {}
},
"meta": {
"region": "meta_region",
"tenantId": "meta_tenantId"
}
}"""
data = utils.parse_measurement_message(message)
self.assertEqual(data[0], {})
self.assertEqual(data[1], 'metric_name')
self.assertEqual(data[2], 'meta_region')
self.assertEqual(data[3], 'meta_tenantId')
self.assertEqual(data[4], 'metric_timestamp')
self.assertEqual(data[5], 0.0)
self.assertEqual(data[6], {})
def test_parse_alarm_state_hist_message(self):
message = Mock()
message.message.value = """{
"alarm-transitioned": {
"alarmId": "dummyid",
"metrics": "dummymetrics",
"newState": "dummynewState",
"oldState": "dummyoldState",
"link": "dummylink",
"lifecycleState": "dummylifecycleState",
"stateChangeReason": "dummystateChangeReason",
"tenantId": "dummytenantId",
"timestamp": "dummytimestamp",
"subAlarms": {
"subAlarmExpression": "dummy_sub_alarm",
"currentValues": "dummy_values",
"metricDefinition": "dummy_definition",
"subAlarmState": "dummy_state"
}
}
}"""
output = ['"sub_alarm_expression":"dummy_sub_alarm"',
'"current_values":"dummy_values"',
'"metric_definition":"dummy_definition"',
'"sub_alarm_state":"dummy_state"']
data = utils.parse_alarm_state_hist_message(message)
self.assertEqual(data[0], 'dummyid')
self.assertEqual(data[1], 'dummymetrics')
self.assertEqual(data[2], 'dummynewState')
self.assertEqual(data[3], 'dummyoldState')
self.assertEqual(data[4], 'dummylink')
self.assertEqual(data[5], 'dummylifecycleState')
self.assertEqual(data[6], "dummystateChangeReason")
for elem in output:
self.assertIn(elem, data[7])
self.assertEqual(data[8], 'dummytenantId')
self.assertEqual(data[9], 'dummytimestamp')
def test_parse_events_message(self):
message = Mock()
message.message.value = """{
"event": {
"event_type": "dummy_event_type",
"timestamp": "dummy_timestamp",
"payload": "dummy_payload",
"dimensions": "dummy_dimensions"
},
"meta": {
"project_id": "dummy_project_id"
}
}"""
project_id, timestamp, event_type, payload, dimensions = utils.parse_events_message(message)
self.assertEqual(project_id, "dummy_project_id")
self.assertEqual(timestamp, "dummy_timestamp")
self.assertEqual(event_type, "dummy_event_type")
self.assertEqual(payload, "dummy_payload")
self.assertEqual(dimensions, "dummy_dimensions")
| apache-2.0 | Python |
|
66443f49c932fba9203b878b7be5f8c1a99a4e9e | make pacbio like names | jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public | iron/utilities/rename_to_pacbio.py | iron/utilities/rename_to_pacbio.py | #!/usr/bin/python
import sys,argparse
from SequenceBasics import FastaHandleReader, FastqHandleReader
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input',help="Use - for STDIN")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--fasta',action='store_true')
group.add_argument('--fastq',action='store_true')
args = parser.parse_args()
if args.input=='-': args.input = sys.stdin
else: args.input= open(args.input)
if args.fasta:
args.input = FastaHandleReader(args.input)
elif args.fastq:
args.input = FastqHandleReader(args.input)
z = 0
while True:
e = args.input.read_entry()
if not e: break
z+=1
name = 'm150101_010101_11111_c111111111111111111_s1_p0/'+str(z)+'/ccs'
if args.fastq:
print '@'+name
print e['seq']
print '+'
print e['qual']
elif args.fasta:
print '>'+name
print e['seq']
if __name__=="__main__":
main()
| apache-2.0 | Python |
|
b3633655ce700adfe3bd5390735edf799fd56624 | add missing migration | senkal/gunnery,gunnery/gunnery,gunnery/gunnery,senkal/gunnery,gunnery/gunnery,senkal/gunnery,senkal/gunnery,gunnery/gunnery | gunnery/core/migrations/0003_auto__add_field_server_port.py | gunnery/core/migrations/0003_auto__add_field_server_port.py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Server.port'
db.add_column(u'core_server', 'port',
self.gf('django.db.models.fields.IntegerField')(default=22),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Server.port'
db.delete_column(u'core_server', 'port')
models = {
u'core.application': {
'Meta': {'unique_together': "(('department', 'name'),)", 'object_name': 'Application'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': u"orm['core.Department']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.department': {
'Meta': {'object_name': 'Department'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.environment': {
'Meta': {'unique_together': "(('application', 'name'),)", 'object_name': 'Environment'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'environments'", 'to': u"orm['core.Application']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_production': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.server': {
'Meta': {'unique_together': "(('environment', 'name'),)", 'object_name': 'Server'},
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': u"orm['core.Environment']"}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '22'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'servers'", 'symmetrical': 'False', 'to': u"orm['core.ServerRole']"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.serverrole': {
'Meta': {'unique_together': "(('department', 'name'),)", 'object_name': 'ServerRole'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'serverroles'", 'to': u"orm['core.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['core'] | apache-2.0 | Python |
|
dddac1090fae15edb9a8d2a2781bb80989a0bc84 | add eventrange control | mlassnig/pilot2,mlassnig/pilot2,PalNilsson/pilot2,PalNilsson/pilot2 | pilot/control/eventrange.py | pilot/control/eventrange.py | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, [email protected], 2018
import json
import Queue
import os
import time
from pilot.util import https
from pilot.util.config import config
import logging
logger = logging.getLogger(__name__)
def download_event_ranges(job, num_ranges=None):
"""
Download event ranges
:param job:
:param num_ranges:
:return: List of event ranges.
"""
log = logger.getChild(str(job['PandaID']))
try:
if num_ranges == None:
# ToBeFix num_ranges with corecount
num_ranges = 1
data = {'pandaID': job['PandaID'],
'jobsetID': job['jobsetID'],
'taskID': job['taskID'],
'nRanges': num_ranges}
log.info("Downloading new event ranges: %s" % data)
res = https.request('{pandaserver}/server/panda/getEventRanges'.format(pandaserver=config.Pilot.pandaserver),
data=data)
log.info("Downloaded event ranges: %s" % res)
if res['StatusCode'] == 0 or str(res['StatusCode']) == '0':
return res['eventRanges']
return []
except Exception, e:
log.error("Failed to download event ranges: %s" % (e.get_detail()))
return None
def update_event_ranges(job, event_ranges, version=1):
"""
Update an event range on the Event Server
:param event_ranges:
"""
log = logger.getChild(str(job['PandaID']))
log.info("Updating event ranges: %s" % event_ranges)
try:
if version:
data = {'eventRanges': json.dumps(event_ranges), 'version': 1}
else:
data = {'eventRanges': json.dumps(event_ranges)}
log.info("Updating event ranges: %s" % data)
res = https.request('{pandaserver}/server/panda/updateEventRanges'.format(pandaserver=config.Pilot.pandaserver),
data=data)
log.info("Updated event ranges status: %s" % res)
except Exception, e:
log.error("Failed to update event ranges: %s" % (e.get_detail()))
| apache-2.0 | Python |
|
42d6f1d17ea0f0117a82eb1933a5150b5eb1e29a | add missing is_context_manager | enthought/pikos,enthought/pikos,enthought/pikos | pikos/_internal/util.py | pikos/_internal/util.py | import inspect
def is_context_manager(obj):
""" Check if the obj is a context manager """
# FIXME: this should work for now.
return hasattr(obj, '__enter__') and hasattr(obj, '__exit__')
| bsd-3-clause | Python |
|
ec2310dc42ccdeaafc74c232fad3199dcd22e252 | Create EPICLocSearch_parse-intron.py | RetelC/PDra_Phylogeography,RetelC/PDra_Phylogeography | EPICLocSearch_parse-intron.py | EPICLocSearch_parse-intron.py | " " " this file was created in november 2014
as part of a de novo search for EPIC loci in
the chaetognath species Pterosagitta draco
property of dr. Ferdinand Marlétaz
" " "
#!/usr/bin/env python
import sys
import re
from collections import defaultdict
def reverse(ali,taxa,clust):
alen=len(ali[taxa[0]])
#print alen
positions=['']*alen
for tax in taxa:
seq=ali[tax]
#print tax, len(seq)
for i,res in enumerate(seq):
positions[i]+=res
#print taxa
n_int=0
onset=50
maxgaps=20
#We selection introns with at least 30 flanking positions out of 50
#print ','.join(taxa)
msk=[tax for tax in taxa if tax.startswith('Lgi')]
id_msk=''.join(msk[0]) if len(msk)>0 else 'NA'
for i,pos in enumerate(positions):
if ''.join(set(pos))=='(':
#print '('*len(taxa)
items=dict((e,positions[i+1].count(e)) for e in set(positions[i+1]))
sum_pres=sum([items[v] for v in ['0','1','2'] if v in items])
sum_tot=sum(items.values())
if sum_pres>sum_tot-5:
cons_left=ali['cons'][i-onset:i]
cons_right=ali['cons'][i+3:i+onset+3]
cons_left_sc=cons_left.count('+')
cons_right_sc=cons_right.count('+')
seq_left=ali[id_msk][i-onset:i].replace(']',')').split(')')[-1] if ')' in ali[id_msk][i-onset:i].replace(']',')') else ali[id_msk][i-onset:i]
seq_right=ali[id_msk][i+3:i+onset+3].replace('[','(').split('(')[0]
gap_left=cons_left.count('-')
gap_right=cons_right.count('-')
if len(seq_left.replace('-',''))>=onset-maxgaps and len(seq_right.replace('-',''))>=onset-maxgaps:
if gap_left<maxgaps and gap_right<maxgaps:
print "{0}\t{1}\t{2}/{3}\t{4}/{5}\t{6} / {7}\t{8} / {9}".format(clust,i,sum_pres,sum_tot,cons_left_sc,cons_right_sc,cons_left,cons_right,seq_left,seq_right)
out.write('>{0}_{1}_left\n{2}\n'.format(clust,i,seq_left))
out.write('>{0}_{1}_right\n{2}\n'.format(clust,i,seq_right))
#print '\n'.join(positions[i-10:i+11])
n_int+=1
#print i,pos[0:50]
#print n_int
gene=''
aliSet=defaultdict(str)
taxa=[]
elt=['cons','insert','sites','intron','sfilt']
out=open('flanks.fa','w')
for line in open(sys.argv[1]):
if line.startswith('./METFAM'):
clust=line.rstrip().split('/')[3]
gene=''
if len(aliSet)>0:
#print "\n",clust
#for tax in taxa:
#print "{0}\t{1}".format(tax,len(aliSet[tax]))
rev=reverse(aliSet,taxa,clust.split('.')[0])
aliSet=defaultdict(str)
taxa=[]
alin=re.search(r'([^\s]+)(\s+)(.+)\n', line)
if alin:
name=line[0:40].split()[0]
seq=line.rstrip()[40:]
#print name,seq[0:40]
aliSet[name]+=seq
if not name in taxa and not name in elt:
taxa.append(name)
#print alin.groups()
#Bfl-G461809
#cons
| mit | Python |
|
c0a809ff79d90712a5074d208193ac9fd2af9901 | Add haproxy parser | jiasir/playback,nofdev/playback | playback/cli/haproxy.py | playback/cli/haproxy.py | import sys
from playback.api import HaproxyInstall
from playback.api import HaproxyConfig
from playback.templates.haproxy_cfg import conf_haproxy_cfg
from playback.cliutil import priority
def install(args):
try:
target = HaproxyInstall(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename, password=args.password)
except AttributeError as e:
sys.stderr.write(e.message)
sys.exit(1)
target.install()
def config(args):
try:
target = HaproxyConfig(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename, password=args.password)
except AttributeError:
sys.stderr.write('No hosts found. Please using --hosts param.')
sys.exit(1)
if args.upload_conf:
target.upload_conf(args.upload_conf)
if args.configure_keepalived:
target.configure_keepalived(args.router_id, args.priority,
args.state, args.interface, args.vip)
def gen_conf():
with open('haproxy.cfg', 'w') as f:
f.write(conf_haproxy_cfg)
@priority(12)
def make(parser):
"""provision HAProxy with Keepalived"""
s = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
def install_f(args):
install(args)
install_parser = s.add_parser('install', help='install HAProxy')
install_parser.set_defaults(func=install_f)
def config_f(args):
config(args)
config_parser = s.add_parser('config', help='configure HAProxy')
config_parser.add_argument('--upload-conf', help='upload configuration file to the target host',
action='store', default=False, dest='upload_conf')
config_parser.add_argument('--configure-keepalived', help='configure keepalived',
action='store_true', default=False, dest='configure_keepalived')
config_parser.add_argument('--router_id', help='Keepalived router id e.g. lb1',
action='store', default=False, dest='router_id')
config_parser.add_argument('--priority', help='Keepalived priority e.g. 150',
action='store', default=False, dest='priority')
config_parser.add_argument('--state', help='Keepalived state e.g. MASTER',
action='store', default=False, dest='state')
config_parser.add_argument('--interface', help='Keepalived binding interface e.g. eth0',
action='store', default=False, dest='interface')
config_parser.add_argument('--vip', help='Keepalived virtual ip e.g. CONTROLLER_VIP',
action='store', default=False, dest='vip')
config_parser.set_defaults(func=config_f)
def gen_conf_f(args):
gen_conf()
gen_conf_parser = s.add_parser('gen-conf', help='generate the example configuration to the current location')
gen_conf_parser.set_defaults(func=gen_conf_f)
| mit | Python |
|
acc5c52011db4c8edc615ae3e0cad9cea4fe58b8 | Add basic test for filesystem observer source | znerol/spreadflow-observer-fs,spreadflow/spreadflow-observer-fs | spreadflow_observer_fs/test/test_source.py | spreadflow_observer_fs/test/test_source.py | # -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
"""
Integration tests for spreadflow filesystem observer source.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from bson import BSON
from datetime import datetime
from twisted.internet import defer
from mock import Mock
from testtools import TestCase, run_test_with
from testtools.twistedsupport import AsynchronousDeferredRunTest
from spreadflow_core.scheduler import Scheduler
from spreadflow_delta.test.matchers import MatchesSendDeltaItemInvocation
from spreadflow_observer_fs.source import FilesystemObserverSource
def _spawnProcess(processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn process method signature.
"""
class SpreadflowSourceIntegrationTestCase(TestCase):
"""
Integration tests for spreadflow filesystem observer source.
"""
@run_test_with(AsynchronousDeferredRunTest)
@defer.inlineCallbacks
def test_source_process(self):
source = FilesystemObserverSource('*.txt', '/some/directory')
reactor = Mock()
reactor.spawnProcess = Mock(spec=_spawnProcess)
scheduler = Mock()
scheduler.send = Mock(spec=Scheduler.send)
# Attach source to the scheduler.
yield source.attach(scheduler, reactor)
self.assertEquals(reactor.spawnProcess.call_count, 1)
# Simulate a message directed to the source.
msg = {
'port': 'default',
'item': {
'type': 'delta',
'date': datetime(2010, 10, 20, 20, 10),
'inserts': ['abcdefg'],
'deletes': ['hiklmno'],
'data': {
'abcdefg': {
'path': '/some/directory/xyz.txt'
}
}
}
}
matches = MatchesSendDeltaItemInvocation(copy.deepcopy(msg['item']), source)
source.peer.dataReceived(BSON.encode(msg))
self.assertEquals(scheduler.send.call_count, 1)
self.assertThat(scheduler.send.call_args, matches)
| mit | Python |
|
4d1c81af1d028b2d0fd58f8bab7e7e0246c04f3b | Create alternative_matching.py | JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking,JsWatt/Free-Parking | hacker_rank/regex/grouping_and_capturing/alternative_matching.py | hacker_rank/regex/grouping_and_capturing/alternative_matching.py | Regex_Pattern = r'^(Mr\.|Mrs\.|Ms\.|Dr\.|Er\.)[a-zA-Z]{1,}$' # Do not delete 'r'.
| mit | Python |
|
8033f8a033ddc38c3f1e2276c8c2b4f50c8360fb | Add Python template | nathanielng/code-templates,nathanielng/code-templates,nathanielng/code-templates | src/template.py | src/template.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import argparse
def main(filename=None):
print("Hello world!")
if os.path.isfile(filename) is not True:
file_status = ' (file does not exist)'
else:
file_status = ''
print("Input File = '{}'{}".format(filename,file_status))
_, file_ext = os.path.splitext(filename)
if not file_ext in ['.txt','.text']:
print("File extension '{}' is invalid".format(file_ext))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Starting Template for Python 3 Programs')
parser.add_argument('file',help='Input file')
args = parser.parse_args()
main(args.file)
| apache-2.0 | Python |
|
11603040c58e27ebb109275bd4454a54e0c61d42 | Test examples | miniworld-project/miniworld_core,miniworld-project/miniworld_core | tests/acceptance/test_examples.py | tests/acceptance/test_examples.py | from typing import Dict
from miniworld.util import JSONConfig
# TODO: examples/batman_adv.json, problem is configurator
def test_snapshot_boot_single_scenario(image_path, runner):
with runner() as r:
for _ in range(5):
scenario = JSONConfig.read_json_config('examples/nb_bridged_lan.json') # type: Dict
r.start_scenario(scenario)
r.step()
r.step()
r.stop(hard=False)
# TODO: test stop/step
def test_snapshot_boot_multiple_scenarios(image_path, runner):
with runner() as r:
scenario = JSONConfig.read_json_config('examples/batman_adv.json') # type: Dict
r.start_scenario(scenario)
for _ in range(5):
r.step()
r.stop(hard=False)
scenario = JSONConfig.read_json_config('examples/nb_bridged_lan.json') # type: Dict
r.start_scenario(scenario)
for _ in range(5):
r.step()
r.stop(hard=False)
scenario = JSONConfig.read_json_config('examples/nb_bridged_wifi.json') # type: Dict
r.start_scenario(scenario)
for _ in range(5):
r.step()
r.stop(hard=False)
| mit | Python |
|
d2c5462c5677d7674921f02687017f4128f219f7 | Create while_loop_else.py | joshavenue/python_notebook | while_loop_else.py | while_loop_else.py | // You can actually do a while loop that ends with an else //
while True:
...
...
...
...
else:
| unlicense | Python |
|
0322e1c51fe07cc9707a687ab309a00ca374a1af | Add a cleanup_test_data management command to remove old test data from dev and stage | mozilla/moztrap,mozilla/moztrap,mccarrmb/moztrap,mccarrmb/moztrap,mccarrmb/moztrap,mozilla/moztrap,mccarrmb/moztrap,mccarrmb/moztrap,mozilla/moztrap,mozilla/moztrap | moztrap/model/core/management/commands/cleanup_test_data.py | moztrap/model/core/management/commands/cleanup_test_data.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import datetime
from optparse import make_option
from django.core.management.base import BaseCommand
from moztrap.model.core import models as core_models
from moztrap.model.environments import models as env_models
class Command(BaseCommand):
help = 'Deletes old test data'
option_list = BaseCommand.option_list + (
make_option('--permanent',
action='store_true',
dest='permanent',
default=True,
help='Permanently delete records?'),)
def handle(self, *args, **options):
for model in (core_models.Product,
env_models.Category,
env_models.Element,
env_models.Profile):
obj_type = model._meta.object_name
objects_to_delete = model.everything.filter(name__startswith='Test %s ' % obj_type)
obj_count = objects_to_delete.count()
objects_to_delete.delete(permanent=options['permanent'])
self.stdout.write('%s: %s test %s object(s) deleted\n' %
(datetime.now().isoformat(), obj_count, obj_type))
| bsd-2-clause | Python |
|
e2004076b1e04df21d9122d94e8ac00776542483 | Create new package. (#6044) | LLNL/spack,skosukhin/spack,skosukhin/spack,EmreAtes/spack,tmerrick1/spack,EmreAtes/spack,mfherbst/spack,iulian787/spack,LLNL/spack,LLNL/spack,matthiasdiener/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,matthiasdiener/spack,skosukhin/spack,krafczyk/spack,tmerrick1/spack,LLNL/spack,EmreAtes/spack,EmreAtes/spack,iulian787/spack,mfherbst/spack,mfherbst/spack,mfherbst/spack,krafczyk/spack,iulian787/spack,krafczyk/spack,tmerrick1/spack,matthiasdiener/spack,matthiasdiener/spack,tmerrick1/spack,skosukhin/spack,iulian787/spack,tmerrick1/spack,LLNL/spack,krafczyk/spack,mfherbst/spack,skosukhin/spack,krafczyk/spack | var/spack/repos/builtin/packages/r-allelicimbalance/package.py | var/spack/repos/builtin/packages/r-allelicimbalance/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAllelicimbalance(RPackage):
"""Provides a framework for allelic specific expression
investigation using RNA-seq data."""
homepage = "http://bioconductor.org/packages/AllelicImbalance/"
url = "https://git.bioconductor.org/packages/AllelicImbalance"
version('1.14.0', git='https://git.bioconductor.org/packages/AllelicImbalance', commit='35958534945819baafde0e13d1eb4d05a514142c')
depends_on('[email protected]:3.4.9', when='@1.14.0')
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-bsgenome', type=('build', 'run'))
depends_on('r-variantannotation', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-gviz', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-latticeextra', type=('build', 'run'))
depends_on('r-gridextra', type=('build', 'run'))
depends_on('r-seqinr', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-nlme', type=('build', 'run'))
| lgpl-2.1 | Python |
|
483cdf6b4dd846d9da11788ae98d86d373fb5c49 | add analyze script | petuum/public,petuum/public,petuum/public,petuum/public,petuum/public | app/lda/scripts/analyze.py | app/lda/scripts/analyze.py | from __future__ import print_function
import numpy as np
import sys
import pandas as pd
phi_path = '/users/wdai/bosen/app/lda/output/lda.S0.M4.T32/lda_out.phi'
num_topics = 100
num_words = 52210
top_k = 10
dict_path = '/users/wdai/bosen/app/lda/datasets/words_freq.tsv'
topk_file = '/users/wdai/bosen/app/lda/output/topk.tsv'
def read_dict():
df = pd.read_csv(dict_path, sep='\t')
min_occur = 10
df = df[df['count'] >= min_occur]
df = df[df['count'] <= 1e6] # remove super frequent words
print('# of words occuring at least 10 times:', len(df.index))
words = df['word'].tolist()
id = df['id'].as_matrix()
# TODO(wdai): remap the word ID after truncation.
return dict(zip(id, words))
if __name__ == '__main__':
phi = np.zeros((num_topics, num_words))
with open(phi_path, 'r') as f:
lines = f.readlines()
for topic, line in enumerate(lines):
fields = [float(field.strip()) for field in line.split()]
assert len(fields) == num_words, 'len(fields): %d vs num_words %d' % \
(len(fields), num_words)
phi[topic, :] = fields
# top-k words
#topk = np.zeros((num_topics, top_k))
i2w = read_dict()
with open(topk_file, 'w') as f:
for t in range(num_topics):
ind = np.argpartition(phi[t,:], -top_k, axis=0)[-top_k:]
ind = ind[np.argsort(phi[t,ind])[::-1]]
for n in ind:
f.write('%s:%.2f\t' % (i2w[n], phi[t,n]))
f.write('\n')
print('Output top %d words to %s' % (top_k, topk_file))
| bsd-3-clause | Python |
|
fcb02edeb8fafa8c297d48edc8ebf6b389321430 | add test | fukatani/stacked_generalization | test_iris.py | test_iris.py | import unittest
from sklearn import datasets
from sklearn.utils.validation import check_random_state
from stacked_generalization import StackedClassifier, FWLSClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.tree.tree import SPARSE_SPLITTERS
class TestStackedClassfier(unittest.TestCase):
def setUp(self):
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
self.iris = iris
def test_stacked_classfier(self):
bclf = LogisticRegression(random_state=1)
clfs = [RandomForestClassifier(n_estimators=50, criterion = 'gini', random_state=1),
ExtraTreesClassifier(n_estimators=50, criterion = 'gini', random_state=2),
ExtraTreesClassifier(n_estimators=40, criterion = 'gini', random_state=3),
GradientBoostingClassifier(n_estimators=25, random_state=1),
GradientBoostingClassifier(n_estimators=40, random_state=1),
RidgeClassifier(random_state=1)]
sl = StackedClassifier(bclf, clfs, n_folds=3, verbose=0, stack_by_proba=True, oob_score_flag=True)
sl.fit(self.iris.data, self.iris.target)
score = sl.score(self.iris.data, self.iris.target)
self.assertGreater(score, 0.8, "Failed with score = {0}".format(score))
self.assertGreater(score, 0.8, "Failed with score = {0}".format(sl.oob_score_))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
|
2d0f76538f8927a85a2c51b0b6c34f54c775b883 | Add kmeans receiver | WheatonCS/Lexos,WheatonCS/Lexos,WheatonCS/Lexos | lexos/receivers/kmeans_receiver.py | lexos/receivers/kmeans_receiver.py | from lexos.receivers.base_receiver import BaseReceiver
class KmeansOption:
def __init__(self,):
class KmeansReceiver(BaseReceiver):
def options_from_front_end(self) -> KmeansOption:
"""Get the Kmeans option from front end.
:return: a KmeansOption object to hold all the options.
"""
| mit | Python |
|
ab87f960ecb6f330f4574d2e8dc6b3d4cc96c40f | add solution for Spiral Matrix II | zhyu/leetcode,zhyu/leetcode | src/spiralMatrixII.py | src/spiralMatrixII.py | class Solution:
# @return a list of lists of integer
def generateMatrix(self, n):
if n == 0:
return []
dirs = [[0, 1], [1, 0], [0, -1], [-1, 0]]
cur = cur_d = 0
cur_x = cur_y = 0
matrix = [[0 for col in xrange(n)] for row in xrange(n)]
while cur != n*n:
cur += 1
matrix[cur_x][cur_y] = cur
nx = cur_x + dirs[cur_d][0]
ny = cur_y + dirs[cur_d][1]
if nx < 0 or ny < 0 or nx == n or ny == n or matrix[nx][ny]:
cur_d = (cur_d+1) % 4
nx = cur_x + dirs[cur_d][0]
ny = cur_y + dirs[cur_d][1]
cur_x, cur_y = nx, ny
return matrix
| mit | Python |
|
69a031db7d83254291349804ee5f59fe9972f181 | Add simple jitclass example | sklam/numba,gmarkall/numba,gmarkall/numba,stonebig/numba,stuartarchibald/numba,seibert/numba,stonebig/numba,numba/numba,stuartarchibald/numba,seibert/numba,sklam/numba,gmarkall/numba,jriehl/numba,sklam/numba,jriehl/numba,IntelLabs/numba,IntelLabs/numba,stefanseefeld/numba,numba/numba,seibert/numba,sklam/numba,numba/numba,sklam/numba,cpcloud/numba,cpcloud/numba,stuartarchibald/numba,cpcloud/numba,stefanseefeld/numba,cpcloud/numba,jriehl/numba,stonebig/numba,IntelLabs/numba,stonebig/numba,IntelLabs/numba,stefanseefeld/numba,gmarkall/numba,jriehl/numba,seibert/numba,stonebig/numba,IntelLabs/numba,stuartarchibald/numba,stefanseefeld/numba,seibert/numba,jriehl/numba,cpcloud/numba,stuartarchibald/numba,numba/numba,stefanseefeld/numba,gmarkall/numba,numba/numba | examples/jitclass.py | examples/jitclass.py | """
A simple jitclass example.
"""
import numpy as np
from numba import jitclass # import the decorator
from numba import int32, float32 # import the types
spec = [
('value', int32), # a simple scalar field
('array', float32[:]), # an array field
]
@jitclass(spec)
class Bag(object):
def __init__(self, value):
self.value = value
self.array = np.zeros(value, dtype=np.float32)
@property
def size(self):
return self.array.size
def increment(self, val):
for i in range(self.size):
self.array[i] += val
return self.array
mybag = Bag(21)
print('isinstance(mybag, Bag)', isinstance(mybag, Bag))
print('mybag.value', mybag.value)
print('mybag.array', mybag.array)
print('mybag.size', mybag.size)
print('mybag.increment(3)', mybag.increment(3))
print('mybag.increment(6)', mybag.increment(6))
| bsd-2-clause | Python |
|
5273a97ab1da4b809573617d3fc01705c322992f | Add tests for form mixin. | thecut/thecut-authorship | thecut/authorship/tests/test_forms.py | thecut/authorship/tests/test_forms.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django import forms
from mock import patch
from test_app.models import AuthorshipModel
from thecut.authorship.factories import UserFactory
from thecut.authorship.forms import AuthorshipMixin
class AuthorshipModelForm(AuthorshipMixin, forms.ModelForm):
class Meta:
model = AuthorshipModel
fields = []
class DummyUser(object):
pass
class TestAuthorshipMixin(TestCase):
def test_requires_an_extra_argument_on_creating_an_instance(self):
self.assertRaises(TypeError, AuthorshipModelForm)
def test_sets_user_attribute(self):
dummy_user = DummyUser()
form = AuthorshipModelForm(user=dummy_user)
self.assertEqual(dummy_user, form.user)
class DummyUnsavedModel(object):
def __init__(self):
self.pk = None
class TestAuthorshipMixinSave(TestCase):
@patch('django.forms.ModelForm.save')
def test_calls_super_class_save_method(self, superclass_save):
form = AuthorshipModelForm(user=UserFactory())
form.instance = DummyUnsavedModel()
form.save()
self.assertTrue(superclass_save.called)
@patch('django.forms.ModelForm.save')
def test_sets_updated_by_to_given_user(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.updated_by)
@patch('django.forms.ModelForm.save')
def test_sets_created_by_if_instance_is_not_saved(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.created_by)
@patch('django.forms.ModelForm.save')
def test_does_not_set_created_by_if_instance_is_saved(self,
superclass_save):
class DummySavedModel(object):
def __init__(self):
self.pk = 'arbitrary-value'
self.created_by = 'arbitrary-value'
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummySavedModel()
form.cleaned_data = {}
form.save()
self.assertNotEqual(user, form.instance.created_by)
| apache-2.0 | Python |
|
e838b6d53f131badfbb7b51b4eb268ebb5d7c450 | Add tests for using the new Entity ID tracking in the rule matcher | banglakit/spaCy,spacy-io/spaCy,recognai/spaCy,explosion/spaCy,raphael0202/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,banglakit/spaCy,raphael0202/spaCy,honnibal/spaCy,banglakit/spaCy,raphael0202/spaCy,recognai/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,raphael0202/spaCy,banglakit/spaCy,banglakit/spaCy,aikramer2/spaCy,honnibal/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,spacy-io/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,banglakit/spaCy,aikramer2/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,recognai/spaCy,explosion/spaCy,raphael0202/spaCy,recognai/spaCy,oroszgy/spaCy.hu,honnibal/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,oroszgy/spaCy.hu,aikramer2/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,recognai/spaCy,Gregory-Howard/spaCy | spacy/tests/matcher/test_entity_id.py | spacy/tests/matcher/test_entity_id.py | from __future__ import unicode_literals
import spacy
from spacy.vocab import Vocab
from spacy.matcher import Matcher
from spacy.tokens.doc import Doc
from spacy.attrs import *
import pytest
@pytest.fixture
def en_vocab():
return spacy.get_lang_class('en').Defaults.create_vocab()
def test_init_matcher(en_vocab):
matcher = Matcher(en_vocab)
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Some', u'words'])) == []
def test_add_empty_entity(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
def test_get_entity_attrs(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
entity = matcher.get_entity('TestEntity')
assert entity == {}
matcher.add_entity('TestEntity2', attrs={'Hello': 'World'})
entity = matcher.get_entity('TestEntity2')
assert entity == {'Hello': 'World'}
assert matcher.get_entity('TestEntity') == {}
def test_get_entity_via_match(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity', attrs={u'Hello': u'World'})
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
matcher.add_pattern(u'TestEntity', [{ORTH: u'Test'}, {ORTH: u'Entity'}])
assert matcher.n_patterns == 1
matches = matcher(Doc(en_vocab, words=[u'Test', u'Entity']))
assert len(matches) == 1
assert len(matches[0]) == 4
ent_id, label, start, end = matches[0]
assert ent_id == matcher.vocab.strings[u'TestEntity']
assert label == 0
assert start == 0
assert end == 2
attrs = matcher.get_entity(ent_id)
assert attrs == {u'Hello': u'World'}
| mit | Python |
|
2cf812ba2015bfcc392a2f401c253850b31060c7 | Make sure all tags are alphanumeric | 0x90sled/catapult,dstockwell/catapult,catapult-project/catapult,sahiljain/catapult,benschmaus/catapult,dstockwell/catapult,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,danbeam/catapult,catapult-project/catapult-csm,danbeam/catapult,modulexcite/catapult,sahiljain/catapult,zeptonaut/catapult,catapult-project/catapult-csm,benschmaus/catapult,benschmaus/catapult,catapult-project/catapult,benschmaus/catapult,scottmcmaster/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,catapult-project/catapult,catapult-project/catapult-csm,dstockwell/catapult,SummerLW/Perf-Insight-Report,modulexcite/catapult,danbeam/catapult,scottmcmaster/catapult,catapult-project/catapult-csm,modulexcite/catapult,dstockwell/catapult,catapult-project/catapult,catapult-project/catapult-csm,benschmaus/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,zeptonaut/catapult,sahiljain/catapult,zeptonaut/catapult,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult-csm,scottmcmaster/catapult,0x90sled/catapult,catapult-project/catapult,sahiljain/catapult,danbeam/catapult,SummerLW/Perf-Insight-Report,0x90sled/catapult,catapult-project/catapult,catapult-project/catapult,benschmaus/catapult | perf_insights/perf_insights/upload.py | perf_insights/perf_insights/upload.py | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import webapp2
import uuid
from perf_insights import trace_info
sys.path.append('third_party')
import cloudstorage as gcs
default_retry_params = gcs.RetryParams(initial_delay=0.2,
max_delay=5.0,
backoff_factor=2,
max_retry_period=15)
gcs.set_default_retry_params(default_retry_params)
class UploadPage(webapp2.RequestHandler):
def get(self):
self.response.out.write("""
<html><body>
<head><title>Performance Insights - Trace Uploader</title></head>
<form action="/upload" enctype="multipart/form-data" method="post">
<div><input type="file" name="trace"/></div>
<div><input type="submit" value="Upload"></div>
</form><hr>
</body></html>""")
def post(self):
trace_uuid = str(uuid.uuid4())
bucket_name = ('/performance-insights/' + trace_uuid)
gcs_file = gcs.open(bucket_name,
'w',
content_type='application/octet-stream',
options={},
retry_params=default_retry_params)
gcs_file.write(self.request.get('trace'))
gcs_file.close()
trace_object = trace_info.TraceInfo(id=trace_uuid)
trace_object.prod = self.request.get('prod')
trace_object.remote_addr = os.environ["REMOTE_ADDR"]
tags_string = self.request.get('tags')
if re.match('^[a-zA-Z0-9,]+$', tags_string): # ignore non alpha-numeric tags
trace_object.tags = tags_string.split(',')
trace_object.user_agent = self.request.headers.get('User-Agent')
trace_object.ver = self.request.get('product_version')
trace_object.put()
self.response.write(trace_uuid)
app = webapp2.WSGIApplication([('/upload', UploadPage)])
| # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import webapp2
import uuid
from perf_insights import trace_info
sys.path.append('third_party')
import cloudstorage as gcs
default_retry_params = gcs.RetryParams(initial_delay=0.2,
max_delay=5.0,
backoff_factor=2,
max_retry_period=15)
gcs.set_default_retry_params(default_retry_params)
class UploadPage(webapp2.RequestHandler):
def get(self):
self.response.out.write("""
<html><body>
<head><title>Performance Insights - Trace Uploader</title></head>
<form action="/upload" enctype="multipart/form-data" method="post">
<div><input type="file" name="trace"/></div>
<div><input type="submit" value="Upload"></div>
</form><hr>
</body></html>""")
def post(self):
trace_uuid = str(uuid.uuid4())
bucket_name = ('/performance-insights/' + trace_uuid)
gcs_file = gcs.open(bucket_name,
'w',
content_type='application/octet-stream',
options={},
retry_params=default_retry_params)
gcs_file.write(self.request.get('trace'))
gcs_file.close()
trace_object = trace_info.TraceInfo(id=trace_uuid)
trace_object.prod = self.request.get('prod')
trace_object.remote_addr = os.environ["REMOTE_ADDR"]
tags_string = self.request.get('tags')
if len(tags_string):
trace_object.tags = tags_string.split(',')
trace_object.user_agent = self.request.headers.get('User-Agent')
trace_object.ver = self.request.get('product_version')
trace_object.put()
self.response.write(trace_uuid)
app = webapp2.WSGIApplication([('/upload', UploadPage)])
| bsd-3-clause | Python |
084f9bb8333a7cfb3f4247afbcae62375060fa2b | Add rude graphics mode tester | Jartza/octapentaveega,Jartza/octapentaveega,Jartza/attiny85-vga,Jartza/attiny85-vga | tests/testpic.py | tests/testpic.py | import serial
import time
import random
import sys
# Give port name of your UART as first argument. No error checking
# here, sorry.
#
ser = serial.Serial(sys.argv[1], 9600, timeout = 1)
serwrite = lambda x: ser.write(bytearray(map(ord, x)))
move_to = lambda x, y: serwrite("\x1B[{0};{1}H".format(y, x))
serwrite("xxxxx\x08") # dismiss if we're left in ANSI mode...
serwrite("\x1B[0]") # Text mode
serwrite("\x1B[2J") # Clear screen
serwrite("\x1B[m") # Reset colors
serwrite("\x1B[?7l") # disable wrap
image=[
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"............................... ...............................",
".............................. ..............................",
"............................. .............................",
"............................. .............................",
"............................ ............................",
"........................... ...........................",
"........................... ...........................",
".......................... ..........................",
".......................... .. ..........................",
"......................... .. .........................",
"........................ .... ........................",
"........................ ...... ........................",
"....................... ...... .......................",
"...................... ........ ......................",
"...................... ........ ......................",
"..................... .......... .....................",
".................... ............ ....................",
".................... ............ ....................",
"................... ........ ... ...................",
"................... ........ ... ...................",
".................. ........ ... ..................",
"................. ......... ..... .................",
"................. ............ ....... .................",
"................ .... ......... ................",
"............... .... ......... ...............",
"............... ... ........ ...............",
".............. ... . ......... ..............",
".............. ... .. .......... ..............",
"............. ... . ......... .............",
"............ ..... . .......... ............",
"............ ........ ... ........... ............",
"........... .......... ... ........... ...........",
".......... ........ ... . ............ ..........",
".......... ........ ... ........... ..........",
"......... ......... ... ..... ..... .........",
"........ ......... .... .. ..... ........",
"........ ......... ... ..... ..... ........",
"....... .......... .... ....... ..... .......",
"....... .......... .... ...... ...... .......",
"...... .......... ..... .... ..... ......",
"..... ........... ..... ... ...... .....",
"..... ............ ...... ... ...... .....",
".... ........... ...... ... ...... ....",
"... ............ ....... .. ...... ...",
"... ............. ........ ... ...... ...",
".. ................................................ ..",
".. .................................................. ..",
". .",
" ",
" ",
" ",
". .",
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"................................................................"
]
nums = [ "..", ". ", " .", " " ]
for row in range(16):
for char in range(32):
num = 0
for index in range (row * 4, row * 4 + 4):
num <<= 2
num |= nums.index(image[index][(char * 2):(char * 2) + 2][:2])
if num in [8,10,13,27,127]:
serwrite(chr(27) + chr(num))
else:
serwrite(chr(num))
if row < 15:
serwrite(chr(13))
ser.flush()
time.sleep(2)
for i in range(5):
for x in range(17):
serwrite("\x1B[{0}]".format(x))
ser.flush()
time.sleep(0.05)
for x in range(16,-1,-1):
serwrite("\x1B[{0}]".format(x))
ser.flush()
time.sleep(0.05)
for x in range(17):
serwrite("\x1B[{0}]".format(x))
ser.flush()
time.sleep(0.05)
ser.flush()
ser.close()
| apache-2.0 | Python |
|
a1eff713339d528720ed5999d05a85066018f070 | Add visualise.py | rstebbing/bspline-regression | visualise.py | visualise.py | # visualise.py
# Imports
import argparse
import json
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from uniform_bspline import Contour
# main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_path')
parser.add_argument('--num-samples', type=int, default=1024)
args = parser.parse_args()
print 'Input:', args.input_path
with open(args.input_path, 'rb') as fp:
z = json.load(fp)
degree, num_control_points, dim, is_closed = (
z['degree'], z['num_control_points'], z['dim'], z['is_closed'])
print ' degree:', degree
print ' num_control_points:', num_control_points
print ' dim:', dim
print ' is_closed:', is_closed
c = Contour(degree, num_control_points, dim, is_closed=is_closed)
Y, w, u, X = map(np.array, [z['Y'], z['w'], z['u'], z['X']])
print ' num_data_points:', Y.shape[0]
kw = {}
if Y.shape[1] == 3:
kw['projection'] = '3d'
f = plt.figure()
ax = f.add_subplot(111, **kw)
ax.set_aspect('equal')
def plot(X, *args, **kwargs):
ax.plot(*(tuple(X.T) + args), **kwargs)
plot(Y, 'ro')
for m, y in zip(c.M(u, X), Y):
plot(np.r_['0,2', m, y], 'k-')
plot(X, 'bo--', ms=8.0)
plot(c.M(c.uniform_parameterisation(args.num_samples), X), 'b-', lw=2.0)
plt.show()
if __name__ == '__main__':
main()
| mit | Python |
|
ee5089a6a16c5a6142444a0ad312fdb641aa845c | Fix tests | neizod/argcomplete,neizod/argcomplete,lisongmin/argcomplete,kislyuk/argcomplete,lisongmin/argcomplete,douglas-larocca/argcomplete,landonb/argcomplete,landonb/argcomplete,douglas-larocca/argcomplete,kislyuk/argcomplete | test/test.py | test/test.py | #!/usr/bin/env python
import locale
import os
import sys
import unittest
from tempfile import TemporaryFile
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from argparse import ArgumentParser
from argcomplete import *
IFS = '\013'
class TestArgcomplete(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['_ARGCOMPLETE'] = "yes"
os.environ['_ARC_DEBUG'] = "yes"
os.environ['IFS'] = IFS
def setUp(self):
pass
def tearDown(self):
pass
def run_completer(self, parser, command, point=None):
with TemporaryFile() as t:
os.environ['COMP_LINE'] = command
os.environ['COMP_POINT'] = point if point else str(len(command))
os.environ['COMP_WORDBREAKS'] = '"\'@><=;|&(:'
with self.assertRaises(SystemExit):
autocomplete(parser, output_stream=t, exit_method=sys.exit)
t.seek(0)
return t.read().decode(locale.getpreferredencoding()).split(IFS)
def test_basic_completion(self):
p = ArgumentParser()
p.add_argument("--foo")
p.add_argument("--bar")
completions = self.run_completer(p, "prog ")
assert(set(completions) == set(['-h', '--help', '--foo', '--bar']))
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
import locale
import os
import sys
import unittest
from tempfile import TemporaryFile
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from argparse import ArgumentParser
from argcomplete import *
IFS = '\013'
class TestArgcomplete(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['_ARGCOMPLETE'] = "yes"
os.environ['_ARC_DEBUG'] = "yes"
os.environ['IFS'] = IFS
def setUp(self):
pass
def tearDown(self):
pass
def run_completer(self, parser, command, point=None):
with TemporaryFile() as t:
os.environ['COMP_LINE'] = command
os.environ['COMP_POINT'] = point if point else str(len(command))
with self.assertRaises(SystemExit):
autocomplete(parser, output_stream=t, exit_method=sys.exit)
t.seek(0)
return t.read().decode(locale.getpreferredencoding()).split(IFS)
def test_basic_completion(self):
p = ArgumentParser()
p.add_argument("--foo")
p.add_argument("--bar")
completions = self.run_completer(p, "prog ")
assert(set(completions) == set(['-h', '--help', '--foo', '--bar']))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
a10648569bbd5dca44adc3cfd5a128703325932b | Create dihedral_tent.py | msmbuilder/mdentropy,msmbuilder/mdentropy | dihedral_tent.py | dihedral_tent.py | import numpy as np
import mdtraj as md
import argparse, cPickle
from multiprocessing import Pool
from itertools import product
from itertools import combinations_with_replacement as combinations
from contextlib import closing
def rbins(n=30):
return np.linspace(-np.pi, np.pi, n+3)[1:-1]
def ent(H):
H /= H.sum()
return -np.sum(H*np.nan_to_num(np.log2(H)))
def ent1D(X, r=rbins()):
H = np.histogram(X, r)[0]
return ent(H)
def ent2D(X, Y, r=rbins()):
H = np.histogram2d(X, Y, 2*[r])[0]
return ent(H)
def ent3D(X, Y, Z, r=rbins()):
W = np.vstack((X, Y, Z)).T
H = np.histogramdd(W, 3*[r])[0]
return ent(H)
def ce(X,Y):
return ent2D(X, Y) - ent1D(Y)
def cmi(X, Y, Z):
return ent2D(X, Z) + ent2D(Y, Z) - ent3D(X, Y, Z) - ent1D(Z)
def dihedrals(traj):
kinds = [md.compute_phi,
md.compute_psi]
#md.compute_chi1,
#md.compute_chi2]
return [kind(traj)[1].T for kind in kinds]
def f(cD, pD):
g = lambda i: sum([cmi(cD[d[0]][i[0]], pD[d[1]][i[1]], pD[d[0]][i[0]]) for d in combinations(range(len(cD)), 2)])
g.__name__ = 'g'
return g
def h(cD, pD):
q = lambda i: sum([ce(cD[d[0]][i[0]], pD[d[0]][i[0]]) for d in combinations(range(len(cD)), 2)])
q.__name__ = 'q'
return q
def run(current, past, iter, N):
cD = dihedrals(current)
pD = dihedrals(past)
n = cD[0].shape[0]
R = []
q = h(cD, pD)
for i in range(iter+1):
g = f(cD, pD)
with closing(Pool(processes=N)) as pool:
R.append(np.reshape(pool.map(g, product(range(n), range(n))), (n, n)))
pool.terminate()
[np.random.shuffle(d) for d in cD]
[np.random.shuffle(d) for d in pD]
CMI = R[0] - np.mean(R[1:], axis = 0)
with closing(Pool(processes=N)) as pool:
CH = (pool.map(q, zip(*(2*[range(n)])))*np.ones((n,n))).T
pool.terminate()
T = CMI/CH
return T.T - T
def parse_cmdln():
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c', '--current', dest='current',help='File containing current step states.')
parser.add_argument('-p', '--past', dest='past',help='File containing past step states.')
parser.add_argument('-t', '--topology', dest='top',help='File containing topology.', default=None)
parser.add_argument('-s', '--shuffle-iter', dest='iter', help='Number of shuffle iterations.', default=100, type=int)
parser.add_argument('-n', '--n-proc', dest='N',help='Number of processors', default=4, type=int)
parser.add_argument('-o', '--output', dest='out', help='Name of output file.', default='tent.pkl')
args = parser.parse_args()
return args
if __name__=="__main__":
options = parse_cmdln()
current = md.load(options.current, top = options.top)
past = md.load(options.past, top = options.top)
D = run(current, past, options.iter, options.N)
cPickle.dump(D, open(options.out, 'wb'))
| mit | Python |
|
13e45a8578e57e2cb55b29980b0f3326dd393a20 | Create sump_monitor.py | danodemano/monitoring-scripts,danodemano/monitoring-scripts | sump_monitor.py | sump_monitor.py | #Import the required modules
import RPi.GPIO as GPIO
import time
import requests
import math
#Setup the GPIO
GPIO.setmode(GPIO.BCM)
#Define the TRIG and ECO pins - these are labeled on the sensor
TRIG = 23
ECHO = 24
#Number of readings we are going to take to avoid issues
numreadings = 7
#Alert that we are starting the measurement
print "Distance Measurement In Progress"
#Loop based on the above number
distancearray=[]
count = 0
while (count < numreadings):
#Setup the two pins for reading
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
GPIO.output(TRIG, False)
print "Waiting For Sensor To Settle"
time.sleep(2)
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO)==0:
pulse_start = time.time()
while GPIO.input(ECHO)==1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance = pulse_duration * 17150
distance = round(distance, 2)
print "Distance:",distance,"cm"
distancearray.append(distance)
count = count + 1
#Get the half of the reading number and round up
mid = numreadings / 2
mid = int(math.ceil(mid))
#Sort the array
distancearray.sort()
#Just for debugging
print distancearray
print distancearray[mid]
#Put the middle value back into the distance variable
distance = distancearray[mid]
#Write the data to the influxdn instance
data = 'environment,host=rpi1,location=basement,type=sumppump value=' + str(distance)
print data
output = requests.post('http://192.168.9.42:8086/write?db=home', data=data)
print output
#Release connections to the GPIO pins
GPIO.cleanup()
| mit | Python |
|
9a6ca54f7cca0bd5f21f0bc590a034e7e3e05b6e | Add migration to add userprofiles to existing users | project-icp/bee-pollinator-app,project-icp/bee-pollinator-app,project-icp/bee-pollinator-app,project-icp/bee-pollinator-app | src/icp/apps/user/migrations/0002_add_userprofiles_to_existing_users.py | src/icp/apps/user/migrations/0002_add_userprofiles_to_existing_users.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
from django.conf import settings
def create_user_profiles_for_existing_users(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserProfile = apps.get_model('user', 'UserProfile')
for user in User.objects.all():
UserProfile.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial')
]
operations = [
migrations.RunPython(create_user_profiles_for_existing_users)
]
| apache-2.0 | Python |
|
a47d2654a5e23417c9e23f2ad19ed1b150524337 | add new mtc script | legoktm/legobot-old,legoktm/legobot-old | trunk/mtc.py | trunk/mtc.py | #!/usr/bin/python
"""
(C) Legoktm, 2008
Distributed under the terms of the MIT license.
__version__ = '$Id: $'
"""
import urllib, re, time
import os, sys
sys.path.append(os.environ['HOME'] + '/stuffs/pywiki/pylegoktm')
import wikipedia, pagegenerators, catlib
from image import *
from upload import UploadRobot
def delink(name):
name = str(name)
return re.compile(r'\[\[(.*?)\]\]', re.IGNORECASE).sub(r'\1', name)
def defilelink(name):
name = str(name)
return re.compile(r'\[\[File:(.*?)\]\]', re.IGNORECASE).sub(r'\1', name)
#SITES
wikien = wikipedia.getSite(code = 'en', fam = 'wikipedia')
commons = wikipedia.getSite(code = 'commons', fam = 'commons')
#FIX ERRORS that CommonsHelper makes
def fixdescrip(des):
# des = re.compile(r'\[\[wikipedia:Category:(.*?)\]\]', re.IGNORECASE).sub(r'[[Category:\1]]', name)
des = re.compile(r'\[\[wikipedia:commons:Category:(.*?)\]\]', re.IGNORECASE).sub(r'[[Category:\1]]', des)
des = re.compile(r'\[\[commons:Category:(.*?)\]\]', re.IGNORECASE).sub(r'[[Category:\1]]', des)
des = re.compile(r'\[\[wikipedia:commons:(.*?)\]\]', re.IGNORECASE).sub(r'[[\1]]', des)
des = re.compile(r'\[\[:en:commons:(.*?)\]\]', re.IGNORECASE).sub(r'[[\1]]', des)
des = re.compile(r'index.php\?title=Image', re.IGNORECASE).sub(r'index.php?title=File', des)
des = re.compile(r'\[http://en.wikipedia.org en.wikipedia\]', re.IGNORECASE).sub(r'[[:en:w|en.wikipedia]]', des)
des = re.compile(r'was stated to be made', re.IGNORECASE).sub(r'was made', des)
if re.search('category', des, re.I):
des = re.compile(r'\{\{subst:Unc\}\} <!\-\- Remove this line once you have added categories \-\->', re.IGNORECASE).sub(r'', des)
return des
#Get the description from CH
def ch2(name):
params = {
'language' : 'en',
'image' : defilelink(name),
'project' : 'wikipedia',
'username' : 'Legoktm',
'doit' : 'Get_text',
}
print 'The parameters are:\n%s' %(str(params))
params = urllib.urlencode(params)
f = urllib.urlopen("http://toolserver.org/~magnus/commonshelper.php", params)
ch2text = f.read()
f.close()
tablock = ch2text.split('<textarea ')[1].split('>')[0]
descrip = ch2text.split('<textarea '+tablock+'>')[1].split('</textarea>')[0]
print 'Recieved info from CommonsHelper about %s:' %(delink(name))
descrip = fixdescrip(descrip)
print descrip
time.sleep(15)
return descrip
#Upload the image
def upload(name):
descrip = ch2(name)
print 'Uploading %s to commons:commons.' %(delink(name))
#wikipedia.showDiff('', descrip)
time.sleep(20)
bot = UploadRobot(name.fileUrl(), description=descrip, useFilename=name.fileUrl(), keepFilename=True, verifyDescription=False, targetSite = commons)
bot.run()
print '%s was uploaded to commons:commons.' %(delink(name))
#Edit enwiki page to reflect movement
def ncd(name):
name = delink(name)
page = wikipedia.Page(wikien, name)
wikitext = page.get()
state0 = wikitext
moveToCommonsTemplate = [r'Commons ok', r'Copy to Wikimedia Commons', r'Move to commons', r'Movetocommons', r'To commons', r'Copy to Wikimedia Commons by BotMultichill']
for moveTemplate in moveToCommonsTemplate:
wikitext = re.sub(r'\{\{' + moveTemplate + r'\}\}', u'', wikitext)
wikitext = '{{subst:ncd}}\n' + wikitext
print 'about to ncd'
wikipedia.showDiff(state0, wikitext)
time.sleep(15)
page.put(wikitext, u'File is now available on Wikimedia Commons.')
def moveimage(name):
#HACK
name = str(name)
name = re.compile(r'\[\[(.*?)\]\]', re.IGNORECASE).sub(r'\1', name)
name = wikipedia.ImagePage(wikien, name)
if wikipedia.Page(commons, delink(name)).exists():
print 'pre ncd'
print delink(name)
ncd(name)
return
upload(name)
ncd(page)
#Use the gen and go!
def findimages():
wikien = wikipedia.getSite(code = 'en', fam = 'wikipedia')
commons = wikipedia.getSite(code = 'commons', fam = 'commons')
transclusionPage = wikipedia.Page(wikien, 'Template:Commons ok')
gen = pagegenerators.ReferringPageGenerator(transclusionPage, onlyTemplateInclusion = True)
# category = catlib.Category(wikien, 'Copy to Wikimedia Commons')
# gen = pagegenerators.CategorizedPageGenerator(category, recurse=True)
for page in gen:
print page
moveimage(page)
if __name__ == "__main__":
try:
findimages()
finally:
wikipedia.stopme() | mit | Python |
|
c2d658ed1caa91eb963a3df850b5cf9b99633f69 | Add missing transpose.py | ledatelescope/bifrost,ledatelescope/bifrost,ledatelescope/bifrost,ledatelescope/bifrost | python/bifrost/transpose.py | python/bifrost/transpose.py |
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from libbifrost import _bf, _check, _get, _string2space
from ndarray import asarray
import ctypes
def transpose(dst, src, axes=None):
if axes is None:
axes = reversed(range(len(dst.shape)))
dst_bf = asarray(dst).as_BFarray()
src_bf = asarray(src).as_BFarray()
array_type = ctypes.c_int*src.ndim
axes_array = array_type(*axes)
_check(_bf.Transpose(src_bf, dst_bf, axes_array))
| bsd-3-clause | Python |
|
f46b08ce3d45b44d3f71759705e8045322c6155d | Create __init__.py | PyThaiNLP/pythainlp | pythainlp/spell/__init__.py | pythainlp/spell/__init__.py | # TODO
| apache-2.0 | Python |
|
1d70b3600ed7e56ad610787d1d5f8c7980121b8f | Add lzyf compreesion | madhuri2k/fantastic-spoon,madhuri2k/fantastic-spoon,madhuri2k/fantastic-spoon | yay0/lzyf.py | yay0/lzyf.py | # Compressor for LZYF
import yay0, logging, struct
maxOffsets = [16, 32, 1024]
maxLengths = {16: 513, 32: 4, 1024: 17}
log = logging.getLogger("lzyf")
def compress(src):
src_size = len(src)
dst_size = 0
dst = bytearray()
src_pos = 0
rl = 0
ctrl_byte = 0
buf = bytearray()
# Start a copy-run
buf.append(src[src_pos])
src_pos += 1
rl += 1
while src_pos < src_size:
pos1, len1 = yay0.checkRunlength(src_pos, src_size, src, maxOffsets[0], maxLengths[maxOffsets[0]])
pos2, len2 = yay0.checkRunlength(src_pos, src_size, src, maxOffsets[2], maxLengths[maxOffsets[2]])
if len1 < 2 and len2 < 2:
# No repeat pattern, add to or create copy run
buf.append(src[src_pos])
rl += 1
src_pos +=1
if rl == 0x1F:
log.info("Copy run of {} ({}) from {} to {} at {} to {}".format(rl, len(buf), src_pos-rl, src_pos, dst_size, dst_size+rl+1))
dst.append(rl)
dst.extend(buf)
dst_size += len(buf) + 1
buf = bytearray()
rl = 0
else:
# output existing copy run, if any
if rl != 0:
log.info("Copy run of {} ({}) from {} to {} at {} to {}".format(rl, len(buf), src_pos-rl, src_pos, dst_size, dst_size+rl+1))
dst.append(rl)
dst.extend(buf)
dst_size += len(buf) + 1
buf = bytearray()
rl = 0
# log
if len1 > len2:
# encode pos1, len1 using C
v = src_pos-pos1-1
ctrl_byte = 0x2000 | ((v & 0x0F) << 9) | ((len1-2) & 0x1FF)
dst.extend(ctrl_byte.to_bytes(2, byteorder='big'))
dst_size += 2
src_pos += len1
elif len2 <= maxLengths[maxOffsets[1]] and pos2 <= maxOffsets[1]:
# encode pos2, len2 using A
v = src_pos - pos2 - 1
ctrl_byte = 0x80 | ((v<<2) & 0x7c) | ((len2-1) & 0x03)
dst.append(ctrl_byte)
dst_size += 1
src_pos += len2
else:
# encode pos2, len2 using B
v = src_pos - pos2 - 1
ctrl_byte = 0x4000 | ((v<<4) & 0x3FF0) | ((len2-2) & 0x0F)
dst.extend(ctrl_byte.to_bytes(2, byteorder='big'))
dst_size += 2
src_pos += len2
if rl != 0:
log.info("Copy run of {} ({}) from {} to {} at {} to {}".format(rl, len(buf), src_pos-rl, src_pos, dst_size, dst_size+rl+1))
dst.append(rl)
dst.extend(buf)
dst_size += len(buf) + 1
buf = bytearray()
rl = 0
log.info("Encoded {} into {} bytes.".format(src_size, dst_size))
return (dst_size, src_size, dst)
def analyzeRuns(data):
for i in range(len(data)):
p, l = yay0.checkRunlength(i, len(data), data, 1024, 513)
if l>1:
log.info("{}: Found run of {} at {}".format(i, l, p))
# i += l
| mit | Python |
|
82069f44f8b8bcb9f7b4df9a267a8641c54b0442 | convert dwt_idwt doctests to nose tests. | kwohlfahrt/pywt,kwohlfahrt/pywt,PyWavelets/pywt,aaren/pywt,rgommers/pywt,grlee77/pywt,aaren/pywt,michelp/pywt,eriol/pywt,grlee77/pywt,PyWavelets/pywt,michelp/pywt,rgommers/pywt,ThomasA/pywt,eriol/pywt,rgommers/pywt,rgommers/pywt,ThomasA/pywt,Dapid/pywt,michelp/pywt,kwohlfahrt/pywt,aaren/pywt,eriol/pywt,Dapid/pywt,ThomasA/pywt,Dapid/pywt | pywt/tests/test_dwt_idwt.py | pywt/tests/test_dwt_idwt.py | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (run_module_suite, assert_allclose, assert_,
assert_raises, dec)
import pywt
def test_dwt_idwt_basic():
x = [3, 7, 1, 1, -2, 5, 4, 6]
cA, cD = pywt.dwt(x, 'db2')
cA_expect = [5.65685425, 7.39923721, 0.22414387, 3.33677403, 7.77817459]
cD_expect = [-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
def test_dwt_wavelet_kwd():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
cA, cD = pywt.dwt(x, wavelet=w, mode='cpd')
cA_expect = [4.38354585, 3.80302657, 7.31813271, -0.58565539, 4.09727044,
7.81994027]
cD_expect = [-1.33068221, -2.78795192, -3.16825651, -0.67715519,
-0.09722957, -0.07045258]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
def test_dwt_coeff_len():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
ln = pywt.dwt_coeff_len(data_len=len(x), filter_len=w.dec_len, mode='sym')
assert_(ln == 6)
ln_modes = [pywt.dwt_coeff_len(len(x), w.dec_len, mode) for mode in
pywt.MODES.modes]
assert_allclose(ln_modes, [6, 6, 6, 6, 6, 4])
@dec.knownfailureif(True, "None input not yet working")
def test_idwt_none_input():
# None input equals arrays of zeros of the right length
res1 = pywt.idwt([1,2,0,1], None, 'db2', 'sym')
res2 = pywt.idwt([1, 2, 0, 1], [0, 0, 0, 0], 'db2', 'sym')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
res1 = pywt.idwt(None, [1, 2, 0, 1], 'db2', 'sym')
res2 = pywt.idwt([0, 0, 0, 0], [1, 2, 0, 1], 'db2', 'sym')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
# Only one argument at a time can be None
assert_raises(ValueError, pywt.idwt, None, None, 'db2', 'sym')
def test_idwt_correct_size_kw():
res = pywt.idwt([1, 2, 3, 4, 5], [1, 2, 3, 4], 'db2', 'sym',
correct_size=True)
expected = [1.76776695, 0.61237244, 3.18198052, 0.61237244, 4.59619408,
0.61237244]
assert_allclose(res, expected)
assert_raises(ValueError, pywt.idwt,
[1, 2, 3, 4, 5], [1, 2, 3, 4], 'db2', 'sym')
assert_raises(ValueError, pywt.idwt, [1, 2, 3, 4], [1, 2, 3, 4, 5], 'db2',
'sym', correct_size=True)
def test_idwt_invalid_input():
# Too short, min length is 4 for 'db4':
assert_raises(ValueError, pywt.idwt, [1,2,4], [4,1,3], 'db4', 'sym')
if __name__ == '__main__':
run_module_suite()
| mit | Python |
|
34001081c2cfaa86d85f7a5b51925dca4a6e1a9f | Use Python 3 type syntax in `zerver/webhooks/yo/view.py`. | tommyip/zulip,synicalsyntax/zulip,eeshangarg/zulip,rht/zulip,andersk/zulip,showell/zulip,kou/zulip,zulip/zulip,punchagan/zulip,eeshangarg/zulip,kou/zulip,andersk/zulip,rht/zulip,punchagan/zulip,rht/zulip,jackrzhang/zulip,andersk/zulip,shubhamdhama/zulip,kou/zulip,rishig/zulip,andersk/zulip,shubhamdhama/zulip,rishig/zulip,hackerkid/zulip,tommyip/zulip,rishig/zulip,dhcrzf/zulip,kou/zulip,timabbott/zulip,punchagan/zulip,timabbott/zulip,brainwane/zulip,shubhamdhama/zulip,andersk/zulip,brainwane/zulip,hackerkid/zulip,eeshangarg/zulip,tommyip/zulip,showell/zulip,timabbott/zulip,shubhamdhama/zulip,hackerkid/zulip,eeshangarg/zulip,brainwane/zulip,zulip/zulip,jackrzhang/zulip,jackrzhang/zulip,punchagan/zulip,rht/zulip,rht/zulip,brainwane/zulip,dhcrzf/zulip,rishig/zulip,andersk/zulip,dhcrzf/zulip,hackerkid/zulip,eeshangarg/zulip,kou/zulip,punchagan/zulip,kou/zulip,jackrzhang/zulip,showell/zulip,timabbott/zulip,zulip/zulip,tommyip/zulip,zulip/zulip,hackerkid/zulip,jackrzhang/zulip,timabbott/zulip,shubhamdhama/zulip,synicalsyntax/zulip,dhcrzf/zulip,zulip/zulip,jackrzhang/zulip,showell/zulip,kou/zulip,brainwane/zulip,timabbott/zulip,synicalsyntax/zulip,dhcrzf/zulip,rishig/zulip,brainwane/zulip,timabbott/zulip,showell/zulip,tommyip/zulip,punchagan/zulip,zulip/zulip,shubhamdhama/zulip,dhcrzf/zulip,rishig/zulip,eeshangarg/zulip,synicalsyntax/zulip,rishig/zulip,tommyip/zulip,shubhamdhama/zulip,andersk/zulip,tommyip/zulip,dhcrzf/zulip,jackrzhang/zulip,showell/zulip,zulip/zulip,rht/zulip,brainwane/zulip,punchagan/zulip,hackerkid/zulip,synicalsyntax/zulip,showell/zulip,eeshangarg/zulip,synicalsyntax/zulip,synicalsyntax/zulip,hackerkid/zulip,rht/zulip | zerver/webhooks/yo/view.py | zerver/webhooks/yo/view.py | # Webhooks for external integrations.
from typing import Optional
import ujson
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.actions import check_send_private_message
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.models import UserProfile, get_user
@api_key_only_webhook_view('Yo')
@has_request_variables
def api_yo_app_webhook(request: HttpRequest, user_profile: UserProfile,
email: str = REQ(default=""),
username: str = REQ(default='Yo Bot'),
topic: Optional[str] = REQ(default=None),
user_ip: Optional[str] = REQ(default=None)) -> HttpResponse:
body = ('Yo from %s') % (username,)
receiving_user = get_user(email, user_profile.realm)
check_send_private_message(user_profile, request.client, receiving_user, body)
return json_success()
| # Webhooks for external integrations.
from typing import Optional
import ujson
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.actions import check_send_private_message
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.models import UserProfile, get_user
@api_key_only_webhook_view('Yo')
@has_request_variables
def api_yo_app_webhook(request, user_profile, email=REQ(default=""),
username=REQ(default='Yo Bot'), topic=REQ(default=None),
user_ip=REQ(default=None)):
# type: (HttpRequest, UserProfile, str, str, Optional[str], Optional[str]) -> HttpResponse
body = ('Yo from %s') % (username,)
receiving_user = get_user(email, user_profile.realm)
check_send_private_message(user_profile, request.client, receiving_user, body)
return json_success()
| apache-2.0 | Python |
8c98d12a08617b9a1ab1a264b826f5e9046eca05 | Add getHWND/getAllWindows utility functions for bots. | brainbots/assistant | assisstant/bots/utility.py | assisstant/bots/utility.py | import subprocess
# criteria: dictionary that has key/values to match against.
# e.g. {"wm_class": "Navigator.Firefox"}
def getHWND(criteria):
windows = getAllWindows()
for window in windows:
if criteria.items() <= window.items():
return window
return None
def getAllWindows():
windows = []
with subprocess.Popen(["wmctrl", "-l", "-p", "-x"], stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) as p:
for line in p.stdout:
tokens = line.split()
windows.append({"hwnd": tokens[0], "workspace": tokens[1], "pid": tokens[2], "wm_class": tokens[3], "title": " ".join(tokens[5:])})
return windows
| apache-2.0 | Python |
|
4912c8261dba456e8e4a62051afdf01565f20ae9 | Add first iteration of raw_to_average_jpegs.py. | nth10sd/raw-images-to-average-jpegs | raw_to_average_jpegs.py | raw_to_average_jpegs.py | #! /usr/bin/env python
#
# Tested on Macs. First run `brew install ufraw exiftool`
import argparse
import glob
import multiprocessing as mp
import os
import subprocess
def parseArgs():
desc = 'Auto-white-balance raw images and create average-sized JPEG files with their EXIF info.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-p', '--path', dest='imagesPath', default=os.getcwd(),
help='Sets the path containing the DNG images. Default is the current ' + \
'working directory, which is: %(default)s')
return parser, parser.parse_args()
def processFiles(fname):
subprocess.check_call(['ufraw-batch', '--wb=auto', '--overwrite',
'--size=2048', '--out-type=jpeg', fname])
subprocess.check_call(['exiftool', '-overwrite_original', '-q', '-x', 'Orientation',
'-TagsFromFile', fname, fname.replace('.DNG', '.jpg')])
def workingProgramCheck(prog):
'''Checks whether the program is accessible on the system.'''
try:
subprocess.check_call(['which', '-s', prog])
except Exception:
raise Exception(prog + ' is not accessible on the system.')
def main():
parser, args = parseArgs()
# Check whether ufraw and exiftool are working properly.
workingProgramCheck('ufraw-batch')
workingProgramCheck('exiftool')
pool = mp.Pool(mp.cpu_count())
for fname in glob.glob(os.path.normpath(os.path.join(args.imagesPath, '*.DNG'))):
pool.apply_async(processFiles, [fname])
pool.close()
pool.join()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.