commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
cc89c5222ec7f6d6f95b5efdce3958b3ca33814e | Add basic functionality and regression tests for ACA dark cal module | sot/mica,sot/mica | mica/archive/tests/test_aca_dark_cal.py | mica/archive/tests/test_aca_dark_cal.py | """
Basic functionality and regression tests for ACA dark cal module.
"""
import numpy as np
from ..aca_dark import dark_cal
def test_date_to_dark_id():
assert dark_cal.date_to_dark_id('2011-01-15T12:00:00') == '2011015'
def test_dark_id_to_date():
assert dark_cal.dark_id_to_date('2011015') == '2011:015'
def test_dark_temp_scale():
scale = dark_cal.dark_temp_scale(-10., -14)
assert np.allclose(scale, 0.70)
def test_get_dark_cal_id():
assert dark_cal.get_dark_cal_id('2007:008', 'nearest') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'before') == '2007006'
assert dark_cal.get_dark_cal_id('2007:008', 'after') == '2007069'
def test_get_dark_cal_image():
image = dark_cal.get_dark_cal_image('2007:008')
assert image.shape == (1024, 1024)
def test_get_dark_cal_props():
props = dark_cal.get_dark_cal_props('2007:008')
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
props = dark_cal.get_dark_cal_props('2007:008', include_image=True)
assert len(props['replicas']) == 5
assert props['start'] == '2007:006:01:56:46.817'
assert props['image'].shape == (1024, 1024)
def test_get_dark_cal_props_table():
props = dark_cal.get_dark_cal_props_table('2007:001', '2008:001')
assert np.allclose(props['eb'], [24.6, 25.89, 51.13, 1.9])
| bsd-3-clause | Python |
|
dd3ed1c8fdf9024a7978a1443baf8ca101f21642 | add demo object for channel | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | server/Mars/MarsRpc/ChannelObjs.py | server/Mars/MarsRpc/ChannelObjs.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from MarsLog.LogManager import LogManager
from Utils.Const import *
class EchoChannelObj(object):
def __init__(self, connector):
super(EchoChannelObj, self).__init__()
self.logger = LogManager.getLogger('MarsRpc.EchoChannelObj')
self.connector = connector
def onDisconnected(self):
self.logger.info('connector closed')
def onRead(self, data):
self.logger.info('received data: %s', data)
self.connector.writeData(data)
return MARS_RC_SUCCESSED
class LoggingChannelObj(object):
def __init__(self, connector):
super(LoggingChannelObj, self).__init__()
self.loggero = LogManager.getLogger('MarsRpc.LoggingChannelObj')
self.connector = connector
def onDisconnected(self):
self.logger.info('connector closed')
def onRead(self, data):
self.logger.info('received data: %s', data)
return MARS_RC_SUCCESSED
| bsd-2-clause | Python |
|
160a3012db5513b4d3a45098a9b0b72e1f117b20 | add a constant coefficient test for sanity | zingale/pyro2,zingale/pyro2,harpolea/pyro2,harpolea/pyro2 | lowmach/mg_constant_test.py | lowmach/mg_constant_test.py | #!/usr/bin/env python
"""
Test the variable coefficient MG solver with a CONSTANT coefficient
problem -- the same on from the multigrid class test. This ensures
we didn't screw up the base functionality here.
We solve:
u_xx + u_yy = -2[(1-6x**2)y**2(1-y**2) + (1-6y**2)x**2(1-x**2)]
u = 0 on the boundary
this is the example from page 64 of the book `A Multigrid Tutorial, 2nd Ed.'
The analytic solution is u(x,y) = (x**2 - x**4)(y**4 - y**2)
"""
from __future__ import print_function
import sys
import numpy
import mesh.patch as patch
import variable_coeff_MG as MG
import pylab
# the analytic solution
def true(x,y):
return (x**2 - x**4)*(y**4 - y**2)
# the coefficients
def alpha(x,y):
return numpy.ones_like(x)
# the L2 error norm
def error(myg, r):
# L2 norm of elements in r, multiplied by dx to
# normalize
return numpy.sqrt(myg.dx*myg.dy*numpy.sum((r[myg.ilo:myg.ihi+1,
myg.jlo:myg.jhi+1]**2).flat))
# the righthand side
def f(x,y):
return -2.0*((1.0-6.0*x**2)*y**2*(1.0-y**2) + (1.0-6.0*y**2)*x**2*(1.0-x**2))
# test the multigrid solver
nx = 256
ny = nx
# create the coefficient variable -- note we don't want Dirichlet here,
# because that will try to make alpha = 0 on the interface. alpha can
# have different BCs than phi
g = patch.Grid2d(nx, ny, ng=1)
d = patch.CellCenterData2d(g)
bc_c = patch.BCObject(xlb="neumann", xrb="neumann",
ylb="neumann", yrb="neumann")
d.register_var("c", bc_c)
d.create()
c = d.get_var("c")
c[:,:] = alpha(g.x2d, g.y2d)
pylab.clf()
pylab.figure(num=1, figsize=(5.0,5.0), dpi=100, facecolor='w')
pylab.imshow(numpy.transpose(c[g.ilo:g.ihi+1,g.jlo:g.jhi+1]),
interpolation="nearest", origin="lower",
extent=[g.xmin, g.xmax, g.ymin, g.ymax])
pylab.xlabel("x")
pylab.ylabel("y")
pylab.title("nx = {}".format(nx))
pylab.colorbar()
pylab.savefig("mg_alpha.png")
# check whether the RHS sums to zero (necessary for periodic data)
rhs = f(g.x2d, g.y2d)
print("rhs sum: {}".format(numpy.sum(rhs[g.ilo:g.ihi+1,g.jlo:g.jhi+1])))
# create the multigrid object
a = MG.VarCoeffCCMG2d(nx, ny,
xl_BC_type="dirichlet", yl_BC_type="dirichlet",
xr_BC_type="dirichlet", yr_BC_type="dirichlet",
nsmooth=10,
nsmooth_bottom=50,
coeffs=c, coeffs_bc=bc_c,
verbose=1)
# debugging
# for i in range(a.nlevels):
# print(i)
# print(a.grids[i].get_var("coeffs"))
# initialize the solution to 0
a.init_zeros()
# initialize the RHS using the function f
rhs = f(a.x2d, a.y2d)
a.init_RHS(rhs)
# solve to a relative tolerance of 1.e-11
a.solve(rtol=1.e-11)
#a.smooth(a.nlevels-1, 50000)
# alternately, we can just use smoothing by uncommenting the following
#a.smooth(a.nlevels-1,50000)
# get the solution
v = a.get_solution()
# compute the error from the analytic solution
b = true(a.x2d,a.y2d)
e = v - b
print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
(error(a.soln_grid, e), a.relative_error, a.num_cycles))
# plot it
pylab.clf()
pylab.figure(num=1, figsize=(10.0,5.0), dpi=100, facecolor='w')
pylab.subplot(121)
pylab.imshow(numpy.transpose(v[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
pylab.xlabel("x")
pylab.ylabel("y")
pylab.title("nx = {}".format(nx))
pylab.colorbar()
pylab.subplot(122)
pylab.imshow(numpy.transpose(e[a.ilo:a.ihi+1,a.jlo:a.jhi+1]),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
pylab.xlabel("x")
pylab.ylabel("y")
pylab.title("error")
pylab.colorbar()
pylab.tight_layout()
pylab.savefig("mg_test.png")
# store the output for later comparison
my_data = a.get_solution_object()
my_data.write("mg_test")
| bsd-3-clause | Python |
|
81b9d141295ee2a8b31974aa86d89b80dfefe3ca | Create question5.py | pythonzhichan/DailyQuestion,pythonzhichan/DailyQuestion | chengjun/question5.py | chengjun/question5.py | #!usr/bin/python
import re
class extrac_url():
def __init__(self,url):
self.url = url
def pater(self):
url = self.url
[scheme1,url_rest] = url.split('//')
scheme = re.search(r'(.+)//',url).group(1)
#print "scheme is %s " % scheme
netloc = re.search(r'//(.+)/',url).group(1)
#print "netloc is %s " % netloc
path = re.search(r'(/.+)\?',url_rest).group(1)
#print 'path is %s'%path
#tt =re.compile(r'\?.+')
query_param = re.search(r'\?(.+)#',url).group(1)
query_params={}
for item in re.split(r'&', query_param):
#print item
index = item.find('=')
query_params[item[:index]] = item[index+1:]
#print "query_params is %s " %query_params
fragment = re.search(r'#(.+)',url).group(1)
#print "fragment is %s " %self.fragment
return [scheme,netloc,path,query_params,fragment]
if __name__=="__main__":
ttt = extrac_url("http://mp.weixin.qq.com/s?__biz=MzA4MjEyNTA5Mw==&mid=2652566513#wechat_redirect").pater()
print "scheme is %s " % ttt[0]
print "netloc is %s " % ttt[1]
print 'path is %s'%ttt[2]
print 'query_params is %s'%ttt[3]
print 'fragment is %s'%ttt[4]
#rint ttt
| mit | Python |
|
5eefc407b8f51c017a3f4193c88f6dc188a88601 | Include OpenCV based Python CLAHE script | seung-lab/Julimaps,seung-lab/Julimaps | src/CLAHE_dir.py | src/CLAHE_dir.py | from PIL import Image
import numpy as np
import h5py
import os
import sys
import cv2
# Maybe consider implemeting more involved auto-balancing
# http://wiki.cmci.info/documents/120206pyip_cooking/python_imagej_cookbook#automatic_brightnesscontrast_button
def apply_clahe_to_H5(fn, clahe):
f = h5py.File(fn, "r+")
img = f["/img"]
# apply clahe
arr = clahe.apply(np.array(img))
# stretch distribution across 0-255 range
max_a = np.max(arr)
min_a = np.min(arr)
alpha = 255.0/(max_a - min_a)
beta = -alpha*min_a
arr = (alpha*arr + beta).astype(np.uint8)
# resave image
img[...] = arr
f.close()
def get_H5_array(fn):
f = h5py.File(fn, "r")
return np.array(f["/img"])
def main():
"""Make TIF images of all H5 matrices in directory
"""
dir = os.getcwd()
# file = sys.argv[1]
files = os.listdir(dir)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(63,63))
for file in files:
if file.endswith("1,1_prealigned.h5"):
print "Applying CLAHE to " + file
# if file == 'Tile_r1-c7_S2-W001_sec15.h5':
fn = os.path.join(dir, file)
apply_clahe_to_H5(fn, clahe)
# if __name__ == '__main__':
# main() | mit | Python |
|
f0af14b8fcd420b63a47e18938664e14cf9ea968 | Add generic asynchronous/synchronous run command | CanonicalLtd/subiquity,CanonicalLtd/subiquity | subiquity/utils.py | subiquity/utils.py | # Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import subprocess
import os
import codecs
import pty
from tornado.process import Subprocess
from subiquity.async import Async
import shlex
import logging
log = logging.getLogger("subiquity.utils")
STREAM = Subprocess.STREAM
def run_command_async(cmd, streaming_callback=None):
return Async.pool.submit(run_command, cmd, streaming_callback)
def run_command(cmd, streaming_callback=None):
""" Executes `cmd` sending its output to `streaming_callback`
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
log.debug("Running command: {}".format(cmd))
stdoutm, stdouts = pty.openpty()
proc = subprocess.Popen(cmd,
stdout=stdouts,
stderr=subprocess.PIPE)
os.close(stdouts)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while proc.poll() is None:
try:
b = os.read(stdoutm, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if streaming_callback:
ls = last_ten_lines(decoded_output)
streaming_callback(ls)
if final:
break
finally:
os.close(stdoutm)
if proc.poll() is None:
proc.kill()
proc.wait()
errors = [l.decode('utf-8') for l in proc.stderr.readlines()]
if streaming_callback:
streaming_callback(last_ten_lines(decoded_output))
errors = ''.join(errors)
if proc.returncode == 0:
return decoded_output.strip()
else:
log.debug("Error with command: "
"[Output] '{}' [Error] '{}'".format(
decoded_output.strip(),
errors.strip()))
raise Exception("Problem running command: [Error] '{}'".format(
errors.strip()))
| agpl-3.0 | Python |
|
2aab90ab9e4a32bef1496149a2780b7385318043 | Add tests | bjodah/symengine.py,symengine/symengine.py,bjodah/symengine.py,symengine/symengine.py,symengine/symengine.py,bjodah/symengine.py | symengine/tests/test_cse.py | symengine/tests/test_cse.py | from symengine import cse, sqrt, symbols
def test_cse_single():
x, y, x0 = symbols("x, y, x0")
e = pow(x + y, 2) + sqrt(x + y)
substs, reduced = cse([e])
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_multiple_expressions():
w, x, y, z, x0 = symbols("w, x, y, z, x0")
e1 = (x + y)*z
e2 = (x + y)*w
substs, reduced = cse([e1, e2])
assert substs == [(x0, x + y)]
assert reduced == [x0*z, x0*w]
| mit | Python |
|
5a21b66f7ab77f419245d8c07d7473a6e1600fc4 | Add crawler for 'Hark, A Vagrant' | jodal/comics,jodal/comics,klette/comics,datagutten/comics,datagutten/comics,datagutten/comics,klette/comics,klette/comics,jodal/comics,jodal/comics,datagutten/comics | comics/crawler/crawlers/harkavagrant.py | comics/crawler/crawlers/harkavagrant.py | from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Hark, A Vagrant!'
language = 'en'
url = 'http://www.harkavagrant.com/'
start_date = '2008-05-01'
history_capable_days = 120
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -8
rights = 'Kate Beaton'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.rsspect.com/rss/vagrant.xml')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
if piece.count('title='):
self.title = pieces[i + 1]
if self.url and self.title:
return
| agpl-3.0 | Python |
|
2f188d3d43741821126e381af9753e0e3d7be231 | test hello python file | rryqszq4/ngx_python,rryqszq4/ngx_python,rryqszq4/ngx_python | t/library/hello.py | t/library/hello.py | import ngx
ngx.echo("Hello, Ngx_python\n") | bsd-2-clause | Python |
|
c2a0b66ec1ad7f32e1291fc6a2312d2a4a06a6e3 | Add class-file to right location | python-technopark/MoneyMoney | src/mmhandler.py | src/mmhandler.py | class MmHandler:
pass
| mit | Python |
|
aa096865f425a57ccbde51d0586be8a07403a6bd | Add migration for BoundarySet start/end see #25 | datamade/represent-boundaries,datamade/represent-boundaries,opencorato/represent-boundaries,datamade/represent-boundaries,opencorato/represent-boundaries,opencorato/represent-boundaries | boundaries/south_migrations/0005_add_set_start_end_date.py | boundaries/south_migrations/0005_add_set_start_end_date.py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BoundarySet.start_date'
db.add_column(u'boundaries_boundaryset', 'start_date',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'BoundarySet.end_date'
db.add_column(u'boundaries_boundaryset', 'end_date',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BoundarySet.start_date'
db.delete_column(u'boundaries_boundaryset', 'start_date')
# Deleting field 'BoundarySet.end_date'
db.delete_column(u'boundaries_boundaryset', 'end_date')
models = {
u'boundaries.boundary': {
'Meta': {'unique_together': "((u'slug', u'set'),)", 'object_name': 'Boundary'},
'centroid': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
'extent': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'spatial_index': 'False', 'blank': 'True'}),
'metadata': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '192', 'db_index': 'True'}),
'set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'boundaries'", 'to': u"orm['boundaries.BoundarySet']"}),
'set_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shape': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'simple_shape': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'})
},
u'boundaries.boundaryset': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'BoundarySet'},
'authority': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'extent': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'extra': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateField', [], {}),
'licence_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'singular': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200', 'primary_key': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['boundaries'] | mit | Python |
|
4f155252bf9d9508b955d7eecf589da347bff817 | Add a setup.cfg. | adam-incuna/imperial-painter,adam-thomas/imperial-painter,adam-incuna/imperial-painter,adam-thomas/imperial-painter | setup.py | setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="imperial-painter-adam-thomas",
version="1.0.0",
author="Adam Thomas",
author_email="[email protected]",
description="A tool for generating prototype cards from Excel files and Django templates",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/adam-thomas/imperial-painter",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | mit | Python |
|
f874c337e0d0bb8cce8cfe6523c0d06c37b93198 | add basic setup.py definition | juannyG/django-rest-framework-saasy,pombredanne/django-rest-framework-saasy | setup.py | setup.py | from distutils.core import setup
setup(
name='SaasyDjangoRestFramework',
version='0.1dev',
packages=[],
license='Creative Commons Attribution-Noncommercial-Share Alike license',
description="SaaS plugin for the django rest framework",
)
| mit | Python |
|
63143c94cef353d7bae13f7b13650801bb901c94 | Test for explicit start/end args to str methods for unicode. | hiway/micropython,martinribelotta/micropython,tdautc19841202/micropython,supergis/micropython,torwag/micropython,pfalcon/micropython,galenhz/micropython,TDAbboud/micropython,kerneltask/micropython,heisewangluo/micropython,kerneltask/micropython,pozetroninc/micropython,ericsnowcurrently/micropython,ernesto-g/micropython,omtinez/micropython,misterdanb/micropython,Peetz0r/micropython-esp32,praemdonck/micropython,xyb/micropython,MrSurly/micropython-esp32,ryannathans/micropython,redbear/micropython,infinnovation/micropython,rubencabrera/micropython,EcmaXp/micropython,xhat/micropython,cwyark/micropython,xhat/micropython,micropython/micropython-esp32,swegener/micropython,firstval/micropython,xhat/micropython,emfcamp/micropython,torwag/micropython,ernesto-g/micropython,pramasoul/micropython,toolmacher/micropython,noahwilliamsson/micropython,lbattraw/micropython,infinnovation/micropython,tuc-osg/micropython,slzatz/micropython,blmorris/micropython,pramasoul/micropython,pramasoul/micropython,Peetz0r/micropython-esp32,alex-robbins/micropython,EcmaXp/micropython,ericsnowcurrently/micropython,noahwilliamsson/micropython,tralamazza/micropython,jimkmc/micropython,ryannathans/micropython,micropython/micropython-esp32,PappaPeppar/micropython,kerneltask/micropython,ruffy91/micropython,emfcamp/micropython,dxxb/micropython,ChuckM/micropython,TDAbboud/micropython,aethaniel/micropython,ganshun666/micropython,oopy/micropython,ChuckM/micropython,ruffy91/micropython,suda/micropython,neilh10/micropython,AriZuu/micropython,neilh10/micropython,henriknelson/micropython,ganshun666/micropython,dinau/micropython,pozetroninc/micropython,blmorris/micropython,puuu/micropython,ernesto-g/micropython,ahotam/micropython,cloudformdesign/micropython,mpalomer/micropython,dhylands/micropython,dxxb/micropython,matthewelse/micropython,tobbad/micropython,galenhz/micropython,praemdonck/micropython,EcmaXp/micropython,swegener/micropython,jmarcelino/pycom-micropython,misterdanb/micropython,tuc-osg/micropython,galenhz/micropython,mgyenik/micropython,oopy/micropython,ahotam/micropython,toolmacher/micropython,stonegithubs/micropython,lbattraw/micropython,emfcamp/micropython,drrk/micropython,heisewangluo/micropython,ganshun666/micropython,dmazzella/micropython,PappaPeppar/micropython,jmarcelino/pycom-micropython,tdautc19841202/micropython,dhylands/micropython,pramasoul/micropython,trezor/micropython,alex-robbins/micropython,mhoffma/micropython,stonegithubs/micropython,Peetz0r/micropython-esp32,misterdanb/micropython,cloudformdesign/micropython,ganshun666/micropython,suda/micropython,PappaPeppar/micropython,pramasoul/micropython,tobbad/micropython,noahwilliamsson/micropython,tdautc19841202/micropython,SHA2017-badge/micropython-esp32,kostyll/micropython,ericsnowcurrently/micropython,kostyll/micropython,supergis/micropython,Timmenem/micropython,pozetroninc/micropython,adafruit/circuitpython,hiway/micropython,micropython/micropython-esp32,mianos/micropython,slzatz/micropython,selste/micropython,tobbad/micropython,kostyll/micropython,warner83/micropython,alex-march/micropython,adafruit/micropython,dxxb/micropython,tobbad/micropython,vriera/micropython,omtinez/micropython,hiway/micropython,dhylands/micropython,ceramos/micropython,drrk/micropython,vriera/micropython,vitiral/micropython,lowRISC/micropython,methoxid/micropystat,utopiaprince/micropython,orionrobots/micropython,lbattraw/micropython,mpalomer/micropython,kostyll/micropython,cnoviello/micropython,chrisdearman/micropython,pozetroninc/micropython,martinribelotta/micropython,lbattraw/micropython,noahwilliamsson/micropython,MrSurly/micropython-esp32,Peetz0r/micropython-esp32,MrSurly/micropython,xyb/micropython,neilh10/micropython,hiway/micropython,PappaPeppar/micropython,EcmaXp/micropython,cnoviello/micropython,martinribelotta/micropython,jimkmc/micropython,suda/micropython,cwyark/micropython,dxxb/micropython,martinribelotta/micropython,heisewangluo/micropython,orionrobots/micropython,paul-xxx/micropython,suda/micropython,SungEun-Steve-Kim/test-mp,jmarcelino/pycom-micropython,skybird6672/micropython,oopy/micropython,bvernoux/micropython,lowRISC/micropython,tdautc19841202/micropython,deshipu/micropython,galenhz/micropython,adafruit/circuitpython,alex-robbins/micropython,ericsnowcurrently/micropython,turbinenreiter/micropython,alex-march/micropython,SungEun-Steve-Kim/test-mp,matthewelse/micropython,matthewelse/micropython,Vogtinator/micropython,mgyenik/micropython,ceramos/micropython,paul-xxx/micropython,oopy/micropython,methoxid/micropystat,feilongfl/micropython,warner83/micropython,utopiaprince/micropython,cnoviello/micropython,chrisdearman/micropython,deshipu/micropython,adamkh/micropython,methoxid/micropystat,pfalcon/micropython,ceramos/micropython,Timmenem/micropython,HenrikSolver/micropython,xyb/micropython,skybird6672/micropython,mpalomer/micropython,vitiral/micropython,warner83/micropython,xuxiaoxin/micropython,emfcamp/micropython,henriknelson/micropython,swegener/micropython,mpalomer/micropython,noahwilliamsson/micropython,ruffy91/micropython,KISSMonX/micropython,aethaniel/micropython,ericsnowcurrently/micropython,Vogtinator/micropython,galenhz/micropython,methoxid/micropystat,KISSMonX/micropython,TDAbboud/micropython,deshipu/micropython,praemdonck/micropython,adamkh/micropython,Timmenem/micropython,AriZuu/micropython,cloudformdesign/micropython,skybird6672/micropython,hiway/micropython,henriknelson/micropython,xyb/micropython,jlillest/micropython,aethaniel/micropython,SHA2017-badge/micropython-esp32,drrk/micropython,dhylands/micropython,SHA2017-badge/micropython-esp32,methoxid/micropystat,kerneltask/micropython,tuc-osg/micropython,TDAbboud/micropython,matthewelse/micropython,bvernoux/micropython,mgyenik/micropython,alex-march/micropython,ceramos/micropython,ChuckM/micropython,trezor/micropython,micropython/micropython-esp32,adafruit/micropython,orionrobots/micropython,kerneltask/micropython,cnoviello/micropython,dmazzella/micropython,jlillest/micropython,Timmenem/micropython,ernesto-g/micropython,xuxiaoxin/micropython,vriera/micropython,MrSurly/micropython-esp32,martinribelotta/micropython,orionrobots/micropython,vitiral/micropython,toolmacher/micropython,Vogtinator/micropython,dhylands/micropython,tdautc19841202/micropython,MrSurly/micropython,adafruit/circuitpython,danicampora/micropython,alex-robbins/micropython,noahchense/micropython,ChuckM/micropython,dinau/micropython,feilongfl/micropython,omtinez/micropython,jlillest/micropython,puuu/micropython,adafruit/micropython,jlillest/micropython,adafruit/micropython,stonegithubs/micropython,hosaka/micropython,mhoffma/micropython,mpalomer/micropython,micropython/micropython-esp32,blmorris/micropython,SungEun-Steve-Kim/test-mp,noahchense/micropython,hosaka/micropython,chrisdearman/micropython,drrk/micropython,MrSurly/micropython,selste/micropython,feilongfl/micropython,selste/micropython,blazewicz/micropython,infinnovation/micropython,hosaka/micropython,swegener/micropython,warner83/micropython,tralamazza/micropython,MrSurly/micropython,MrSurly/micropython-esp32,torwag/micropython,neilh10/micropython,cloudformdesign/micropython,pfalcon/micropython,xuxiaoxin/micropython,mhoffma/micropython,blmorris/micropython,turbinenreiter/micropython,ruffy91/micropython,ceramos/micropython,xhat/micropython,torwag/micropython,PappaPeppar/micropython,supergis/micropython,puuu/micropython,omtinez/micropython,xuxiaoxin/micropython,redbear/micropython,turbinenreiter/micropython,mhoffma/micropython,adamkh/micropython,dmazzella/micropython,chrisdearman/micropython,toolmacher/micropython,paul-xxx/micropython,slzatz/micropython,torwag/micropython,noahchense/micropython,adafruit/circuitpython,SHA2017-badge/micropython-esp32,suda/micropython,hosaka/micropython,blazewicz/micropython,adafruit/micropython,mianos/micropython,noahchense/micropython,xyb/micropython,ahotam/micropython,deshipu/micropython,ernesto-g/micropython,turbinenreiter/micropython,ryannathans/micropython,omtinez/micropython,dmazzella/micropython,lowRISC/micropython,vitiral/micropython,feilongfl/micropython,matthewelse/micropython,chrisdearman/micropython,noahchense/micropython,xhat/micropython,blmorris/micropython,tralamazza/micropython,bvernoux/micropython,puuu/micropython,ryannathans/micropython,alex-march/micropython,vriera/micropython,skybird6672/micropython,firstval/micropython,deshipu/micropython,paul-xxx/micropython,henriknelson/micropython,danicampora/micropython,supergis/micropython,lbattraw/micropython,utopiaprince/micropython,tuc-osg/micropython,AriZuu/micropython,Vogtinator/micropython,mianos/micropython,danicampora/micropython,feilongfl/micropython,matthewelse/micropython,dinau/micropython,warner83/micropython,lowRISC/micropython,tobbad/micropython,HenrikSolver/micropython,adamkh/micropython,MrSurly/micropython-esp32,praemdonck/micropython,oopy/micropython,jimkmc/micropython,adafruit/circuitpython,dinau/micropython,Timmenem/micropython,skybird6672/micropython,toolmacher/micropython,blazewicz/micropython,adamkh/micropython,redbear/micropython,rubencabrera/micropython,jmarcelino/pycom-micropython,rubencabrera/micropython,infinnovation/micropython,tralamazza/micropython,heisewangluo/micropython,KISSMonX/micropython,adafruit/circuitpython,pfalcon/micropython,ganshun666/micropython,slzatz/micropython,trezor/micropython,jmarcelino/pycom-micropython,HenrikSolver/micropython,ahotam/micropython,turbinenreiter/micropython,pozetroninc/micropython,ruffy91/micropython,heisewangluo/micropython,mianos/micropython,supergis/micropython,trezor/micropython,redbear/micropython,cwyark/micropython,hosaka/micropython,danicampora/micropython,aethaniel/micropython,cloudformdesign/micropython,orionrobots/micropython,firstval/micropython,utopiaprince/micropython,trezor/micropython,slzatz/micropython,vitiral/micropython,xuxiaoxin/micropython,emfcamp/micropython,firstval/micropython,selste/micropython,cwyark/micropython,pfalcon/micropython,alex-robbins/micropython,aethaniel/micropython,HenrikSolver/micropython,puuu/micropython,SungEun-Steve-Kim/test-mp,selste/micropython,SHA2017-badge/micropython-esp32,ryannathans/micropython,AriZuu/micropython,mianos/micropython,mgyenik/micropython,lowRISC/micropython,jimkmc/micropython,blazewicz/micropython,ChuckM/micropython,kostyll/micropython,Peetz0r/micropython-esp32,dinau/micropython,misterdanb/micropython,SungEun-Steve-Kim/test-mp,TDAbboud/micropython,alex-march/micropython,firstval/micropython,mgyenik/micropython,swegener/micropython,drrk/micropython,vriera/micropython,bvernoux/micropython,cnoviello/micropython,MrSurly/micropython,blazewicz/micropython,paul-xxx/micropython,utopiaprince/micropython,dxxb/micropython,praemdonck/micropython,ahotam/micropython,bvernoux/micropython,misterdanb/micropython,KISSMonX/micropython,Vogtinator/micropython,neilh10/micropython,henriknelson/micropython,cwyark/micropython,mhoffma/micropython,KISSMonX/micropython,rubencabrera/micropython,redbear/micropython,danicampora/micropython,jlillest/micropython,jimkmc/micropython,HenrikSolver/micropython,stonegithubs/micropython,EcmaXp/micropython,rubencabrera/micropython,tuc-osg/micropython,stonegithubs/micropython,AriZuu/micropython,infinnovation/micropython | tests/unicode/unicode_pos.py | tests/unicode/unicode_pos.py | # str methods with explicit start/end pos
print("Привет".startswith("П"))
print("Привет".startswith("р", 1))
print("абвба".find("а", 1))
print("абвба".find("а", 1, -1))
| mit | Python |
|
601fd8a7b4fea5db2f23741735e6e7f1332b4417 | Fix issue #949 - Add mock as dependency | spulec/moto,Affirm/moto,Affirm/moto,botify-labs/moto,Affirm/moto,botify-labs/moto,kefo/moto,whummer/moto,dbfr3qs/moto,william-richard/moto,Brett55/moto,william-richard/moto,botify-labs/moto,botify-labs/moto,spulec/moto,ZuluPro/moto,Brett55/moto,william-richard/moto,ZuluPro/moto,kefo/moto,spulec/moto,Brett55/moto,whummer/moto,whummer/moto,kefo/moto,Affirm/moto,botify-labs/moto,ZuluPro/moto,rocky4570/moto,ZuluPro/moto,kefo/moto,okomestudio/moto,kefo/moto,spulec/moto,dbfr3qs/moto,rocky4570/moto,rocky4570/moto,rocky4570/moto,ZuluPro/moto,Brett55/moto,spulec/moto,Affirm/moto,spulec/moto,dbfr3qs/moto,Brett55/moto,Affirm/moto,okomestudio/moto,Brett55/moto,rocky4570/moto,dbfr3qs/moto,dbfr3qs/moto,ZuluPro/moto,william-richard/moto,botify-labs/moto,whummer/moto,whummer/moto,whummer/moto,william-richard/moto,william-richard/moto,okomestudio/moto,dbfr3qs/moto,okomestudio/moto,okomestudio/moto,rocky4570/moto,okomestudio/moto | setup.py | setup.py | #!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup, find_packages
install_requires = [
"Jinja2>=2.8",
"boto>=2.36.0",
"boto3>=1.2.1",
"cookies",
"requests>=2.0",
"xmltodict",
"dicttoxml",
"six",
"werkzeug",
"pyaml",
"pytz",
"python-dateutil",
"mock",
]
extras_require = {
'server': ['flask'],
}
setup(
name='moto',
version='1.0.0',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
author_email='[email protected]',
url='https://github.com/spulec/moto',
entry_points={
'console_scripts': [
'moto_server = moto.server:main',
],
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
extras_require=extras_require,
license="Apache",
test_suite="tests",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing",
],
)
| #!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup, find_packages
install_requires = [
"Jinja2>=2.8",
"boto>=2.36.0",
"boto3>=1.2.1",
"cookies",
"requests>=2.0",
"xmltodict",
"dicttoxml",
"six",
"werkzeug",
"pyaml",
"pytz",
"python-dateutil",
]
extras_require = {
'server': ['flask'],
}
setup(
name='moto',
version='1.0.0',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
author_email='[email protected]',
url='https://github.com/spulec/moto',
entry_points={
'console_scripts': [
'moto_server = moto.server:main',
],
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
extras_require=extras_require,
license="Apache",
test_suite="tests",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing",
],
)
| apache-2.0 | Python |
84c1ee14e1717ec63782dd5a159fe5848fce1cc4 | Add Python 3.6 and 3.7 to PyPI page | fkazimierczak/bottle,oz123/bottle,oz123/bottle,bottlepy/bottle,bottlepy/bottle,defnull/bottle,fkazimierczak/bottle,ifduyue/bottle,ifduyue/bottle,defnull/bottle | setup.py | setup.py | #!/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version_info < (2, 7):
raise NotImplementedError("Sorry, you need at least Python 2.7 or Python 3.2+ to use bottle.")
import bottle
setup(name='bottle',
version=bottle.__version__,
description='Fast and simple WSGI-framework for small web-applications.',
long_description=bottle.__doc__,
author=bottle.__author__,
author_email='[email protected]',
url='http://bottlepy.org/',
py_modules=['bottle'],
scripts=['bottle.py'],
license='MIT',
platforms='any',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| #!/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version_info < (2, 7):
raise NotImplementedError("Sorry, you need at least Python 2.7 or Python 3.2+ to use bottle.")
import bottle
setup(name='bottle',
version=bottle.__version__,
description='Fast and simple WSGI-framework for small web-applications.',
long_description=bottle.__doc__,
author=bottle.__author__,
author_email='[email protected]',
url='http://bottlepy.org/',
py_modules=['bottle'],
scripts=['bottle.py'],
license='MIT',
platforms='any',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| mit | Python |
f539736b563fb6859a6bffb03aed42b57880744f | create module | danzipie/aau-community-detection | test_vbn_parser.py | test_vbn_parser.py | import networkx as nx
import matplotlib.pyplot as plt
import vbn_parser as p
# initialize
G = nx.Graph()
link = 'http://vbn.aau.dk/da/organisations/antennas-propagation-and-radio-networking(c2c38bb3-3d28-4b2c-8bc4-949211d2d486)/publications.rss?altordering=publicationOrderByPublicationYearThenCreated&pageSize=500'
# populate the graph
p.parse_vbn(link, G)
# visualize the graph
labels = nx.get_node_attributes(G, 'name')
nx.draw(G, labels=labels)
plt.show()
nx.write_graphml(G,"test1.graphml")
| mit | Python |
|
30fc52d77170844c5b3820d997286df744eb56db | Add setup.py for packaging and PyPI submission. | kozz/hsalf | setup.py | setup.py | from setuptools import setup
name = 'hsalf'
setup(
name=name,
version='0.0.1',
author='Nam T. Nguyen',
author_email='[email protected]',
url='https://bitbucket.org/namn/hsalf/overview',
description='Hsalf is a pure Python library to read and write Flash files (SWF).',
long_description='Hsalf is a pure Python library to read and write Flash files (SWF).',
platforms='Any',
package_dir={'':'.'},
packages=['hsalf'],
package_data={'': ['README', 'LICENSE']},
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Multimedia',
'Topic :: Security',
'Topic :: Software Development :: Assemblers',
'Topic :: Software Development :: Disassemblers',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| mit | Python |
|
d433d16a0375669c1664bbe8f20a8db5924fa92e | Add basic benchmark for length | alvinlindstam/grapheme | tests/benchmark.py | tests/benchmark.py | from random import choice
from string import ascii_lowercase
import timeit
import grapheme
def random_ascii_string(n):
return "".join(choice(ascii_lowercase) for i in range(n))
long_ascii_string = random_ascii_string(1000)
statements = [
"len(long_ascii_string)",
"grapheme.length(long_ascii_string)",
]
for statement in statements:
n = 100
result = timeit.timeit(statement, setup="from __main__ import long_ascii_string; import grapheme", number=n) / 100
print("{}: {} seconds".format(statement, result))
| mit | Python |
|
dd015a7bf9c69e2f96488c9239be694303b30176 | Create setup.py | shenweichen/DeepCTR | setup.py | setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="deepctr",
version="0.1.3",
author="Weichen Shen",
author_email="[email protected]",
description="DeepCTR is a Easy-to-use,Modular and Extendible package of deep-learning based CTR models ,including serval DNN-based CTR models and lots of core components layer of the models which can be used to build your own custom model.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shenweichen/deepctr",
packages=setuptools.find_packages(),
install_requires=[],
extras_require={
"tf": ["tensorflow>=1.4.0,<1.7.0"],
"tf_gpu": ["tensorflow-gpu>=1.4.0,<1.7.0"],
},
entry_points={
},
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| apache-2.0 | Python |
|
5a2a2aa33a2e206042b3d28a830d00bdae2f5ad8 | Add setup.py for distribution | Pringley/rw | setup.py | setup.py | from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name = "rw",
version = "0.0.1",
packages = find_packages(),
scripts = ['scripts/rw'],
install_requires = ['docopt'],
author = "Ben Pringle",
author_email = "[email protected]",
url = "http://github.com/Pringley/rw",
description = "Generate random words (e.g. for passwords)",
license = "MIT",
)
| mit | Python |
|
91affa8b785e0b5261f69448c1c08de429460bb9 | Add setup.py | lamby/django-yadt | setup.py | setup.py | from setuptools import setup
setup(
name='django-yadt',
packages=(
'django_yadt',
'django_yadt.management',
'django_yadt.management.commands',
),
)
| bsd-3-clause | Python |
|
631afff160077cc629054613d59cb47747f6c20d | Fix setup to exclude tests | superdesk/superdesk-core,nistormihai/superdesk-core,superdesk/superdesk-core,sivakuna-aap/superdesk-core,ancafarcas/superdesk-core,akintolga/superdesk-core,petrjasek/superdesk-core,petrjasek/superdesk-core,mdhaman/superdesk-core,ioanpocol/superdesk-core,ioanpocol/superdesk-core,mdhaman/superdesk-core,superdesk/superdesk-core,petrjasek/superdesk-core,nistormihai/superdesk-core,hlmnrmr/superdesk-core,ancafarcas/superdesk-core,sivakuna-aap/superdesk-core,akintolga/superdesk-core,mugurrus/superdesk-core,plamut/superdesk-core,mugurrus/superdesk-core,superdesk/superdesk-core,mugurrus/superdesk-core,marwoodandrew/superdesk-core,marwoodandrew/superdesk-core,ioanpocol/superdesk-core,hlmnrmr/superdesk-core,plamut/superdesk-core,petrjasek/superdesk-core,mdhaman/superdesk-core | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pip.download import PipSession
LONG_DESCRIPTION = open('README.md').read()
REQUIREMENTS = [str(ir.req) for ir in parse_requirements('requirements.txt', session=PipSession())
if not (getattr(ir, 'link', False) or getattr(ir, 'url', False))]
setup(
name='Superdesk-Core',
version='0.0.1-dev',
description='Superdesk Core library',
long_description=LONG_DESCRIPTION,
author='petr jasek',
author_email='[email protected]',
url='https://github.com/superdesk/superdesk-core',
license='GPLv3',
platforms=['any'],
packages=find_packages(exclude=['tests']),
install_requires=REQUIREMENTS,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| #!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pip.download import PipSession
LONG_DESCRIPTION = open('README.md').read()
REQUIREMENTS = [str(ir.req) for ir in parse_requirements('requirements.txt', session=PipSession())
if not (getattr(ir, 'link', False) or getattr(ir, 'url', False))]
setup(
name='Superdesk-Core',
version='0.0.1-dev',
description='Superdesk Core library',
long_description=LONG_DESCRIPTION,
author='petr jasek',
author_email='[email protected]',
url='https://github.com/superdesk/superdesk-core',
license='GPLv3',
platforms=['any'],
packages=find_packages(),
install_requires=REQUIREMENTS,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| agpl-3.0 | Python |
5b251911d112abea610477a3f552a78be6b5b1e1 | add utils module | ojengwa/gfe | utils.py | utils.py | from flask import Response, request
def add_basic_auth(blueprint, username, password, realm='RQ Dashboard'):
'''Add HTTP Basic Auth to a blueprint.
Note this is only for casual use!
'''
@blueprint.before_request
def basic_http_auth(*args, **kwargs):
auth = request.authorization
if (auth is None or auth.password != password or auth
.username != username):
return Response(
'Please login',
401,
{'WWW-Authenticate': 'Basic realm="{0}"'.format(realm)})
| mit | Python |
|
70ad81a24e218fd2b5fed03224611eae63e0d58f | add main argument processing file | Zumium/boxes | boxes/argsParse.py | boxes/argsParse.py | import argparse
| apache-2.0 | Python |
|
a633cc0b4ee376ff02af101154e60b8b33dfda08 | add migration for old logs | mfraezz/osf.io,alexschiller/osf.io,laurenrevere/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,laurenrevere/osf.io,pattisdr/osf.io,crcresearch/osf.io,Nesiehr/osf.io,saradbowman/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,mluo613/osf.io,felliott/osf.io,baylee-d/osf.io,leb2dg/osf.io,felliott/osf.io,binoculars/osf.io,HalcyonChimera/osf.io,sloria/osf.io,monikagrabowska/osf.io,chennan47/osf.io,rdhyee/osf.io,acshi/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,chennan47/osf.io,aaxelb/osf.io,crcresearch/osf.io,acshi/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,sloria/osf.io,hmoco/osf.io,erinspace/osf.io,brianjgeiger/osf.io,felliott/osf.io,adlius/osf.io,cslzchen/osf.io,cwisecarver/osf.io,erinspace/osf.io,pattisdr/osf.io,mattclark/osf.io,brianjgeiger/osf.io,alexschiller/osf.io,mfraezz/osf.io,pattisdr/osf.io,leb2dg/osf.io,acshi/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,mluo613/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,sloria/osf.io,hmoco/osf.io,baylee-d/osf.io,caneruguz/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,icereval/osf.io,cwisecarver/osf.io,caneruguz/osf.io,baylee-d/osf.io,aaxelb/osf.io,cwisecarver/osf.io,chrisseto/osf.io,icereval/osf.io,mluo613/osf.io,Nesiehr/osf.io,laurenrevere/osf.io,hmoco/osf.io,chrisseto/osf.io,alexschiller/osf.io,chrisseto/osf.io,mattclark/osf.io,TomBaxter/osf.io,rdhyee/osf.io,aaxelb/osf.io,Nesiehr/osf.io,aaxelb/osf.io,adlius/osf.io,mluo613/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,acshi/osf.io,crcresearch/osf.io,mfraezz/osf.io,saradbowman/osf.io,icereval/osf.io,hmoco/osf.io,mfraezz/osf.io,chennan47/osf.io,chrisseto/osf.io,leb2dg/osf.io,Nesiehr/osf.io,caneruguz/osf.io,rdhyee/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,monikagrabowska/osf.io,mluo613/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,adlius/osf.io,binoculars/osf.io,leb2dg/osf.io,rdhyee/osf.io,binoculars/osf.io,adlius/osf.io,cslzchen/osf.io | scripts/migrate_preprint_logs.py | scripts/migrate_preprint_logs.py | import sys
import logging
from datetime import datetime
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from website.app import init_app
from website.models import NodeLog, PreprintService
logger = logging.getLogger(__name__)
def main(dry):
if dry:
logging.warn('DRY mode running')
now = datetime.utcnow()
initiated_logs = NodeLog.find(Q('action', 'eq', NodeLog.PREPRINT_INITIATED) & Q('date', 'lt', now))
for log in initiated_logs:
try:
preprint = PreprintService.find_one(Q('node', 'eq', log.node))
log.params.update({
'preprint': {
'id': preprint._id
},
'service': {
'name': preprint.provider.name
}
})
logging.info('Updating log {} from node {}, with preprint id: {}'.format(log._id, log.node.title, preprint._id))
if not dry:
log.save()
except NoResultsFound:
pass
updated_logs = NodeLog.find(Q('action', 'eq', NodeLog.PREPRINT_FILE_UPDATED) & Q('date', 'lt', now))
for log in updated_logs:
try:
preprint = PreprintService.find_one(Q('node', 'eq', log.node))
log.params.update({
'preprint': {
'id': preprint._id
}
})
logging.info('Updating log {} from node {}, with preprint id: {}'.format(log._id, log.node.title, preprint._id))
if not dry:
log.save()
except NoResultsFound:
pass
if __name__ == '__main__':
init_app(routes=False) # Sets the storage backends on all models
dry = 'dry' in sys.argv
main(dry)
| apache-2.0 | Python |
|
caf2d7108d7329da562a012775bac0a87d4c62b6 | Create db_create.py | rockwolf/python,rockwolf/python,rockwolf/python,rockwolf/python,rockwolf/python,rockwolf/python | fade/db_create.py | fade/db_create.py | #!flask/bin/python
"""
See LICENSE.txt file for copyright and license details.
"""
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
| bsd-3-clause | Python |
|
f6f75172b1b8a41fc5ae025416ea665258d4ff4c | Add script for updating favicon from gh avatar | Sorashi/sorashi.github.io | favicon-update.py | favicon-update.py | from PIL import Image
import requests
from io import BytesIO
# This whole script was done using Google and StackOverflow
# How to generate ico files
# https://stackoverflow.com/a/36168447/1697953
# How to get GitHub avatar location from username
# https://stackoverflow.com/a/36380674/1697953
# How to read image data from URL
# https://stackoverflow.com/a/23489503/1697953
# How to follow redirects in requests
# https://stackoverflow.com/a/50606372/1697953
avatarUrl = 'https://github.com/sorashi.png'
if __name__ == "__main__":
r = requests.head(avatarUrl, allow_redirects=True)
print('Avatar located at ' + r.url)
response = requests.get(r.url)
img = Image.open(BytesIO(response.content))
img.save('favicon.ico', sizes=[(16, 16), (32, 32), (48, 48), (64, 64)]) | mit | Python |
|
75031595de8726dcd21535b13385c4e6c89aa190 | Add run meter task | impactlab/oeem-energy-datastore,impactlab/oeem-energy-datastore,impactlab/oeem-energy-datastore | datastore/tasks.py | datastore/tasks.py | from __future__ import absolute_import
from celery import shared_task
from datastore.models import Project
@shared_task
def run_meter(project_pk):
project = Project.objects.get(pk=project_pk):
project.run_meter()
| mit | Python |
|
3ed6a0e337c99d12fb4abd96b2230e13388289e7 | Add tests for Process functionality. | Jarn/jarn.mkrelease | jarn/mkrelease/tests/test_process.py | jarn/mkrelease/tests/test_process.py | import unittest
import os
from jarn.mkrelease.process import Process
from jarn.mkrelease.testing import JailSetup
from jarn.mkrelease.testing import quiet
class PopenTests(unittest.TestCase):
@quiet
def test_simple(self):
process = Process()
rc, lines = process.popen('echo "Hello world"')
self.assertEqual(rc, 0)
self.assertEqual(lines, ['Hello world'])
def test_quiet(self):
process = Process(quiet=True)
rc, lines = process.popen('echo "Hello world"')
self.assertEqual(rc, 0)
self.assertEqual(lines, ['Hello world'])
def test_env(self):
env = os.environ.copy()
env['HELLO'] = 'Hello world'
process = Process(quiet=True, env=env)
rc, lines = process.popen('echo ${HELLO}')
self.assertEqual(rc, 0)
self.assertEqual(lines, ['Hello world'])
def test_echo(self):
process = Process()
rc, lines = process.popen('echo "Hello world"', echo=False)
self.assertEqual(rc, 0)
self.assertEqual(lines, ['Hello world'])
def test_echo2(self):
process = Process()
rc, lines = process.popen('$ "Hello world"', echo2=False)
self.assertEqual(rc, 127)
self.assertEqual(lines, [])
@quiet
def test_bad_cmd(self):
process = Process()
rc, lines = process.popen('$ "Hello world"')
self.assertEqual(rc, 127)
self.assertEqual(lines, [])
class PipeTests(unittest.TestCase):
def test_simple(self):
process = Process()
value = process.pipe('echo "Hello world"')
self.assertEqual(value, 'Hello world')
def test_quiet(self):
process = Process(quiet=True)
value = process.pipe('echo "Hello world"')
self.assertEqual(value, 'Hello world')
def test_env(self):
env = os.environ.copy()
env['HELLO'] = 'Hello world'
process = Process(quiet=True, env=env)
value = process.pipe('echo ${HELLO}')
self.assertEqual(value, 'Hello world')
@quiet
def test_bad_cmd(self):
process = Process()
value = process.pipe('$ "Hello world"')
self.assertEqual(value, '')
class SystemTests(JailSetup):
def test_simple(self):
process = Process()
rc = process.system('echo "Hello world" > output')
self.assertEqual(rc, 0)
self.assertEqual(process.pipe('cat output'), 'Hello world')
def test_quiet(self):
process = Process(quiet=True)
rc = process.system('echo "Hello world"')
self.assertEqual(rc, 0)
def test_env(self):
env = os.environ.copy()
env['HELLO'] = 'Hello world'
process = Process(env=env)
rc = process.system('echo ${HELLO} > output')
self.assertEqual(rc, 0)
self.assertEqual(process.pipe('cat output'), 'Hello world')
def test_bad_cmd(self):
process = Process()
rc = process.system('$ "Hello world" 2> output')
self.assertEqual(rc, 127)
class OsSystemTests(JailSetup):
def test_simple(self):
process = Process()
rc = process.os_system('echo "Hello world" > output')
self.assertEqual(rc, 0)
self.assertEqual(process.pipe('cat output'), 'Hello world')
def test_quiet(self):
process = Process(quiet=True)
rc = process.os_system('echo "Hello world"')
self.assertEqual(rc, 0)
def test_env(self):
env = {'HELLO': 'Hello world'}
process = Process(env=env)
rc = process.os_system('echo ${HELLO} > output')
self.assertEqual(rc, 0)
self.assertEqual(process.pipe('cat output'), 'Hello world')
def test_bad_cmd(self):
process = Process()
rc = process.os_system('$ "Hello world" 2> output')
self.assertNotEqual(rc, 0)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| bsd-2-clause | Python |
|
27c5a09ddbe2ddf14b2f4c84ebb668adbdfd7070 | ADD example.basicserver for test | thislight/wood,thislight/wood | example/basicserver.py | example/basicserver.py |
from wood import Wood
w = Wood(__name__,debug=True)
IndexHandler = w.empty(uri='/',name='IndexHandler')
@IndexHandler.get
def index_page(self):
self.write('滑稽,这里什么都没有\n(HuajiEnv)')
if __name__ == '__main__':
w.start(port=6000)
| apache-2.0 | Python |
|
537bb46c6806ef69ab3022641a76f50f97630e11 | Add first migration: Create the database tables. | klmitch/boson | boson/db/sqlalchemy/alembic/versions/1f22e3c5ff66_initial_revision.py | boson/db/sqlalchemy/alembic/versions/1f22e3c5ff66_initial_revision.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Initial revision
Revision ID: 1f22e3c5ff66
Revises: None
Create Date: 2012-10-26 17:37:18.592202
"""
# revision identifiers, used by Alembic.
revision = '1f22e3c5ff66'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Create the tables.
"""
op.create_table(
'services',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('name', sa.String(64), nullable=False),
sa.Column('auth_fields', sa.Text),
)
op.create_table(
'categories',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('service_id', sa.String(36), sa.ForeignKey('services.id'),
nullable=False),
sa.Column('name', sa.String(64), nullable=False),
sa.Column('usage_fset', sa.Text),
sa.Column('quota_fsets', sa.Text),
)
op.create_table(
'resources',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('service_id', sa.String(36), sa.ForeignKey('services.id'),
nullable=False),
sa.Column('category_id', sa.String(36), sa.ForeignKey('categories.id'),
nullable=False),
sa.Column('name', sa.String(64), nullable=False),
sa.Column('parameters', sa.Text),
sa.Column('absolute', sa.Boolean, nullable=False),
)
op.create_table(
'usages',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('resource_id', sa.String(36), sa.ForeignKey('resources.id'),
nullable=False),
sa.Column('parameter_data', sa.Text),
sa.Column('auth_data', sa.Text),
sa.Column('used', sa.BigInteger, nullable=False),
sa.Column('reserved', sa.BigInteger, nullable=False),
sa.Column('until_refresh', sa.Integer),
sa.Column('refresh_id', sa.String(36)),
)
op.create_table(
'quotas',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('resource_id', sa.String(36), sa.ForeignKey('resources.id'),
nullable=False),
sa.Column('auth_data', sa.Text),
sa.Column('limit', sa.BigInteger),
)
op.create_table(
'reservations',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('expire', sa.DateTime, nullable=False),
)
op.create_table(
'reserved_items',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('reservation_id', sa.String(36),
sa.ForeignKey('reservations.id'), nullable=False),
sa.Column('resource_id', sa.String(36), sa.ForeignKey('resources.id'),
nullable=False),
sa.Column('usage_id', sa.String(36), sa.ForeignKey('usages.id'),
nullable=False),
sa.Column('delta', sa.BigInteger, nullable=False),
)
def downgrade():
"""
Drop the tables.
"""
op.drop_table('services')
op.drop_table('categories')
op.drop_table('resources')
op.drop_table('usages')
op.drop_table('quotas')
op.drop_table('reservations')
op.drop_table('reserved_items')
| apache-2.0 | Python |
|
8049e2f0bb0a12bb301ab4390c3e4da3d90f0369 | Move stagingsettings to new 'cosmos' project tree | telefonicaid/fiware-cosmos-platform,telefonicaid/fiware-cosmos-platform,telefonicaid/fiware-cosmos-platform,telefonicaid/fiware-cosmos-platform,telefonicaid/fiware-cosmos-platform | cosmos/platform/frontend/src/bdp_fe/conf/stagingsettings.py | cosmos/platform/frontend/src/bdp_fe/conf/stagingsettings.py | """
Module testsettings
These settings allow Django unittests to setup a temporary databse and run the
tests of the installed applications.
"""
DEBUG = True
TEMPLATE_DEBUG = DEBUG
from bdp_fe.conf.base_settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/bdp_fe.db'
}
}
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
ADMINS = (
('admin', '[email protected]'),
)
MANAGERS = ADMINS
LANDING_ROOT = '/tmp/landing/'
CLUSTER_CONF = {
'host': 'localhost',
'port': 9888,
'mongobase': 'mongodb://pshdp04',
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'loggers': {
'django.request': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'bdp_fe': {
'handlers': ['console', 'mail_admins'],
'level': 'DEBUG',
'propagate': True,
}
}
}
| apache-2.0 | Python |
|
1b8edd0d6ce3f66c8bacad8bad62de0d40284799 | Add dodge game example | JoeGlancy/micropython,JoeGlancy/micropython,JoeGlancy/micropython | examples/dodge_game.py | examples/dodge_game.py | """
Dodge game
Get the player back and forth across the screen while dodging the enemy
"""
from microbit import *
import music
class Enemy:
"""
Enemy which moves vertically down the screen
"""
def __init__(self):
self.x, self.y = 2, -1
def get_positions(self):
return ((self.x, self.y), (self.x, self.y + 1 if self.y < 4 else 0))
def move(self):
# Rotate back round to the top
self.y = (self.y + 1) % 5
def draw(self):
for x, y in self.get_positions():
display.set_pixel(x, y, 9)
class Player:
"""
Left-right moving player which can be controlled with buttons
"""
RIGHT = 1
LEFT = -1
STOPPED = 0
LEFT_EDGE = 0
RIGHT_EDGE = 4
def __init__(self):
self.alive = True
self.score = 0
self.just_scored = False
self.x, self.y = self.LEFT_EDGE, 2
self.direction = self.STOPPED
def get_position(self):
return (self.x, self.y)
def die(self):
"""
Player dies - show their score and play sad music
"""
self.alive = False
display.show(str(self.score))
music.play(music.WAWAWAWAA)
def move(self):
"""
Move the player one step further in their
current direction
"""
self.just_scored = False
self.x += self.direction
if self.x in (self.LEFT_EDGE, self.RIGHT_EDGE):
# Player reached the edge - another run survived!
if self.direction != self.STOPPED:
self.score += 1
self.just_scored = True
self.direction = self.STOPPED
def draw(self):
"""
Draw the player
"""
display.set_pixel(self.x, self.y, 9)
if self.just_scored:
music.pitch(400, 40)
def act_on_input(self):
# If we're standing still, look for a button press.
if self.direction == self.STOPPED:
if button_b.was_pressed() and self.x == self.LEFT_EDGE:
self.direction = self.RIGHT
elif button_a.was_pressed() and self.x == self.RIGHT_EDGE:
self.direction = self.LEFT
class Game:
def __init__(self):
self.enemy = Enemy()
self.player = Player()
self.frame_rate = 1
def detect_collisions(self):
"""
Have the player and the enemy collided?
"""
return self.player.get_position() in self.enemy.get_positions()
def do_frame(self):
"""
Called once per frame to advance the game state
"""
# Adjust the speed as the player's score gets higher
# (But don't let it exceed the actual frame rate)
self.frame_rate = max(1, min(100, self.player.score))
if self.player.alive:
display.clear()
self.enemy.move()
self.player.act_on_input()
self.player.move()
if self.detect_collisions():
self.player.die()
else:
self.enemy.draw()
self.player.draw()
game = Game()
while True:
timestamp = running_time()
game.do_frame()
# Keep the frame rate consistent
new_timestamp = running_time()
time_taken = (new_timestamp - timestamp)
interval = 1000 // game.frame_rate
if time_taken < interval:
sleep(interval - time_taken)
timestamp = new_timestamp
| mit | Python |
|
70d912bfb1ccec03edfe92b9b2c87610346c8f42 | Add blocking migration for new domain db | dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq | corehq/doctypemigrations/migrations/0006_domain_migration_20151118.py | corehq/doctypemigrations/migrations/0006_domain_migration_20151118.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.doctypemigrations.djangomigrations import assert_initial_complete
from corehq.doctypemigrations.migrator_instances import domains_migration
class Migration(migrations.Migration):
dependencies = [
('doctypemigrations', '0005_auto_20151013_0819'),
]
operations = [
migrations.RunPython(assert_initial_complete(domains_migration))
]
| bsd-3-clause | Python |
|
0378f8cde69a18d954341b861a724592ef7a5949 | Extend RANSAC example with comparison to BaggingRegressor | murali-munna/scikit-learn,gotomypc/scikit-learn,mattilyra/scikit-learn,fzalkow/scikit-learn,Srisai85/scikit-learn,ChanderG/scikit-learn,3manuek/scikit-learn,loli/sklearn-ensembletrees,jorge2703/scikit-learn,trungnt13/scikit-learn,mugizico/scikit-learn,schets/scikit-learn,terkkila/scikit-learn,sanketloke/scikit-learn,nesterione/scikit-learn,cl4rke/scikit-learn,hsiaoyi0504/scikit-learn,jereze/scikit-learn,ZenDevelopmentSystems/scikit-learn,sonnyhu/scikit-learn,spallavolu/scikit-learn,nvoron23/scikit-learn,equialgo/scikit-learn,sergeyf/scikit-learn,hugobowne/scikit-learn,wzbozon/scikit-learn,chrsrds/scikit-learn,Barmaley-exe/scikit-learn,espg/scikit-learn,kaichogami/scikit-learn,amueller/scikit-learn,giorgiop/scikit-learn,vibhorag/scikit-learn,rajat1994/scikit-learn,anirudhjayaraman/scikit-learn,wazeerzulfikar/scikit-learn,kagayakidan/scikit-learn,kagayakidan/scikit-learn,jereze/scikit-learn,IshankGulati/scikit-learn,saiwing-yeung/scikit-learn,zihua/scikit-learn,davidgbe/scikit-learn,0asa/scikit-learn,mjudsp/Tsallis,vibhorag/scikit-learn,ChanderG/scikit-learn,vermouthmjl/scikit-learn,yask123/scikit-learn,hlin117/scikit-learn,potash/scikit-learn,mehdidc/scikit-learn,herilalaina/scikit-learn,khkaminska/scikit-learn,yanlend/scikit-learn,vinayak-mehta/scikit-learn,fengzhyuan/scikit-learn,ningchi/scikit-learn,harshaneelhg/scikit-learn,quheng/scikit-learn,pypot/scikit-learn,kylerbrown/scikit-learn,ChanChiChoi/scikit-learn,olologin/scikit-learn,ephes/scikit-learn,JPFrancoia/scikit-learn,bthirion/scikit-learn,shyamalschandra/scikit-learn,jereze/scikit-learn,btabibian/scikit-learn,lesteve/scikit-learn,zorojean/scikit-learn,alexsavio/scikit-learn,huzq/scikit-learn,nrhine1/scikit-learn,ningchi/scikit-learn,pv/scikit-learn,simon-pepin/scikit-learn,bikong2/scikit-learn,OshynSong/scikit-learn,ZenDevelopmentSystems/scikit-learn,justincassidy/scikit-learn,MechCoder/scikit-learn,JPFrancoia/scikit-learn,vshtanko/scikit-learn,deepesch/scikit-learn,Akshay0724/scikit-learn,aflaxman/scikit-learn,thientu/scikit-learn,trungnt13/scikit-learn,billy-inn/scikit-learn,madjelan/scikit-learn,xubenben/scikit-learn,sumspr/scikit-learn,ycaihua/scikit-learn,nelson-liu/scikit-learn,idlead/scikit-learn,Clyde-fare/scikit-learn,iismd17/scikit-learn,dsullivan7/scikit-learn,CforED/Machine-Learning,abhishekgahlot/scikit-learn,huobaowangxi/scikit-learn,appapantula/scikit-learn,vibhorag/scikit-learn,harshaneelhg/scikit-learn,andrewnc/scikit-learn,madjelan/scikit-learn,pompiduskus/scikit-learn,samzhang111/scikit-learn,treycausey/scikit-learn,appapantula/scikit-learn,xavierwu/scikit-learn,nikitasingh981/scikit-learn,xyguo/scikit-learn,MohammedWasim/scikit-learn,toastedcornflakes/scikit-learn,Lawrence-Liu/scikit-learn,fyffyt/scikit-learn,jseabold/scikit-learn,alvarofierroclavero/scikit-learn,shenzebang/scikit-learn,fbagirov/scikit-learn,ilo10/scikit-learn,IssamLaradji/scikit-learn,stylianos-kampakis/scikit-learn,belltailjp/scikit-learn,Srisai85/scikit-learn,davidgbe/scikit-learn,ycaihua/scikit-learn,RomainBrault/scikit-learn,wanggang3333/scikit-learn,vigilv/scikit-learn,rishikksh20/scikit-learn,djgagne/scikit-learn,IssamLaradji/scikit-learn,shenzebang/scikit-learn,iismd17/scikit-learn,Achuth17/scikit-learn,lin-credible/scikit-learn,zhenv5/scikit-learn,fbagirov/scikit-learn,djgagne/scikit-learn,pypot/scikit-learn,vortex-ape/scikit-learn,mwv/scikit-learn,nhejazi/scikit-learn,massmutual/scikit-learn,ivannz/scikit-learn,hitszxp/scikit-learn,hlin117/scikit-learn,abimannans/scikit-learn,justincassidy/scikit-learn,loli/sklearn-ensembletrees,tmhm/scikit-learn,nikitasingh981/scikit-learn,liberatorqjw/scikit-learn,vinayak-mehta/scikit-learn,3manuek/scikit-learn,gotomypc/scikit-learn,mblondel/scikit-learn,ChanChiChoi/scikit-learn,clemkoa/scikit-learn,hrjn/scikit-learn,kevin-intel/scikit-learn,pianomania/scikit-learn,samuel1208/scikit-learn,cybernet14/scikit-learn,zaxtax/scikit-learn,jm-begon/scikit-learn,alexsavio/scikit-learn,mlyundin/scikit-learn,quheng/scikit-learn,ClimbsRocks/scikit-learn,poryfly/scikit-learn,eickenberg/scikit-learn,f3r/scikit-learn,jkarnows/scikit-learn,stylianos-kampakis/scikit-learn,AlexanderFabisch/scikit-learn,bhargav/scikit-learn,shangwuhencc/scikit-learn,bhargav/scikit-learn,sumspr/scikit-learn,arahuja/scikit-learn,luo66/scikit-learn,pratapvardhan/scikit-learn,BiaDarkia/scikit-learn,hitszxp/scikit-learn,arahuja/scikit-learn,xavierwu/scikit-learn,altairpearl/scikit-learn,zorroblue/scikit-learn,spallavolu/scikit-learn,fredhusser/scikit-learn,dsquareindia/scikit-learn,jkarnows/scikit-learn,vivekmishra1991/scikit-learn,meduz/scikit-learn,fabianp/scikit-learn,vermouthmjl/scikit-learn,icdishb/scikit-learn,lazywei/scikit-learn,cybernet14/scikit-learn,CforED/Machine-Learning,bnaul/scikit-learn,mattgiguere/scikit-learn,henridwyer/scikit-learn,Achuth17/scikit-learn,h2educ/scikit-learn,jayflo/scikit-learn,kjung/scikit-learn,ndingwall/scikit-learn,fredhusser/scikit-learn,henrykironde/scikit-learn,yunfeilu/scikit-learn,luo66/scikit-learn,tosolveit/scikit-learn,rvraghav93/scikit-learn,pratapvardhan/scikit-learn,jakobworldpeace/scikit-learn,qifeigit/scikit-learn,rvraghav93/scikit-learn,joernhees/scikit-learn,BiaDarkia/scikit-learn,mojoboss/scikit-learn,moutai/scikit-learn,Obus/scikit-learn,chrsrds/scikit-learn,zihua/scikit-learn,jpautom/scikit-learn,r-mart/scikit-learn,schets/scikit-learn,zuku1985/scikit-learn,glennq/scikit-learn,imaculate/scikit-learn,glemaitre/scikit-learn,rrohan/scikit-learn,nomadcube/scikit-learn,Akshay0724/scikit-learn,hsiaoyi0504/scikit-learn,icdishb/scikit-learn,rishikksh20/scikit-learn,ishanic/scikit-learn,Windy-Ground/scikit-learn,sonnyhu/scikit-learn,alvarofierroclavero/scikit-learn,vinayak-mehta/scikit-learn,terkkila/scikit-learn,mhue/scikit-learn,AlexRobson/scikit-learn,robin-lai/scikit-learn,yask123/scikit-learn,khkaminska/scikit-learn,yyjiang/scikit-learn,nhejazi/scikit-learn,vortex-ape/scikit-learn,abhishekgahlot/scikit-learn,jzt5132/scikit-learn,mlyundin/scikit-learn,rishikksh20/scikit-learn,liberatorqjw/scikit-learn,victorbergelin/scikit-learn,CforED/Machine-Learning,MechCoder/scikit-learn,madjelan/scikit-learn,siutanwong/scikit-learn,huobaowangxi/scikit-learn,MartinSavc/scikit-learn,voxlol/scikit-learn,potash/scikit-learn,AlexRobson/scikit-learn,Fireblend/scikit-learn,RayMick/scikit-learn,ChanderG/scikit-learn,LohithBlaze/scikit-learn,Lawrence-Liu/scikit-learn,waterponey/scikit-learn,khkaminska/scikit-learn,AlexRobson/scikit-learn,RomainBrault/scikit-learn,kevin-intel/scikit-learn,ashhher3/scikit-learn,qifeigit/scikit-learn,loli/semisupervisedforests,imaculate/scikit-learn,murali-munna/scikit-learn,aminert/scikit-learn,liangz0707/scikit-learn,xiaoxiamii/scikit-learn,thientu/scikit-learn,ilo10/scikit-learn,maheshakya/scikit-learn,ldirer/scikit-learn,bikong2/scikit-learn,zhenv5/scikit-learn,lenovor/scikit-learn,chrsrds/scikit-learn,maheshakya/scikit-learn,ivannz/scikit-learn,betatim/scikit-learn,rishikksh20/scikit-learn,ankurankan/scikit-learn,thilbern/scikit-learn,pratapvardhan/scikit-learn,nesterione/scikit-learn,poryfly/scikit-learn,pkruskal/scikit-learn,Adai0808/scikit-learn,mwv/scikit-learn,liyu1990/sklearn,joshloyal/scikit-learn,ogrisel/scikit-learn,ephes/scikit-learn,robin-lai/scikit-learn,andrewnc/scikit-learn,maheshakya/scikit-learn,pnedunuri/scikit-learn,justincassidy/scikit-learn,poryfly/scikit-learn,shenzebang/scikit-learn,HolgerPeters/scikit-learn,themrmax/scikit-learn,hainm/scikit-learn,HolgerPeters/scikit-learn,moutai/scikit-learn,glouppe/scikit-learn,eickenberg/scikit-learn,mfjb/scikit-learn,mfjb/scikit-learn,belltailjp/scikit-learn,huobaowangxi/scikit-learn,zorroblue/scikit-learn,lesteve/scikit-learn,DSLituiev/scikit-learn,IshankGulati/scikit-learn,rahul-c1/scikit-learn,mhdella/scikit-learn,schets/scikit-learn,dsullivan7/scikit-learn,sinhrks/scikit-learn,nmayorov/scikit-learn,anurag313/scikit-learn,thilbern/scikit-learn,YinongLong/scikit-learn,PatrickOReilly/scikit-learn,waterponey/scikit-learn,kylerbrown/scikit-learn,smartscheduling/scikit-learn-categorical-tree,victorbergelin/scikit-learn,cybernet14/scikit-learn,glemaitre/scikit-learn,untom/scikit-learn,idlead/scikit-learn,Lawrence-Liu/scikit-learn,imaculate/scikit-learn,MohammedWasim/scikit-learn,dingocuster/scikit-learn,xuewei4d/scikit-learn,vybstat/scikit-learn,Nyker510/scikit-learn,quheng/scikit-learn,dsquareindia/scikit-learn,massmutual/scikit-learn,espg/scikit-learn,tawsifkhan/scikit-learn,vybstat/scikit-learn,ilo10/scikit-learn,trankmichael/scikit-learn,abhishekkrthakur/scikit-learn,hdmetor/scikit-learn,f3r/scikit-learn,eg-zhang/scikit-learn,mikebenfield/scikit-learn,aflaxman/scikit-learn,kjung/scikit-learn,sarahgrogan/scikit-learn,trungnt13/scikit-learn,3manuek/scikit-learn,thientu/scikit-learn,huzq/scikit-learn,ndingwall/scikit-learn,Nyker510/scikit-learn,dsquareindia/scikit-learn,shikhardb/scikit-learn,rrohan/scikit-learn,hrjn/scikit-learn,joshloyal/scikit-learn,fzalkow/scikit-learn,bnaul/scikit-learn,vshtanko/scikit-learn,jm-begon/scikit-learn,mxjl620/scikit-learn,q1ang/scikit-learn,cainiaocome/scikit-learn,fabianp/scikit-learn,plissonf/scikit-learn,hsuantien/scikit-learn,kashif/scikit-learn,Djabbz/scikit-learn,manhhomienbienthuy/scikit-learn,aetilley/scikit-learn,iismd17/scikit-learn,clemkoa/scikit-learn,pythonvietnam/scikit-learn,xavierwu/scikit-learn,nmayorov/scikit-learn,tmhm/scikit-learn,untom/scikit-learn,LiaoPan/scikit-learn,aabadie/scikit-learn,imaculate/scikit-learn,yonglehou/scikit-learn,LiaoPan/scikit-learn,voxlol/scikit-learn,JsNoNo/scikit-learn,jjx02230808/project0223,LohithBlaze/scikit-learn,lin-credible/scikit-learn,LohithBlaze/scikit-learn,Garrett-R/scikit-learn,shusenl/scikit-learn,eickenberg/scikit-learn,jpautom/scikit-learn,hrjn/scikit-learn,sanketloke/scikit-learn,aminert/scikit-learn,lin-credible/scikit-learn,r-mart/scikit-learn,quheng/scikit-learn,henridwyer/scikit-learn,fbagirov/scikit-learn,fyffyt/scikit-learn,robbymeals/scikit-learn,pianomania/scikit-learn,jaidevd/scikit-learn,larsmans/scikit-learn,Garrett-R/scikit-learn,evgchz/scikit-learn,MatthieuBizien/scikit-learn,ZENGXH/scikit-learn,russel1237/scikit-learn,joshloyal/scikit-learn,joernhees/scikit-learn,zorojean/scikit-learn,herilalaina/scikit-learn,dsullivan7/scikit-learn,ssaeger/scikit-learn,lazywei/scikit-learn,meduz/scikit-learn,xwolf12/scikit-learn,amueller/scikit-learn,DonBeo/scikit-learn,henridwyer/scikit-learn,aetilley/scikit-learn,Myasuka/scikit-learn,mattgiguere/scikit-learn,akionakamura/scikit-learn,jmetzen/scikit-learn,themrmax/scikit-learn,jseabold/scikit-learn,raghavrv/scikit-learn,chrisburr/scikit-learn,saiwing-yeung/scikit-learn,NelisVerhoef/scikit-learn,JsNoNo/scikit-learn,PrashntS/scikit-learn,sanketloke/scikit-learn,idlead/scikit-learn,mayblue9/scikit-learn,yanlend/scikit-learn,zorojean/scikit-learn,Windy-Ground/scikit-learn,mjgrav2001/scikit-learn,kaichogami/scikit-learn,0x0all/scikit-learn,beepee14/scikit-learn,gotomypc/scikit-learn,phdowling/scikit-learn,Titan-C/scikit-learn,procoder317/scikit-learn,hugobowne/scikit-learn,eickenberg/scikit-learn,fengzhyuan/scikit-learn,hlin117/scikit-learn,PatrickOReilly/scikit-learn,CVML/scikit-learn,thilbern/scikit-learn,betatim/scikit-learn,eickenberg/scikit-learn,larsmans/scikit-learn,andrewnc/scikit-learn,hugobowne/scikit-learn,mxjl620/scikit-learn,rajat1994/scikit-learn,vshtanko/scikit-learn,walterreade/scikit-learn,aetilley/scikit-learn,mhdella/scikit-learn,victorbergelin/scikit-learn,marcocaccin/scikit-learn,rexshihaoren/scikit-learn,larsmans/scikit-learn,arabenjamin/scikit-learn,yask123/scikit-learn,fredhusser/scikit-learn,ky822/scikit-learn,shangwuhencc/scikit-learn,tomlof/scikit-learn,petosegan/scikit-learn,hitszxp/scikit-learn,bnaul/scikit-learn,glouppe/scikit-learn,xiaoxiamii/scikit-learn,PatrickOReilly/scikit-learn,mattgiguere/scikit-learn,voxlol/scikit-learn,ilyes14/scikit-learn,fabioticconi/scikit-learn,mfjb/scikit-learn,pv/scikit-learn,ltiao/scikit-learn,saiwing-yeung/scikit-learn,mehdidc/scikit-learn,hdmetor/scikit-learn,mjgrav2001/scikit-learn,akionakamura/scikit-learn,heli522/scikit-learn,xuewei4d/scikit-learn,waterponey/scikit-learn,rahuldhote/scikit-learn,xubenben/scikit-learn,JsNoNo/scikit-learn,yask123/scikit-learn,ElDeveloper/scikit-learn,NunoEdgarGub1/scikit-learn,pompiduskus/scikit-learn,smartscheduling/scikit-learn-categorical-tree,manashmndl/scikit-learn,lin-credible/scikit-learn,phdowling/scikit-learn,arabenjamin/scikit-learn,larsmans/scikit-learn,tawsifkhan/scikit-learn,ChanChiChoi/scikit-learn,MartinSavc/scikit-learn,zuku1985/scikit-learn,treycausey/scikit-learn,LiaoPan/scikit-learn,xwolf12/scikit-learn,466152112/scikit-learn,jjx02230808/project0223,hugobowne/scikit-learn,macks22/scikit-learn,Garrett-R/scikit-learn,shangwuhencc/scikit-learn,fzalkow/scikit-learn,bhargav/scikit-learn,ahoyosid/scikit-learn,rexshihaoren/scikit-learn,ilo10/scikit-learn,shikhardb/scikit-learn,sumspr/scikit-learn,anurag313/scikit-learn,michigraber/scikit-learn,tomlof/scikit-learn,etkirsch/scikit-learn,pv/scikit-learn,procoder317/scikit-learn,ltiao/scikit-learn,liyu1990/sklearn,theoryno3/scikit-learn,untom/scikit-learn,bikong2/scikit-learn,xavierwu/scikit-learn,abhishekkrthakur/scikit-learn,eg-zhang/scikit-learn,ishanic/scikit-learn,arahuja/scikit-learn,voxlol/scikit-learn,thientu/scikit-learn,Aasmi/scikit-learn,jlegendary/scikit-learn,hsiaoyi0504/scikit-learn,lenovor/scikit-learn,kjung/scikit-learn,hainm/scikit-learn,carrillo/scikit-learn,ngoix/OCRF,PatrickChrist/scikit-learn,MartinSavc/scikit-learn,pythonvietnam/scikit-learn,yunfeilu/scikit-learn,scikit-learn/scikit-learn,billy-inn/scikit-learn,bikong2/scikit-learn,hdmetor/scikit-learn,spallavolu/scikit-learn,hdmetor/scikit-learn,liberatorqjw/scikit-learn,cwu2011/scikit-learn,devanshdalal/scikit-learn,cauchycui/scikit-learn,fyffyt/scikit-learn,zuku1985/scikit-learn,mfjb/scikit-learn,elkingtonmcb/scikit-learn,jakirkham/scikit-learn,ningchi/scikit-learn,Garrett-R/scikit-learn,nmayorov/scikit-learn,xiaoxiamii/scikit-learn,clemkoa/scikit-learn,ilyes14/scikit-learn,UNR-AERIAL/scikit-learn,pnedunuri/scikit-learn,RPGOne/scikit-learn,davidgbe/scikit-learn,clemkoa/scikit-learn,billy-inn/scikit-learn,sergeyf/scikit-learn,wlamond/scikit-learn,RPGOne/scikit-learn,qifeigit/scikit-learn,samzhang111/scikit-learn,nvoron23/scikit-learn,AnasGhrab/scikit-learn,nhejazi/scikit-learn,DonBeo/scikit-learn,aminert/scikit-learn,IssamLaradji/scikit-learn,scikit-learn/scikit-learn,macks22/scikit-learn,CVML/scikit-learn,anurag313/scikit-learn,IndraVikas/scikit-learn,akionakamura/scikit-learn,mojoboss/scikit-learn,ZENGXH/scikit-learn,appapantula/scikit-learn,thilbern/scikit-learn,ltiao/scikit-learn,aminert/scikit-learn,potash/scikit-learn,huzq/scikit-learn,ivannz/scikit-learn,liyu1990/sklearn,Myasuka/scikit-learn,jayflo/scikit-learn,akionakamura/scikit-learn,Akshay0724/scikit-learn,huobaowangxi/scikit-learn,ElDeveloper/scikit-learn,rexshihaoren/scikit-learn,jlegendary/scikit-learn,olologin/scikit-learn,evgchz/scikit-learn,andaag/scikit-learn,0asa/scikit-learn,massmutual/scikit-learn,sarahgrogan/scikit-learn,siutanwong/scikit-learn,rohanp/scikit-learn,simon-pepin/scikit-learn,frank-tancf/scikit-learn,lbishal/scikit-learn,roxyboy/scikit-learn,vivekmishra1991/scikit-learn,alexsavio/scikit-learn,hrjn/scikit-learn,0x0all/scikit-learn,jmschrei/scikit-learn,manashmndl/scikit-learn,f3r/scikit-learn,bigdataelephants/scikit-learn,JosmanPS/scikit-learn,phdowling/scikit-learn,tdhopper/scikit-learn,abimannans/scikit-learn,abhishekgahlot/scikit-learn,sergeyf/scikit-learn,AnasGhrab/scikit-learn,fredhusser/scikit-learn,zhenv5/scikit-learn,3manuek/scikit-learn,ishanic/scikit-learn,AlexandreAbraham/scikit-learn,ogrisel/scikit-learn,deepesch/scikit-learn,spallavolu/scikit-learn,tawsifkhan/scikit-learn,NunoEdgarGub1/scikit-learn,appapantula/scikit-learn,joernhees/scikit-learn,AlexRobson/scikit-learn,olologin/scikit-learn,sonnyhu/scikit-learn,trankmichael/scikit-learn,AlexandreAbraham/scikit-learn,Djabbz/scikit-learn,jakobworldpeace/scikit-learn,mikebenfield/scikit-learn,liyu1990/sklearn,vigilv/scikit-learn,yyjiang/scikit-learn,madjelan/scikit-learn,vivekmishra1991/scikit-learn,michigraber/scikit-learn,rahul-c1/scikit-learn,aewhatley/scikit-learn,arjoly/scikit-learn,wzbozon/scikit-learn,rrohan/scikit-learn,NelisVerhoef/scikit-learn,TomDLT/scikit-learn,iismd17/scikit-learn,murali-munna/scikit-learn,trankmichael/scikit-learn,ngoix/OCRF,mojoboss/scikit-learn,frank-tancf/scikit-learn,manashmndl/scikit-learn,phdowling/scikit-learn,jblackburne/scikit-learn,cybernet14/scikit-learn,anurag313/scikit-learn,sonnyhu/scikit-learn,xuewei4d/scikit-learn,trungnt13/scikit-learn,robin-lai/scikit-learn,hitszxp/scikit-learn,justincassidy/scikit-learn,AlexanderFabisch/scikit-learn,saiwing-yeung/scikit-learn,jblackburne/scikit-learn,tosolveit/scikit-learn,dsullivan7/scikit-learn,etkirsch/scikit-learn,Achuth17/scikit-learn,0x0all/scikit-learn,jpautom/scikit-learn,TomDLT/scikit-learn,IshankGulati/scikit-learn,ClimbsRocks/scikit-learn,jmetzen/scikit-learn,deepesch/scikit-learn,mhdella/scikit-learn,Barmaley-exe/scikit-learn,bigdataelephants/scikit-learn,AnasGhrab/scikit-learn,dhruv13J/scikit-learn,equialgo/scikit-learn,Sentient07/scikit-learn,idlead/scikit-learn,sinhrks/scikit-learn,pompiduskus/scikit-learn,h2educ/scikit-learn,rvraghav93/scikit-learn,mattilyra/scikit-learn,cauchycui/scikit-learn,jaidevd/scikit-learn,xwolf12/scikit-learn,mehdidc/scikit-learn,xwolf12/scikit-learn,kylerbrown/scikit-learn,khkaminska/scikit-learn,cl4rke/scikit-learn,rvraghav93/scikit-learn,ephes/scikit-learn,kevin-intel/scikit-learn,Barmaley-exe/scikit-learn,abhishekgahlot/scikit-learn,ElDeveloper/scikit-learn,loli/semisupervisedforests,jayflo/scikit-learn,glouppe/scikit-learn,jakirkham/scikit-learn,OshynSong/scikit-learn,theoryno3/scikit-learn,Adai0808/scikit-learn,nomadcube/scikit-learn,shenzebang/scikit-learn,sergeyf/scikit-learn,devanshdalal/scikit-learn,MatthieuBizien/scikit-learn,herilalaina/scikit-learn,jblackburne/scikit-learn,AlexandreAbraham/scikit-learn,anirudhjayaraman/scikit-learn,kagayakidan/scikit-learn,abimannans/scikit-learn,Titan-C/scikit-learn,mhdella/scikit-learn,ashhher3/scikit-learn,UNR-AERIAL/scikit-learn,Djabbz/scikit-learn,liangz0707/scikit-learn,yyjiang/scikit-learn,fengzhyuan/scikit-learn,schets/scikit-learn,rohanp/scikit-learn,ycaihua/scikit-learn,jblackburne/scikit-learn,carrillo/scikit-learn,fbagirov/scikit-learn,scikit-learn/scikit-learn,Windy-Ground/scikit-learn,ogrisel/scikit-learn,Fireblend/scikit-learn,manhhomienbienthuy/scikit-learn,YinongLong/scikit-learn,JosmanPS/scikit-learn,bigdataelephants/scikit-learn,dhruv13J/scikit-learn,raghavrv/scikit-learn,aewhatley/scikit-learn,carrillo/scikit-learn,wlamond/scikit-learn,AlexandreAbraham/scikit-learn,DSLituiev/scikit-learn,kashif/scikit-learn,YinongLong/scikit-learn,vigilv/scikit-learn,RayMick/scikit-learn,MatthieuBizien/scikit-learn,aabadie/scikit-learn,aabadie/scikit-learn,jkarnows/scikit-learn,MartinDelzant/scikit-learn,cainiaocome/scikit-learn,ssaeger/scikit-learn,samuel1208/scikit-learn,russel1237/scikit-learn,Aasmi/scikit-learn,plissonf/scikit-learn,petosegan/scikit-learn,untom/scikit-learn,mikebenfield/scikit-learn,nmayorov/scikit-learn,Djabbz/scikit-learn,IshankGulati/scikit-learn,samzhang111/scikit-learn,wazeerzulfikar/scikit-learn,jorge2703/scikit-learn,jorik041/scikit-learn,jakobworldpeace/scikit-learn,kaichogami/scikit-learn,mwv/scikit-learn,mehdidc/scikit-learn,mattilyra/scikit-learn,AIML/scikit-learn,icdishb/scikit-learn,wzbozon/scikit-learn,shikhardb/scikit-learn,q1ang/scikit-learn,jmetzen/scikit-learn,jjx02230808/project0223,jzt5132/scikit-learn,Jimmy-Morzaria/scikit-learn,deepesch/scikit-learn,elkingtonmcb/scikit-learn,alexeyum/scikit-learn,simon-pepin/scikit-learn,r-mart/scikit-learn,ahoyosid/scikit-learn,marcocaccin/scikit-learn,ltiao/scikit-learn,UNR-AERIAL/scikit-learn,gotomypc/scikit-learn,yyjiang/scikit-learn,wanggang3333/scikit-learn,costypetrisor/scikit-learn,chrisburr/scikit-learn,cauchycui/scikit-learn,tomlof/scikit-learn,aflaxman/scikit-learn,fabioticconi/scikit-learn,robbymeals/scikit-learn,466152112/scikit-learn,lenovor/scikit-learn,alexsavio/scikit-learn,Clyde-fare/scikit-learn,yonglehou/scikit-learn,kylerbrown/scikit-learn,jpautom/scikit-learn,plissonf/scikit-learn,ogrisel/scikit-learn,costypetrisor/scikit-learn,nhejazi/scikit-learn,Jimmy-Morzaria/scikit-learn,NunoEdgarGub1/scikit-learn,aabadie/scikit-learn,stylianos-kampakis/scikit-learn,terkkila/scikit-learn,kjung/scikit-learn,fabianp/scikit-learn,nesterione/scikit-learn,vybstat/scikit-learn,bigdataelephants/scikit-learn,xzh86/scikit-learn,amueller/scikit-learn,abimannans/scikit-learn,shyamalschandra/scikit-learn,JsNoNo/scikit-learn,yonglehou/scikit-learn,ankurankan/scikit-learn,marcocaccin/scikit-learn,Obus/scikit-learn,loli/semisupervisedforests,ngoix/OCRF,r-mart/scikit-learn,arjoly/scikit-learn,ivannz/scikit-learn,Vimos/scikit-learn,pv/scikit-learn,themrmax/scikit-learn,shangwuhencc/scikit-learn,OshynSong/scikit-learn,cauchycui/scikit-learn,luo66/scikit-learn,NelisVerhoef/scikit-learn,vigilv/scikit-learn,ldirer/scikit-learn,JeanKossaifi/scikit-learn,dingocuster/scikit-learn,zaxtax/scikit-learn,ashhher3/scikit-learn,loli/sklearn-ensembletrees,stylianos-kampakis/scikit-learn,pianomania/scikit-learn,macks22/scikit-learn,anntzer/scikit-learn,jmschrei/scikit-learn,lbishal/scikit-learn,pythonvietnam/scikit-learn,pnedunuri/scikit-learn,ndingwall/scikit-learn,hitszxp/scikit-learn,evgchz/scikit-learn,amueller/scikit-learn,Myasuka/scikit-learn,Aasmi/scikit-learn,jkarnows/scikit-learn,andaag/scikit-learn,shahankhatch/scikit-learn,ahoyosid/scikit-learn,btabibian/scikit-learn,shyamalschandra/scikit-learn,Jimmy-Morzaria/scikit-learn,zorojean/scikit-learn,RPGOne/scikit-learn,sumspr/scikit-learn,roxyboy/scikit-learn,CVML/scikit-learn,hsiaoyi0504/scikit-learn,Titan-C/scikit-learn,kashif/scikit-learn,nomadcube/scikit-learn,bthirion/scikit-learn,anirudhjayaraman/scikit-learn,ephes/scikit-learn,anirudhjayaraman/scikit-learn,AIML/scikit-learn,mhue/scikit-learn,YinongLong/scikit-learn,plissonf/scikit-learn,jseabold/scikit-learn,yanlend/scikit-learn,smartscheduling/scikit-learn-categorical-tree,Srisai85/scikit-learn,ngoix/OCRF,mlyundin/scikit-learn,pianomania/scikit-learn,PrashntS/scikit-learn,henrykironde/scikit-learn,sarahgrogan/scikit-learn,theoryno3/scikit-learn,mhue/scikit-learn,pnedunuri/scikit-learn,ZenDevelopmentSystems/scikit-learn,ZENGXH/scikit-learn,glemaitre/scikit-learn,ZENGXH/scikit-learn,Sentient07/scikit-learn,Titan-C/scikit-learn,anntzer/scikit-learn,yunfeilu/scikit-learn,poryfly/scikit-learn,henridwyer/scikit-learn,heli522/scikit-learn,vortex-ape/scikit-learn,giorgiop/scikit-learn,Aasmi/scikit-learn,MartinSavc/scikit-learn,lesteve/scikit-learn,Sentient07/scikit-learn,wanggang3333/scikit-learn,tosolveit/scikit-learn,luo66/scikit-learn,xzh86/scikit-learn,ngoix/OCRF,CforED/Machine-Learning,cl4rke/scikit-learn,petosegan/scikit-learn,giorgiop/scikit-learn,mjudsp/Tsallis,ky822/scikit-learn,jereze/scikit-learn,Barmaley-exe/scikit-learn,xzh86/scikit-learn,AnasGhrab/scikit-learn,0x0all/scikit-learn,ChanderG/scikit-learn,smartscheduling/scikit-learn-categorical-tree,Vimos/scikit-learn,tdhopper/scikit-learn,btabibian/scikit-learn,pkruskal/scikit-learn,rahuldhote/scikit-learn,rajat1994/scikit-learn,giorgiop/scikit-learn,jseabold/scikit-learn,xuewei4d/scikit-learn,jorik041/scikit-learn,loli/sklearn-ensembletrees,NunoEdgarGub1/scikit-learn,mjudsp/Tsallis,arjoly/scikit-learn,mayblue9/scikit-learn,nelson-liu/scikit-learn,alexeyum/scikit-learn,pratapvardhan/scikit-learn,alexeyum/scikit-learn,tawsifkhan/scikit-learn,andrewnc/scikit-learn,belltailjp/scikit-learn,JeanKossaifi/scikit-learn,HolgerPeters/scikit-learn,jakobworldpeace/scikit-learn,maheshakya/scikit-learn,tmhm/scikit-learn,0asa/scikit-learn,jorge2703/scikit-learn,PatrickChrist/scikit-learn,russel1237/scikit-learn,LohithBlaze/scikit-learn,gclenaghan/scikit-learn,lazywei/scikit-learn,manhhomienbienthuy/scikit-learn,MechCoder/scikit-learn,glennq/scikit-learn,mattilyra/scikit-learn,nrhine1/scikit-learn,q1ang/scikit-learn,ClimbsRocks/scikit-learn,mlyundin/scikit-learn,0x0all/scikit-learn,frank-tancf/scikit-learn,olologin/scikit-learn,jorik041/scikit-learn,evgchz/scikit-learn,liberatorqjw/scikit-learn,potash/scikit-learn,kashif/scikit-learn,lbishal/scikit-learn,xyguo/scikit-learn,AlexanderFabisch/scikit-learn,ldirer/scikit-learn,shahankhatch/scikit-learn,harshaneelhg/scikit-learn,Nyker510/scikit-learn,shusenl/scikit-learn,nrhine1/scikit-learn,Myasuka/scikit-learn,shyamalschandra/scikit-learn,hsuantien/scikit-learn,adamgreenhall/scikit-learn,ishanic/scikit-learn,cainiaocome/scikit-learn,tdhopper/scikit-learn,liangz0707/scikit-learn,heli522/scikit-learn,murali-munna/scikit-learn,vinayak-mehta/scikit-learn,JeanKossaifi/scikit-learn,RayMick/scikit-learn,Obus/scikit-learn,pkruskal/scikit-learn,kaichogami/scikit-learn,equialgo/scikit-learn,russel1237/scikit-learn,toastedcornflakes/scikit-learn,RachitKansal/scikit-learn,gclenaghan/scikit-learn,jorge2703/scikit-learn,kagayakidan/scikit-learn,ClimbsRocks/scikit-learn,hainm/scikit-learn,Nyker510/scikit-learn,bnaul/scikit-learn,harshaneelhg/scikit-learn,UNR-AERIAL/scikit-learn,Vimos/scikit-learn,aetilley/scikit-learn,jlegendary/scikit-learn,henrykironde/scikit-learn,procoder317/scikit-learn,ndingwall/scikit-learn,aewhatley/scikit-learn,zaxtax/scikit-learn,MohammedWasim/scikit-learn,chrisburr/scikit-learn,pypot/scikit-learn,andaag/scikit-learn,q1ang/scikit-learn,RachitKansal/scikit-learn,huzq/scikit-learn,procoder317/scikit-learn,jm-begon/scikit-learn,AIML/scikit-learn,toastedcornflakes/scikit-learn,espg/scikit-learn,0asa/scikit-learn,pkruskal/scikit-learn,glemaitre/scikit-learn,devanshdalal/scikit-learn,dhruv13J/scikit-learn,rrohan/scikit-learn,Garrett-R/scikit-learn,Fireblend/scikit-learn,theoryno3/scikit-learn,walterreade/scikit-learn,krez13/scikit-learn,dingocuster/scikit-learn,rajat1994/scikit-learn,IssamLaradji/scikit-learn,waterponey/scikit-learn,mblondel/scikit-learn,mattilyra/scikit-learn,shahankhatch/scikit-learn,beepee14/scikit-learn,carrillo/scikit-learn,raghavrv/scikit-learn,dhruv13J/scikit-learn,Achuth17/scikit-learn,altairpearl/scikit-learn,wanggang3333/scikit-learn,jjx02230808/project0223,ahoyosid/scikit-learn,cwu2011/scikit-learn,OshynSong/scikit-learn,arjoly/scikit-learn,victorbergelin/scikit-learn,manhhomienbienthuy/scikit-learn,eg-zhang/scikit-learn,ycaihua/scikit-learn,MatthieuBizien/scikit-learn,jlegendary/scikit-learn,mxjl620/scikit-learn,vybstat/scikit-learn,ldirer/scikit-learn,zaxtax/scikit-learn,costypetrisor/scikit-learn,adamgreenhall/scikit-learn,ankurankan/scikit-learn,samuel1208/scikit-learn,Clyde-fare/scikit-learn,wlamond/scikit-learn,JosmanPS/scikit-learn,tomlof/scikit-learn,pythonvietnam/scikit-learn,altairpearl/scikit-learn,PatrickChrist/scikit-learn,hsuantien/scikit-learn,etkirsch/scikit-learn,rahuldhote/scikit-learn,nomadcube/scikit-learn,anntzer/scikit-learn,JeanKossaifi/scikit-learn,jzt5132/scikit-learn,Jimmy-Morzaria/scikit-learn,sanketloke/scikit-learn,DSLituiev/scikit-learn,jakirkham/scikit-learn,costypetrisor/scikit-learn,liangz0707/scikit-learn,xyguo/scikit-learn,IndraVikas/scikit-learn,abhishekgahlot/scikit-learn,f3r/scikit-learn,xubenben/scikit-learn,petosegan/scikit-learn,RachitKansal/scikit-learn,nesterione/scikit-learn,JosmanPS/scikit-learn,zorroblue/scikit-learn,jaidevd/scikit-learn,LiaoPan/scikit-learn,espg/scikit-learn,robin-lai/scikit-learn,walterreade/scikit-learn,IndraVikas/scikit-learn,samuel1208/scikit-learn,pypot/scikit-learn,vshtanko/scikit-learn,djgagne/scikit-learn,btabibian/scikit-learn,robbymeals/scikit-learn,simon-pepin/scikit-learn,PatrickChrist/scikit-learn,aflaxman/scikit-learn,krez13/scikit-learn,BiaDarkia/scikit-learn,mugizico/scikit-learn,krez13/scikit-learn,xzh86/scikit-learn,mayblue9/scikit-learn,trankmichael/scikit-learn,ilyes14/scikit-learn,alexeyum/scikit-learn,wzbozon/scikit-learn,cainiaocome/scikit-learn,sarahgrogan/scikit-learn,adamgreenhall/scikit-learn,andaag/scikit-learn,vivekmishra1991/scikit-learn,walterreade/scikit-learn,krez13/scikit-learn,Adai0808/scikit-learn,mayblue9/scikit-learn,roxyboy/scikit-learn,ilyes14/scikit-learn,glennq/scikit-learn,meduz/scikit-learn,nikitasingh981/scikit-learn,yunfeilu/scikit-learn,beepee14/scikit-learn,MechCoder/scikit-learn,larsmans/scikit-learn,h2educ/scikit-learn,RomainBrault/scikit-learn,zihua/scikit-learn,zuku1985/scikit-learn,alvarofierroclavero/scikit-learn,hainm/scikit-learn,mjgrav2001/scikit-learn,loli/semisupervisedforests,fengzhyuan/scikit-learn,belltailjp/scikit-learn,MohammedWasim/scikit-learn,michigraber/scikit-learn,Akshay0724/scikit-learn,shahankhatch/scikit-learn,ankurankan/scikit-learn,tdhopper/scikit-learn,fabioticconi/scikit-learn,themrmax/scikit-learn,MartinDelzant/scikit-learn,meduz/scikit-learn,ky822/scikit-learn,zihua/scikit-learn,ChanChiChoi/scikit-learn,terkkila/scikit-learn,sinhrks/scikit-learn,vermouthmjl/scikit-learn,eg-zhang/scikit-learn,ashhher3/scikit-learn,zorroblue/scikit-learn,treycausey/scikit-learn,466152112/scikit-learn,joernhees/scikit-learn,billy-inn/scikit-learn,qifeigit/scikit-learn,hlin117/scikit-learn,djgagne/scikit-learn,henrykironde/scikit-learn,RPGOne/scikit-learn,Fireblend/scikit-learn,altairpearl/scikit-learn,mugizico/scikit-learn,mblondel/scikit-learn,NelisVerhoef/scikit-learn,RayMick/scikit-learn,moutai/scikit-learn,marcocaccin/scikit-learn,samzhang111/scikit-learn,ningchi/scikit-learn,RomainBrault/scikit-learn,cwu2011/scikit-learn,rahul-c1/scikit-learn,chrisburr/scikit-learn,loli/sklearn-ensembletrees,glennq/scikit-learn,massmutual/scikit-learn,xubenben/scikit-learn,scikit-learn/scikit-learn,sinhrks/scikit-learn,toastedcornflakes/scikit-learn,wazeerzulfikar/scikit-learn,dingocuster/scikit-learn,tosolveit/scikit-learn,abhishekkrthakur/scikit-learn,arabenjamin/scikit-learn,JPFrancoia/scikit-learn,Windy-Ground/scikit-learn,Adai0808/scikit-learn,PatrickOReilly/scikit-learn,TomDLT/scikit-learn,nelson-liu/scikit-learn,gclenaghan/scikit-learn,jmschrei/scikit-learn,Vimos/scikit-learn,jmetzen/scikit-learn,shusenl/scikit-learn,maheshakya/scikit-learn,mwv/scikit-learn,nrhine1/scikit-learn,wlamond/scikit-learn,glouppe/scikit-learn,DonBeo/scikit-learn,adamgreenhall/scikit-learn,PrashntS/scikit-learn,vermouthmjl/scikit-learn,nikitasingh981/scikit-learn,mblondel/scikit-learn,CVML/scikit-learn,robbymeals/scikit-learn,mattgiguere/scikit-learn,ssaeger/scikit-learn,PrashntS/scikit-learn,MartinDelzant/scikit-learn,elkingtonmcb/scikit-learn,roxyboy/scikit-learn,mugizico/scikit-learn,evgchz/scikit-learn,AIML/scikit-learn,jorik041/scikit-learn,ssaeger/scikit-learn,mxjl620/scikit-learn,rohanp/scikit-learn,lazywei/scikit-learn,manashmndl/scikit-learn,JPFrancoia/scikit-learn,cwu2011/scikit-learn,bthirion/scikit-learn,ElDeveloper/scikit-learn,fyffyt/scikit-learn,vortex-ape/scikit-learn,ankurankan/scikit-learn,rahul-c1/scikit-learn,HolgerPeters/scikit-learn,rexshihaoren/scikit-learn,bthirion/scikit-learn,jaidevd/scikit-learn,nvoron23/scikit-learn,mikebenfield/scikit-learn,fzalkow/scikit-learn,lbishal/scikit-learn,dsquareindia/scikit-learn,gclenaghan/scikit-learn,DSLituiev/scikit-learn,nelson-liu/scikit-learn,Obus/scikit-learn,0asa/scikit-learn,AlexanderFabisch/scikit-learn,joshloyal/scikit-learn,cl4rke/scikit-learn,TomDLT/scikit-learn,mjgrav2001/scikit-learn,mjudsp/Tsallis,arahuja/scikit-learn,lesteve/scikit-learn,jayflo/scikit-learn,BiaDarkia/scikit-learn,jm-begon/scikit-learn,MartinDelzant/scikit-learn,RachitKansal/scikit-learn,icdishb/scikit-learn,Srisai85/scikit-learn,tmhm/scikit-learn,lenovor/scikit-learn,moutai/scikit-learn,kevin-intel/scikit-learn,betatim/scikit-learn,rohanp/scikit-learn,davidgbe/scikit-learn,jmschrei/scikit-learn,abhishekkrthakur/scikit-learn,siutanwong/scikit-learn,arabenjamin/scikit-learn,h2educ/scikit-learn,raghavrv/scikit-learn,yonglehou/scikit-learn,jzt5132/scikit-learn,equialgo/scikit-learn,mojoboss/scikit-learn,nvoron23/scikit-learn,herilalaina/scikit-learn,treycausey/scikit-learn,vibhorag/scikit-learn,michigraber/scikit-learn,fabianp/scikit-learn,rahuldhote/scikit-learn,xiaoxiamii/scikit-learn,Lawrence-Liu/scikit-learn,shusenl/scikit-learn,IndraVikas/scikit-learn,wazeerzulfikar/scikit-learn,mhue/scikit-learn,ycaihua/scikit-learn,anntzer/scikit-learn,elkingtonmcb/scikit-learn,betatim/scikit-learn,bhargav/scikit-learn,etkirsch/scikit-learn,mjudsp/Tsallis,beepee14/scikit-learn,siutanwong/scikit-learn,zhenv5/scikit-learn,jakirkham/scikit-learn,466152112/scikit-learn,ky822/scikit-learn,ngoix/OCRF,frank-tancf/scikit-learn,pompiduskus/scikit-learn,Clyde-fare/scikit-learn,aewhatley/scikit-learn,yanlend/scikit-learn,alvarofierroclavero/scikit-learn,xyguo/scikit-learn,macks22/scikit-learn,heli522/scikit-learn,shikhardb/scikit-learn,devanshdalal/scikit-learn,Sentient07/scikit-learn,chrsrds/scikit-learn,treycausey/scikit-learn,DonBeo/scikit-learn,ZenDevelopmentSystems/scikit-learn,hsuantien/scikit-learn,fabioticconi/scikit-learn | examples/linear_model/plot_ransac.py | examples/linear_model/plot_ransac.py | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, ensemble
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSAC(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Robustly fit linear model with bagged linear regressor
model_bagged = ensemble.BaggingRegressor(linear_model.LinearRegression())
model_bagged.fit(X, y)
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
line_y_bagged = model_bagged.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='RANSAC inliers')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='RANSAC outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.plot(line_X, line_y_bagged, '-y', label='Bagging regressor')
plt.legend(loc='lower left')
plt.show()
| """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_robust = linear_model.RANSAC(linear_model.LinearRegression())
model_robust.fit(X, y)
inlier_mask = model_robust.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_robust = model_robust.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='Inlier data')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='Outlier data')
plt.plot(line_X, line_y, '-k', label='Linear model from all data')
plt.plot(line_X, line_y_robust, '-b', label='Robustly fitted linear model')
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause | Python |
d0e5ea752912b10e473b2a05da9196800eb6ca86 | Add an example for the RedisLock | dieseldev/diesel | examples/redis_lock.py | examples/redis_lock.py | import random
from diesel import fork, quickstop, quickstart, sleep
from diesel.protocols.redis import RedisClient, RedisTransactionError, RedisLock, LockNotAcquired
"""Implement the Redis INCR command using a lock. Obviously this is inefficient, but it's a good
example of how to use the RedisLock class"""
key = 'test-lock-key'
incr_key = 'test-incr-key'
counter = 0
"""If sleep_factor > lock_timeout you are exercising the timeout loop, otherwise, that loop should be a noop"""
lock_timeout = 3
sleep_factor = 1
def take_lock():
global counter
client = RedisClient('localhost', 6379)
try:
with RedisLock(client, key, timeout=lock_timeout) as lock:
v = client.get(incr_key)
sleep(random.random() * sleep_factor)
client.set(incr_key, int(v) + 1)
counter += 1
except LockNotAcquired:
pass
def main():
client = RedisClient('localhost', 6379)
client.delete(key)
client.set(incr_key, 0)
for _ in xrange(500):
fork(take_lock)
if random.random() > 0.1:
sleep(random.random() / 10)
sleep(2)
assert counter == int(client.get(incr_key)), 'Incr failed!'
quickstop()
quickstart(main)
| bsd-3-clause | Python |
|
d0b8c68ae3c8acbc3d5dfe13842e3c41a198b978 | Add script to fix all notions | l-vincent-l/alignements_backend | fix_notions_db.py | fix_notions_db.py | from alignements_backend.db import DB
from alignements_backend.notion import Notion
for notion in DB.scan_iter(match='notion:*'):
n = Notion(list(DB.sscan_iter(notion)))
| mit | Python |
|
ad6e0bad22b0c5b0e6f97ceb13694ab804041443 | Add model resources. | GeneralMaximus/secondhand | tracker/api.py | tracker/api.py | from tastypie.resources import ModelResource
from tracker.models import Task, WorkSession
from django.contrib.auth.models import User
from tastypie import fields
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'user'
class TaskResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
queryset = Task.objects.all()
resource_name = 'task'
class WorkSessionResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
task = fields.ForeignKey(TaskResource, 'task')
class Meta:
queryset = WorkSession.objects.all()
resource_name = 'work_session'
| mit | Python |
|
8f05695a153eef415f752c42c2f737de3f120b55 | Create dgen_red_a.py | kyanyoga/iot_kafka_datagen | bin/dgen_red_a.py | bin/dgen_red_a.py | #!/usr/bin/env python
"""
Created on March 4, 2016
@author: Gus Segura
"""
# imports
import time
import random
import base64
import os
import sys
import math
import json
import redis
from collections import OrderedDict
# redis server connection
# Connection to Database: this is how we work with Redis
database = redis.StrictRedis(host='localhost', port=6379, db=0)
# Output file for testing when not pushing to Kafka, Redis, SparkStreaming.
# pwd = os.path.dirname(__file__)
# print pwd
# outputpath = os.path.normpath(pwd + '/../sample_data/' + sys.argv[1])
# outputpath = os.path.normpath(sys.argv[1])
# outputpath = "file.log"
# print outputpath
# startng values for simulated sensors
start = time.time()
start_value = 0
baseTemp = 32.0
basePresure = 1000
baseLevel = 10
jmsg = {}
# create new redis cache object : TODO - Migrate Object Creation
def Sensmsg(object):
""" Holds the model for Sensor Message
Performs sensor message data storage management using the next data structure:
next-sensor-msg-id: <uid> - holds the next sensor message id to generate
"sensmsg:<uid>": {
timestamp:<timestamp>
timezone:<timezone>
millisec:<millis>
sensname:<sensor_name>
senstype:<sensor_type>
metric:<value>
}
"""
pass
# create dictionary
def create_jmsg(timestamp, timezone, millis, sensor, senstype, metric):
msg = OrderedDict() # ordered dictionary
if(timestamp != ""):
msg["timestamp"] = timestamp
if(timezone != ""):
msg["timezone"] = timezone
if(millis != ""):
msg["millis"] = millis
if(sensor != ""):
msg["sensor"] = sensor
if(senstype != ""):
msg["senstype"] = senstype
if(metric != ""):
msg["metric"] = metric
print(json.dumps(msg))
return msg
# main infinite loop
while (True):
t = time.strftime('%Y-%m-%dT%H:%M:%S')
timezone = time.strftime('%z')
millis = "%.3d" % (time.time() % 1 * 1000)
sin_value = math.sin(start_value)
start_value += 1
#open file for append
# outputfile = open(outputpath, 'a+')
# sensor name
#create random values - well match sensor id to type for now.
sensor = random.sample(['sen/1', 'sen/2', 'sen/3', 'send/4'], 1)[0]
# metric type
metric = random.sample(['currentTemp', 'currentPresure', 'currentLevel'], 1)[0]
# case -- yuk: python uses if else.
# -------------------------------- #
if metric == 'currentTemp':
baseTemp = baseTemp + sin_value
if baseTemp <= 0:
baseTemp = 32.0 # reset if sin function takes you negative
# create message dictionary
jmsg = create_jmsg(t,timezone,millis, "sen/1", metric, baseTemp)
if metric == 'currentPresure':
basePresure = basePresure + sin_value*10
if basePresure <= 0:
basePresure = 1000 # reset if sin function takes you negative
# create message dictionary
jmsg = create_jmsg(t,timezone,millis, "sen/2", metric, basePresure)
if metric == 'currentLevel':
baseLevel = baseLevel + sin_value*.10
if baseLevel <= 0:
baseLevel = 10
# create message dictionary
jmsg = create_jmsg(t,timezone,millis, "sen/3", metric, baseLevel)
# TODO: Push to Redis
msg_id = database.incr("next-senmsg-id")
print (msg_id)
# "HMSET" allows to set many keys for hash map
database.hmset("sensmsg:{0}".format(msg_id),
{
"timestamp":jmsg.get("timestamp"),
"timezone":jmsg.get("timezone"),
"millisec":jmsg.get("millis"),
"sensname":jmsg.get("sensor"),
"senstype":jmsg.get("senstype"),
"metric":jmsg.get("metric")
}
)
# sleep to slow down generation
time.sleep( .7750 / 1000.0 )
# reset values for next cycle
jmsg = {}
| mit | Python |
|
c97680113fb25ed43e96c26d02bfd57e15e427b8 | Add missing migrations | opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor | nodeconductor/billing/migrations/0004_invoice_usage_pdf.py | nodeconductor/billing/migrations/0004_invoice_usage_pdf.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('billing', '0003_invoice_status'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='usage_pdf',
field=models.FileField(null=True, upload_to=b'invoices_usage', blank=True),
preserve_default=True,
),
]
| mit | Python |
|
e456e4799f5cee13ce1b5f93a9cc91b28059db16 | Remove tests that use real data from the individual inferred test | e-mission/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server | emission/individual_tests/TestMetricsInferredSections.py | emission/individual_tests/TestMetricsInferredSections.py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import logging
import arrow
import os
import emission.core.get_database as edb
import emission.core.wrapper.localdate as ecwl
import emission.tests.common as etc
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.storage.timeseries.format_hacks.move_filter_field as estfm
import emission.storage.decorations.local_date_queries as esdldq
from emission.net.api import metrics
class TestMetricsInferredSections(unittest.TestCase):
def setUp(self):
self.seed_mode_path = etc.copy_dummy_seed_for_inference()
etc.setupRealExample(self,
"emission/tests/data/real_examples/shankari_2015-aug-21")
self.testUUID1 = self.testUUID
etc.setupRealExample(self,
"emission/tests/data/real_examples/shankari_2015-aug-27")
etc.runIntakePipeline(self.testUUID1)
etc.runIntakePipeline(self.testUUID)
logging.info(
"After loading, timeseries db size = %s" % edb.get_timeseries_db().count())
self.aug_start_ts = 1438387200
self.aug_end_ts = 1441065600
self.day_start_dt = esdldq.get_local_date(self.aug_start_ts, "America/Los_Angeles")
self.day_end_dt = esdldq.get_local_date(self.aug_end_ts, "America/Los_Angeles")
def tearDown(self):
self.clearRelatedDb()
os.remove(self.seed_mode_path)
def clearRelatedDb(self):
edb.get_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_pipeline_state_db().delete_many({"user_id": self.testUUID})
edb.get_timeseries_db().delete_many({"user_id": self.testUUID1})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.testUUID1})
edb.get_pipeline_state_db().delete_many({"user_id": self.testUUID1})
def testCountNoEntries(self):
# Ensure that we don't crash if we don't find any entries
# Should return empty array instead
# Unlike in https://amplab.cs.berkeley.edu/jenkins/job/e-mission-server-prb/591/
met_result_ld = metrics.summarize_by_local_date(self.testUUID,
ecwl.LocalDate({'year': 2000}),
ecwl.LocalDate({'year': 2001}),
'MONTHLY', ['count'], True)
self.assertEqual(list(met_result_ld.keys()), ['aggregate_metrics', 'user_metrics'])
self.assertEqual(met_result_ld['aggregate_metrics'][0], [])
self.assertEqual(met_result_ld['user_metrics'][0], [])
met_result_ts = metrics.summarize_by_timestamp(self.testUUID,
arrow.get(2000,1,1).timestamp,
arrow.get(2001,1,1).timestamp,
'm', ['count'], True)
self.assertEqual(list(met_result_ts.keys()), ['aggregate_metrics', 'user_metrics'])
self.assertEqual(met_result_ts['aggregate_metrics'][0], [])
self.assertEqual(met_result_ts['user_metrics'][0], [])
if __name__ == '__main__':
import emission.tests.common as etc
etc.configLogging()
unittest.main()
| bsd-3-clause | Python |
|
321463a5d7f102431ed286d57d1a8fa8c576cca7 | add plotting fns | dharhas/terrapin | terrapin/plot.py | terrapin/plot.py | import matplotlib.pyplot as plt
def flow_grid(dem, angles):
pass | bsd-2-clause | Python |
|
ed23fb301503d331af243a37d1b0a934d5d2f21c | add laser plugin object | b-mueller/mythril,b-mueller/mythril,b-mueller/mythril,b-mueller/mythril | mythril/laser/ethereum/plugins/plugin.py | mythril/laser/ethereum/plugins/plugin.py | from mythril.laser.ethereum.svm import LaserEVM
class LaserPlugin:
""" Base class for laser plugins
Functionality in laser that the symbolic execution process does not need to depend on
can be implemented in the form of a laser plugin.
Laser plugins implement the function initialize(symbolic_vm) which is called with the laser virtual machine
when they are loaded.
Regularly a plugin will introduce several hooks into laser in this function
Plugins can direct actions by raising Signals defined in mythril.laser.ethereum.plugins.signals
For example, a pruning plugin might raise the PluginSkipWorldState signal.
"""
def initialize(self, symbolic_vm: LaserEVM):
""" Initializes this plugin on the symbolic virtual machine
:param symbolic_vm: symbolic virtual machine to initialize the laser plugin on
"""
raise NotImplementedError
| mit | Python |
|
550469032843eb2af3b4a9faaed34d9754f00700 | Add command to test managers emails | mabhub/Geotrek,camillemonchicourt/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek,makinacorpus/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,Anaethelion/Geotrek,Anaethelion/Geotrek,camillemonchicourt/Geotrek,johan--/Geotrek,GeotrekCE/Geotrek-admin,johan--/Geotrek,johan--/Geotrek,johan--/Geotrek,makinacorpus/Geotrek,camillemonchicourt/Geotrek,GeotrekCE/Geotrek-admin,mabhub/Geotrek | geotrek/common/management/commands/test_managers_emails.py | geotrek/common/management/commands/test_managers_emails.py | from django.core.mail import mail_managers
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Test if email settings are OK by sending mail to site managers"
def execute(self, *args, **options):
subject = u'Test email for managers'
message = u'If you receive this email, it seems that conf is OK !'
mail_managers(subject, message, fail_silently=False)
| bsd-2-clause | Python |
|
edb9500824faffd9f1d0d1b59ca29966e3b18282 | Customize behave formatter to output json | avidas/reliability-demo | modules/formatter_record.py | modules/formatter_record.py | from behave.formatter.json import PrettyJSONFormatter
from pprint import pprint
class RecordFormatter(PrettyJSONFormatter):
name = "super"
description = "Formatter for adding REST calls to JSON output."
jsteps = {} # Contains an array of features, that contains array of steps in each feature
# Overriding Background Function. This runs evertime a Background is ran.
# This step
def background(self, background):
# Let the parent run first
super(RecordFormatter, self).background(background)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Remove all the background steps from our jsteps, as they are not required
for step in background.steps:
self.jsteps[self.current_feature_element.name].pop(0)
# Overriding Step feature. This is called everytime a step is found in feature file. This happens before the feature/scenario are executed.
def step(self, step):
# Let the parent run first
super(RecordFormatter, self).step(step)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Append the step into our own collection of jsteps.
self.jsteps[self.current_feature_element['name']].append(step);
# Overriding End of Feature. This is ran once the entire feature has completed running
def eof(self):
# Iterate through each scenarios
for scenario in self.current_feature_data['elements']:
# Check if Scenario valid
if (scenario['name'] != ''):
steps = scenario['steps']
jscenariosteps = self.jsteps[scenario['name']]
status = "passed" # Add Scenario status
# Iterate through the jstep, and step results
for (j, jstep) in enumerate(jscenariosteps):
# Check if any of the above status failed, if so, mark the status as failed
if ('result' in steps[j]):
if steps[j]['result']['status'] == 'failed':
status = 'failed'
# Add configurations in scenario level. generally used for sdk_language and sdk_version
if (hasattr(jstep, "details")):
scenario['details'] = jstep.details
if (hasattr(jstep, "date")):
steps[j]['date'] = jstep.date
# Check if jstep has attribute calls, where our custom data is stored - Could be generalized further
if (hasattr(jstep, "calls") and 'result' in steps[j]):
# add the calls to our step object, that would be later added to json output.
steps[j]['result']['calls'] = jstep.calls
# Add feature name and Status as a part of scenario
scenario['feature'] = self.current_feature.name
scenario['status'] = status
# Let the parent run last here
super(RecordFormatter, self).eof()
def isnotBackground(self):
if(self.current_feature_element['name'] != ''):
if(self.current_feature_element['name'] not in self.jsteps):
self.jsteps[self.current_feature_element['name']] = []
return True
return False
| mit | Python |
|
def7e3aeaf3b0cd1a6486c72c68a3baad77ef3e5 | Create leetcode-50.py | jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm | python_practice/leetCode/leetcode-50.py | python_practice/leetCode/leetcode-50.py | class Solution:
def myPow(self, x: 'float', n: 'int') -> 'float':
return x**n
def myPow2(self, x: 'float', n: 'int') -> 'float':
if n == 0:
return 1
if n < 0:
n = 0-n
x = 1/x
return x**(n%2)*myPow2(x*x, n//2)
| mit | Python |
|
71e431a5eccc6483847888fb0f8f5f30f182913a | add a script to convert xml documentation into json | GodotExplorer/godot-tools,GodotExplorer/godot-tools | doc/xmldoc2json.py | doc/xmldoc2json.py | #!/usr/bin/python
import sys
import xml.etree.ElementTree as ET
import json
def parseClass(data):
dictCls = dict(data.attrib)
dictCls['brief_description'] = data.find("brief_description").text.strip()
dictCls['description'] = data.find("description").text.strip()
dictCls['methods'] = []
for m in data.find("methods"):
dictCls['methods'].append(parseMethod(m))
dictCls['signals'] = []
for s in (data.find("signals") if data.find("signals") is not None else []):
dictCls['signals'].append(parseMethod(s))
dictCls['constants'] = []
for c in (data.find("constants") if data.find("constants") is not None else []):
dictCls['constants'].append(parseConstant(c))
dictCls['properties'] = []
for m in (data.find("members") if data.find("members") is not None else []):
dictCls['properties'].append(parseProperty(m))
dictCls['theme_properties'] = []
for thi in (data.find("theme_items") if data.find("theme_items") is not None else []):
dictCls['theme_properties'].append(parseProperty(thi))
return dictCls
def parseMethod(data):
dictMethod = dict(data.attrib)
dictMethod['description'] = data.find("description").text.strip()
dictMethod['return_type'] = data.find("return").attrib["type"] if data.find("return") is not None else ""
if "qualifiers" not in dictMethod: dictMethod["qualifiers"] = ""
dictMethod["arguments"] = []
for arg in data.iter('argument'):
dictMethod["arguments"].append(parseArgument(arg))
return dictMethod
def parseArgument(data):
dictArg = dict(data.attrib)
if "dictArg" in dictArg: dictArg.pop("index")
dictArg["default_value"] = dictArg["default"] if "default" in dictArg else ""
if "default" in dictArg: dictArg.pop("default")
return dictArg
def parseConstant(data):
dictConst = dict(data.attrib)
dictConst["description"] = data.text.strip()
return dictConst
def parseProperty(data):
dictProp = dict(data.attrib)
dictProp["description"] = data.text.strip()
return dictProp
def main():
if len(sys.argv) >=2 :
tree = ET.parse(open(sys.argv[1], 'r'))
classes = {}
for cls in tree.getroot():
dictCls = parseClass(cls)
classes[dictCls['name']] = dictCls
jsonContent = json.dumps({"classes": classes, "version": "2.1.3"}, ensure_ascii=False, indent=2)
print(jsonContent)
if __name__ == '__main__':
main()
| mit | Python |
|
4ca8d43d8e6ec243d9812bb313a8e7a21ad781ea | Add DB exercise. | bluedai180/PythonExercise,bluedai180/PythonExercise | Exercise/DB.py | Exercise/DB.py | import mysql.connector
conn = mysql.connector.connect(user='root', password='blue', database='test')
cursor = conn.cursor()
cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
cursor.execute('insert into user (id, name) values (%s, %s)', ['1', 'Dai'])
print(cursor.rowcount)
conn.commit()
cursor.close()
cursor = conn.cursor()
cursor.execute('select * from user where id = %s', ('1',))
values = cursor.fetchall()
print(values)
cursor.close()
conn.close()
| apache-2.0 | Python |
|
c4e1e034a3f0be3590dc78c5683d9deaf44d696f | add example of escape character | AlanCoding/Ansible-inventory-file-examples,AlanCoding/Ansible-inventory-file-examples | scripts/escape/backslash.py | scripts/escape/backslash.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
'''
This tests the acceptance of backslashes
\\f should be okay
\f is not necessarily okay, because json.dumps will not dump this
'''
print json.dumps({
"foogroup": {
"hosts": [
"foobar"
]
},
"_meta": {
"hostvars": {
"foobar": {
"host_specific_var": "ba\frrr",
"from_issue": "H%]~7\f0$ and this... O'Jw\u00188\u0006\b... "
}
}
}
}, indent=4) | mit | Python |
|
c5c0d3f447b1295bdbbda60c38c4123f2e8d871c | add gnucash2ledger converter | MatzeB/pygnucash,MatzeB/pygnucash | gnucash2ledger.py | gnucash2ledger.py | #!/usr/bin/env python
import sys
import codecs
import gnucash
out = codecs.getwriter('UTF-8')(sys.stdout)
if len(sys.argv) == 1:
sys.stderr.write("Invocation: %s gnucash_filename\n" % sys.argv[0])
sys.exit(1)
data = gnucash.read_file(sys.argv[1])
def format_commodity(commodity):
mnemonic = commodity.mnemonic
try:
if mnemonic.encode('ascii').isalpha():
return mnemonic
except:
pass
return "\"%s\"" % mnemonic # TODO: escape " char in mnemonic
def full_acc_name(acc):
result = ""
if acc.parent.parent.parent is not None:
result = full_acc_name(acc.parent) + ":"
result += acc.name
return result
commodities = data.commodities.values()
for commodity in commodities:
if commodity.mnemonic == "":
continue
out.write("commodity %s\n" % format_commodity(commodity))
if commodity.fullname != "":
out.write("\tnote %s\n" % commodity.fullname)
out.write("\n")
accounts = data.accounts.values()
for acc in accounts:
# ignore "dummy" accounts
if acc.type is None or acc.type == "ROOT":
continue
if str(acc.commodity) == "template":
continue
out.write("account %s\n" % (full_acc_name(acc), ))
if acc.description != "":
out.write("\tnote %s\n" % (acc.description,))
formated_commodity = format_commodity(acc.commodity)
formated_commodity = formated_commodity.replace("\"", "\\\"")
out.write("\tcheck commodity == \"%s\"\n" % formated_commodity)
out.write("\n")
# Prices
prices = data.prices.values()
prices.sort(key = lambda x: x.date)
for price in prices:
date = price.date.strftime("%Y/%m/%d %H:%M:%S")
out.write("P %s %s %s %s\n" % (date, format_commodity(price.commodity), price.value, format_commodity(price.currency)))
out.write("\n")
transactions = data.transactions.values()
transactions.sort(key=lambda x: x.post_date)
for trans in transactions:
date = trans.post_date.strftime("%Y/%m/%d")
out.write("%s * %s\n" % (date, trans.description))
for split in trans.splits:
out.write("\t%-40s " % full_acc_name(split.account))
if split.account.commodity != trans.currency:
out.write("%10.2f %s @@ %.2f %s" % (split.quantity, format_commodity(split.account.commodity), abs(split.value), format_commodity(trans.currency)))
else:
out.write("%10.2f %s" % (split.value, format_commodity(trans.currency)))
out.write("\n")
out.write("\n")
| bsd-2-clause | Python |
|
bfbd2c792aacd307f8d7ed68ea0f2a7db681431d | add functions that generate mask image of the target bin | pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc | jsk_apc2016_common/python/jsk_apc2016_common/mask_bin.py | jsk_apc2016_common/python/jsk_apc2016_common/mask_bin.py | #!/usr/bin/env python
import numpy as np
from matplotlib.path import Path
import jsk_apc2016_common.segmentation_helper as helper
from tf2_geometry_msgs import do_transform_point
def get_mask_img(transform, target_bin, camera_model):
"""
:param point: point that is going to be transformed
:type point: PointStamped
:param transform: camera_frame -> bbox_frame
:type transform: Transform
"""
# check frame_id of a point and transform just in case
assert camera_model.tf_frame == transform.header.frame_id
assert target_bin.bbox.header.frame_id == transform.child_frame_id
transformed_list = [
do_transform_point(corner, transform)
for corner in target_bin.corners]
projected_points = project_points(transformed_list, camera_model)
# generate an polygon that covers the region
path = Path(projected_points)
x, y = np.meshgrid(
np.arange(camera_model.width),
np.arange(camera_model.height))
x, y = x.flatten(), y.flatten()
points = np.vstack((x, y)).T
mask_img = path.contains_points(
points).reshape(
camera_model.height, camera_model.width
).astype('bool')
return mask_img
def project_points(points, camera_model):
"""
:param points: list of geometry_msgs.msg.PointStamped
:type list of stamped points :
:param projected_points: list of camera_coordinates
:type projected_points: (u, v)
The frames of the points and the camera_model are same.
"""
# generate mask iamge
for point in points:
if point.header.frame_id != camera_model.tf_frame:
raise ValueError('undefined')
if len(points) != 4:
raise ValueError('undefined')
projected_points = []
for point in points:
projected_points.append(
camera_model.project3dToPixel(
helper.list_from_point(point.point)
)
)
return projected_points
| bsd-3-clause | Python |
|
852c6639bb0a71b9ef2dd81b2830193d0c9fe23d | Create FractalPoke.py | christopherkopic/FractalPoke-Blender3d | FractalPoke.py | FractalPoke.py | bl_info = {
"name": "FractalPoke",
"author": "Christopher Kopic",
"version": (1, 0),
"blender": (2, 7, 8),
"location": "",
"description": "Iterative Poking inspired by Simon Holmedal's Always Forever",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Mesh"}
import bpy
from bpy.types import Operator
from bpy.props import FloatProperty, IntProperty, BoolProperty
class FractalPoke(bpy.types.Operator):
"""Fractal Poke"""
bl_idname = "mesh.fractal_poke"
bl_label = "Fractal Poke"
bl_options = {'REGISTER', 'UNDO'}
iterations = IntProperty(
name = "Iterations",
default = 3,
min = 1,
description = "Be careful as complexity will increase exponentially"
)
start_offset = FloatProperty(
name = "Start Offset",
default = 1.0,
description = "Offset for first poke iteration"
)
offset_multiplier = FloatProperty(
name = "Offset Multiplier",
default = 0.5,
description = "Increases or decreases offset for each iteration"
)
offset_flip = BoolProperty(
name = "Flip Offset",
default = False,
description = "Flips offsetting inward or outward for each iteration"
)
grow_selection = BoolProperty(
name = "Grow Selection",
default = False,
description = "Grows selection for each iteration"
)
shrink_selection = BoolProperty(
name = "Shrink Selection",
default = False,
description = "Shrinks selection for each iteration"
)
def execute(self, context):
my_offset = self.start_offset
for i in range(self.iterations):
bpy.ops.mesh.poke(offset = my_offset)
my_offset *= self.offset_multiplier
if self.offset_flip:
my_offset *= -1
if self.grow_selection:
bpy.ops.mesh.select_more()
if self.shrink_selection:
bpy.ops.mesh.select_less()
return {'FINISHED'}
@classmethod
def poll(cls, context):
ob = context.active_object
return ob is not None and ob.mode == 'EDIT'
def register():
bpy.utils.register_class(FractalPoke)
def unregister():
bpy.utils.unregister_class(FractalPoke)
if __name__ == "__main__":
register()
| mit | Python |
|
2dff378e7f446e83aa7c105bded3f3330fe9fa20 | Add a script to generate a Javascript file encoding_<enc>.js containing encoding and decoding tables for the specified <enc> encoding. Uses Unicode table at location http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/<enc>.TXT. Related to issue #1541. | brython-dev/brython,brython-dev/brython,brython-dev/brython | scripts/make_encoding_js.py | scripts/make_encoding_js.py | """Create a Javascript script to encode / decode for a specific encoding
described in a file available at
http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/<ENCODING>.TXT
"""
import os
import re
import json
import urllib.request
line_re = re.compile("^(0x[A-Z0-9]+)\s+(0x[A-Z0-9]+)*", re.M)
tmpl = "http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/{}.TXT"
encoding = input("Encoding name: ")
req = urllib.request.urlopen(tmpl.format(encoding.upper()))
data = req.read().decode("ascii")
root_dir = os.path.dirname(os.path.dirname(__file__))
libs_dir = os.path.join(root_dir, "www", "src", "libs")
filename = os.path.join(libs_dir, f"encoding_{encoding.lower()}.js")
with open(filename, "w", encoding="utf-8") as out:
out.write("var _table = [")
for line in data.split("\n"):
mo = line_re.match(line)
if mo:
key, value = mo.groups()
out.write(f"{key}, {value or -1},")
out.write("]\n")
out.write("var decoding_table = [],\n encoding_table = []\n")
out.write("""for(var i = 0, len = _table.length; i < len; i += 2){
var value = _table[i + 1]
if(value !== null){
encoding_table[value] = _table[i]
}
decoding_table[_table[i]] = _table[i + 1]
}
$module = {encoding_table, decoding_table}
""")
| bsd-3-clause | Python |
|
f1c65cf208b4a6275214d82a765ad75c47c75715 | add example of how to use KT without defines | benvanwerkhoven/kernel_tuner | examples/cuda-c++/vector_add_defines.py | examples/cuda-c++/vector_add_defines.py | #!/usr/bin/env python
""" This is the example demonstrates how to use Kernel Tuner
to insert tunable parameters into template arguments
without using any C preprocessor defines
"""
import numpy as np
import kernel_tuner as kt
def tune():
kernel_string = """
template<typename T, int blockSize>
__global__ void vector_add(T *c, T *a, T *b, int n) {
auto i = blockIdx.x * blockSize + threadIdx.x;
if (i<n) {
c[i] = a[i] + b[i];
}
}
"""
size = 10000000
a = np.random.randn(size).astype(np.float32)
b = np.random.randn(size).astype(np.float32)
c = np.zeros_like(b)
n = np.int32(size)
args = [c, a, b, n]
tune_params = dict()
tune_params["block_size_x"] = [128+64*i for i in range(15)]
result, env = kt.tune_kernel("vector_add<float, block_size_x>", kernel_string, size, args, tune_params, defines={})
return result
if __name__ == "__main__":
tune()
| apache-2.0 | Python |
|
00cc1f17796897ca2f4351bbea74ee22aad98f14 | Create quadrants_HH_HL_LH_LL.py | jamaps/open_geo_scripts,jamaps/fun_with_gdal,jamaps/gdal_and_ogr_scripts,jamaps/shell_scripts,jamaps/open_geo_scripts,jamaps/open_geo_scripts,jamaps/gdal_and_ogr_scripts,jamaps/fun_with_gdal,jamaps/shell_scripts | quadrants_HH_HL_LH_LL.py | quadrants_HH_HL_LH_LL.py | # python3 for categorizing data into 4 quadrants from 2 numerical fields
# this case is for vis minoirty + avg income in Toronto census tracts
import csv
import statistics as st
# just the toronto cts
tor_cts = []
with open('ct_tor.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
tor_cts.append(row['ctuid'])
var_1 = [] # avg inc
var_2 = [] # perc vis min
with open('in_inc_vis.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['ctuid'] in tor_cts:
try:
var_1.append(float(row['avg_inc']))
perc_vis = float(row['vis_min_pop']) / float(row['total_pop'])
var_2.append(perc_vis)
except:
print(row['ctuid'])
print(len(var_1))
print(len(var_2))
v1b=v2b=0
print("----------------------------------")
# for var 1
print("median", st.median(var_1))
print("mean", st.mean(var_1))
print("input break value:")
v1b = float(input())
# for var 2
print("----------------------------------")
print("median", st.median(var_2))
print("mean", st.mean(var_2))
print("input break value:")
v2b = float(input())
HHc = 0
HLc = 0
LHc = 0
LLc = 0
# break the data via the set breaks
with open('in_inc_vis.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['ctuid'] in tor_cts:
try:
perc_vis = float(row['vis_min_pop']) / float(row['total_pop'])
inc = float(row['avg_inc'])
# ye olde if statements
if inc > v1b and perc_vis > v2b:
q = 'HH'
HHc += 1
elif inc > v1b and perc_vis <= v2b:
q = 'HL'
HLc += 1
elif inc <= v1b and perc_vis > v2b:
q = 'LH'
LHc += 1
elif inc <= v1b and perc_vis <= v2b:
q = 'LL'
LLc += 1
orow = [row['ctuid'],inc,perc_vis,q]
#print(orow)
except:
#print(row['ctuid'])
None
print("HH", HHc)
print("LH", LHc)
print("HL", HLc)
print("LL", LLc)
| mit | Python |
|
9dae55d2ef2e786799554ec2121cf9ecfe59eb62 | Rename file | gawkermedia/dnsdiff | dnsdiff/dnsdiff.py | dnsdiff/dnsdiff.py | '''Module to quickly look up and compare NS records for differences'''
import dns.resolver
import pprint
import sys
pp = pprint.PrettyPrinter(indent=4)
def compare_dns(nameservers, domain):
'''Compares records between nameservers using dnspython'''
responses = {}
resolver = dns.resolver.Resolver(configure=False)
for ns in nameservers:
ns_list = []
resolver.nameservers = ns
answer = dns.resolver.query(domain, 'NS')
for record in answer:
ns_list.append(record.target)
responses[ns] = sorted(ns_list)
pp.pprint(responses)
print "Determining differences"
set_list = []
for val in responses.values():
set_list.append(set(val))
differences = set.difference(*set_list)
if len(differences) == 0 or len(nameservers) == 1:
print "No discrepancies found"
sys.exit(0)
else:
print "Discrepancies found!"
print differences
sys.exit(1)
| mit | Python |
|
d3b4d53e84bdb1f50b244b282d6cb1b3b0d10ee3 | Add scraper | kqdtran/bearRec,kqdtran/bearRec | project/scrape.py | project/scrape.py | import requests
import re
from bs4 import BeautifulSoup
def main():
department_list = scrape()
department_scrape(department_list)
def scrape():
# Set up pull requests and soup object
front_html = requests.get("http://general-catalog.berkeley.edu/catalog/gcc_search_menu")
soup = BeautifulSoup(front_html.content, from_encoding="utf-8")
# variable for info
text = []
# extract department list and write it in a file
with open("list.txt", "w") as f:
#solving the ascii problem
problem_str = u'This is not all ascii\xf8 man'
safe_str = problem_str.encode('ascii', 'ignore')
for sp in soup.find_all('option'):
text.append(sp.string)
#print(type(sp.string))
safe_str = sp.string.encode('ascii','ignore')
f.write(safe_str +"\n")
return text
#(TEST) testing Biology deparment
def department_scrape(d_list):
# set up post url
url = "http://general-catalog.berkeley.edu/catalog/gcc_search_sends_request"
# set up post parameter
#iterate all
for department in d_list:
payload = {'p_dept_name': department}
# posting website and constructing soup object
r = requests.post(url, params=payload)
soup = BeautifulSoup(r.content, from_encoding="utf-8")
# variable for scrap object
text = []
# iterate the table row element
for sp in soup.find_all("tr"):
text.append(sp.text.strip())
# formatting text array
format_text = []
class_name = []
i = 0
title_indicator = False
after_format_indicator = False
while i < len(text):
if ("Course Format" in text[i]) and title_indicator == False:
i = i - 1
title_indicator = True
after_format_indicator = False
elif "Course Format" in text[i]:
format_text.append(text[i])
title_indicator = False
after_format_indicator = True
if "Prerequisites" in text[i]:
format_text.append(text[i])
if "Description" in text[i]:
format_text.append(text[i])
if title_indicator == True:
class_name.append(text[i])
format_text.append(text[i])
i = i + 1
#### List of spliter with indicator word
s1 = "Course Format:"
s2 = "Prerequisites:"
s3 = "Credit option"
s4 = "Description:"
save_indicator = False
for element in class_name:
name = element + ".txt"
name = name.replace("/", " ")
safe_name = name.encode('ascii', 'ignore')
for info in format_text:
if element in info:
save_indicator = True
if save_indicator == True:
if s4 in info:
save_indicator = False
with open("data/" + safe_name, "w") as f:
problem_str = u'This is not all ascii\xf8 man'
safe_str = info.encode('ascii', 'ignore')
safe_element = element.encode('ascii', 'ignore')
f.write(safe_element + "\n")
f.write(safe_str + "\n")
if __name__ == "__main__":
main()
| mit | Python |
|
be189d9d01f916af87b45f36ac36f7c5d302dbbf | add an experimental command for setting the login background image | lyw07/kolibri,mrpau/kolibri,benjaoming/kolibri,learningequality/kolibri,indirectlylit/kolibri,benjaoming/kolibri,lyw07/kolibri,jonboiser/kolibri,DXCanas/kolibri,mrpau/kolibri,lyw07/kolibri,DXCanas/kolibri,jonboiser/kolibri,indirectlylit/kolibri,jonboiser/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,mrpau/kolibri,benjaoming/kolibri,learningequality/kolibri,lyw07/kolibri,jonboiser/kolibri,learningequality/kolibri,learningequality/kolibri,DXCanas/kolibri,DXCanas/kolibri,mrpau/kolibri,benjaoming/kolibri | kolibri/content/management/commands/background.py | kolibri/content/management/commands/background.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import shutil
from django.conf import settings
from django.core.management.base import BaseCommand
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def add_arguments(self, parser):
subparsers = parser.add_subparsers(dest='command', help="The following subcommands are available.")
movedir_subparser = subparsers.add_parser(
name='set',
cmd=self,
help="EXPERIMENTAL: Sets the login screen background image"
)
movedir_subparser.add_argument(
'destination',
type=str,
help='Image file'
)
subparsers.add_parser(
name='reset',
cmd=self,
help="Set default"
)
def handle(self, *args, **options):
user_static_directory = os.path.join(settings.STATIC_ROOT, 'user_module')
if not os.path.exists(user_static_directory):
self.stderr.write(self.style.ERROR('\nStatic directory does not exist.'))
raise SystemExit(1)
img_path = os.path.join(user_static_directory, 'background.jpg')
backup_img_path = os.path.join(user_static_directory, 'background-backup')
if options['command'] == 'set':
new_img_path = os.path.abspath(os.path.expanduser(options['destination']))
if not os.path.exists(new_img_path):
self.stderr.write(
self.style.ERROR('\n{} does not exist.').format(options['destination'])
)
raise SystemExit(1)
# Only save a backup if it didn't exist before.
# This should only back up the default Kolibri image.
if not os.path.exists(backup_img_path):
shutil.copy(img_path, backup_img_path)
shutil.copy(new_img_path, img_path)
elif options['command'] == 'reset':
if os.path.exists(backup_img_path):
shutil.copy(backup_img_path, img_path)
| mit | Python |
|
1f48fee7ffcef3eefa6aaedb5ca963c10bb7c58c | Add test case for user creation form | wldcordeiro/cookiecutter-django-essentials,wldcordeiro/cookiecutter-django-essentials,wldcordeiro/cookiecutter-django-essentials | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/test_forms.py | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/test_forms.py | from django.test import TestCase
from users.forms import ZionsUserCreationForm
from users.models import User
class {{cookiecutter.project_camel_name}}UserCreationTestCase(TestCase):
def setUp(self):
self.test_user = User.objects.create(
username='testuser',
email='[email protected]',
password='password'
)
self.bad_form = ZionsUserCreationForm({
'username': 'testuser',
'password1': 'password',
'password2': 'password',
})
self.good_form = ZionsUserCreationForm({
'username': 'testuser2',
'password1': 'password',
'password2': 'password',
})
def test_username_good(self):
self.assertTrue(self.good_form.is_valid())
def test_clean_username_bad(self):
self.assertFalse(self.bad_form.is_valid())
| bsd-3-clause | Python |
|
616bb27db3daef8939fe706d1c41cf79f35b40fa | set of default rules in common module | deztructor/parsed | common.py | common.py | #/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Denis Zalevskiy
# Licensed under MIT License
import string
from parser import *
def vspace(): return '\n\r', ignore
def hspace(): return ' \t', ignore
def eol(): return choice(eof, vspace), ignore
def space(): return ' \n\r\t', ignore
def spaces(): return r0_inf(space), ignore
def any_char(): return ne(eof), value
def digit_dec() : return '0123456789', value
def digit_hex() : return '0123456789ABCDEFabcdef', value
def ascii(): return sym(lambda s: s in string.ascii_letters), value
| mit | Python |
|
b8c4fdc1ebba18ab832160bece4ce8b391a15b7a | add sampled stochastic games serialization tests | deepmind/open_spiel,deepmind/open_spiel,deepmind/open_spiel,deepmind/open_spiel,deepmind/open_spiel,deepmind/open_spiel | open_spiel/python/tests/sampled_stochastic_games_test.py | open_spiel/python/tests/sampled_stochastic_games_test.py | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pyspiel
# All games with kSampledStochastic chance mode.
SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST = [
g for g in pyspiel.registered_games() if g.default_loadable
and g.chance_mode == pyspiel.GameType.ChanceMode.SAMPLED_STOCHASTIC
]
assert len(SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST) >= 2
class SampledStochasticGamesTest(parameterized.TestCase):
def random_playout(self, state):
np.random.seed(0)
while not state.is_terminal():
state.apply_action(np.random.choice(state.legal_actions()))
return state
@parameterized.parameters(*SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST)
def test_stateful_game_serialization(self, game_info):
game = pyspiel.load_game(game_info.short_name,
{"rng_seed": pyspiel.GameParameter(0)})
# mutate game's internal RNG state
state = self.random_playout(game.new_initial_state())
deserialized_game = pickle.loads(pickle.dumps(game))
# make sure initial states are the same after game deserialization
state1 = self.random_playout(game.new_initial_state())
d_state1 = self.random_playout(deserialized_game.new_initial_state())
self.assertNotEqual(str(state1), str(state))
self.assertEqual(str(state1), str(d_state1))
# try one more time
state2 = self.random_playout(game.new_initial_state())
d_state2 = self.random_playout(deserialized_game.new_initial_state())
self.assertNotEqual(str(state2), str(state1))
self.assertEqual(str(state2), str(d_state2))
if __name__ == "__main__":
absltest.main()
| apache-2.0 | Python |
|
ed9d640a11c02ca4b42e62d975e4ae9a2bd33093 | add tests for simtk! | dwhswenson/openpathsampling,openpathsampling/openpathsampling,choderalab/openpathsampling,choderalab/openpathsampling,openpathsampling/openpathsampling,openpathsampling/openpathsampling,dwhswenson/openpathsampling,dwhswenson/openpathsampling,dwhswenson/openpathsampling,openpathsampling/openpathsampling,choderalab/openpathsampling | openpathsampling/experimental/storage/test_simtk_unit.py | openpathsampling/experimental/storage/test_simtk_unit.py | import pytest
import numpy as np
from ..simstore.custom_json import JSONSerializerDeserializer, DEFAULT_CODECS
from .simtk_unit import *
try:
from simtk import unit
except ImportError:
HAS_SIMTK = False
else:
HAS_SIMTK = True
class TestSimtkUnitCodec(object):
def setup(self):
pytest.importorskip('simtk.unit')
my_unit = unit.nanometer / unit.picosecond**2
self.values = {
'float': 1.0 * my_unit,
'array': np.array([1.0, 2.0]) * my_unit,
}
self.serialization = JSONSerializerDeserializer(
DEFAULT_CODECS + [simtk_quantity_codec]
)
@pytest.mark.parametrize('obj_type', ['float', 'array'])
def test_serialization_cycle(self, obj_type):
obj = self.values[obj_type]
ser = self.serialization.serializer(obj)
deser = self.serialization.deserializer(ser)
reser = self.serialization.serializer(deser)
if obj_type == 'array':
np.testing.assert_array_equal(obj, deser)
else:
assert obj == deser
assert ser == reser
class TestSimtkQuantityHandler(object):
def setup(self):
pytest.importorskip('simtk.unit')
self.handlers = {
'float': SimtkQuantityHandler(
('unit.nanometer/unit.picosecond**2', 'float')
),
'array': SimtkQuantityHandler(
('unit.nanometer', 'ndarray.float32(2,3)')
),
}
self.objects = {
'float': 1.0 * unit.nanometer / unit.picosecond**2,
'array': np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]]) * unit.nanometer,
}
@pytest.mark.parametrize('type_str, expected', [
(
'simtk(unit.nanometer/unit.picosecond**2)*float',
('unit.nanometer/unit.picosecond**2', 'float')
), (
'simtk(unit.nanometer)*ndarray.float32(3,3)',
('unit.nanometer', 'ndarray.float32(3,3)')
),
])
def test_is_my_type(self, type_str, expected):
assert SimtkQuantityHandler.is_my_type(type_str) == expected
@pytest.mark.parametrize('obj_type', ['float', 'array'])
def test_serialization_cycle(self, obj_type):
handler = self.handlers[obj_type]
obj = self.objects[obj_type]
ser = handler.serialize(obj)
deser = handler.deserialize(ser)
reser = handler.serialize(deser)
assert ser == reser
if obj_type == 'array':
np.testing.assert_array_equal(obj, deser)
else:
assert obj == deser
assert obj.unit == deser.unit
| mit | Python |
|
4d85702561c000824083544de98693e244c8aab7 | Add test for decoder stack | Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide | tests/test_decoding_stack.py | tests/test_decoding_stack.py | #! /usr/bin/env python
from __future__ import division
from timeside.decoder import FileDecoder
from timeside.analyzer import AubioPitch
from timeside.core import ProcessPipe
import numpy as np
from unit_timeside import *
import os.path
#from glib import GError as GST_IOError
# HINT : to use later with Gnonlin only
class TestDecodingFromStack(unittest.TestCase):
"Test decoder stack"
def setUp(self):
self.samplerate, self.channels, self.blocksize = None, None, None
self.start = 0
self.duration = None
self.expected_samplerate = 44100
self.expected_channels = 2
self.expected_totalframes = 352800
self.test_exact_duration = True
self.source_duration = 8
self.expected_mime_type = 'audio/x-wav'
self.source = os.path.join(os.path.dirname(__file__),
"samples/sweep.wav")
def testProcess(self):
"Test decoder stack: test process"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
self.assertTrue(decoder.stack)
self.assertFalse(decoder.from_stack)
pipe = ProcessPipe(decoder)
pipe.run()
self.assertFalse(decoder.stack)
self.assertTrue(decoder.from_stack)
self.assertEqual(len(pipe.frames_stack), 44)
pipe.run()
def testResults(self):
"Test decoder stack: test frames content"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
pitch_on_file = AubioPitch()
pipe = (decoder | pitch_on_file)
pipe.run()
self.assertIsInstance(pipe.frames_stack, list)
pitch_results_on_file = pipe.results['aubio_pitch.pitch'].data.copy()
# If the pipe is used for a second run, the processed frames stored
# in the stack are passed to the other processors
# without decoding the audio source again.
#Let's define a second analyzer equivalent to the previous one:
pitch_on_stack = AubioPitch()
pipe |= pitch_on_stack
pipe.run()
# to assert that the frames passed to the two analyzers are the same,
# we check that the results of these analyzers are equivalent:
pitch_results_on_stack = pipe.results['aubio_pitch.pitch'].data
self.assertTrue(np.array_equal(pitch_results_on_stack,
pitch_results_on_file))
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| agpl-3.0 | Python |
|
40bf8d4773eb659ac2ac22aef50c2f63084924be | add profiler test case | tensorflow/community,tensorflow/community,tensorflow/community,tensorflow/community,tensorflow/community | rfcs/20200624-pluggable-device-for-tensorflow/sample/test_profiler.py | rfcs/20200624-pluggable-device-for-tensorflow/sample/test_profiler.py | #!/usr/bin/env python
# coding=utf-8
import tensorflow as tf
import numpy as np
import os
tf.compat.v1.disable_eager_execution()
profile_options = tf.profiler.experimental.ProfilerOptions(
host_tracer_level = 3,
device_tracer_level = 1)
logpath = os.path.join('data', 'logs', 'profiler_demo')
a = tf.random.normal(shape=[1,10, 10, 8], dtype=tf.float32, seed=1)
w = tf.random.normal(shape=[3, 3, 8, 4], dtype=tf.float32, seed=1)
a1 = tf.random.normal(shape=[1, 10, 10, 8], dtype=tf.float32, seed=1)
w1 = tf.random.normal(shape=[3, 3, 8, 4], dtype=tf.float32, seed=1)
with tf.device("/MY_DEVICE:0"):
tf.profiler.experimental.start(logpath)
b = tf.nn.relu(a)
c = tf.nn.conv2d(b, w, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC')
tf.profiler.experimental.stop()
with tf.device("/CPU:0"):
b1 = tf.nn.relu(a1)
c1 = tf.nn.conv2d(b1, w1, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC')
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(allow_soft_placement=False, log_device_placement=True))
print(sess.run(tf.reduce_all(tf.less(c - c1, 1e-5))))
| apache-2.0 | Python |
|
1670438ac9becf93e9ba428065e4b19b219e8ffc | Add WebSockets and SSL supports with Twisted :) | Relrin/Helenae,Relrin/Helenae,Relrin/Helenae | helenae/server.py | helenae/server.py | import sys
from json import dumps, loads
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from db.create_db import Users
from twisted.internet import reactor, ssl
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS
# TODO: Add logger (from Twisted, not original library)
# TODO: Create PLUGIN architecture (using twistd)
# TODO: Define PostgreSQL DB structure
# TODO: Authentication under PostgreSQL+SQLAlchemy ORM
# TODO: Errors/Exceptions processing
engine = sqlalchemy.create_engine('postgresql://Relrin:05909333@localhost/csan', pool_size=20, max_overflow=0)
class DFSServerProtocol(WebSocketServerProtocol):
def __init__(self):
# get object from connection pool and create session
# DONT FORGET use after all "self.sesison.close()"!!!
self.Session = sessionmaker(bind=engine)
def __del__(self):
self.session.close()
def authorization(self, data):
"""
Checking user with DB
"""
session = self.Session()
result = session.execute(sqlalchemy.select([Users]).where(Users.name == data['user']))
result = result.fetchone()
if result is None:
data['cmd'] = 'RAUT'
data['error'] = 'User not found'
else:
if result['name'] == data['user']:
# correct users info --> real user
if result['password'] == data['password']:
data['cmd'] = 'HELP'
data['auth'] = True
# incorrect password --> fake user
else:
data['cmd'] = 'RAUT'
data['error'] = 'Incorrect password. Try again...'
session.close()
return data
def onMessage(self, payload, isBinary):
"""
Processing request from user and send response
"""
json_data = loads(payload)
# for none-authorized users
if json_data['auth'] == False:
# first action with server --> authorization
if json_data['cmd'] == 'AUTH':
json_data = self.authorization(json_data)
# for authorized users
else:
pass
response = dumps(json_data)
self.sendMessage(str(response))
def readFileStructure(self):
"""
Get all files/folders/etc. structure from DB
"""
pass
def getServerParams(self):
"""
Getting (IP, PORT) of "File Server" to read/write operations
"""
pass
def fileSync(self):
"""
Synchronization files using rsync tool
"""
pass
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
contextFactory = ssl.DefaultOpenSSLContextFactory('keys/server.key', 'keys/server.crt')
factory = WebSocketServerFactory("wss://localhost:9000", debug = debug, debugCodePaths = debug)
factory.protocol = DFSServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory, contextFactory)
webdir = File(".")
webdir.contentTypes['.crt'] = 'application/x-x509-ca-cert'
web = Site(webdir)
reactor.listenSSL(8080, web, contextFactory)
#reactor.listenTCP(8080, web)
reactor.run()
| mit | Python |
|
3d0827fa805a08eaaaa07e037f6ce3da6d8e1c4e | add guess module | mrterry/yoink | yoink/guess.py | yoink/guess.py | import numpy as np
from scipy import ndimage
try:
from skimage.feature import corner_harris
from skimage.measure import approximate_polygon
except ImportError:
from yoink.mini_skimage import corner_harris, approximate_polygon
def guess_corners(bw):
"""
Infer the corners of an image using a Sobel filter to find the edges and a
Harris filter to find the corners. Takes only as single color chanel.
Parameters
----------
bw : (m x n) ndarray of ints
Returns
-------
corners : pixel coordinates of plot corners
outline : (m x n) ndarray of bools True -> plot area
"""
e_map = ndimage.sobel(bw)
markers = np.zeros_like(bw)
markers[bw < 30] = 1
markers[bw > 150] = 2
seg = ndimage.watershed_ift(e_map, np.asarray(markers, dtype=int))
outline = ndimage.binary_fill_holes(1-seg)
corners = corner_harris(np.asarray(outline, dtype=int))
corners = approximate_polygon(corners, 1)
return corners, outline
def get_angle(p1, p2):
return np.arctan2(p1[0]-p2[0], p1[1]-p2[1]) * 180./np.pi
def get_angle2(corners):
order = np.argsort(corners[:, 0])
top = corners[order[:2]]
bot = corners[order[2:]]
order = np.argsort(corners[:, 1])
left = corners[order[:2]]
right = corners[order[2:]]
angles = [get_angle(top[0, :], top[1, :]),
get_angle(bot[0, :], bot[1, :]),
get_angle(left[0, :], left[1, :]) + 90,
get_angle(right[0, :], right[1, :]) + 90,
]
angle = sum(angles) / len(angles)
return angle
def clear_border(im, outline):
im_fixed = im.copy()
im_fixed[-outline] = 255
return im_fixed
| bsd-3-clause | Python |
|
4ef2344b3abf3d8c0542ffd97425557ae092f21d | handle ZeroDivisionError | paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,yongtang/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_saved_model,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,sarvex/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,frreiss/tensorflow-fred,gautam1858/tensorflow,petewarden/tensorflow,sarvex/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,paolodedios/tensorflow,annarev/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,petewarden/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,sarvex/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,karllessard/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,sarvex/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,annarev/tensorflow,tensorflow/tensorflow,sarvex/tensorflow,annarev/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,annarev/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,petewarden/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,annarev/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,petewarden/tensorflow,frreiss/tensorflow-fred,annarev/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred | tensorflow/python/data/experimental/benchmarks/map_defun_benchmark.py | tensorflow/python/data/experimental/benchmarks/map_defun_benchmark.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
class MapDefunBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for MapDefunOp."""
def _run(self, op, name=None, num_iters=3000):
wall_time = self.run_op_benchmark(
op=op,
iters=num_iters,
warmup=True
)
zero_division_delta = 1e-100
wall_time = wall_time + zero_division_delta
self.report_benchmark(
name=name,
iters=num_iters,
wall_time=wall_time,
extras={"examples_per_sec": 1 / float(wall_time)})
def benchmark_defun_vs_map_fn(self):
"""Benchmarks to compare the performance of MapDefun vs tf.map_fn."""
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def defun(x):
return array_ops.identity(x)
def fn(x):
return array_ops.identity(x)
base = math_ops.range(10000)
for input_size in [10, 100, 1000, 10000]:
num_iters = 10000 // input_size
map_defun_op = map_defun.map_defun(defun, [base], [dtypes.int32], [()])
map_fn_op = map_fn.map_fn(fn, base)
self._run(
op=map_defun_op,
name="with_defun_size_%d" % input_size,
num_iters=num_iters
)
self._run(
op=map_fn_op,
name="without_defun_size_%d" % input_size,
num_iters=num_iters
)
if __name__ == "__main__":
benchmark_base.test.main()
| # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
class MapDefunBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for MapDefunOp."""
def _run(self, op, name=None, num_iters=3000):
wall_time = self.run_op_benchmark(
op=op,
iters=num_iters,
warmup=True
)
self.report_benchmark(
name=name,
iters=num_iters,
wall_time=wall_time,
extras={"examples_per_sec": float(1 / wall_time)})
def benchmark_defun_vs_map_fn(self):
"""Benchmarks to compare the performance of MapDefun vs tf.map_fn."""
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def defun(x):
return array_ops.identity(x)
def fn(x):
return array_ops.identity(x)
base = math_ops.range(10000)
for input_size in [10, 100, 1000, 10000]:
num_iters = 10000 // input_size
map_defun_op = map_defun.map_defun(defun, [base], [dtypes.int32], [()])
map_fn_op = map_fn.map_fn(fn, base)
self._run(
op=map_defun_op,
name="with_defun_size_%d" % input_size,
num_iters=num_iters
)
self._run(
op=map_fn_op,
name="without_defun_size_%d" % input_size,
num_iters=num_iters
)
if __name__ == "__main__":
benchmark_base.test.main()
| apache-2.0 | Python |
ef96c4e1a27289f5cdad5de78ee2a2dfc1b91bd0 | Create network-delay-time.py | tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode | Python/network-delay-time.py | Python/network-delay-time.py | # Time: O((|E| + |V|) * log|V|)
# Space: O(|E| + |V|)
# Dijkstra's algorithm
class Solution(object):
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
min_heap = []
adj = [[] for _ in xrange(N)]
for u, v, w in times:
adj[u-1].append((v-1, w))
lookup, result = set(), 0
heapq.heappush(min_heap, (0, K-1))
while min_heap and len(lookup) != N:
result, u = heapq.heappop(min_heap)
lookup.add(u)
for v, w in adj[u]:
if v in lookup: continue
heapq.heappush(min_heap, (result+w, v))
return result if len(lookup) == N else -1
| mit | Python |
|
842d7337f236d94d1b7ed70aaa98eff73b4000cd | Create pyside_houdini.py | amitkhanna15/aov-manager | pyside_houdini.py | pyside_houdini.py | """
This module helps you use PyQt in Houdini's GUI by integrating PyQt's event
loop into Houdini's. Replace calls to QApplication.exec_() in your
code with calls to pyqt_houdini.exec_(app).
"""
from email.mime import image
import hou
from PySide import QtCore
from PySide import QtGui
class IntegratedEventLoop(object):
"""This class behaves like QEventLoop except it allows PyQt to run inside
Houdini's event loop on the main thread. You probably just want to
call exec_() below instead of using this class directly.
"""
def __init__(self, application, dialogs):
# We need the application to send posted events. We hold a reference
# to any dialogs to ensure that they don't get garbage collected
# (and thus close in the process). The reference count for this object
# will go to zero when it removes itself from Houdini's event loop.
self.application = application
self.dialogs = dialogs
self.event_loop = QtCore.QEventLoop()
def exec_(self):
hou.ui.addEventLoopCallback(self.processEvents)
def processEvents(self):
# There is no easy way to know when the event loop is done. We can't
# use QEventLoop.isRunning() because it always returns False since
# we're not inside QEventLoop.exec_(). We can't rely on a
# lastWindowClosed signal because the window is usually made invisible
# instead of closed. Instead, we need to explicitly check if any top
# level widgets are still visible.
if not anyQtWindowsAreOpen():
hou.ui.removeEventLoopCallback(self.processEvents)
self.event_loop.processEvents()
self.application.sendPostedEvents(None, 0)
def anyQtWindowsAreOpen():
return any(w.isVisible() for w in QtGui.QApplication.topLevelWidgets())
def exec_(application, *args):
"""You cannot call QApplication.exec_, or Houdini will freeze while PyQt
waits for and processes events. Instead, call this function to allow
Houdini's and PyQt's event loops to coexist. Pass in any dialogs as
extra arguments, if you want to ensure that something holds a reference
to them while the event loop runs.
This function returns right away.
"""
IntegratedEventLoop(application, args).exec_()
def execSynchronously(application, *args):
"""This function is like exec_, except it will not return until all PyQt
windows have closed. Houdini will remain responsive while the PyQt window
is open.
"""
exec_(application, *args)
hou.ui.waitUntil(lambda: not anyQtWindowsAreOpen())
| mit | Python |
|
90ef0ed82a4d22f277ccc0c3275f0a07189fadc0 | Make title pictures. | pyoceans/seapy,pyoceans/seapy,pyoceans/seapy | title_pics.py | title_pics.py | # -*- coding: utf-8 -*-
#
# title_pics.py
#
# purpose: Create map and time-series for title
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.github.io/
# created: 20-Jan-2015
# modified: Tue 20 Jan 2015 11:18:15 AM BRT
#
# obs:
#
import matplotlib
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
matplotlib.style.use('ggplot')
def make_map(projection=ccrs.PlateCarree(),
extent=[-43.5, -32.5, -24.5, -14.5]):
subplot_kw = dict(projection=projection)
fig, ax = plt.subplots(figsize=(3.25, 3.25), subplot_kw=subplot_kw)
ax.set_extent(extent)
#gl = ax.gridlines(draw_labels=True)
#gl.xlabels_top = gl.ylabels_right = False
#gl.xformatter = LONGITUDE_FORMATTER
#gl.yformatter = LATITUDE_FORMATTER
return fig, ax
def fake_tide(t, M2amp, M2phase, S2amp, S2phase, randamp):
"""
Generate a minimally realistic-looking fake semidiurnal tide.
t is time in hours
phases are in radians
"""
out = M2amp * np.sin(2 * np.pi * t / 12.42 - M2phase)
out += S2amp * np.sin(2 * np.pi * t / 12.0 - S2phase)
out += randamp * np.random.randn(len(t))
return out
if __name__ == '__main__':
# Map.
layer = 'BlueMarble_ShadedRelief_Bathymetry'
url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
fig, ax = make_map()
ax.add_wmts(url, layer)
ax.axis('off')
fig.savefig('map.png', format='png', dpi=72, orientation='portrait',
transparent=True)
# Time-series.
t = np.arange(500)
u = fake_tide(t, 2.2, 0.3, 1, .3, 0.4)
v = fake_tide(t, 1.1, 0.3 + np.pi / 2, 0.6, 0.3 + np.pi / 2, 0.4)
fig, ax = plt.subplots(figsize=(3.25, 3.25))
legendkw = dict(loc='lower right', fancybox=True, fontsize='small')
kw = dict(alpha=0.5, linewidth=2.5)
ax.plot(t, u, label='U', color='cornflowerblue', **kw)
ax.plot(t, v, label='V', color='lightsalmon', **kw)
ax.axis([200, 500, -8, 8])
# Keep the y tick labels from getting too crowded.
ax.locator_params(axis='y', nbins=5)
ax.axis('off')
fig.savefig('timeSeries.png', format='png', dpi=72, orientation='portrait',
transparent=True)
| cc0-1.0 | Python |
|
cf95ab6ee1bf53ff1a998824dc3718c1ae19336e | Create train_dots.py | davharris/leafpuppy,davharris/leafpuppy,davharris/leafpuppy | train_dots.py | train_dots.py | #!/bin/python
import pylab as pl
import cPickle
import matplotlib.pyplot as plt
from sklearn import svm, metrics
import numpy as np
import sys
square = 13
imgloc = '../images/v012-penn.10-1hA5D1-cropb.png'
resd={'dot':0,'noise':1,'vein':2}
currimg=plt.imread(imgloc)
pkl_file=open('dots.pkl', 'r')
dots = cPickle.load(pkl_file)
pkl_file.close()
pkl_file=open('noise.pkl', 'r')
noise = cPickle.load(pkl_file)
pkl_file.close()
pkl_file=open('veins.pkl','r')
veins = cPickle.load(pkl_file)
pkl_file.close()
#dots = zip(dots, [0 for i in range(len(dots))])
#noise = zip(noise, [1 for i in range(len(noise))])
#veins = zip(veins, [2 for i in range(len(veins))])
print np.shape(np.asarray(dots))
print np.shape(np.asarray(noise))
print np.shape(np.asarray(veins))
dots_data = np.asarray(dots).reshape((len(dots),-1))
noise_data= np.asarray(noise).reshape((len(noise),-1))
veins_data= np.asarray(veins).reshape((len(veins),-1))
data = np.concatenate((np.concatenate((dots_data,noise_data)),veins_data))
print len(data)
target = [resd['dot'] for i in range(len(dots_data))] + [resd['noise'] for i in range(len(noise_data))] + [resd['vein'] for i in range(len(veins_data))]
print len(target)
classifier = svm.SVC(gamma=0.001)
classifier.fit(data, target)
tmpx, tmpy = len(currimg[0][:]), len(currimg[:][0])
final_image=np.ones((tmpy,tmpx))
blocks=[]
print 'Going through the blocks...'
sys.stdout.flush()
for i in [i+square/2 for i in xrange(tmpy-square)]:
for j in [j+square/2 for j in xrange(tmpx-square)]:
currblock=currimg[i-square/2:i+square/2+1,j-square/2:j+square/2+1]
blocks.append(currblock)
blocks=np.asarray(blocks)
print np.shape(blocks)
blocks = np.asarray(blocks).reshape(len(blocks),-1)
print np.shape(blocks)
print 'About to make predictions...'
sys.stdout.flush()
predicted = classifier.predict(blocks)
voting = np.zeros((tmpy, tmpx, 3))
print 'About to count votes...'
sys.stdout.flush()
for p in xrange(len(predicted)):
j=p%(tmpx-square)+square/2
i=(p-j+square/2)/(tmpx-square)+square/2
#[i,j] are the coordinates of the center of that box
#since p=(i-s/2)(X-s)+j-s/2
for y in range(i-square/2,i+square/2):
for x in range(j-square/2,j+square/2):
voting[y,x][predicted[p]]+=1
for i in xrange(tmpy):
for j in xrange(tmpx):
if voting[i,j].argmax()==resd['vein']:
final_image[i,j]=0
plt.imshow(final_image, cmap=plt.cm.gray)
plt.show()
#for i in [i+square/2 for i in xrange(tmpx-square)]:
# for j in [j+square/2 for j in xrange(tmpy-square)]:
# for k in range(i-square/2,i+square/2+1):
# for
| bsd-3-clause | Python |
|
84153b0be78998ab8ec6914df8623c99255457b5 | Improve code for creating temporary locustfiles that can be used in tests | mbeacom/locust,locustio/locust,locustio/locust,mbeacom/locust,mbeacom/locust,locustio/locust,mbeacom/locust,locustio/locust | locust/test/mock_locustfile.py | locust/test/mock_locustfile.py | import os
import random
import time
from contextlib import contextmanager
MOCK_LOUCSTFILE_CONTENT = '''
"""This is a mock locust file for unit testing"""
from locust import HttpLocust, TaskSet, task, between
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
class LocustSubclass(HttpLocust):
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
task_set = UserTasks
class NotLocustSubclass():
host = "http://localhost:8000"
'''
class MockedLocustfile:
__slots__ = ["filename", "directory", "file_path"]
@contextmanager
def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT):
mocked = MockedLocustfile()
mocked.directory = os.path.dirname(os.path.abspath(__file__))
mocked.filename = "%s_%s_%i.py" % (
filename_prefix,
str(time.time()).replace(".", "_"),
random.randint(0,100000),
)
mocked.file_path = os.path.join(mocked.directory, mocked.filename)
with open(mocked.file_path, 'w') as file:
file.write(content)
yield mocked
os.remove(mocked.file_path)
| mit | Python |
|
fea7f350ce711d183fd9011c43ca68fff88400eb | Add cython compile util | awest1339/multiscanner,mitre/multiscanner,MITRECND/multiscanner,mitre/multiscanner,jmlong1027/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,MITRECND/multiscanner,mitre/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,awest1339/multiscanner,jmlong1027/multiscanner | utils/cython_compile_libs.py | utils/cython_compile_libs.py | #!/bin/env python
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import os
import sys
import shutil
from pyximport.pyxbuild import pyx_to_dll
WD = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))
LIBS = os.path.join(WD, 'libs')
# Adds the libs directory to the path
sys.path.append(LIBS)
import common
def main():
filelist = common.parseFileList([LIBS], recursive=True)
try:
import pefile
filepath = pefile.__file__[:-1]
filelist.append(filepath)
except:
print('pefile not installed...')
for filename in filelist:
if filename.endswith('.py'):
filename = str(filename)
try:
pyx_to_dll(filename, inplace=True)
print(filename, 'successful!')
except Exception as e:
print('ERROR:', filename, 'failed')
try:
os.remove(filename[:-2] + 'c')
except:
pass
# Cleanup build dirs
walk = os.walk(LIBS)
for path in walk:
path = path[0]
if os.path.basename(path) == '_pyxbld' and os.path.isdir(path):
shutil.rmtree(path)
if __name__ == '__main__':
main()
| mpl-2.0 | Python |
|
d31f63a914877fe12d66497bdbc7dd6d871672fc | add solution for Best Time to Buy and Sell Stock | zhyu/leetcode,zhyu/leetcode | src/bestTimeToBuyAndSellStock.py | src/bestTimeToBuyAndSellStock.py | class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
n = len(prices)
if n < 2:
return 0
min_price = prices[0]
res = 0
for i in xrange(1, n):
res = max(res, prices[i]-min_price)
min_price = min(min_price, prices[i])
return res
| mit | Python |
|
595c8fad76696240f96e61d9a2299de3d6cda16a | Add utility for walking etree and yielding nodes if options class type match. | TamiaLab/PySkCode | skcode/utility/walketree.py | skcode/utility/walketree.py | """
SkCode utility for walking across a document tree.
"""
def walk_tree_for_cls(tree_node, opts_cls):
"""
Walk the tree and yield any tree node matching the given options class.
:param tree_node: The current tree node instance.
:param opts_cls: The options class to search for.
"""
# Check the current tree node first
if isinstance(tree_node.opts, opts_cls):
yield tree_node
# Check all children nodes
for child in tree_node.children:
for node in walk_tree_for_cls(child, opts_cls):
yield node
| agpl-3.0 | Python |
|
4e50597100b5e84b1ed3c304a3a7323e7bab7918 | Create removeSequence.py | edenmark/removeSequence | removeSequence.py | removeSequence.py | #!/usr/bin/python
###############################################################################
#
# removeSequence.py version 1.0
#
# Removes a specified nucleotide sequence from the beginning of a larger sequence
#
# Useful for preparing FASTA files for certain processing pipelines that do not
# allow for distal barcodes or primers
#
# Copyright (C) 2014 Evan Denmark
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import argparse
parser = argparse.ArgumentParser(description = ' ')
parser.add_argument('fasta',help= 'fasta file with adaptor sequences')
parser.add_argument('adaptor_sequence', help= 'string of nucleotides')
fasta = parser.parse_args().fasta
adaptor_sequence = parser.parse_args().adaptor_sequence
def remove_fusion_adaptors(fasta, adaptor_sequence):
"""
Removes the fusion adaptor at the beginning of each sequence of a FASTA file
"""
fasta = str(fasta)
old_file = open(fasta, 'r')
new_file = open('new_'+fasta+'.fasta', 'w')
length_adaptor = len(adaptor_sequence)
for each_line in old_file:
each_line = str(each_line)
if each_line[0] == '>':
#name line
new_file.write(each_line)
else:
#sequence line
if each_line[:(length_adaptor)] == adaptor_sequence:
current_line = each_line[:].rstrip('\n').lstrip()
current_line = str(current_line)
adaptor_sequence=adaptor_sequence[:length_adaptor]
new_line = current_line[length_adaptor:]
new_file.write(new_line+'\n')
old_file.close()
new_file.close()
remove_fusion_adaptors(fasta, adaptor_sequence)
| mit | Python |
|
b7d15547bd88c6304c5d8ceb1f74481cb4d162e7 | Add parser hacking example | Saluev/habrahabr-jinja-demo | repeat_n_times.py | repeat_n_times.py | # -*- encoding: utf-8 -*-
from jinja2 import Environment
from jinja2.ext import Extension
from jinja2 import nodes
class RepeatNTimesExtension(Extension):
tags = {"repeat"}
def parse(self, parser):
lineno = next(parser.stream).lineno
index = nodes.Name("_", "store", lineno=lineno)
how_many_times = parser.parse_expression()
iterable = nodes.Call(nodes.Name("range", "load"), [how_many_times], [], None, None)
parser.stream.expect("name:times")
body = parser.parse_statements(["name:endrepeat"], drop_needle=True)
return nodes.For(index, iterable, body, [], None, False, lineno=lineno)
if __name__ == "__main__":
env = Environment()
env.add_extension(RepeatNTimesExtension)
template = env.from_string(u"""
{%- repeat 3 times -%}
{% if not loop.first and not loop.last %}, {% endif -%}
{% if loop.last %} и ещё раз {% endif -%}
учиться
{%- endrepeat -%}
""")
print(template.render())
| mit | Python |
|
e3365aa8d9f5e49d3aff732d169c22a46ef22904 | Create viriback_tracker.py (#452) | yeti-platform/yeti,yeti-platform/yeti,yeti-platform/yeti,yeti-platform/yeti | plugins/feeds/public/viriback_tracker.py | plugins/feeds/public/viriback_tracker.py | import logging
from dateutil import parser
from datetime import timedelta, datetime
from core import Feed
from core.errors import ObservableValidationError
from core.observables import Url, Ip
class ViriBackTracker(Feed):
default_values = {
"frequency": timedelta(hours=24),
"name": "ViriBackTracker",
"source": "http://tracker.viriback.com/dump.php",
"description":
"Malware C2 Urls and IPs",
}
def update(self):
for line in self.update_csv(delimiter=',', quotechar='"'):
if not line or line[0].startswith(("Family", "#")):
continue
family, url, ip, first_seen = line
first_seen = parser.parse(first_seen)
if self.last_run is not None:
if self.last_run > first_seen:
continue
self.analyze(family, url, ip, first_seen)
def analyze(self, family, url, ip, first_seen):
url_obs = False
ip_obs = False
family = family.lower()
context = {
'first_seen': first_seen,
'source': self.name
}
if url:
try:
url_obs = Url.get_or_create(value=url)
url_obs.add_context(context)
url_obs.add_source(self.name)
url_obs.tag(["c2", family])
except ObservableValidationError as e:
logging.error(e)
if ip:
try:
ip_obs = Ip.get_or_create(value=ip)
ip_obs.add_context(context)
ip_obs.tag(family.lower())
except ObservableValidationError as e:
logging.error(e)
if url_obs and ip_obs:
url_obs.active_link_to(ip_obs, 'ip', self.name)
| apache-2.0 | Python |
|
5a5c30e701220cc874d08a442af0e81d2020aacf | bump dev version | NelleV/pyconfr-test,pyconca/2013-web,pydata/symposion,miurahr/symposion,faulteh/symposion,euroscipy/symposion,TheOpenBastion/symposion,pyconau2017/symposion,euroscipy/symposion,miurahr/symposion,pyconca/2013-web,pydata/symposion,pyohio/symposion,pyohio/symposion,mbrochh/symposion,toulibre/symposion,NelleV/pyconfr-test,pinax/symposion,python-spain/symposion,TheOpenBastion/symposion,toulibre/symposion,faulteh/symposion,mbrochh/symposion,python-spain/symposion,pyconau2017/symposion,pinax/symposion | symposion/__init__.py | symposion/__init__.py | __version__ = "1.0b1.dev43"
| __version__ = "1.0b1.dev42"
| bsd-3-clause | Python |
85336dfed46145c36307f218612db7c4d8dbf637 | bump version | mbrochh/symposion,pydata/symposion,euroscipy/symposion,pydata/symposion,euroscipy/symposion,python-spain/symposion,pinax/symposion,pyconau2017/symposion,pyohio/symposion,pyconca/2013-web,pyohio/symposion,toulibre/symposion,miurahr/symposion,mbrochh/symposion,TheOpenBastion/symposion,pyconca/2013-web,pyconau2017/symposion,pinax/symposion,TheOpenBastion/symposion,miurahr/symposion,faulteh/symposion,NelleV/pyconfr-test,faulteh/symposion,toulibre/symposion,python-spain/symposion,NelleV/pyconfr-test | symposion/__init__.py | symposion/__init__.py | __version__ = "1.0b1.dev18"
| __version__ = "1.0b1.dev17"
| bsd-3-clause | Python |
c642a32b1aff0c9adc8e62aad8ceb7e0396512ed | bump version | TheOpenBastion/symposion,pinax/symposion,TheOpenBastion/symposion,mbrochh/symposion,pyconca/2013-web,faulteh/symposion,pinax/symposion,miurahr/symposion,euroscipy/symposion,mbrochh/symposion,pyconau2017/symposion,pyohio/symposion,euroscipy/symposion,toulibre/symposion,pydata/symposion,pyconau2017/symposion,pyohio/symposion,NelleV/pyconfr-test,NelleV/pyconfr-test,faulteh/symposion,toulibre/symposion,pydata/symposion,miurahr/symposion,python-spain/symposion,pyconca/2013-web,python-spain/symposion | symposion/__init__.py | symposion/__init__.py | __version__ = "1.0b1.dev14"
| __version__ = "1.0b1.dev13"
| bsd-3-clause | Python |
c36a954dbdfcca6e520dca6b96c1c97f496880ca | Add test for forcefield_labeler | open-forcefield-group/smarty,open-forcefield-group/smarty,open-forcefield-group/smarty | smarty/tests/test_forcefield_labeler.py | smarty/tests/test_forcefield_labeler.py | from functools import partial
import smarty
import openeye
from openeye.oechem import *
import os
from smarty.utils import get_data_filename
import numpy as np
from smarty.forcefield_labeler import *
def test_read_ffxml():
"""Test reading of ffxml files.
"""
labeler = ForceField_labeler(get_data_filename('forcefield/Frosst_AlkEtOH.ffxml'))
def test_molecule_labeling(verbose = False):
"""Test using ForceField_labeler to provide force terms applied to an oemol."""
mol = OEMol()
OEParseSmiles(mol, 'CCC')
OEAddExplicitHydrogens(mol)
labeler = ForceField_labeler(get_data_filename('forcefield/Frosst_AlkEtOH.ffxml'))
labels = labeler.labelMolecules( [mol], verbose = verbose)
# Check that force terms aren't empty
if not 'HarmonicBondForce' in labels[0].keys():
raise Exception("No force term assigned for harmonic bonds.")
if not 'HarmonicAngleForce' in labels[0].keys():
raise Exception("No force term assigned for harmonic angles.")
if not 'PeriodicTorsionForce' in labels[0].keys():
raise Exception("No force term assigned for periodic torsions.")
if not 'NonbondedForce' in labels[0].keys():
raise Exception("No nonbonded force term assigned.")
| mit | Python |
|
7d1fde66e0fd6b3b8cc9876e0d3271d6776b347f | convert tiffs to video added | emmettk/pvrsex | image_to_video.py | image_to_video.py | # -*- coding: utf-8 -*-
"""
Created on Tue May 15 16:11:55 2018
@author: LaVision
"""
#!/usr/local/bin/python3
import cv2
import argparse
import os
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-ext", "--extension", required=False, default='png', help="extension name. default is 'tif'.")
ap.add_argument("-o", "--output", required=False, default='output.mp4', help="output video file")
args = vars(ap.parse_args())
# Arguments
dir_path = '.'
ext = args['extension']
output = args['output']
images = []
for f in os.listdir(dir_path):
if f.endswith(ext):
images.append(f)
# Determine the width and height from the first image
image_path = os.path.join(dir_path, images[0])
frame = cv2.imread(image_path)
cv2.imshow('video',frame)
height, width, channels = frame.shape
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case
out = cv2.VideoWriter(output, fourcc, 20.0, (width, height))
for image in images:
image_path = os.path.join(dir_path, image)
print("processing", image_path)
frame = cv2.imread(image_path)
out.write(frame) # Write out frame to video
cv2.imshow('video',frame)
if (cv2.waitKey(1) & 0xFF) == ord('q'): # Hit `q` to exit
break
# Release everything if job is finished
out.release()
cv2.destroyAllWindows()
print("The output video is {}".format(output)) | mit | Python |
|
f67514bf9ed193c0a8ac68c2258913bb54df8a88 | Create save_py_source.py | cclauss/Pythonista_ui | save_py_source.py | save_py_source.py | import datetime, os, zipfile
exts = '.py pyui'.split()
zip_file_name = 'aa_source_code_%Y_%m_%d_%H_%M_%S.zip'
zip_file_name = datetime.datetime.strftime(datetime.datetime.now(), zip_file_name)
def get_filenames(in_dir=None):
def visit(_, dirname, names):
for name in names:
filename = os.path.join(dirname, name)
if os.path.isfile(filename):
filenames.append(filename)
in_dir = in_dir or os.curdir
filenames = []
os.path.walk(in_dir or os.curdir, visit, None)
return filenames
filenames = get_filenames()
if exts:
filenames = [fn for fn in filenames if os.path.splitext(fn)[1] in exts]
file_count = len(filenames)
print('{} files found.'.format(file_count))
if filenames:
with zipfile.ZipFile(zip_file_name, 'w') as zip_file:
for i, filename in enumerate(filenames):
zip_file.write(filename)
if not i % 50:
print('{} of {}: {}'.format(i, file_count, filename))
print('{}\n{} files copied into zip file: "{}".'.format('=' * 13, file_count, zip_file_name))
| apache-2.0 | Python |
|
c7c02febb43eb2466484f5c99d6dcc2d60e67e09 | add docker.py | zhengze/zblog,zhengze/zblog,zhengze/zblog,zhengze/zblog | zblogsite/settings/docker.py | zblogsite/settings/docker.py | from .base import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'zblog', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': '1234',
#'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'HOST': 'db', # docker mysql.
'PORT': '3306', # Set to empty string for default.
'OPTIONS': {
'init_command': "Set sql_mode='STRICT_TRANS_TABLES'"
}
}
}
| mit | Python |
|
f60f31c73deef7768af5eb45046a8848f2dc40c4 | Create draw_neural_net.py | XinDongol/warehouse | draw/draw_neural_net.py | draw/draw_neural_net.py | import matplotlib.pyplot as plt
def draw_neural_net(ax, left, right, bottom, top, layer_sizes):
'''
Draw a neural network cartoon using matplotilb.
:usage:
>>> fig = plt.figure(figsize=(12, 12))
>>> draw_neural_net(fig.gca(), .1, .9, .1, .9, [4, 7, 2])
:parameters:
- ax : matplotlib.axes.AxesSubplot
The axes on which to plot the cartoon (get e.g. by plt.gca())
- left : float
The center of the leftmost node(s) will be placed here
- right : float
The center of the rightmost node(s) will be placed here
- bottom : float
The center of the bottommost node(s) will be placed here
- top : float
The center of the topmost node(s) will be placed here
- layer_sizes : list of int
List of layer sizes, including input and output dimensionality
'''
n_layers = len(layer_sizes)
v_spacing = (top - bottom)/float(max(layer_sizes))
h_spacing = (right - left)/float(len(layer_sizes) - 1)
# Nodes
for n, layer_size in enumerate(layer_sizes):
layer_top = v_spacing*(layer_size - 1)/2. + (top + bottom)/2.
for m in xrange(layer_size):
circle = plt.Circle((n*h_spacing + left, layer_top - m*v_spacing), v_spacing/4.,
color='w', ec='k', zorder=4)
ax.add_artist(circle)
# Edges
for n, (layer_size_a, layer_size_b) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
layer_top_a = v_spacing*(layer_size_a - 1)/2. + (top + bottom)/2.
layer_top_b = v_spacing*(layer_size_b - 1)/2. + (top + bottom)/2.
for m in xrange(layer_size_a):
for o in xrange(layer_size_b):
line = plt.Line2D([n*h_spacing + left, (n + 1)*h_spacing + left],
[layer_top_a - m*v_spacing, layer_top_b - o*v_spacing], c='k')
ax.add_artist(line)
| mit | Python |
|
041b55f3a9ded360146f6e2dda74a6b20b3e6f7e | Add scrape_results | Aditya8795/Python-Scripts | scrape_results.py | scrape_results.py | from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
driver = webdriver.Firefox()
driver.get("http://www.nitt.edu/prm/ShowResult.htm")
driver.get("javascript:(function(){document.getElementsByName('main')[0].contentWindow.document.getElementById('TextBox1').value=110113006;}());")
driver.get("javascript:(function(){document.getElementsByName('main')[0].contentWindow.document.getElementById('Button1').click();}());")
sleep(1)
driver.get("javascript:(function(){document.getElementsByName('main')[0].contentWindow.document.getElementById('Dt1').selectedIndex = 1;document.getElementsByName('main')[0].contentWindow.document.getElementById('Dt1').onchange();}());")
print "I have got the page to a specific student's result!!"
| mit | Python |
|
5745bf81a32915cb85a60093dc2a7123e5814767 | Add personal problem 001 | aiden0z/snippets,aiden0z/snippets,aiden0z/snippets,aiden0z/snippets,aiden0z/snippets,aiden0z/snippets | problems/001_parse_navs_from_markdown.py | problems/001_parse_navs_from_markdown.py | """从 Markdown 文本中解析出目录信息
现在有格式如下的文档:
```markdown
* [目录 1](chapter1/1.md)
* [目录 1.1](chapter1/1-1.md)
* [目录 1.1.1](chapter1/1-1-1.md)
* [目录 2](chapter2/1.md)
```
要求写一个解析器,解析返回所有目录信息,并包含层级关系。返回的示例数据如下。
```python
[
{
'name': '目录 1',
'path': 'chaper1/1.md',
'chapters': [
{
'name': '目录 1.1',
'path': 'chapter1/1-1.md',
'chapters': [
{'name': '目录 1.1.1', 'path': 'chapter1/1-1-1.md'}
]
}
]},
{'name': '目录 2', 'path': 'chapter2/1.md'}
]
```
"""
import re
import pprint
class Solution:
def parse_navs(self, content):
stack = [({'name': 'dummy', 'path': 'dummy', 'children': []}, -1)]
for line in content.splitlines():
# 有效行判断
meta = re.match(r'.*\[(.+)\]\((.+)\)', line)
if meta is None:
continue
# 缩进匹配
result = re.match(r'^\s+', line)
if result is None:
indent = 0
else:
indent = result.span()[1]
name, path = meta.groups()
item = {'name': name, 'path': path, 'children': []}
pre_indent = stack[-1][1]
if indent == pre_indent:
stack.pop()
parent = stack[-1][0]
elif indent > pre_indent:
parent = stack[-1][0]
else:
while indent != stack.pop()[1]:
pass
parent = stack[-1][0]
stack.append((item, indent))
parent['children'].append(item)
return stack[0][0]['children']
if __name__ == '__main__':
summary = """
* [a](content/preface/preface-chinese.md)
* [b](content/chapter1/1.1-chinese.md)
* [c](content/chapter1/1.1-chinese.md)
* [d](content/chapter2/2.1-chinese.md)
"""
solutions = [Solution]
for s in solutions:
result = s().parse_navs(summary)
pprint.pprint(result, indent=4)
assert result[0]['children'][1]['children'][0]['name'] == 'd'
| mit | Python |
|
dc52b5914c4d0024458eefeb3b3576aa58692345 | Remove print | Chilledheart/seahub,miurahr/seahub,madflow/seahub,miurahr/seahub,cloudcopy/seahub,madflow/seahub,cloudcopy/seahub,cloudcopy/seahub,miurahr/seahub,madflow/seahub,madflow/seahub,cloudcopy/seahub,Chilledheart/seahub,Chilledheart/seahub,madflow/seahub,miurahr/seahub,Chilledheart/seahub,Chilledheart/seahub | organizations/decorators.py | organizations/decorators.py | # encoding: utf-8
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from seaserv import get_user_current_org
def org_staff_required(func):
"""
Decorator for views that checks the user is org staff.
"""
def _decorated(request, *args, **kwargs):
user = request.user.username
url_prefix = kwargs.get('url_prefix', '')
org = get_user_current_org(user, url_prefix)
if org and org.is_staff:
return func(request, *args, **kwargs)
return HttpResponseRedirect(reverse('myhome'))
return _decorated
| # encoding: utf-8
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from seaserv import get_user_current_org
def org_staff_required(func):
"""
Decorator for views that checks the user is org staff.
"""
def _decorated(request, *args, **kwargs):
user = request.user.username
url_prefix = kwargs.get('url_prefix', '')
org = get_user_current_org(user, url_prefix)
print url_prefix
print org._dict
if org and org.is_staff:
return func(request, *args, **kwargs)
return HttpResponseRedirect(reverse('myhome'))
return _decorated
| apache-2.0 | Python |
55bf42057bcd9e14d964b2064f9322c164ba91ff | Test request construction (#91) | tableau/server-client-python,tableau/server-client-python,Talvalin/server-client-python,Talvalin/server-client-python | test/test_requests.py | test/test_requests.py | import unittest
import requests
import requests_mock
import tableauserverclient as TSC
class RequestTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
# Fake sign in
self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67'
self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM'
self.baseurl = self.server.workbooks.baseurl
def test_make_get_request(self):
with requests_mock.mock() as m:
m.get(requests_mock.ANY)
url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks"
opts = TSC.RequestOptions(pagesize=13, pagenumber=13)
resp = self.server.workbooks._make_request(requests.get,
url,
content=None,
request_object=opts,
auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM',
content_type='text/xml')
self.assertEquals(resp.request.query, 'pagenumber=13&pagesize=13')
self.assertEquals(resp.request.headers['x-tableau-auth'], 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM')
self.assertEquals(resp.request.headers['content-type'], 'text/xml')
def test_make_post_request(self):
with requests_mock.mock() as m:
m.post(requests_mock.ANY)
url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks"
resp = self.server.workbooks._make_request(requests.post,
url,
content=b'1337',
request_object=None,
auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM',
content_type='multipart/mixed')
self.assertEquals(resp.request.headers['x-tableau-auth'], 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM')
self.assertEquals(resp.request.headers['content-type'], 'multipart/mixed')
self.assertEquals(resp.request.body, b'1337')
| mit | Python |
|
69c3e33df15dca13cf310062216525dfbe98639e | add spectandus for index analysis | ebenolson/desiderata | spectandus.py | spectandus.py | #!/usr/bin/env python
# Author: Eben Olson <[email protected]>
# Licensed under the MIT license <http://opensource.org/licenses/MIT>
import logging
from colorlog import ColoredFormatter
import plac
import sys
import json
from fs import zipfs
from collections import defaultdict
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
)
stream = logging.StreamHandler()
stream.setFormatter(formatter)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(stream)
def convert_index(reference, outfile):
logger.info(u'Converting ZipFS directory index {} to hashmap'.format(reference))
hashes = defaultdict(list)
count = 0
encoding = sys.getfilesystemencoding()
with zipfs.ZipFS(reference, 'r', encoding=encoding) as hashfs:
for logfilename in hashfs.walkfiles('/'):
filehash = hashfs.open(logfilename).read()
hashes[filehash].append(logfilename)
count += 1
logger.info(u'{} files with {} unique hashes found in index'.format(count, len(hashes)))
logger.info(u'Writing results to {}'.format(outfile))
json.dump(hashes, open(outfile, 'w'), indent=4)
def list_duplicates(reference, outfile):
logger.info(u'Searching for duplicated files in {}'.format(reference))
hashes = json.load(open(reference))
duplicates = {}
for filehash, files in hashes.items():
if len(files) > 1:
duplicates[filehash] = files
logger.info(u'{} hashes with multiple files found out of {} in reference'.format(len(duplicates), len(hashes)))
logger.info(u'Writing results to {}'.format(outfile))
json.dump(duplicates, open(outfile, 'w'), indent=4)
def list_unmatched(reference, target, outfile):
logger.info(u'Searching for files in {} not in reference {}'.format(target, reference))
reference = json.load(open(reference))
target = json.load(open(target))
unmatched = {}
logger.info(u'Reference has {} hashes, target has {}'.format(len(reference), len(target)))
for filehash, files in target.items():
if filehash not in reference:
unmatched[filehash] = files
if len(unmatched):
logger.warn(u'{} hashes were not matched in reference'.format(len(unmatched)))
else:
logger.info(u'All hashes in target were found in reference')
logger.info(u'Writing results to {}'.format(outfile))
json.dump(unmatched, open(outfile, 'w'), indent=4)
@plac.annotations(
convert=('List all files in reference (zipfile) as json hashmap', 'flag', 'c'),
dupcheck=('Show all hashes in reference (json) with multiple files', 'flag', 'm'),
newcheck=('Show all hashes in target (json) not in reference (json)', 'option', 'n'),
debug=('Show all log output', 'flag', 'd'),
reference='Reference index',
)
def main(convert, dupcheck, newcheck, debug, reference, outfile='result.json'):
if debug:
logger.setLevel(logging.DEBUG)
if convert:
convert_index(reference, outfile)
if dupcheck:
list_duplicates(reference, outfile)
if newcheck:
list_unmatched(reference, newcheck, outfile)
if __name__ == '__main__':
plac.call(main)
| mit | Python |
|
35f98c14a74e207c616fcb57538bb176842c0d1e | Add procfile and wsgi entrypoint | openhealthcare/open-prescribing,openhealthcare/open-prescribing,openhealthcare/open-prescribing | nhs/wsgi.py | nhs/wsgi.py | """
WSGI config for Nhs Prescriptions project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nhs.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| agpl-3.0 | Python |
|
ebd41d7f264de92be19347042749ef48d5820b7d | add inner product demo | YcheLanguageStudio/PythonStudy | study/language_core_and_lib/function/functional_example.py | study/language_core_and_lib/function/functional_example.py | def demo_inner_product():
vec0 = range(10)
vec1 = [i ** 2 for i in range(10)]
print 'inner product:', reduce(lambda l, r: l + r, map(lambda ele: ele[0] * ele[1], zip(vec0, vec1)), 0)
print 'verify:', sum([i ** 3 for i in range(10)])
if __name__ == '__main__':
demo_inner_product()
| mit | Python |
|
7bd3d26427c08cf38f2f7dedbf075e1335447f70 | add config for database | lecly/pymongo-driver | config/database.py | config/database.py | mongorc = {
'host': '127.0.0.1',
'port': 27017,
'db': 'demo'
}
| mit | Python |
|
f3325695a78f528af6f3c2adb6024dc71405af8f | Create kaynaksız_sil.py | Mavrikant/WikiBots | kaynaksız_sil.py | kaynaksız_sil.py | # -*- coding: utf-8 -*-
# !/usr/bin/python
from bs4 import BeautifulSoup
import requests
import mavri
import re
import random
xx= mavri.login('tr.wikipedia','Mavrikant Bot')
wiki='tr.wikipedia'
template='Şablon:Kaynaksız'
ticontinue = ''
while ticontinue != 'DONE':
allpages= requests.get('https://' + wiki + '.org/w/api.php?action=query&utf8&format=json&tiprop=title&titles='+template+'&prop=transcludedin&tilimit=500&ticontinue='+str(ticontinue))
try:
ticontinue =allpages.json()['continue']['ticontinue']
except:
ticontinue = 'DONE'
for page in allpages.json()['query']['pages'].itervalues().next()['transcludedin']:
title = page['title']
#print title
content = mavri.content_of_page(wiki, title)
kaynak_sayisi= len(re.findall(ur'<\s?ref\s?\>', content))
print kaynak_sayisi
if (kaynak_sayisi>0):
print title
content = mavri.content_of_section(wiki,title,0,xx)
content = re.sub(ur'\{\{\s?[Kk]aynaksız[^\}]*\}\}\s?\n?', '', content)
params3 = '?format=json&action=tokens'
r3 = requests.get('https://' + wiki + '.org/w/api.php' + params3, cookies=xx.cookies)
edit_token = r3.json()['tokens']['edittoken']
edit_cookie = xx.cookies.copy()
edit_cookie.update(r3.cookies)
payload = {'action': 'edit', 'assert': 'user', 'format': 'json', 'utf8': '', 'section': str(0), 'text': content, 'summary': '-Kaynaksız şablonu, '+str(kaynak_sayisi)+' adet kaynak var', 'title': title, 'token': edit_token, 'bot': ''}
requests.post('https://' + wiki + '.org/w/api.php', data=payload, cookies=edit_cookie)
exit(0)
| mit | Python |
|
118e47c2bc307d8de447e9d37973feca44763ab5 | Create __init__.py | StackStorm/st2contrib,StackStorm/st2contrib,StackStorm/st2contrib | packs/astral/actions/lib/__init__.py | packs/astral/actions/lib/__init__.py | from .BaseAction import BaseAction
| apache-2.0 | Python |
|
f5c56152771fbafc5ac9161ccd453a240bfca5cc | Add get_history example. | supasate/PythonZabbixApi | examples/get_history.py | examples/get_history.py | import sys
sys.path.append('../')
import zabbix
from datetime import datetime
from datetime import timedelta
from calendar import timegm
# read config file
config = {}
execfile("config.py", config)
# new api instance
server = config["server"]
api = zabbix.Api(server)
# log in
username = config["user"]
password = config["password"]
api.login(username, password)
# get history
# host id
http_host_id = config["http_host_id"]
# item id
http_processor_time = config["http_processor_time_id"]
# start time and end time
time_from = timegm((datetime.now() - timedelta(minutes = 100)).utctimetuple()) - 150000
time_till = timegm(datetime.now().utctimetuple()) - 150000
print api.get_history('float', http_host_id, http_processor_time, time_from, time_till)
# log out
api.logout()
| apache-2.0 | Python |
|
26afdc032087693d274966a803a6bb3c77d17549 | add request example | squeaky-pl/japronto,squeaky-pl/japronto,squeaky-pl/japronto,squeaky-pl/japronto,squeaky-pl/japronto | examples/request/req.py | examples/request/req.py | from app import Application
def dump(request):
text = """
Method: {0.method}
Path: {0.path}
Version: {0.version}
Headers: {0.headers}
Match: {0.match_dict}
Body: {0.body}
QS: {0.query_string}
query: {0.query}
mime_type: {0.mime_type}
encoding: {0.encoding}
form: {0.form}
keep_alive: {0.keep_alive}
route: {0.route}
hostname: {0.hostname}
port: {0.port}
remote_addr: {0.remote_addr}
""".strip().format(request)
return request.Response(text=text)
if __name__ == '__main__':
app = Application()
app.router.add_route('/', dump)
app.router.add_route('/{a}/{b}', dump)
app.serve()
| mit | Python |
|
b84a2667b5071ede3eb983364195c3a2d3c97543 | Create MQTTstage.py | Anton04/MQTT-Stage,Anton04/MQTT-Stage | MQTTstage.py | MQTTstage.py | #!/usr/bin/python
#Check if the
def CheckDirectories():
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.