commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
d95f24d43f3925a91176429cca1aaac30a0c55aa | Create java module main | hatchery/Genepool2,hatchery/genepool | genes/java/main.py | genes/java/main.py | from genes import apt, debconf
import platform
class Config:
OS = platform.system()
(DIST, _, CODE) = platform.linux_distribution()
REPO = DIST.lower() + '-' + CODE
def main():
if Config.OS == 'Linux':
if Config.DIST == 'Ubuntu' or Config.DIST == 'Debian':
#FIXME: debian needs ppa software
apt.add_repo('ppa:webupd8team/java')
apt.update()
debconf.set_selections('oracle-java8-installer',
'shared/accepted-oracle-license-v1-1',
'select', 'true')
apt.install('oracle-java8-installer')
else:
#FIXME: print failure case
pass
elif Config.OS == 'Darwin':
#brew_cask.install('java8')
pass
else:
#FIXME: print failure, handle windows
pass
| mit | Python |
|
6d87badb68f2e20a3907f670b9190956ebd127e8 | Create AddBinaryNumbers.py | manikTharaka/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms | math/AddBinaryNumbers/Python/AddBinaryNumbers.py | math/AddBinaryNumbers/Python/AddBinaryNumbers.py | number1 = input("Enter the first number: ")
number2 = input("Enter the second number: ")
result = (int(number1, 2) + int(number2, 2))
result = bin(result)
print(result[2:])
| cc0-1.0 | Python |
|
c71a43dae259299952cec082d33f003ecaeb9eab | Add marky test. | gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com | tests/py/test_markdown.py | tests/py/test_markdown.py | from gratipay.testing import Harness
from gratipay.utils import markdown
from HTMLParser import HTMLParser
class TestMarkdown(Harness):
def test_marky_works(self):
md = "**Hello World!**"
actual = HTMLParser().unescape(markdown.marky(md)).strip()
expected = '<p><strong>Hello World!</strong></p>'
assert actual == expected
| mit | Python |
|
722b1d55c771e628ba82bbd5b8f8f5de047112af | Add a hex dump utility class. | gvnn3/PCS,gvnn3/PCS | tests/hexdumper.py | tests/hexdumper.py | # This hack by: Raymond Hettinger
class hexdumper:
"""Given a byte array, turn it into a string. hex bytes to stdout."""
def __init__(self):
self.FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' \
for x in range(256)])
def dump(self, src, length=8):
result=[]
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(self.FILTER)
result.append("%04X %-*s %s\n" % \
(i, length*3, hexa, printable))
return ''.join(result)
| bsd-3-clause | Python |
|
f7a8c0b6e361ce3e5f0980b539b843b33fea258d | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/4464fe2aad5cccfd7935b0f1767901eb08e99784. | tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,karllessard/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "4464fe2aad5cccfd7935b0f1767901eb08e99784"
TFRT_SHA256 = "cc3b5b95a2da47710ade8b2d3c0046cd05750f94db5f3feb58a224ae7163db2f"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "191a16a25cc901e12535893b94aca169916d378c"
TFRT_SHA256 = "11b5d8d41bc4a6c1c6c7f9c6958c834aef832162ca75806f799bb51c9119b93d"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
c5b0c56f53dee5577641a668019f40f9468017ea | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/83d3045fb5476bed115ae438871a228c1c682af1. | paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "83d3045fb5476bed115ae438871a228c1c682af1"
TFRT_SHA256 = "bdde8691c6a17c803de04423271b3534a421fd323627dc607b1fddf2f454e52c"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "6ca793b5d862ef6c50f242d77a811f06cce9b60a"
TFRT_SHA256 = "720b059a6b1d5757a76e56cf4a3a791b58e5d020858f6b67b077839963bffe8c"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
a3a9d4d6538b025d0c6c821a72076e084a5b597b | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/9dac1ed1ebc2350ada97b16093174a1a0bbd56d0. | karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,karllessard/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "9dac1ed1ebc2350ada97b16093174a1a0bbd56d0"
TFRT_SHA256 = "89eea9ff0c9dfca61037c4da051a6ddf4d4598614f7ca08a240355d1635f8786"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "553df8c12e9ba5930b9b8065f1d012ea07c6044c"
TFRT_SHA256 = "477d0374b044c60cd018fdb17e7c6054e190e59e36e1a442eb5d1628efb2341d"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
2e53ae34ec03485302d5d7e6e5dd05707bbd1cf6 | Add camera tests | lillian-lemmer/sappho,lillian-lemmer/sappho,lily-seabreeze/sappho,lillian-gardenia-seabreeze/sappho,lillian-gardenia-seabreeze/sappho,lily-seabreeze/sappho | tests/test_camera.py | tests/test_camera.py | import os
import pygame
from ..sappho import Camera
from .common import compare_pygame_surfaces
class TestCamera(object):
def test_scroll(self):
# Create surface to render to
output_surface = pygame.surface.Surface((1, 1))
# Create fixtures
red_surface = pygame.surface.Surface((1, 1))
blue_surface = pygame.surface.Surface((1, 1))
red_surface.fill((255, 0, 0))
blue_surface.fill((0, 255, 0))
# Create the camera and blit colors to it
camera = Camera((2, 1), (1, 1), (1, 1))
camera.blit(red_surface, (0, 0))
camera.blit(blue_surface, (1, 0))
# We should be at (0, 0) so blitting should get us a red pixel
output_surface.blit(camera, (0, 0))
assert(compare_pygame_surfaces(red_surface, output_surface))
# Scroll one pixel to the left, and we should get a blue pixel
# when blitting
camera.scroll(1, 0)
output_surface.blit(camera, (0, 0))
assert(compare_pygame_surfaces(blue_surface, output_surface))
def test_scale(self):
# Create surface to render to
output_surface = pygame.surface.Surface((10, 10))
# Create fixtures
red_small = pygame.surface.Surface((1, 1))
red_large = pygame.surface.Surface((10, 10))
red_small.fill((255, 0, 0))
red_large.fill((255, 0, 0))
# Create the camera with scaling enabled and blit our red pixel to it
camera = Camera((1, 1), (10, 10), (1, 1))
camera.blit(red_small, (0, 0))
# Blit and compare
output_surface.blit(camera, (0, 0))
assert(compare_pygame_surfaces(output_surface, red_large))
| mit | Python |
|
349918610081c8c02dc75fdafd47f647814dd63c | add converter of string to format maya understands for changing setting of fps | mindbender-studio/core,getavalon/core,mindbender-studio/core,MoonShineVFX/core,MoonShineVFX/core,getavalon/core | mindbender/maya/pythonpath/mayafpsconverter.py | mindbender/maya/pythonpath/mayafpsconverter.py | def mayafpsconverter(Sfps):
condition = 0
if Sfps == "":
condition = 1
return Sfps
if Sfps == "15":
condition = 1
return "game"
if Sfps == "24":
condition = 1
return "film"
if Sfps == "25":
condition = 1
return "pal"
if Sfps == "30":
condition = 1
return "ntsc"
if Sfps == "48":
condition = 1
return "show"
if Sfps == "50":
condition = 1
return "palf"
if Sfps == "60":
condition = 1
return "ntscf"
ERRORSTRING = "MINDBENDER_FPS has bad value in the bat file"
if str(Sfps).isdigit() is False:
cmds.confirmDialog(
title="Enviroment variable error",
message=ERRORSTRING,
button="",
defaultButton="",
cancelButton="",
dismissString="")
return ""
if condition == 0:
Sfps = str(Sfps) + "fps"
return Sfps | mit | Python |
|
3eafac9d71f7f885f66a63218557194291c649f7 | add config test | girder/slicer_cli_web,girder/slicer_cli_web | tests/test_config.py | tests/test_config.py | import pytest
from pytest_girder.assertions import assertStatusOk, assertStatus
from slicer_cli_web.config import PluginSettings
@pytest.mark.plugin('slicer_cli_web')
def test_default_task_folder(server, admin, folder):
# Test the setting
resp = server.request('/system/setting', method='PUT', params={
'key': PluginSettings.SLICER_CLI_WEB_TASK_FOLDER,
'value': 'bad value'
}, user=admin)
assertStatus(resp, 400)
resp = server.request('/system/setting', method='PUT', params={
'key': PluginSettings.SLICER_CLI_WEB_TASK_FOLDER,
'value': folder['_id']
}, user=admin)
assertStatusOk(resp)
assert PluginSettings.has_task_folder()
assert PluginSettings.get_task_folder()['_id'] == folder['_id']
| apache-2.0 | Python |
|
cfc89a542ebb9b1745bb8a7ce30f79dad12a16b7 | add mslib tool to build static C libraries. | abadger/Bento,abadger/Bento,cournape/Bento,cournape/Bento,cournape/Bento,cournape/Bento,abadger/Bento,abadger/Bento | yaku/tools/mslib.py | yaku/tools/mslib.py | import yaku.utils
import yaku.task
def setup(ctx):
env = ctx.env
ctx.env["STLINK"] = ["lib.exe"]
ctx.env["STLINK_TGT_F"] = ["/OUT:"]
ctx.env["STLINK_SRC_F"] = []
ctx.env["STLINKFLAGS"] = ["/nologo"]
ctx.env["STATICLIB_FMT"] = "%s.lib"
# XXX: hack
saved = yaku.task.Task.exec_command
def msvc_exec_command(self, cmd, cwd):
new_cmd = []
carry = ""
for c in cmd:
if c in ["/OUT:"]:
carry = c
else:
c = carry + c
carry = ""
new_cmd.append(c)
saved(self, new_cmd, cwd)
yaku.task.Task.exec_command = msvc_exec_command
def detect(ctx):
if yaku.utils.find_program("lib.exe") is None:
return False
else:
return True
| bsd-3-clause | Python |
|
367a7cdcb02d2d8c15e9a2375c5304b2ad9c89ac | Add the basic tools as functions to facilitate basic operations | vlegoff/ytranslate | ytranslate/tools.py | ytranslate/tools.py | # Copyright (c) 2015, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing different tools as functions.
They can be called to interact with the created object of the
'ytranslate' library, including the catalogs and loader.
"""
from ytranslate.fsloader import FSLoader
def init(root_dir):
"""Load the catalogs at a specified location.
The 'root_dir', the parent directory, is sent to the FSLoader
class which is to create a hierarchy of catalogs. The parent
catalogs bear the name of the namespace (that is their
directory or their filename without the '.yml' extension).
For instance:
init("path/to/translations")
Use the 'select' function to then select a catalog.
"""
fsloader = FSLoader(root_dir)
FSLoader.current_loader = fsloader
fsloader.load()
def select(catalog):
"""Select the catalog from the loader.
The catalog's name must be specified. If the loader is a
FSLoader (the default), then the 'root_dir' directory contains
the parent catalogs. You should use one of its contained
directoriess' names, or that of a MYL file without the '.yml'
extension. For instance:
select("en")
"""
if FSLoader.current_loader:
FSLoader.current_loader.select(catalog)
else:
raise ValueError("the current loader hasn't been selected")
def t(address, count=None, **kwargs):
"""Retrieve the translated message from the selected catalog.
You can use this function to obtain the translated message,
corresponding to the address, which must represent the list of
namespaces separated by '.'. For instance:
t("ui.title")
The hierarchy of messages is defined by the catalog's structure
(directories and files, if it has been selected by a FSLoader,
which is the default choice).
You can also use placeholders as named parameters:
t("welcome.name", user="John")
Additionally, you can vary the message according to a number.
For instance:
t("notificaiton.emails", 3)
See the user documentation for a detailed explanation about
the syntax and corresponding catalogs.
"""
if FSLoader.current_catalog:
return FSLoader.current_catalog.retrieve(address, count, **kwargs)
raise ValueError("no catalog has been selected")
| bsd-3-clause | Python |
|
8660c7fda8cc7290fadeed7a39f06218087d9401 | Add draft test module for linter | geographika/mappyfile,geographika/mappyfile | tests/test_linter.py | tests/test_linter.py | import logging
import pytest
from mappyfile.validator import Validator
def validate(d):
v = Validator()
return v.validate(d)
def get_from_dict(d, keys):
for k in keys:
if isinstance(k, int):
d = d[0]
else:
d = d[k]
return d
def run_tests():
pytest.main(["tests/test_linter.py"])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# run_tests()
print("Done!")
| mit | Python |
|
f73eaa3d1ba8c6f21fe64a4793aea7ba6b6835ca | Create tensorBoard-example.py | hpssjellis/easy-tensorflow-on-cloud9,hpssjellis/easy-tensorflow-on-cloud9,hpssjellis/easy-tensorflow-on-cloud9 | rocksetta-examples/tensorBoard-example.py | rocksetta-examples/tensorBoard-example.py | '''
Loss Visualization with TensorFlow.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import tensorflow as tf
import numpy
# Import MINST data
import input_data
mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
# Use Logistic Regression from our previous example
# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1
# tf Graph Input
x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
# Create model
# Set model weights
W = tf.Variable(tf.zeros([784, 10]), name="weights")
b = tf.Variable(tf.zeros([10]), name="bias")
# Construct model
activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
# Minimize error using cross entropy
cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
# Initializing the variables
init = tf.initialize_all_variables()
# Create a summary to monitor cost function
tf.scalar_summary("loss", cost)
# Merge all summaries to a single operator
merged_summary_op = tf.merge_all_summaries()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Set logs writer into folder /home/ubuntu/workspace/tmp5/tensorflow_logs
summary_writer = tf.train.SummaryWriter('/home/ubuntu/workspace/tmp5/tensorflow_logs', graph_def=sess.graph_def)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
# Write logs at every iteration
summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
summary_writer.add_summary(summary_str, epoch*total_batch + i)
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
print "Optimization Finished!"
# Test model
correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
'''
Run the command line: tensorboard --logdir=/home/ubuntu/workspace/tmp5/tensorflow_logs
Open http://localhost:6006/ into your web browser
'''
| mit | Python |
|
ad7f9f785f9a4a4494127a9b2196e1fc64c9f3de | Add basic first tests for new report driven by "events" | iwoca/django-deep-collector | tests/test_report.py | tests/test_report.py | from django.test import TestCase
from deep_collector.core import RelatedObjectsCollector
from .factories import BaseModelFactory
class TestLogReportGeneration(TestCase):
def test_report_with_no_debug_mode(self):
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.collect(obj)
report = collector.get_report()
self.assertDictEqual(report, {
'excluded_fields': [],
'log': 'Set DEBUG to True if you what collector internal logs'
})
def test_report_with_debug_mode(self):
self.maxDiff = None
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.DEBUG = True
collector.collect(obj)
report = collector.get_report()
self.assertEqual(report['excluded_fields'], [])
# For now, just checking that the log report is not empty.
# Some work has to be done to test it more.
self.assertNotEqual(report['log'], [])
| bsd-3-clause | Python |
|
272371f28369cca514d90f355e7771c133d11dcf | Create __openerp__.py | gwsilva/project-surgery | project_surgery/__openerp__.py | project_surgery/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Gideoni Silva (Omnes)
# Copyright 2013-2014 Omnes Tecnologia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Participantes da Cirurgia',
'description': 'Este módulo adiciona os campos adicionais com os participantes da cirurgia.',
'category': 'Generic Modules/Projects & Services',
'license': 'AGPL-3',
'author': 'Omnes',
'website': 'www.omnes.net.br',
'version': '0.1',
'depends': [
'base',
'project',
],
'data': [
'project_view.xml'
],
'demo': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
|
a08452c4ed3338cf43bf2647bcc17a7d66ba4d23 | call restore config directly | dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq | corehq/apps/ota/tasks.py | corehq/apps/ota/tasks.py | from celery.task import task
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.case.xml import V1
from casexml.apps.phone.restore import RestoreConfig
from corehq.apps.users.models import CommCareUser
from soil import DownloadBase
@task
def prime_restore(usernames_or_ids, version=V1, cache_timeout=None, overwrite_cache=False):
total = len(usernames_or_ids)
DownloadBase.set_progress(prime_restore, 0, total)
ret = {'messages': []}
for i, username_or_id in enumerate(usernames_or_ids):
couch_user = get_user(username_or_id)
if not couch_user:
ret['messages'].append('User not found: {}'.format(username_or_id))
continue
try:
project = couch_user.project
commtrack_settings = project.commtrack_settings
stock_settings = commtrack_settings.get_ota_restore_settings() if commtrack_settings else None
restore_config = RestoreConfig(
couch_user.to_casexml_user(), None, version, None,
items=True,
stock_settings=stock_settings,
domain=project,
force_cache=True,
cache_timeout=cache_timeout,
overwrite_cache=overwrite_cache
)
restore_config.get_payload()
ret['messages'].append('Restore generated successfully for user: {}'.format(
couch_user.human_friendly_name,
))
except Exception as e:
ret['messages'].append('Error processing user: {}'.format(str(e)))
DownloadBase.set_progress(prime_restore, i + 1, total)
return ret
def get_user(username_or_id):
try:
couch_user = CommCareUser.get(username_or_id)
except ResourceNotFound:
try:
couch_user = CommCareUser.get_by_username(username_or_id)
except ResourceNotFound:
return None
return couch_user
| from celery.task import task
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.case.xml import V1
from corehq.apps.users.models import CommCareUser
from soil import DownloadBase
@task
def prime_restore(usernames_or_ids, version=V1, cache_timeout=None, overwrite_cache=False):
from corehq.apps.ota.views import get_restore_response
total = len(usernames_or_ids)
DownloadBase.set_progress(prime_restore, 0, total)
ret = {'messages': []}
for i, username_or_id in enumerate(usernames_or_ids):
couch_user = get_user(username_or_id)
if not couch_user:
ret['messages'].append('User not found: {}'.format(username_or_id))
continue
try:
get_restore_response(
couch_user.domain,
couch_user,
since=None,
version=version,
force_cache=True,
cache_timeout=cache_timeout,
overwrite_cache=overwrite_cache,
items=True
)
except Exception as e:
ret['messages'].append('Error processing user: {}'.format(str(e)))
DownloadBase.set_progress(prime_restore, i + 1, total)
return ret
def get_user(username_or_id):
try:
couch_user = CommCareUser.get(username_or_id)
except ResourceNotFound:
try:
couch_user = CommCareUser.get_by_username(username_or_id)
except ResourceNotFound:
return None
return couch_user
| bsd-3-clause | Python |
e1a0029488d4cbf0581c21ceb1bd5db3c19bf3eb | add readme | x522758754/XlsTools | algorithms/CommonFun.py | algorithms/CommonFun.py | #!user/bin/env python
# coding:utf-8
import sys
import random
reload(sys)
sys.setdefaultencoding('utf-8')
def QuickSort(left, right, array):
l = left
r = right
while l < r:
base = array[r]
while (array[l] <= base and l < r):
l = l + 1
if(l < r):
array[r] = array[l]
while (array[l] <= array[r] and l < r):
r = r - 1
if(l < r):
array[l] = array[r]
array[r] = base
QuickSort(left, r - 1, array)
QuickSort(r + 1, right, array)
#array 为有序数组
def BinarySearch(left, right, array, target):
if(left < right):
mid = (left + right)/2
if(array[mid] > target):
return BinarySearch(left, mid-1, array, target)
elif(array[mid] < target):
return BinarySearch(mid+1, right, array, target)
else:
return mid
else:
return -1
if __name__ == '__main__':
array = []
for i in range(10):
it = random.randint(1, 100)
array.append(it)
QuickSort(0, len(array)-1, array)
print BinarySearch(0, len(array)-1, array, 15) | mit | Python |
|
e0a7824253ae412cf7cc27348ee98c919d382cf2 | verify stderr for a failing clone into a non-empty dir | gitpython-developers/gitpython,gitpython-developers/GitPython,gitpython-developers/gitpython,gitpython-developers/GitPython | test/test_clone.py | test/test_clone.py | # -*- coding: utf-8 -*-
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from pathlib import Path
import re
import git
from .lib import (
TestBase,
with_rw_directory,
)
class TestClone(TestBase):
@with_rw_directory
def test_checkout_in_non_empty_dir(self, rw_dir):
non_empty_dir = Path(rw_dir)
garbage_file = non_empty_dir / 'not-empty'
garbage_file.write_text('Garbage!')
# Verify that cloning into the non-empty dir fails while complaining about the target directory not being empty/non-existent
try:
self.rorepo.clone(non_empty_dir)
except git.GitCommandError as exc:
self.assertTrue(exc.stderr, "GitCommandError's 'stderr' is unexpectedly empty")
expr = re.compile(r'(?is).*\bfatal:\s+destination\s+path\b.*\bexists\b.*\bnot\b.*\bempty\s+directory\b')
self.assertTrue(expr.search(exc.stderr), '"%s" does not match "%s"' % (expr.pattern, exc.stderr))
else:
self.fail("GitCommandError not raised")
| bsd-3-clause | Python |
|
4683fc67d5171d8bb0391ac45f587fbc3e3c97fc | Add dependency installer for linux and mac osx | GCI-2015-GPW/DevAssist | install_dependencies.py | install_dependencies.py | import platform
import subprocess
"""
This is a standalone script that installs the required dependencies to run. It
*should* be platform independent, and should work regardless of what platform
you are running it on.
To install dependencies, download the DevAssist source and run this script by
running "python install_dependencies.py"
"""
# Identifying host platform
host_platform = platform.system()
def install_dependencies():
"""
Installs dependencies for DevAssist
"""
# Darwin = Mac OSX
if host_platform == "Darwin":
# Installing portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio...\n")
portaudio = subprocess.Popen(["brew install portaudio"], shell=True)
portaudio.communicate()
print("\nportaudio has been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
elif host_platform == "Linux":
# Installing dependencies for portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio & dependencies...\n")
portaudio = subprocess.Popen(["apt-get install portaudio19-dev python-all-dev python3-all-dev"], shell=True)
portaudio.communicate()
print("\nportaudio & dependencies have been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install --global-option='build_ext' --global-option='-I/usr/local/include' --global-option='-L/usr/local/lib' pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
if __name__ == "__main__":
install_dependencies()
| mit | Python |
|
0fd7cdee45b54551bcfc901cece2e5cc9dec4555 | Add new test setup required for py.test/django test setup | emory-libraries/eulcommon,emory-libraries/eulcommon | test/test_setup.py | test/test_setup.py | import os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'testsettings'
# run django setup if we are on a version of django that has it
if hasattr(django, 'setup'):
# setup doesn't like being run more than once
try:
django.setup()
except RuntimeError:
pass | apache-2.0 | Python |
|
2014a7e3e785c9826575846a38b4703ef19946f4 | fix path stuff | shujunqiao/cocos2d-python,shujunqiao/cocos2d-python,vyscond/cocos,shujunqiao/cocos2d-python,dangillet/cocos | test/test_tiles.py | test/test_tiles.py | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import math
import pyglet
from pyglet.window import key
import cocos
from cocos import tiles
class CarSprite(cocos.actions.ActionSprite):
speed = 0
def update(self, dt):
# handle input and move the car
self.rotation += (keyboard[key.RIGHT] - keyboard[key.LEFT]) * 150 * dt
speed = self.speed
speed += (keyboard[key.UP] - keyboard[key.DOWN]) * 50
if speed > 200: speed = 200
if speed < -100: speed = -100
self.speed = speed
r = math.radians(self.rotation)
s = dt * speed
self.x += math.sin(r) * s
self.y += math.cos(r) * s
manager.set_focus(self.x, self.y)
if __name__ == "__main__":
from cocos.director import director
#director.init(width=400, height=300)
director.init(width=600, height=300)
car_layer = tiles.ScrollableLayer()
car = pyglet.image.load('car.png')
car.anchor_x = car.width//2
car.anchor_y = car.height//2
car = CarSprite(car)
pyglet.clock.schedule(car.update)
car_layer.add(car)
manager = tiles.ScrollingManager(director.window)
test_layer = tiles.load('road-map.xml')['map0']
manager.append(test_layer)
manager.append(car_layer)
main_scene = cocos.scene.Scene(test_layer, car_layer)
keyboard = key.KeyStateHandler()
director.window.push_handlers(keyboard)
@director.window.event
def on_close():
pyglet.app.exit()
director.run(main_scene)
| import math
import pyglet
from pyglet.window import key
import cocos
from cocos import tiles
class CarSprite(cocos.actions.ActionSprite):
speed = 0
def update(self, dt):
# handle input and move the car
self.rotation += (keyboard[key.RIGHT] - keyboard[key.LEFT]) * 150 * dt
speed = self.speed
speed += (keyboard[key.UP] - keyboard[key.DOWN]) * 50
if speed > 200: speed = 200
if speed < -100: speed = -100
self.speed = speed
r = math.radians(self.rotation)
s = dt * speed
self.x += math.sin(r) * s
self.y += math.cos(r) * s
manager.set_focus(self.x, self.y)
if __name__ == "__main__":
from cocos.director import director
#director.init(width=400, height=300)
director.init(width=600, height=300)
car_layer = tiles.ScrollableLayer()
car = pyglet.image.load('car.png')
car.anchor_x = car.width//2
car.anchor_y = car.height//2
car = CarSprite(car)
pyglet.clock.schedule(car.update)
car_layer.add(car)
manager = tiles.ScrollingManager(director.window)
test_layer = tiles.load('road-map.xml')['map0']
manager.append(test_layer)
manager.append(car_layer)
main_scene = cocos.scene.Scene(test_layer, car_layer)
keyboard = key.KeyStateHandler()
director.window.push_handlers(keyboard)
@director.window.event
def on_close():
pyglet.app.exit()
director.run(main_scene)
| bsd-3-clause | Python |
dbfa14401c0b50eb1a3cac413652cb975ee9d41f | Add valid directory cleaner helper test | huikyole/climate,agoodm/climate,MJJoyce/climate,MBoustani/climate,agoodm/climate,MJJoyce/climate,kwhitehall/climate,MBoustani/climate,lewismc/climate,agoodm/climate,pwcberry/climate,MBoustani/climate,Omkar20895/climate,MJJoyce/climate,agoodm/climate,kwhitehall/climate,lewismc/climate,pwcberry/climate,huikyole/climate,riverma/climate,jarifibrahim/climate,apache/climate,Omkar20895/climate,jarifibrahim/climate,apache/climate,pwcberry/climate,huikyole/climate,jarifibrahim/climate,kwhitehall/climate,lewismc/climate,MBoustani/climate,kwhitehall/climate,agoodm/climate,riverma/climate,MJJoyce/climate,Omkar20895/climate,pwcberry/climate,riverma/climate,huikyole/climate,riverma/climate,pwcberry/climate,Omkar20895/climate,jarifibrahim/climate,lewismc/climate,huikyole/climate,Omkar20895/climate,apache/climate,MBoustani/climate,riverma/climate,apache/climate,MJJoyce/climate,apache/climate,jarifibrahim/climate,lewismc/climate | ocw-ui/backend/tests/test_directory_helpers.py | ocw-ui/backend/tests/test_directory_helpers.py | import os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
| apache-2.0 | Python |
|
ce924c72795d342605bb4409d5217fe99c807ace | Add test cases for moments functions | michaelpacer/scikit-image,SamHames/scikit-image,Midafi/scikit-image,ajaybhat/scikit-image,youprofit/scikit-image,oew1v07/scikit-image,emon10005/scikit-image,ClinicalGraphics/scikit-image,Hiyorimi/scikit-image,robintw/scikit-image,ofgulban/scikit-image,keflavich/scikit-image,youprofit/scikit-image,rjeli/scikit-image,michaelaye/scikit-image,vighneshbirodkar/scikit-image,Midafi/scikit-image,almarklein/scikit-image,bsipocz/scikit-image,SamHames/scikit-image,SamHames/scikit-image,ajaybhat/scikit-image,bennlich/scikit-image,dpshelio/scikit-image,pratapvardhan/scikit-image,almarklein/scikit-image,juliusbierk/scikit-image,GaZ3ll3/scikit-image,warmspringwinds/scikit-image,juliusbierk/scikit-image,WarrenWeckesser/scikits-image,bennlich/scikit-image,rjeli/scikit-image,blink1073/scikit-image,SamHames/scikit-image,GaZ3ll3/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image,ClinicalGraphics/scikit-image,rjeli/scikit-image,paalge/scikit-image,emon10005/scikit-image,newville/scikit-image,newville/scikit-image,jwiggins/scikit-image,keflavich/scikit-image,ofgulban/scikit-image,paalge/scikit-image,vighneshbirodkar/scikit-image,warmspringwinds/scikit-image,WarrenWeckesser/scikits-image,Britefury/scikit-image,ofgulban/scikit-image,almarklein/scikit-image,vighneshbirodkar/scikit-image,Hiyorimi/scikit-image,chintak/scikit-image,michaelpacer/scikit-image,chriscrosscutler/scikit-image,chintak/scikit-image,dpshelio/scikit-image,michaelaye/scikit-image,Britefury/scikit-image,bsipocz/scikit-image,chriscrosscutler/scikit-image,jwiggins/scikit-image,oew1v07/scikit-image,blink1073/scikit-image,almarklein/scikit-image,chintak/scikit-image,chintak/scikit-image,robintw/scikit-image | skimage/measure/tests/test_moments.py | skimage/measure/tests/test_moments.py | from numpy.testing import assert_equal, assert_almost_equal
import numpy as np
from skimage.measure import (moments, moments_central, moments_normalized,
moments_hu)
def test_moments():
image = np.zeros((20, 20), dtype=np.double)
image[14, 14] = 1
image[15, 15] = 1
image[14, 15] = 0.5
image[15, 14] = 0.5
m = moments(image)
assert_equal(m[0, 0], 3)
assert_almost_equal(m[0, 1] / m[0, 0], 14.5)
assert_almost_equal(m[1, 0] / m[0, 0], 14.5)
def test_moments_central():
image = np.zeros((20, 20), dtype=np.double)
image[14, 14] = 1
image[15, 15] = 1
image[14, 15] = 0.5
image[15, 14] = 0.5
mu = moments_central(image, 14.5, 14.5)
# shift image by dx=2, dy=2
image2 = np.zeros((20, 20), dtype=np.double)
image2[16, 16] = 1
image2[17, 17] = 1
image2[16, 17] = 0.5
image2[17, 16] = 0.5
mu2 = moments_central(image2, 14.5 + 2, 14.5 + 2)
# central moments must be translation invariant
assert_equal(mu, mu2)
def test_moments_normalized():
image = np.zeros((20, 20), dtype=np.double)
image[13:17, 13:17] = 1
mu = moments_central(image, 14.5, 14.5)
nu = moments_normalized(mu)
# shift image by dx=-3, dy=-3 and scale by 0.5
image2 = np.zeros((20, 20), dtype=np.double)
image2[11:13, 11:13] = 1
mu2 = moments_central(image2, 11.5, 11.5)
nu2 = moments_normalized(mu2)
# central moments must be translation and scale invariant
assert_almost_equal(nu, nu2, decimal=1)
def test_moments_hu():
image = np.zeros((20, 20), dtype=np.double)
image[13:15, 13:17] = 1
mu = moments_central(image, 13.5, 14.5)
nu = moments_normalized(mu)
hu = moments_hu(nu)
# shift image by dx=2, dy=3, scale by 0.5 and rotate by 90deg
image2 = np.zeros((20, 20), dtype=np.double)
image2[11, 11:13] = 1
image2 = image2.T
mu2 = moments_central(image2, 11.5, 11)
nu2 = moments_normalized(mu2)
hu2 = moments_hu(nu2)
# central moments must be translation and scale invariant
assert_almost_equal(hu, hu2, decimal=1)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause | Python |
|
5ee4f6fd50da0a7115f8ca0ab29c4388eaef13a6 | add probabitity function decomposition | ademinn/AdaptiveIPFilter,ademinn/AdaptiveIPFilter | src/probability.py | src/probability.py | from __future__ import division
from math import log
from scipy.special import binom
import numpy as np
def C(p, p0):
p1 = 1 - p0
return -p0*log(p0, 2) + p0*p*log(p0*p, 2) - (p1+p0*p)*log(p1+p0*p, 2)
def P(c, p0, eps=0.00001):
left = 0
right = 1
while right - left > eps:
p = (left + right) / 2
cp = C(p, p0)
if cp > c:
left = p
else:
right = p
return left
def coef(i, p):
return binom(N, i) * p**i*(1-p)**(N-i)
def A(c, N, M):
points = (np.array(xrange(M)) + 1) / (M + 1)
A = np.matrix([np.array([coef(i, p) for i in xrange(N)]) for p in points])
b = np.array([P(c, p) for p in points])
a, _, _, _ = np.linalg.lstsq(A, b)
return a
if __name__ == '__main__':
N = 10 # Buffer size
M = 100 # Num of points
c = 0.15
a = A(c, N)
p0 = 0.7
x = np.array([coef(i, p0) for i in xrange(N)])
print(np.dot(a, x))
print(P(c, p0))
print(a)
| bsd-2-clause | Python |
|
ad5b1459bf514f7be5b39b90d0fdf627edf65f62 | Add helper table module to generate common test tables [skip ci] | StuartLittlefair/astropy,lpsinger/astropy,saimn/astropy,tbabej/astropy,mhvk/astropy,stargaser/astropy,DougBurke/astropy,astropy/astropy,kelle/astropy,larrybradley/astropy,kelle/astropy,joergdietrich/astropy,joergdietrich/astropy,astropy/astropy,MSeifert04/astropy,lpsinger/astropy,bsipocz/astropy,StuartLittlefair/astropy,StuartLittlefair/astropy,mhvk/astropy,joergdietrich/astropy,StuartLittlefair/astropy,lpsinger/astropy,pllim/astropy,dhomeier/astropy,saimn/astropy,DougBurke/astropy,larrybradley/astropy,larrybradley/astropy,pllim/astropy,MSeifert04/astropy,kelle/astropy,stargaser/astropy,dhomeier/astropy,dhomeier/astropy,bsipocz/astropy,astropy/astropy,AustereCuriosity/astropy,dhomeier/astropy,AustereCuriosity/astropy,funbaker/astropy,tbabej/astropy,mhvk/astropy,tbabej/astropy,tbabej/astropy,saimn/astropy,MSeifert04/astropy,pllim/astropy,saimn/astropy,mhvk/astropy,bsipocz/astropy,DougBurke/astropy,kelle/astropy,stargaser/astropy,DougBurke/astropy,kelle/astropy,lpsinger/astropy,pllim/astropy,mhvk/astropy,astropy/astropy,tbabej/astropy,aleksandr-bakanov/astropy,aleksandr-bakanov/astropy,joergdietrich/astropy,lpsinger/astropy,funbaker/astropy,AustereCuriosity/astropy,dhomeier/astropy,larrybradley/astropy,AustereCuriosity/astropy,funbaker/astropy,astropy/astropy,joergdietrich/astropy,larrybradley/astropy,saimn/astropy,bsipocz/astropy,AustereCuriosity/astropy,pllim/astropy,StuartLittlefair/astropy,funbaker/astropy,aleksandr-bakanov/astropy,MSeifert04/astropy,aleksandr-bakanov/astropy,stargaser/astropy | astropy/table/table_helpers.py | astropy/table/table_helpers.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helper functions for table development, mostly creating useful
tables for testing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from itertools import cycle
import string
import numpy as np
from .table import Table, Column
from ..extern.six.moves import zip
class TimingTables(object):
"""
Object which contains two tables and various other attributes that
are useful for timing and other API tests.
"""
def __init__(self, size=1000, masked=False):
self.masked = masked
# Initialize table
self.table = Table(masked=self.masked)
# Create column with mixed types
np.random.seed(12345)
self.table['i'] = np.arange(size)
self.table['a'] = np.random.random(size) # float
self.table['b'] = np.random.random(size) > 0.5 # bool
self.table['c'] = np.random.random((size,10)) # 2d column
self.table['d'] = np.random.choice(np.array(list(string.ascii_letters)),size)
self.extra_row = {'a':1.2, 'b':True, 'c':np.repeat(1, 10), 'd':'Z'}
self.extra_column = np.random.randint(0, 100, size)
self.row_indices = np.where(self.table['a'] > 0.9)[0]
self.table_grouped = self.table.group_by('d')
# Another table for testing joining
self.other_table = Table(masked=self.masked)
self.other_table['i'] = np.arange(1,size,3)
self.other_table['f'] = np.random.random()
self.other_table.sort('f')
# Another table for testing hstack
self.other_table_2 = Table(masked=self.masked)
self.other_table_2['g'] = np.random.random(size)
self.other_table_2['h'] = np.random.random((size, 10))
self.bool_mask = self.table['a'] > 0.6
def simple_table(size=3, cols=None, kinds='ifS', masked=False):
"""
Return a simple table for testing.
Example
--------
::
>>> from astropy.table.table_helpers import simple_table
>>> print(simple_table(3, 6, masked=True, kinds='ifOS'))
a b c d e f
--- --- -------- --- --- ---
-- 1.0 {'c': 2} -- 5 5.0
2 2.0 -- e 6 --
3 -- {'e': 4} f -- 7.0
Parameters
----------
size : int
Number of table rows
cols : int, default=number of kinds
Number of table columns
kinds : str
String consisting of the column dtype.kinds. This string
will be cycled through to generate the column dtype.
The allowed values are 'i', 'f', 'S', 'O'.
Returns
-------
out : `Table`
New table with appropriate characteristics
"""
if cols is None:
cols = len(kinds)
if cols > 26:
raise ValueError("Max 26 columns in SimpleTable")
columns = []
names = [chr(ord('a') + ii) for ii in xrange(cols)]
letters = np.array([c for c in string.ascii_letters])
for jj, kind in zip(xrange(cols), cycle(kinds)):
if kind == 'i':
data = np.arange(1, size + 1, dtype=int) + jj
elif kind == 'f':
data = np.arange(size, dtype=float) + jj
elif kind == 'S':
indices = (np.arange(size) + jj) % len(letters)
data = letters[indices]
elif kind == 'O':
indices = (np.arange(size) + jj) % len(letters)
vals = letters[indices]
data = [{val: index} for val, index in zip(vals, indices)]
else:
raise ValueError('Unknown data kind')
columns.append(Column(data, dtype=kind))
table = Table(columns, names=names, masked=masked)
if masked:
for ii, col in enumerate(table.columns.values()):
mask = np.array((np.arange(size) + ii) % 3, dtype=bool)
col.mask = ~mask
return table
def complex_table():
"""
Return a masked table from the io.votable test set that has a wide variety
of stressing types.
"""
from ..utils.data import get_pkg_data_filename
from ..io.votable.table import parse
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
votable = parse(get_pkg_data_filename('../io/votable/tests/data/regression.xml'),
pedantic=False)
first_table = votable.get_first_table()
table = first_table.to_table()
return table
| bsd-3-clause | Python |
|
8e8a1a33d8bedcb597020f9723c03d0f6af57522 | Add python script | danihuge/WhatsPy | send.py | send.py | import sys
import os
try:
sys.path.append(os.path.join(os.environ['ANDROID_VIEW_CLIENT_HOME'], 'src'))
except:
pass
from com.dtmilano.android.viewclient import ViewClient
number = sys.argv[2]
text = sys.argv[3]
print("Sending WhatsApp...")
print("Number: " + number)
print("Text: " + text)
package = 'com.android.chrome'
activity = 'com.google.android.apps.chrome.Main'
component = package + "/" + activity
uri = 'https://api.whatsapp.com/send?phone=' + number
device, serialno = ViewClient.connectToDeviceOrExit()
vc = ViewClient(device=device, serialno=serialno)
device.startActivity(component=component, uri=uri)
vc.sleep(3)
device.type(text)
vc = ViewClient(device=device, serialno=serialno)
send = vc.findViewByIdOrRaise('com.whatsapp:id/send')
send.touch()
| mit | Python |
|
cec1ec8367c83e540b9a9cfbfeac2a576cdf357b | add send.py | lexruee/pi-switch-python,lexruee/pi-switch-python | send.py | send.py | """
Example:
switch type A:
sudo python send.py -c off -t A -s 11111,11111 -p 0
switch type B:
sudo python send.py -c off -t B -s 1,3 -p 0
switch type C:
sudo python send.py -c off -t C -s a,1,1 -p 0
switch type D:
sudo python send.py -c off -t D -s A,1 -p 0
"""
import argparse
import sys
try:
import pi_switch
except ImportError:
print "pi_switch import error!"
#sys.exit()
def create_switch(type, settings, pin):
"""Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch
"""
switch = None
if type == "A":
group, device = settings.split(",")
switch = pi_switch.RCSwitchA(group, device)
elif type == "B":
addr, channel = settings.split(",")
addr = int(addr)
channel = int(channel)
switch = pi_switch.RCSwitchB(addr, channel)
elif type == "C":
family, group, device = settings.split(",")
group = int(group)
device = int(device)
switch = pi_switch.RCSwitchC(family, group, device)
elif type == "D":
group, device = settings.split(",")
device = int(device)
switch = pi_switch.RCSwitchD(group, device)
else:
print "Type %s is not supported!" % type
sys.exit()
switch.enableTransmit(pin)
return switch
def toggle(switch, command):
"""Toggles a switch on or off.
Args:
switch (switch): a switch
command (str): "on" or "off"
"""
if command in ["on"]:
switch.switchOn()
if command in ["off"]:
switch.switchOff()
def main():
parser = argparse.ArgumentParser(description="Send off / on commands to a remote power socket.")
parser.add_argument("-c", dest = "command", metavar = "command", nargs = "?",
help="can be on or off")
parser.add_argument("-t", dest = "type", metavar = "type", nargs = "?",
help="type of the switch: A, B, C or D")
parser.add_argument("-s", dest = "settings", metavar = "settings", nargs = "?",
help="settings as a comma separated list: value1,value2,value2")
parser.add_argument("-p", dest = "pin", metavar = "pin", type = int, nargs = "?",
help="wriningPi pin")
args = parser.parse_args()
switch = create_switch(args.type, args.settings, args.pin)
toggle(switch, args.command)
if __name__ == "__main__":
main()
| lgpl-2.1 | Python |
|
61f806ffc68c41dfbb926ea6825292eabed46966 | Add sorting code | accre/lstore,PerilousApricot/lstore,tacketar/lstore,accre/lstore,PerilousApricot/lstore,tacketar/lstore,accre/lstore,tacketar/lstore,tacketar/lstore,accre/lstore,PerilousApricot/lstore,PerilousApricot/lstore | sort.py | sort.py | #!/usr/bin/env python
import re
import sys
sort = {}
regex = re.compile(r'TBX_API \w* \*?(\w*)\(.*')
for line in sys.stdin.readlines():
result = regex.match(line)
if not result:
sort[line] = line
else:
sort[result.group(1)] = line
for k in sorted(sort.keys()):
sys.stdout.write(sort[k])
| apache-2.0 | Python |
|
3e885137d23e7618b78f207ecd6b2f6118a4a0dc | add a test file | natcap/google-code-postcommit-webhook | test.py | test.py | #!/usr/bin/python
import cgi
cgi.test()
| bsd-3-clause | Python |
|
b6a55999cd0f6ff6a7d69b7eb59e859d415b275f | Add test.py with old-formatting test | authmillenon/python_version_test | test.py | test.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Martine Lenders <[email protected]>
#
# Distributed under terms of the MIT license.
"%s" % "test"
"%d" % 2
"%.4f" % 2.0
| mit | Python |
|
f4d26567afc9185e0f9370eda43d30084437ade5 | Solve Code Fights make array consecutive 2 problem | HKuz/Test_Code | CodeFights/makeArrayConsecutive2.py | CodeFights/makeArrayConsecutive2.py | #!/usr/local/bin/python
# Code Fights Make Array Consecutive 2 Problem
def makeArrayConsecutive2(statues):
return (len(range(min(statues), max(statues) + 1)) - len(statues))
def main():
tests = [
[[6, 2, 3, 8], 3],
[[0, 3], 2],
[[5, 4, 6], 0],
[[6, 3], 2],
[[1], 0]
]
for t in tests:
res = makeArrayConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: makeArrayConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: makeArrayConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
| mit | Python |
|
06d8f4290cf433a538cef4851acefd6e42c8341d | Add simple example | accepton/accepton-python | examples/client.py | examples/client.py | #!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from accepton import Client
API_KEY = 'skey_be064297e7b2db4b6ce5928e8dcad582'
accepton = Client(api_key=API_KEY, environment='development')
token = accepton.create_token(amount=1099, application_fee=99, currency='cad',
description='Test charge')
print(token)
| mit | Python |
|
96dd9b2968039be3fa87a30e8a16ed1c77be10bb | solve 94 | brettchien/LeetCode | 94_BinaryTreeInorderTraversal.py | 94_BinaryTreeInorderTraversal.py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer[]}
def inorderTraversal(self, root):
if not root:
return []
result = []
stack = [(False, root)]
while stack:
read, node = stack.pop()
if read:
result.append(node.val)
else:
if node.right:
stack.append((False, node.right))
stack.append((True, node))
if node.left:
stack.append((False, node.left))
return result
| mit | Python |
|
e8c0b17bb28f1212b302959144086d72c205bf4c | store toc list in optional file to make merging easier | springcoil/euroscipy_proceedings,Stewori/euroscipy_proceedings,euroscipy/euroscipy_proceedings,mwcraig/scipy_proceedings,chendaniely/scipy_proceedings,katyhuff/scipy_proceedings,chendaniely/scipy_proceedings,mikaem/euroscipy_proceedings,dotsdl/scipy_proceedings,katyhuff/scipy_proceedings,euroscipy/euroscipy_proceedings,juhasch/euroscipy_proceedings,mjklemm/euroscipy_proceedings,mjklemm/euroscipy_proceedings,mikaem/euroscipy_proceedings,sbenthall/scipy_proceedings,euroscipy/euroscipy_proceedings,SepidehAlassi/euroscipy_proceedings,helgee/euroscipy_proceedings,juhasch/euroscipy_proceedings,dotsdl/scipy_proceedings,sbenthall/scipy_proceedings,helgee/euroscipy_proceedings,sbenthall/scipy_proceedings,michaelpacer/scipy_proceedings,michaelpacer/scipy_proceedings,chendaniely/scipy_proceedings,mwcraig/scipy_proceedings,Stewori/euroscipy_proceedings,springcoil/euroscipy_proceedings,mwcraig/scipy_proceedings,mjklemm/euroscipy_proceedings,katyhuff/scipy_proceedings,mikaem/euroscipy_proceedings,michaelpacer/scipy_proceedings,dotsdl/scipy_proceedings,SepidehAlassi/euroscipy_proceedings,helgee/euroscipy_proceedings,springcoil/euroscipy_proceedings,juhasch/euroscipy_proceedings,Stewori/euroscipy_proceedings,SepidehAlassi/euroscipy_proceedings | publisher/conf.py | publisher/conf.py | import glob
import os
work_dir = os.path.dirname(__file__)
papers_dir = os.path.join(work_dir,'../papers')
output_dir = os.path.join(work_dir,'../output')
template_dir = os.path.join(work_dir,'_templates')
static_dir = os.path.join(work_dir,'_static')
css_file = os.path.join(static_dir,'scipy-proc.css')
toc_list = os.path.join(static_dir,'toc.txt')
build_dir = os.path.join(work_dir,'_build')
pdf_dir = os.path.join(build_dir, 'pdfs')
html_dir = os.path.join(build_dir, 'html')
bib_dir = os.path.join(html_dir, 'bib')
toc_conf = os.path.join(build_dir, 'toc.json')
proc_conf = os.path.join(work_dir,'../scipy_proc.json')
if os.path.isfile(toc_list):
with open(toc_list) as f:
dirs = f.read().splitlines()
else:
dirs = sorted([os.path.basename(d)
for d in glob.glob('%s/*' % papers_dir)
if os.path.isdir(d)])
| import glob
import os
work_dir = os.path.dirname(__file__)
papers_dir = os.path.join(work_dir,'../papers')
output_dir = os.path.join(work_dir,'../output')
template_dir = os.path.join(work_dir,'_templates')
static_dir = os.path.join(work_dir,'_static')
css_file = os.path.join(static_dir,'scipy-proc.css')
build_dir = os.path.join(work_dir,'_build')
pdf_dir = os.path.join(build_dir, 'pdfs')
html_dir = os.path.join(build_dir, 'html')
bib_dir = os.path.join(html_dir, 'bib')
toc_conf = os.path.join(build_dir, 'toc.json')
proc_conf = os.path.join(work_dir,'../scipy_proc.json')
dirs = sorted([os.path.basename(d)
for d in glob.glob('%s/*' % papers_dir)
if os.path.isdir(d)])
| bsd-2-clause | Python |
f046bd8982f08a31448bb5e4e10ded2a14ea95b0 | Create __init__.py | telefonicaid/iot-qa-tools,telefonicaid/iotqatools,telefonicaid/iot-qa-tools,telefonicaid/iotqatools,telefonicaid/iot-qa-tools,telefonicaid/iotqatools | iotqatools/__init__.py | iotqatools/__init__.py | agpl-3.0 | Python |
||
e4a33badd98c4c927c4128e22fd839f54711cfd6 | Create PedidoCadastrar.py | AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb | backend/Models/Predio/PedidoCadastrar.py | backend/Models/Predio/PedidoCadastrar.py | from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoCadastrar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoCadastrar, self).__init__(variaveis_do_ambiente)
try:
self.id = self.corpo['id']
self.nome = self.corpo['nome']
except:
raise ErroNoHTTP(400)
def getId(self):
return self.id
def setNome(self,nome):
self.nome = nome
def getNome(self):
return self.nome
| mit | Python |
|
82152af00c54ea94a4e8cd90d3cd5f45ef28ee86 | add missing unit test file | chenjiandongx/pyecharts,chenjiandongx/pyecharts,chenjiandongx/pyecharts | test/test_utils.py | test/test_utils.py | # coding=utf-8
from __future__ import unicode_literals
import os
import codecs
from nose.tools import eq_
from pyecharts.utils import (
freeze_js,
write_utf8_html_file,
get_resource_dir
)
def test_get_resource_dir():
path = get_resource_dir('templates')
expected = os.path.join(os.getcwd(), '..', 'pyecharts', 'templates')
eq_(path, os.path.abspath(expected))
def test_freeze_js():
html_content = """
</style>
<!-- build -->
<script src="js/echarts/echarts.min.js"></script>
<script src="js/echarts/echarts-wordcloud.min.js"></script>
<!-- endbuild -->
</head><body>"""
html_content = freeze_js(html_content)
assert 'exports.echarts' in html_content
assert 'echarts-wordcloud' in html_content
def test_write_utf8_html_file():
content = "柱状图数据堆叠示例"
file_name = 'test.html'
write_utf8_html_file(file_name, content)
with codecs.open(file_name, 'r', 'utf-8') as f:
actual_content = f.read()
eq_(content, actual_content)
| mit | Python |
|
9c52dae7f5de64865fff51a24680c43e041376ea | Add random_subtree script | kdmurray91/kwip-experiments,kdmurray91/kwip-experiments,kdmurray91/kwip-experiments | random_subtree.py | random_subtree.py | #!/usr/bin/env python2
# Use either ete2 or ete3
try:
import ete3 as ete
except ImportError:
import ete2 as ete
import numpy as np
CLI = """
USAGE:
random_subtree <tree> <n>
Subsamples <n> taxa from the Newick tree in <tree>, preserving the branch
lengths of subsampled taxa.
"""
def main(treefile, n):
n = int(n)
tree = ete.Tree(treefile)
leaves = tree.get_leaf_names()
subsample = [leaves[i] for i in np.random.choice(n, size=len(tree))]
tree.prune(subsample, preserve_branch_length=True)
print(tree.write())
if __name__ == "__main__":
import docopt
opts = docopt.docopt(CLI)
main(opts['<tree>'], int(opts['<n>']))
| mit | Python |
|
3c1e61b4b47ec244e4cadd4bf34e0a21cf1ff7e1 | Create w3_1.py | s40523222/2016fallcp_hw,s40523222/2016fallcp_hw,s40523222/2016fallcp_hw | w3_1.py | w3_1.py | print("第三週")
| agpl-3.0 | Python |
|
8bb9d6cbe161654126bb3aa3adecdb99ee0d9987 | Create sct4.py | PythonProgramming/MPI4Py-Parallel-Computing-tutorial | sct4.py | sct4.py | from mpi4py import MPI
comm = MPI.COMM_WORLD
rank=comm.rank
size=comm.size
print 'Rank:',rank
print 'Node Count:',size
print 9**(rank+3)
| mit | Python |
|
aafd823069176075b4810496ee98cea3203b5652 | Make a command to make subsets. Subsets are useful for testing during development. | googlei18n/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,moyogo/tachyfont,googlei18n/TachyFont,moyogo/tachyfont,bstell/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,moyogo/tachyfont,googlei18n/TachyFont,bstell/TachyFont,googlei18n/TachyFont,googlei18n/TachyFont,bstell/TachyFont,googlefonts/TachyFont,bstell/TachyFont,bstell/TachyFont,googlefonts/TachyFont,googlefonts/TachyFont | build_time/src/make_subset.py | build_time/src/make_subset.py | """
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
from fontTools.subset import Options, load_font, Subsetter, save_font
def main(args):
"""Subset a font (useful for making small test fonts).
Arguments:
font-file
--hinting=(False|True) ,default is false
"""
parser = argparse.ArgumentParser()
parser.add_argument('fontfile', help='Input font file')
parser.add_argument('--text', default='',
help='Text to include in the subset')
parser.add_argument('--unicodes', default='',
help='Comma separated list of Unicode codepoints (hex) '
'to include in the subset; eg, "e7,0xe8,U+00e9"')
parser.add_argument('--glyphs', default='',
help='Comma separated list of glyph IDs (decimal) to '
'include in the subset; eg, "1,27"')
parser.add_argument('--hinting',default=False, action='store_true',
help='Enable hinting if specified, no hinting if not '
'present')
cmd_args = parser.parse_args(args)
options = Options()
# Definitely want the .notdef glyph and outlines.
options.notdef_glyph = True
options.notdef_outline = True
# Get the item. to keep in the subset.
text = cmd_args.text
unicodes_str = cmd_args.unicodes.lower().replace('0x', '').replace('u+', '')
unicodes = [ int(c,16) for c in unicodes_str.split(',') if c ]
glyphs = [ int(c) for c in cmd_args.glyphs.split(',') if c ]
fontfile = cmd_args.fontfile
options.hinting = cmd_args.hinting # False => no hinting
dir = os.path.dirname(fontfile)
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
output_file = dir + '/' + filename + '_subset' + extension
font = load_font(fontfile, options, lazy=False)
subsetter = Subsetter(options)
subsetter.populate(text=text, unicodes=unicodes, glyphs=glyphs)
subsetter.subset(font)
save_font(font, output_file, options)
if __name__ == '__main__':
main(sys.argv[1:])
| apache-2.0 | Python |
|
01f4aedac1df6f2e55c76d60c52d1e0c5ccfd9f2 | Revert "Delete test file" | adamtheturtle/vws-python,adamtheturtle/vws-python | tests/mock_vws/test_query.py | tests/mock_vws/test_query.py | """
Tests for the mock of the query endpoint.
https://library.vuforia.com/articles/Solution/How-To-Perform-an-Image-Recognition-Query.
"""
import io
from typing import Any, Dict
from urllib.parse import urljoin
import pytest
import requests
from requests import codes
from requests_mock import POST
from tests.utils import VuforiaDatabaseKeys
from vws._request_utils import authorization_header, rfc_1123_date
@pytest.mark.usefixtures('verify_mock_vuforia')
class TestQuery:
"""
Tests for the query endpoint.
"""
def test_no_results(
self,
vuforia_database_keys: VuforiaDatabaseKeys,
high_quality_image: io.BytesIO,
) -> None:
"""
With no results
"""
image_content = high_quality_image.read()
content_type = 'multipart/form-data'
query: Dict[str, Any] = {}
date = rfc_1123_date()
request_path = '/v1/query'
url = urljoin('https://cloudreco.vuforia.com', request_path)
files = {'image': ('image.jpeg', image_content, 'image/jpeg')}
request = requests.Request(
method=POST,
url=url,
headers={},
data=query,
files=files,
)
prepared_request = request.prepare() # type: ignore
authorization_string = authorization_header(
access_key=vuforia_database_keys.client_access_key,
secret_key=vuforia_database_keys.client_secret_key,
method=POST,
content=prepared_request.body,
content_type=content_type,
date=date,
request_path=request_path,
)
headers = {
**prepared_request.headers,
'Authorization': authorization_string,
'Date': date,
}
prepared_request.prepare_headers(headers=headers)
session = requests.Session()
response = session.send(request=prepared_request) # type: ignore
assert response.status_code == codes.OK
assert response.json()['result_code'] == 'Success'
assert response.json()['results'] == []
assert 'query_id' in response.json()
| mit | Python |
|
793344ae359f028db950a364d48578ae97cb7028 | Add tests for jenkins_job_linter.test_jjb_subcommand | OddBloke/jenkins-job-linter | tests/test_jjb_subcommand.py | tests/test_jjb_subcommand.py | from jenkins_job_linter.jjb_subcommand import LintSubCommand
class TestParseArgs(object):
def test_parser_named_lint(self, mocker):
subcommand = LintSubCommand()
subparser_mock = mocker.Mock()
subcommand.parse_args(subparser_mock)
assert 1 == subparser_mock.add_parser.call_count
assert mocker.call('lint') == subparser_mock.add_parser.call_args
def test_args_added_to_parser(self, mocker):
expected_methods = [
'parse_arg_names', 'parse_arg_path',
'parse_option_recursive_exclude']
subcommand = LintSubCommand()
mocks = []
for expected_method in expected_methods:
mock = mocker.Mock()
setattr(subcommand, expected_method, mock)
mocks.append(mock)
subparser_mock = mocker.Mock()
subcommand.parse_args(subparser_mock)
for mock in mocks:
assert 1 == mock.call_count
assert mocker.call(
subparser_mock.add_parser.return_value) == mock.call_args
class TestExecute(object):
def test_arguments_passed_through(self, mocker):
super_execute_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
options, jjb_config = mocker.Mock(), mocker.Mock()
subcommand = LintSubCommand()
subcommand.execute(options, jjb_config)
assert 1 == super_execute_mock.call_count
assert mocker.call(options, jjb_config) == super_execute_mock.call_args
def test_config_xml_set_to_false(self, mocker):
super_execute_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
options = mocker.Mock()
subcommand = LintSubCommand()
subcommand.execute(options, mocker.Mock())
assert super_execute_mock.call_args[0][0].config_xml is False
def _get_tmpdir_mock(self, mocker):
temporary_directory_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.tempfile.TemporaryDirectory')
return temporary_directory_mock.return_value.__enter__.return_value
def test_tmpdir_used_as_output_dir(self, mocker):
mocker.patch(
'jenkins_job_linter.jjb_subcommand.lint_jobs_from_directory')
super_execute_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
tmpdir_mock = self._get_tmpdir_mock(mocker)
options = mocker.Mock()
subcommand = LintSubCommand()
subcommand.execute(options, mocker.Mock())
assert super_execute_mock.call_args[0][0].output_dir == tmpdir_mock
def test_lint_jobs_from_directory_called_with_tmpdir(self, mocker):
lint_jobs_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.lint_jobs_from_directory')
mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
tmpdir_mock = self._get_tmpdir_mock(mocker)
subcommand = LintSubCommand()
subcommand.execute(mocker.Mock, mocker.Mock())
assert 1 == lint_jobs_mock.call_count
assert lint_jobs_mock.call_args[0][0] == tmpdir_mock
def test_lint_jobs_from_directory_called_with_jjb_config_config_parser(
self, mocker):
lint_jobs_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.lint_jobs_from_directory')
mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
jjb_config = mocker.Mock()
subcommand = LintSubCommand()
subcommand.execute(mocker.Mock, jjb_config)
assert 1 == lint_jobs_mock.call_count
assert lint_jobs_mock.call_args[0][1] == jjb_config.config_parser
| apache-2.0 | Python |
|
2e330d5cd2ad033c675d5888a2f43e0f846a4df1 | Add CodeDeploy | craigbruce/troposphere,7digital/troposphere,ikben/troposphere,WeAreCloudar/troposphere,horacio3/troposphere,cloudtools/troposphere,ikben/troposphere,horacio3/troposphere,johnctitus/troposphere,alonsodomin/troposphere,7digital/troposphere,amosshapira/troposphere,pas256/troposphere,johnctitus/troposphere,Yipit/troposphere,dmm92/troposphere,dmm92/troposphere,pas256/troposphere,alonsodomin/troposphere,cloudtools/troposphere | troposphere/codedeploy.py | troposphere/codedeploy.py | # Copyright (c) 2015, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
from .validators import positive_integer
KEY_ONLY = "KEY_ONLY"
VALUE_ONLY = "VALUE_ONLY"
KEY_AND_VALUE = "KEY_AND_VALUE"
class GitHubLocation(AWSProperty):
props = {
'CommitId': (basestring, True),
'Repository': (basestring, True),
}
class S3Location(AWSProperty):
props = {
'Bucket': (basestring, True),
'BundleType': (basestring, True),
'ETag': (basestring, False),
'Key': (basestring, True),
'Version': (basestring, True),
}
class Revision(AWSProperty):
props = {
'GitHubLocation': (GitHubLocation, False),
'RevisionType': (basestring, False),
'S3Location': (S3Location, False),
}
class Deployment(AWSProperty):
props = {
'Description': (basestring, False),
'IgnoreApplicationStopFailures': (bool, False),
'Revision': (Revision, True),
}
class Ec2TagFilters(AWSProperty):
props = {
'Key': (basestring, False),
'Type': (basestring, False),
'Value': (basestring, False),
}
class OnPremisesInstanceTagFilters(AWSProperty):
props = {
'Key': (basestring, False),
'Type': (basestring, False),
'Value': (basestring, False),
}
class MinimumHealthyHosts(AWSProperty):
props = {
'Type': (basestring, False),
'Value': (positive_integer, False),
}
class Application(AWSObject):
resource_type = "AWS::CodeDeploy::Application"
props = {
}
class DeploymentConfig(AWSObject):
resource_type = "AWS::CodeDeploy::DeploymentConfig"
props = {
'MinimumHealthyHosts': (MinimumHealthyHosts, False),
}
class DeploymentGroup(AWSObject):
resource_type = "AWS::DirectoryService::DeploymentGroup"
props = {
'ApplicationName': (basestring, True),
'AutoScalingGroups': ([basestring], False),
'Deployment': (Deployment, False),
'DeploymentConfigName': (basestring, False),
'Ec2TagFilters': (Ec2TagFilters, False),
'OnPremisesInstanceTagFilters': (OnPremisesInstanceTagFilters, False),
'ServiceRoleArn': (basestring, True),
}
| bsd-2-clause | Python |
|
0091af78bd191e34ecb621b20e79d6dd3d32ebb6 | Add unit tests for VocabularySet | clemente-lab/metasane | tests/test_core.py | tests/test_core.py | #!/usr/bin/env python
from __future__ import division
from unittest import TestCase, main
from metasane.core import VocabularySet
class VocabularySetTests(TestCase):
def setUp(self):
"""Initialize data used in the tests."""
self.single_vocab = {'vocab_1': VOCAB_1.split('\n')}
self.multi_vocab = {
'vocab_1': VOCAB_1.split('\n'),
'vocab_2': VOCAB_2.split('\n')
}
self.multi_vocab_inst = VocabularySet(self.multi_vocab)
def test_init_empty(self):
"""Test constructing an instance with no vocabs."""
obs = VocabularySet({})
self.assertEqual(len(obs), 0)
def test_init_single(self):
"""Test constructing an instance with a single vocab."""
obs = VocabularySet(self.single_vocab)
self.assertEqual(len(obs), 1)
self.assertTrue('vocab_1' in obs)
def test_init_multi(self):
"""Test constructing an instance with multiple vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
def test_contains(self):
"""Test membership based on ID."""
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
self.assertFalse('vocab_3' in self.multi_vocab_inst)
def test_getitem(self):
"""Test retrieving vocab based on ID."""
obs = self.multi_vocab_inst['vocab_1']
self.assertEqual(obs, set(['foo', 'bar', 'baz']))
obs = self.multi_vocab_inst['vocab_2']
self.assertEqual(obs, set(['xyz', '123', 'abc']))
def test_getitem_nonexistent(self):
"""Test retrieving vocab based on nonexistent ID."""
with self.assertRaises(KeyError):
_ = self.multi_vocab_inst['vocab_3']
def test_len(self):
"""Test retrieving the number of vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
VOCAB_1 = """foo
\t \t
baR\t\t
\t\tBAZ
"""
VOCAB_2 = """abc
123
xyz"""
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
|
1f9240f0b954afa9f587f468872c3e1e215f2eaa | Implement channel mode +s (or what's left of it) | Heufneutje/txircd,DesertBus/txircd,ElementalAlchemist/txircd | txircd/modules/cmode_s.py | txircd/modules/cmode_s.py | from txircd.modbase import Mode
class SecretMode(Mode):
def listOutput(self, command, data):
if command != "LIST":
return data
cdata = data["cdata"]
if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels:
data["cdata"] = {}
# other +s stuff is hiding in other modules.
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.mode_s = None
def spawn(self):
self.mode_s = SecretMode()
return {
"modes": {
"cns": self.mode_s
},
"actions": {
"commandextra": [self.mode_s.listOutput]
}
def cleanup(self):
self.ircd.removeMode("cns")
self.ircd.actions["commandextra"].remove(self.mode_s.listOutput) | bsd-3-clause | Python |
|
2a26fc7f0ac6223ebcb20eb1de550e899e5728db | add beginnings of script for ball identification | Edeleon4/PoolShark,Edeleon4/PoolShark,Edeleon4/PoolShark,Edeleon4/PoolShark,Edeleon4/PoolShark,Edeleon4/PoolShark | scripts/hist.py | scripts/hist.py | import cv2
import numpy as np
frame = cv2.imread('/mnt/c/Users/T-HUNTEL/Desktop/hackathon/table3.jpg')
h,w,c = frame.shape
print frame.shape
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
BORDER_COLOR = 0
def flood_fill(image, x, y, value):
count = 1
points = [(x, y)]
"Flood fill on a region of non-BORDER_COLOR pixels."
if x >= image.shape[1] or y >= image.shape[0] or image[x,y] == BORDER_COLOR:
return None, None
edge = [(x, y)]
image[x, y] = value
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
if s <= image.shape[1] and y <= image.shape[0] and \
image[s, t] not in (BORDER_COLOR, value):
image[s, t] = value
points.append((s, t))
count += 1
newedge.append((s, t))
edge = newedge
return count, points
# thresholds for different balls / background
low_bkg = np.array([15, 40, 50], dtype=np.uint8)
high_bkg = np.array([40, 190, 200], dtype=np.uint8)
lower_blue = np.array([110,50,50], dtype=np.uint8)
upper_blue = np.array([130,255,255], dtype=np.uint8)
low_yellow = np.array([20, 30, 30], dtype=np.uint8)
high_yellow = np.array([30, 255, 255], dtype=np.uint8)
# mask out the background
mask = cv2.inRange(hsv, low_bkg, high_bkg)
mask = np.invert(mask)
# Bitwise-AND mask and original image
objects = cv2.bitwise_and(frame,frame, mask= mask)
hsv = cv2.cvtColor(objects, cv2.COLOR_BGR2HSV)
# mask the yellow balls
mask = cv2.inRange(hsv, low_yellow, high_yellow)
yellows = cv2.bitwise_and(objects, objects, mask=mask)
# find the biggest cloud of 1's in the yellow mask
biggest_cloud = []
biggest_count = 0
image = mask / 255.
while len(np.where(image == 1)[0]) > 0:
loc = np.where(image == 1)
y = loc[0][0]
x = loc[1][0]
count, cloud = flood_fill(image, y, x, 2)
if count > biggest_count:
print count
biggest_count = count
biggest_cloud = cloud
print biggest_cloud
print biggest_count
cv2.imwrite('mask.jpg', mask)
cv2.imwrite('yellows.jpg', yellows)
cv2.imwrite('frame.jpg', frame)
| mit | Python |
|
6a686a800a3579970a15fa9552b2eb4e1b6b3ed9 | add some tools for ml scoring | log0ymxm/corgi | corgi/ml.py | corgi/ml.py | import numpy as np
import pandas as pd
from scipy.stats import kendalltau, spearmanr
from sklearn.metrics import (accuracy_score, f1_score, log_loss,
mean_squared_error, precision_score, recall_score)
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
classifier_scoring = {
'accuracy': accuracy_score,
'log_loss': log_loss,
'f1_score': lambda x, y: f1_score(x, y, average='weighted'),
'precision': lambda x, y: precision_score(x, y, average='weighted'),
'recall': lambda x, y: recall_score(x, y, average='weighted'),
}
regression_scoring = {
'mean_squared_error': mean_squared_error,
'kendalltau': lambda x, y: kendalltau(x, y).correlation,
'spearmanr': lambda x, y: spearmanr(x, y)[0],
}
def scores(y, y_pred, scoring=None):
if scoring is None:
raise Exception("cross_val_scores requires a dict of measures.")
scores = {}
for k, metric in scoring.items():
scores[k] = metric(y, y_pred)
return scores
def cross_val_scores(clf, X, y, cv=3, scoring=None):
if scoring is None:
raise Exception("cross_val_scores requires a dict of measures.")
X, y = np.array(X), np.array(y)
skf = StratifiedKFold(n_splits=cv)
scores = []
for train, test in tqdm(skf.split(X, y)):
clf.fit(X[train], y[train])
y_pred = clf.predict(X[test])
score = {}
for k, metric in scoring.items():
try:
score[k] = metric(y[test], y_pred)
except:
pass
scores.append(score)
return pd.DataFrame(scores)
| mit | Python |
|
2d7d4987eb06372496ce4a5b7b961a12deba9574 | add windows-specific tests for shell_{quote,split} | googlefonts/nanoemoji,googlefonts/nanoemoji | tests/util_test.py | tests/util_test.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from nanoemoji.util import shell_quote, shell_split
import pytest
# Source:
# https://github.com/python/cpython/blob/653e563/Lib/test/test_subprocess.py#L1198-L1214
LIST2CMDLINE_TEST_DATA = [
(["a b c", "d", "e"], '"a b c" d e'),
(['ab"c', "\\", "d"], 'ab\\"c \\ d'),
(['ab"c', " \\", "d"], 'ab\\"c " \\\\" d'),
(["a\\\\\\b", "de fg", "h"], 'a\\\\\\b "de fg" h'),
(['a\\"b', "c", "d"], 'a\\\\\\"b c d'),
(["a\\\\b c", "d", "e"], '"a\\\\b c" d e'),
(["a\\\\b\\ c", "d", "e"], '"a\\\\b\\ c" d e'),
(["ab", ""], 'ab ""'),
]
CMDLINE2LIST_TEST_DATA = [(cmdline, args) for args, cmdline in LIST2CMDLINE_TEST_DATA]
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="Windows only")
@pytest.mark.parametrize(
"args, expected_cmdline",
LIST2CMDLINE_TEST_DATA,
ids=[s for _, s in LIST2CMDLINE_TEST_DATA],
)
def test_windows_shell_quote(args, expected_cmdline):
assert " ".join(shell_quote(s) for s in args) == expected_cmdline
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="Windows only")
@pytest.mark.parametrize(
"cmdline, expected_args",
CMDLINE2LIST_TEST_DATA,
ids=[s for s, _ in CMDLINE2LIST_TEST_DATA],
)
def test_windows_shell_split(cmdline, expected_args):
assert shell_split(cmdline) == expected_args
| apache-2.0 | Python |
|
a6ff8a5838f82be3d5b0b4196c03fbf7c15aff7a | Test dat.info | karissa/datpy,akubera/datpy | test.py | test.py | import unittest
import requests
port = 'http://localhost:6461'
def info():
call = port + '/api'
req = requests.get(call, stream=True)
print(req.content)
return req.status_code
class DatTest(unittest.TestCase):
def test_info(self):
self.assertEqual(info(), 200)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python |
|
26c49015b0c3be8045423306abb74eb7ea080f0b | Create test.py | Semen52/GIBDD | test.py | test.py | # -*- coding: utf-8 -*-
import sys
import time
import feedparser
import nltk
import coding
import numpy as np
import nltk
import string
from nltk.corpus import stopwords
#coding.setup_console("utf8")
if __name__ == "__main__":
start_time = time.time()
if len(sys.argv) >= 1:
print "Старт " + str(start_time)
#app = locomotive.app.Application()
# ... дополнительная логика ...
#print feedparser.parse("http://feeds.nytimes.com/nyt/rss/Technology")
#print nltk.corpus.stopwords.words('russian')
#print nltk.download()
def read_data_file(file_name="data.csv"):
# Загружаем файл с кодировкай utf8
text = open(file_name,'r').read()
# Декодируем из utf8 в unicode - из внешней в рабочую
text = text.decode('cp1251')
# Работаем с текстом
return text
def save_result_file(file_name="data.csv", text=""):
# Кодируем тест из unicode в utf8 - из рабочей во внешнюю
text = text.encode('utf8')
# Сохраняем в файл с кодировкий utf8
open("new_" + file_name,'w').write(text)
def tokenize_me(file_text):
#firstly let's apply nltk tokenization
tokens = nltk.word_tokenize(file_text)
#let's delete punctuation symbols
tokens = [i for i in tokens if ( i not in string.punctuation )]
#deleting stop_words
stop_words = stopwords.words('russian')
stop_words.extend([u'что', u'это', u'так', u'вот', u'быть', u'как', u'в', u'—', u'к', u'на'])
tokens = [i for i in tokens if ( i not in stop_words )]
#cleaning words
tokens = [i.replace(u"«", "").replace(u"»", "") for i in tokens]
return tokens
text = read_data_file("data1.csv")
#s = text.rstrip(";")
print text
#d = np.array(text)
#d = ['Тест','списка']
tokens = tokenize_me(text)
#print ','.join(d)
print ','.join(tokens)
save_result_file("data1.csv",'\n'.join(tokens))
| apache-2.0 | Python |
|
18df3284fd6dc176b71c41599d02a24dc021f8db | add file that is useful for testing; but will be much more useful when I figure out how to turn of debugging output in Flask. | DrXyzzy/smc,DrXyzzy/smc,sagemathinc/smc,tscholl2/smc,tscholl2/smc,tscholl2/smc,sagemathinc/smc,tscholl2/smc,tscholl2/smc,DrXyzzy/smc,sagemathinc/smc,sagemathinc/smc,DrXyzzy/smc | test.py | test.py | #!/usr/bin/env python
import os
from doctest import testmod, NORMALIZE_WHITESPACE, ELLIPSIS
import backend, client, frontend, misc, model, session
def tm(module):
testmod(module, optionflags=NORMALIZE_WHITESPACE | ELLIPSIS)
def run_doctests():
tm(backend)
tm(client)
tm(frontend)
tm(misc)
tm(model)
tm(session)
if __name__ == '__main__':
run_doctests()
| agpl-3.0 | Python |
|
ae6184a023f9a14c54663270d4a4294b8c3832f4 | Create test.py | gakrakow/rastcats | test.py | test.py | import os
print("hello there")
| mit | Python |
|
9309f7190314abdd8b56368147862453d17d97b5 | Create test.py | sliwhu/UWHousingTeam,sliwhu/UWHousingTeam | test.py | test.py | mit | Python |
||
d527bc83d44b91bb827c02907faf8cd7e7d49544 | Add dateutil gist | chenjianlong/my-gist,chenjianlong/my-gist | dateutil.py | dateutil.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8:et
"""Date and Time util
"""
__author__ = ["Jianlong Chen <[email protected]>"]
__date__ = "2013-07-17"
import datetime
def year():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y')
def date_time():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
def date():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
def hour():
return datetime.datetime.strftime(datetime.datetime.now(), '%H')
| unlicense | Python |
|
75a61dfe788102d04e1cc3b151e839fa9add724f | Fix review requests | YarivCol/mbed-os,mikaleppanen/mbed-os,YarivCol/mbed-os,netzimme/mbed-os,kl-cruz/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,infinnovation/mbed-os,arostm/mbed-os,nRFMesh/mbed-os,YarivCol/mbed-os,screamerbg/mbed,cvtsi2sd/mbed-os,svogl/mbed-os,ryankurte/mbed-os,screamerbg/mbed,RonEld/mbed,cvtsi2sd/mbed-os,pradeep-gr/mbed-os5-onsemi,kl-cruz/mbed-os,mmorenobarm/mbed-os,nvlsianpu/mbed,radhika-raghavendran/mbed-os5.1-onsemi,monkiineko/mbed-os,c1728p9/mbed-os,infinnovation/mbed-os,betzw/mbed-os,j-greffe/mbed-os,kjbracey-arm/mbed,catiedev/mbed-os,Archcady/mbed-os,bcostm/mbed-os,mazimkhan/mbed-os,catiedev/mbed-os,fahhem/mbed-os,mikaleppanen/mbed-os,c1728p9/mbed-os,CalSol/mbed,infinnovation/mbed-os,pradeep-gr/mbed-os5-onsemi,bulislaw/mbed-os,fanghuaqi/mbed,mazimkhan/mbed-os,catiedev/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,adamgreen/mbed,radhika-raghavendran/mbed-os5.1-onsemi,pradeep-gr/mbed-os5-onsemi,adustm/mbed,j-greffe/mbed-os,mmorenobarm/mbed-os,arostm/mbed-os,RonEld/mbed,bulislaw/mbed-os,netzimme/mbed-os,adustm/mbed,mikaleppanen/mbed-os,karsev/mbed-os,arostm/mbed-os,j-greffe/mbed-os,ryankurte/mbed-os,NXPmicro/mbed,mmorenobarm/mbed-os,andcor02/mbed-os,fahhem/mbed-os,c1728p9/mbed-os,betzw/mbed-os,NXPmicro/mbed,RonEld/mbed,pradeep-gr/mbed-os5-onsemi,HeadsUpDisplayInc/mbed,arostm/mbed-os,theotherjimmy/mbed,nRFMesh/mbed-os,mbedmicro/mbed,maximmbed/mbed,bcostm/mbed-os,catiedev/mbed-os,karsev/mbed-os,mazimkhan/mbed-os,monkiineko/mbed-os,kl-cruz/mbed-os,betzw/mbed-os,betzw/mbed-os,bcostm/mbed-os,svogl/mbed-os,netzimme/mbed-os,bcostm/mbed-os,mbedmicro/mbed,kjbracey-arm/mbed,nvlsianpu/mbed,RonEld/mbed,YarivCol/mbed-os,theotherjimmy/mbed,karsev/mbed-os,j-greffe/mbed-os,j-greffe/mbed-os,arostm/mbed-os,mmorenobarm/mbed-os,bcostm/mbed-os,NXPmicro/mbed,CalSol/mbed,screamerbg/mbed,Archcady/mbed-os,NXPmicro/mbed,monkiineko/mbed-os,mbedmicro/mbed,maximmbed/mbed,adamgreen/mbed,mbedmicro/mbed,ryankurte/mbed-os,ryankurte/mbed-os,nvlsianpu/mbed,bcostm/mbed-os,HeadsUpDisplayInc/mbed,YarivCol/mbed-os,svogl/mbed-os,svogl/mbed-os,adamgreen/mbed,CalSol/mbed,nRFMesh/mbed-os,monkiineko/mbed-os,mmorenobarm/mbed-os,catiedev/mbed-os,netzimme/mbed-os,netzimme/mbed-os,bulislaw/mbed-os,monkiineko/mbed-os,betzw/mbed-os,fahhem/mbed-os,fahhem/mbed-os,monkiineko/mbed-os,karsev/mbed-os,cvtsi2sd/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,netzimme/mbed-os,adamgreen/mbed,infinnovation/mbed-os,theotherjimmy/mbed,bulislaw/mbed-os,nRFMesh/mbed-os,CalSol/mbed,CalSol/mbed,bulislaw/mbed-os,NXPmicro/mbed,NXPmicro/mbed,andcor02/mbed-os,cvtsi2sd/mbed-os,karsev/mbed-os,ryankurte/mbed-os,pradeep-gr/mbed-os5-onsemi,kl-cruz/mbed-os,CalSol/mbed,fahhem/mbed-os,kjbracey-arm/mbed,radhika-raghavendran/mbed-os5.1-onsemi,andcor02/mbed-os,mikaleppanen/mbed-os,adamgreen/mbed,mazimkhan/mbed-os,cvtsi2sd/mbed-os,HeadsUpDisplayInc/mbed,infinnovation/mbed-os,j-greffe/mbed-os,fanghuaqi/mbed,HeadsUpDisplayInc/mbed,adamgreen/mbed,adustm/mbed,fahhem/mbed-os,YarivCol/mbed-os,catiedev/mbed-os,Archcady/mbed-os,kl-cruz/mbed-os,fanghuaqi/mbed,ryankurte/mbed-os,maximmbed/mbed,nvlsianpu/mbed,theotherjimmy/mbed,nRFMesh/mbed-os,maximmbed/mbed,mbedmicro/mbed,c1728p9/mbed-os,RonEld/mbed,cvtsi2sd/mbed-os,c1728p9/mbed-os,theotherjimmy/mbed,kjbracey-arm/mbed,adustm/mbed,maximmbed/mbed,HeadsUpDisplayInc/mbed,screamerbg/mbed,Archcady/mbed-os,HeadsUpDisplayInc/mbed,bulislaw/mbed-os,mikaleppanen/mbed-os,fanghuaqi/mbed,svogl/mbed-os,c1728p9/mbed-os,arostm/mbed-os,mazimkhan/mbed-os,mazimkhan/mbed-os,betzw/mbed-os,fanghuaqi/mbed,screamerbg/mbed,pradeep-gr/mbed-os5-onsemi,mikaleppanen/mbed-os,theotherjimmy/mbed,nRFMesh/mbed-os,andcor02/mbed-os,Archcady/mbed-os,adustm/mbed,RonEld/mbed,mmorenobarm/mbed-os,maximmbed/mbed,infinnovation/mbed-os,andcor02/mbed-os,Archcady/mbed-os,nvlsianpu/mbed,screamerbg/mbed,svogl/mbed-os,adustm/mbed,karsev/mbed-os,nvlsianpu/mbed,kl-cruz/mbed-os,andcor02/mbed-os | tools/export/cdt/__init__.py | tools/export/cdt/__init__.py | import re
from os.path import join, exists, realpath, relpath, basename
from os import makedirs
from tools.export.makefile import Makefile, GccArm, Armc5, IAR
class Eclipse(Makefile):
"""Generic Eclipse project. Intended to be subclassed by classes that
specify a type of Makefile.
"""
def generate(self):
"""Generate Makefile, .cproject & .project Eclipse project file,
py_ocd_settings launch file, and software link .p2f file
"""
super(Eclipse, self).generate()
starting_dot = re.compile(r'(^[.]/|^[.]$)')
ctx = {
'name': self.project_name,
'elf_location': join('BUILD',self.project_name)+'.elf',
'c_symbols': self.toolchain.get_symbols(),
'asm_symbols': self.toolchain.get_symbols(True),
'target': self.target,
'include_paths': [starting_dot.sub('%s/' % self.project_name, inc) for inc in self.resources.inc_dirs],
'load_exe': str(self.LOAD_EXE).lower()
}
if not exists(join(self.export_dir,'eclipse-extras')):
makedirs(join(self.export_dir,'eclipse-extras'))
self.gen_file('cdt/pyocd_settings.tmpl', ctx,
join('eclipse-extras',self.target+'_pyocd_settings.launch'))
self.gen_file('cdt/necessary_software.tmpl', ctx,
join('eclipse-extras','necessary_software.p2f'))
self.gen_file('cdt/.cproject.tmpl', ctx, '.cproject')
self.gen_file('cdt/.project.tmpl', ctx, '.project')
class EclipseGcc(Eclipse, GccArm):
LOAD_EXE = True
NAME = "Eclipse-GCC-ARM"
class EclipseArmc5(Eclipse, Armc5):
LOAD_EXE = False
NAME = "Eclipse-Armc5"
class EclipseIAR(Eclipse, IAR):
LOAD_EXE = True
NAME = "Eclipse-IAR"
| import re
from os.path import join, exists, realpath, relpath, basename
from os import makedirs
from tools.export.makefile import Makefile, GccArm, Armc5, IAR
class Eclipse(Makefile):
"""Generic Eclipse project. Intended to be subclassed by classes that
specify a type of Makefile.
"""
def generate(self):
"""Generate Makefile, .cproject & .project Eclipse project file,
py_ocd_settings launch file, and software link .p2f file
"""
super(Eclipse, self).generate()
include_paths_replace_re= re.compile(r'(^[.]/|^[.]$)')
ctx = {
'name': self.project_name,
'elf_location': join('BUILD',self.project_name)+'.elf',
'c_symbols': self.toolchain.get_symbols(),
'asm_symbols': self.toolchain.get_symbols(True),
'target': self.target,
'include_paths': map(lambda s: include_paths_replace_re.sub('%s/' % self.project_name, s), self.resources.inc_dirs),
'load_exe': str(self.LOAD_EXE).lower()
}
if not exists(join(self.export_dir,'eclipse-extras')):
makedirs(join(self.export_dir,'eclipse-extras'))
self.gen_file('cdt/pyocd_settings.tmpl', ctx,
join('eclipse-extras',self.target+'_pyocd_settings.launch'))
self.gen_file('cdt/necessary_software.tmpl', ctx,
join('eclipse-extras','necessary_software.p2f'))
self.gen_file('cdt/.cproject.tmpl', ctx, '.cproject')
self.gen_file('cdt/.project.tmpl', ctx, '.project')
class EclipseGcc(Eclipse, GccArm):
LOAD_EXE = True
NAME = "Eclipse-GCC-ARM"
class EclipseArmc5(Eclipse, Armc5):
LOAD_EXE = False
NAME = "Eclipse-Armc5"
class EclipseIAR(Eclipse, IAR):
LOAD_EXE = True
NAME = "Eclipse-IAR"
| apache-2.0 | Python |
5186f3e4bfcaf033c4012e72c8cb766a0b903296 | Add file for updating strains | AndersenLab/concordance-nf,AndersenLab/concordance-nf | set_isotypes.py | set_isotypes.py | from gcloud import datastore
import requests
import time
ds = datastore.Client(project='andersen-lab')
url = "https://docs.google.com/spreadsheets/d/1V6YHzblaDph01sFDI8YK_fP0H7sVebHQTXypGdiQIjI/pub?gid=0&single=true&output=tsv"
gs = requests.get(url).text.encode("utf-8").splitlines()
gs = [str(x, 'utf-8').strip().split("\t") for x in gs]
gs = [x for x in gs if x[2]]
gs = [(x[0], x[2], x[3]) for x in gs]
WI_ISOTYPE = {}
WI_STRAIN = {}
for strain, isotype, prev_names in gs:
if prev_names != "NA":
prev_names = prev_names.split("|")
for p in prev_names:
if p:
WI_ISOTYPE[p] = isotype
WI_STRAIN[p] = strain
if strain and isotype:
WI_ISOTYPE[strain] = isotype
WI_STRAIN[strain] = strain
if isotype:
WI_ISOTYPE[isotype] = isotype
exclude_indices = ['most_abundant_sequence',
'fastqc_per_base_sequence_quality_data',
'fastqc_per_tile_sequence_quality_data',
'fastqc_per_sequence_quality_scores_data',
'fastqc_per_base_sequence_content_data',
'fastqc_per_sequence_gc_content_data',
'fastqc_per_base_n_content_data',
'fastqc_sequence_length_distribution_data',
'fastqc_sequence_duplication_levels_data',
'fastqc_overrepresented_sequences_data',
'fastqc_adapter_content_data',
'fastqc_kmer_content_data',
'fastqc_error']
def query_item(kind, filters=None, projection=()):
# filters:
# [("var_name", "=", 1)]
query = ds.query(kind=kind, projection = projection)
if filters:
for var, op, val in filters:
query.add_filter(var, op, val)
return query.fetch()
def get_item(kind, name):
return ds.get(ds.key(kind, name))
def update_item(kind, name, **kwargs):
item = get_item(kind, name)
if item is None:
m = datastore.Entity(key=ds.key(kind, name),
exclude_from_indexes=exclude_indices)
else:
m = datastore.Entity(key=ds.key(kind, name),
exclude_from_indexes=exclude_indices)
m.update(dict(item))
for key, value in kwargs.items():
if type(value) == str:
m[key] = value
elif type(value) == list:
if key in m:
m[key] += value
else:
m[key] = value
m[key] = list(set(m[key]))
# If date created of file is earlier
elif key == 'date_created' and item:
vtimestamp = time.mktime(value.timetuple())
dstimestamp = time.mktime(m['date_created'].timetuple())
if vtimestamp < dstimestamp:
m[key] = value
else:
m[key] = value
if 'fq_profile_count' in m:
m['fq_profile_count'] += 1
else:
m['fq_profile_count'] = 1
ds.put(m)
fastqs = query_item('fastq', filters = [['strain_type', '=', 'WI'], ['use', '=', True]])
for fq in fastqs:
if 'original_strain' in fq.keys():
if fq['original_strain'] in WI_STRAIN.keys():
fq['strain'] = WI_STRAIN[fq['original_strain']]
if fq['original_strain'] in WI_ISOTYPE.keys():
fq['isotype'] = WI_ISOTYPE[fq['original_strain']]
print([fq.key.name, fq['isotype'], fq['strain'], fq['original_strain']])
if 'seq_folder' in fq.keys():
if fq['seq_folder'] != "original_wi_seq":
if fq['library'] != fq['barcode'].replace("+", ""):
print(fq['library'] + "=>" + fq['barcode'].replace("+", ""))
fq['library'] = fq['barcode'].replace("+", "")
update_item('fastq', fq.key.name, **fq)
| mit | Python |
|
39fa13cf9b12f3828d4776d10532405c0ea43603 | Add an example | messente/verigator-python | examples/example.py | examples/example.py | """
Flow as follows:
Create Service -> Create User -> Initiate Authentication -> Verify Pin
"""
from messente.verigator.api import Api
api = Api("username", "password")
service = api.services.create("http://example.com", "service_name")
user = api.users.create(service.id, "+xxxxxxxxxxx", "username")
auth_id = api.auth.initiate(service.id, user.id, api.auth.METHOD_SMS)
while True:
try:
input = raw_input # Python 2 compatibility
except NameError:
pass
token = input("Enter Sms Pin: ")
auth_res, error = api.auth.verify(service.id, user.id, api.auth.METHOD_SMS, token, auth_id)
if auth_res:
break
print("Not Verified... Reason: {}".format(error['result']))
print("Verified Successfully!")
| apache-2.0 | Python |
|
265f8c48f4b257287dd004ba783a8aa6f94bb870 | Add Latin params file | diyclassics/cltk,TylerKirby/cltk,TylerKirby/cltk,kylepjohnson/cltk,cltk/cltk,D-K-E/cltk | cltk/tokenize/latin/params.py | cltk/tokenize/latin/params.py | """ Params: Latin
"""
__author__ = ['Patrick J. Burns <[email protected]>']
__license__ = 'MIT License.'
PRAENOMINA = ['a', 'agr', 'ap', 'c', 'cn', 'd', 'f', 'k', 'l', "m'", 'm', 'mam', 'n', 'oct', 'opet', 'p', 'post', 'pro', 'q', 's', 'ser', 'sert', 'sex', 'st', 't', 'ti', 'v', 'vol', 'vop', 'a', 'ap', 'c', 'cn', 'd', 'f', 'k', 'l', 'm', "m'", 'mam', 'n', 'oct', 'opet', 'p', 'paul', 'post', 'pro', 'q', 'ser', 'sert', 'sex', 'sp', 'st', 'sta', 't', 'ti', 'v', 'vol', 'vop']
CALENDAR = ['ian', 'febr', 'mart', 'apr', 'mai', 'iun', 'iul', 'aug', 'sept', 'oct', 'nov', 'dec'] \
+ ['kal', 'non', 'id', 'a.d']
MISC = ['coll', 'cos', 'ord', 'pl.', 's.c', 'suff', 'trib']
ABBREVIATIONS = set(
PRAENOMINA +
CALENDAR +
MISC
)
| mit | Python |
|
8f6e10a6fe5d76a27369801ae998e3d7e30b667e | Implement Zhai_Luo support. | colour-science/colour | colour/adaptation/zhai2018.py | colour/adaptation/zhai2018.py | import numpy as np
def chromatic_adaptation_forward_Zhai2018(XYZb,
XYZwb,
Db,
XYZws,
Ds,
XYZwo,
CAT="CAT02"):
Ywo = XYZwo[1]
Ywb = XYZwb[1]
Yws = XYZws[1]
if CAT == "CAT02":
Mt = np.array([
[0.7328, 0.4296, -0.1624],
[-0.7036, 1.6975, 0.0061],
[0.0030, 0.0136, 0.9834],
])
if CAT == "CAT16":
Mt = np.array([
[0.401288, 0.650173, -0.051461],
[-0.250268, 1.204414, 0.045854],
[-0.002079, 0.048952, 0.953127],
])
RGBb = Mt @ XYZb
RGBwb = Mt @ XYZwb
RGBws = Mt @ XYZws
RGBwo = Mt @ XYZwo
Drgbb = Db * (Ywb / Ywo) * (RGBwo / RGBwb) + 1 - Db
Drgbs = Ds * (Yws / Ywo) * (RGBwo / RGBws) + 1 - Ds
Drgb = (Drgbb / Drgbs)
RGBs = Drgb * RGBb
XYZs = np.linalg.inv(Mt) @ RGBs
return XYZs
"""
XYZb = np.array([48.900,43.620,6.250])
XYZwb = np.array([109.850,100,35.585])
Db = 0.9407
XYZws = np.array([95.047,100,108.883])
Ds = 0.9800
XYZwo = np.array([100,100,100])
Zhai_Luo2(XYZb, XYZwb, Db, XYZws, Ds, XYZwo, 'CAT16')
"""
def chromatic_adaptation_inverse_Zhai2018(XYZs,
XYZwb,
Db,
XYZws,
Ds,
XYZwo,
CAT="CAT02"):
Ywo = XYZwo[1]
Ywb = XYZwb[1]
Yws = XYZws[1]
if CAT == "CAT02":
Mt = np.array([
[0.7328, 0.4296, -0.1624],
[-0.7036, 1.6975, 0.0061],
[0.0030, 0.0136, 0.9834],
])
if CAT == "CAT16":
Mt = np.array([
[0.401288, 0.650173, -0.051461],
[-0.250268, 1.204414, 0.045854],
[-0.002079, 0.048952, 0.953127],
])
RGBwb = Mt @ XYZwb
RGBws = Mt @ XYZws
RGBwo = Mt @ XYZwo
Drgbb = Db * (Ywb / Ywo) * (RGBwo / RGBwb) + 1 - Db
Drgbs = Ds * (Yws / Ywo) * (RGBwo / RGBws) + 1 - Ds
Drgb = (Drgbb / Drgbs)
RGBs = Mt @ XYZs
RGBb = RGBs / Drgb
RGBs = Drgb * RGBb
XYZb = np.linalg.inv(Mt) @ RGBb
return XYZb
"""
XYZs = np.array([40.374,43.694,20.517])
XYZwb = np.array([109.850,100,35.585])
Db = 0.9407
XYZws = np.array([95.047,100,108.883])
Ds = 0.9800
XYZwo = np.array([100,100,100])
Zhai_Luo_inverse2(XYZs, XYZwb, Db, XYZws, Ds, XYZwo, 'CAT16')
"""
| bsd-3-clause | Python |
|
0a9efede94c64d114cf536533b94a47210a90604 | Add viper.common.constants.py | MeteorAdminz/viper,MeteorAdminz/viper,postfix/viper-1,S2R2/viper,Beercow/viper,postfix/viper-1,cwtaylor/viper,jack51706/viper,Beercow/viper,jack51706/viper,kevthehermit/viper,Beercow/viper,jahrome/viper,S2R2/viper,cwtaylor/viper,jahrome/viper,kevthehermit/viper | viper/common/constants.py | viper/common/constants.py | # This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import os
_current_dir = os.path.abspath(os.path.dirname(__file__))
VIPER_ROOT = os.path.normpath(os.path.join(_current_dir, "..", ".."))
| bsd-3-clause | Python |
|
2d7ea21c2d9171a79298866bf02abf64b849be0e | add a simple info cog | slice/dogbot,slice/dogbot,sliceofcode/dogbot,slice/dogbot,sliceofcode/dogbot | dog/ext/info.py | dog/ext/info.py | from textwrap import dedent
import discord
from discord.ext.commands import guild_only
from lifesaver.bot import Cog, group, Context
from lifesaver.utils import human_delta
class Info(Cog):
"""A cog that provides information about various entities like guilds or members."""
@group(aliases=['guild', 'guild_info', 'server_info'], invoke_without_command=True)
@guild_only()
async def server(self, ctx: Context):
"""Views information about this server."""
embed = discord.Embed(title=ctx.guild.name)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=f'Owned by {ctx.guild.owner}', icon_url=ctx.guild.owner.avatar_url)
g: discord.Guild = ctx.guild
n_humans = sum(1 for m in g.members if not m.bot)
n_bots = len(g.members) - n_humans
embed.description = dedent(f"""\
{n_humans} humans, {n_bots} bots ({n_humans + n_bots} members)
Created {g.created_at}
{human_delta(g.created_at)} ago
""")
embed.add_field(name='Entities', value=dedent(f"""\
{len(g.text_channels)} text channels, {len(g.voice_channels)} voice channels, {len(g.categories)} categories
{len(g.roles)} roles
"""))
await ctx.send(embed=embed)
@server.command(aliases=['icon_url'])
@guild_only()
async def icon(self, ctx: Context):
"""Sends this server's icon."""
if not ctx.guild.icon_url:
await ctx.send('No server icon.')
return
await ctx.send(ctx.guild.icon_url_as(format='png'))
def setup(bot):
bot.add_cog(Info(bot))
| mit | Python |
|
eff85f039674ca9fe69294ca2e81644dc4ff4cb6 | add celery for all notification mail | gnowledge/gstudio,AvadootNachankar/gstudio,gnowledge/gstudio,olympian94/gstudio,makfire/gstudio,makfire/gstudio,olympian94/gstudio,gnowledge/gstudio,gnowledge/gstudio,Dhiru/gstudio,makfire/gstudio,Dhiru/gstudio,Dhiru/gstudio,olympian94/gstudio,AvadootNachankar/gstudio,AvadootNachankar/gstudio,makfire/gstudio,gnowledge/gstudio,olympian94/gstudio,olympian94/gstudio,olympian94/gstudio,AvadootNachankar/gstudio,Dhiru/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/views/tasks.py | gnowsys-ndf/gnowsys_ndf/ndf/views/tasks.py | from celery import task
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from gnowsys_ndf.notification import models as notification
from gnowsys_ndf.ndf.models import Node
from gnowsys_ndf.ndf.models import node_collection, triple_collection
import json
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
sitename = Site.objects.all()[0]
@task
def task_set_notify_val(request_user_id, group_id, msg, activ, to_user):
'''
Attach notification mail to celery task
'''
request_user = User.objects.get(id=request_user_id)
to_send_user = User.objects.get(id=to_user)
try:
group_obj = node_collection.one({'_id': ObjectId(group_id)})
site = sitename.name.__str__()
objurl = "http://test"
render = render_to_string(
"notification/label.html",
{
'sender': request_user.username,
'activity': activ,
'conjunction': '-',
'object': group_obj,
'site': site,
'link': objurl
}
)
notification.create_notice_type(render, msg, "notification")
notification.send([to_send_user], render, {"from_user": request_user})
return True
except Exception as e:
print "Error in sending notification- "+str(e)
return False
| agpl-3.0 | Python |
|
f956af85b27d104e84754b4d93a761b82ae39831 | add external_iterate.py | uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco,uwescience/raco | external_iterate.py | external_iterate.py | #!/usr/bin/env python
"""Compile a Myrial program into logical relational algebra."""
import raco.myrial.interpreter as interpreter
import raco.myrial.parser as parser
import raco.scheme
from raco import algebra
from raco import myrialang
from raco.compile import optimize
from raco.language import MyriaAlgebra
import argparse
import json
import os
import sys
def evaluate(plan):
if isinstance(plan, algebra.DoWhile):
evaluate(plan.left)
evaluate(plan.right)
elif isinstance(plan, algebra.Sequence):
for child in plan.children():
evaluate(child)
else:
logical = str(plan)
physical = optimize([('', plan)], target=MyriaAlgebra, source=algebra.LogicalAlgebra)
phys = myrialang.compile_to_json(logical, logical, physical)
print phys
json.dumps(phys)
def print_pretty_plan(plan, indent=0):
if isinstance(plan, algebra.DoWhile):
print '%s%s' % (' ' * indent, plan.shortStr())
print_pretty_plan(plan.left, indent + 4)
print_pretty_plan(plan.right, indent + 4)
elif isinstance(plan, algebra.Sequence):
print '%s%s' % (' ' * indent, plan.shortStr())
for child in plan.children():
print_pretty_plan(child, indent + 4)
else:
print '%s%s' % (' ' * indent, plan)
def parse_options(args):
parser = argparse.ArgumentParser()
parser.add_argument('-p', dest='parse_only',
help="Parse only", action='store_true')
parser.add_argument('file', help='File containing Myrial source program')
ns = parser.parse_args(args)
return ns
class FakeCatalog(object):
def __init__(self, catalog):
self.catalog = catalog
def get_scheme(self, relation_key):
return raco.Scheme(self.catalog[relation_key])
@classmethod
def load_from_file(cls, path):
with open(path) as fh:
return cls(eval(fh.read()))
def main(args):
opt = parse_options(args)
# Search for a catalog definition file
catalog_path = os.path.join(os.path.dirname(opt.file), 'catalog.py')
catalog = None
if os.path.exists(catalog_path):
catalog = FakeCatalog.load_from_file(catalog_path)
_parser = parser.Parser()
processor = interpreter.StatementProcessor(catalog)
with open(opt.file) as fh:
statement_list = _parser.parse(fh.read())
if opt.parse_only:
print statement_list
else:
processor.evaluate(statement_list)
plan = processor.get_physical_plan()
evaluate(plan)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | Python |
|
40146a54f9857aaaf252f3e1e5de7dc73c6cd181 | add pl.proportions | theislab/scvelo | scvelo/plotting/proportions.py | scvelo/plotting/proportions.py | from ..preprocessing.utils import sum_var
import matplotlib.pyplot as pl
import numpy as np
def proportions(adata, groupby='clusters', layers=['spliced', 'unspliced', 'ambigious'], highlight='unspliced',
add_labels_pie=True, add_labels_bar=True, fontsize=8, figsize=(10, 2), dpi=100, show=True):
"""
Parameters
----------
"""
# get counts per cell for each layer
layers_keys = [key for key in layers if key in adata.layers.keys()]
counts_layers = [sum_var(adata.layers[key]) for key in layers_keys]
counts_total = np.sum(counts_layers, 0)
counts_total += counts_total == 0
counts_layers = np.array([counts / counts_total for counts in counts_layers])
gspec = pl.GridSpec(1, 2, pl.figure(None, figsize, dpi=dpi))
colors = pl.get_cmap('tab20b')(np.linspace(0.10, 0.65, len(layers_keys)))
# pie chart of total abundances
ax = pl.subplot(gspec[0])
mean_abundances = np.mean(counts_layers, axis=1)
if highlight is None: highlight = 'none'
explode = [.1 if (l == highlight or l in highlight) else 0 for l in layers_keys]
autopct = '%1.0f%%' if add_labels_pie else None
pie = ax.pie(np.mean(counts_layers, axis=1), colors=colors, explode=explode,
autopct=autopct, shadow=True, startangle=45)
if autopct is not None:
for pct, color in zip(pie[-1], colors):
r, g, b, _ = color
pct.set_color('white' if r * g * b < 0.5 else 'darkgrey')
pct.set_fontweight('bold')
pct.set_fontsize(fontsize)
ax.legend(layers_keys, ncol=len(layers_keys), bbox_to_anchor=(0, 1), loc='lower left', fontsize=fontsize)
# bar chart of abundances per category
if groupby is not None and groupby in adata.obs.keys():
counts_groups = dict()
for cluster in adata.obs[groupby].cat.categories:
counts_groups[cluster] = np.mean(counts_layers[:, adata.obs[groupby] == cluster], axis=1)
labels = list(counts_groups.keys())
data = np.array(list(counts_groups.values()))
data_cum = data.cumsum(axis=1)
ax2 = pl.subplot(gspec[1])
for i, (colname, color) in enumerate(zip(layers_keys, colors)):
starts, widths = data_cum[:, i] - data[:, i], data[:, i]
xpos = starts + widths / 2
curr_xpos = xpos[0]
for i, (x, w) in enumerate(zip(xpos, widths)):
curr_xpos = curr_xpos if x - w / 2 + .05 < curr_xpos < x + w / 2 - .05 else x
xpos[i] = curr_xpos
ax2.barh(labels, widths, left=starts, height=0.9, label=colname, color=color)
if add_labels_bar:
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.5 else 'darkgrey'
for y, (x, c) in enumerate(zip(xpos, widths)):
ax2.text(x, y, '{:.0f}%'.format(c * 100), ha='center', va='center',
color=text_color, fontsize=fontsize, fontweight='bold')
ax2.legend(ncol=len(layers_keys), bbox_to_anchor=(0, 1), loc='lower left', fontsize=fontsize)
ax2.invert_yaxis()
ax2.set_xlim(0, np.nansum(data, axis=1).max())
ax2.margins(0)
ax2.set_xlabel('proportions', fontweight='bold', fontsize=fontsize * 1.2)
ax2.set_ylabel(groupby, fontweight='bold', fontsize=fontsize * 1.2)
ax2.tick_params(axis='both', which='major', labelsize=fontsize)
ax = [ax, ax2]
if show:
pl.show()
else:
return ax
| bsd-3-clause | Python |
|
7f78d4ea286d9827aaa47022077de286195c2cd9 | Add a Fast(er) DS1820 reader | scudderfish/SFL525R,scudderfish/SFL525R | wipy/flash/lib/FDS1820.py | wipy/flash/lib/FDS1820.py | from onewire import *
import machine
import time
class FDS1820(object):
def __init__(self, onewire):
self.ow = onewire
self.roms = [rom for rom in self.ow.scan() if rom[0] == 0x10 or rom[0] == 0x28]
def read_temp(self, rom=None):
"""
Read and return the temperature of one DS18x20 device.
Pass the 8-byte bytes object with the ROM of the specific device you want to read.
If only one DS18x20 device is attached to the bus you may omit the rom parameter.
"""
rom = rom or self.roms[0]
ow = self.ow
ow.reset()
ow.select_rom(rom)
ow.write_byte(0x44) # Convert Temp
while True:
if ow.read_bit():
break
ow.reset()
ow.select_rom(rom)
ow.write_byte(0xbe) # Read scratch
data = ow.read_bytes(9)
return self.convert_temp(rom[0], data)
def read_temps(self):
"""
Read and return the temperatures of all attached DS18x20 devices.
"""
temps = []
ow=self.ow
ow.reset()
for rom in self.roms:
ow.select_rom(rom)
ow.write_byte(0x44)
while True:
if ow.read_bit():
break
ow.reset()
for rom in self.roms:
ow.select_rom(rom)
ow.write_byte(0xbe) # Read scratch
data = ow.read_bytes(9)
temps.append(self.convert_temp(rom[0], data))
return temps
def slow_read_temps(self):
temps=[];
for rom in self.roms:
temps.append(self.read_temp(rom))
return temps;
def convert_temp(self, rom0, data):
"""
Convert the raw temperature data into degrees celsius and return as a fixed point with 2 decimal places.
"""
temp_lsb = data[0]
temp_msb = data[1]
if rom0 == 0x10:
if temp_msb != 0:
# convert negative number
temp_read = temp_lsb >> 1 | 0x80 # truncate bit 0 by shifting, fill high bit with 1.
temp_read = -((~temp_read + 1) & 0xff) # now convert from two's complement
else:
temp_read = temp_lsb >> 1 # truncate bit 0 by shifting
count_remain = data[6]
count_per_c = data[7]
temp = 100 * temp_read - 25 + (count_per_c - count_remain) // count_per_c
return temp
elif rom0 == 0x28:
return (temp_msb << 8 | temp_lsb) * 100 // 16
else:
assert False
def tst():
dat = machine.Pin('GP30')
ow = OneWire(dat)
ds = FDS1820(ow)
print('devices:', ds.roms)
start=time.ticks_ms()
for x in range(0,3):
print('temperatures:', ds.slow_read_temps())
print(time.ticks_diff(start,time.ticks_ms()))
start=time.ticks_ms()
for x in range(0,3):
print('temperatures:', ds.read_temps())
print(time.ticks_diff(start,time.ticks_ms()))
| mit | Python |
|
9184d4cebf95ee31836970bedffaddc3bfaa2c2d | Prepare v2.20.8.dev | crawln45/Flexget,ianstalk/Flexget,gazpachoking/Flexget,ianstalk/Flexget,malkavi/Flexget,tobinjt/Flexget,tobinjt/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,malkavi/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,malkavi/Flexget,tobinjt/Flexget,crawln45/Flexget,Danfocus/Flexget,Flexget/Flexget,JorisDeRieck/Flexget,crawln45/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,malkavi/Flexget,ianstalk/Flexget,tobinjt/Flexget,Danfocus/Flexget,Flexget/Flexget,Danfocus/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.20.8.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.20.7'
| mit | Python |
1bf65c4b18b1d803b9515f80056c4be5790e3bde | Prepare v1.2.276.dev | poulpito/Flexget,jacobmetrick/Flexget,oxc/Flexget,cvium/Flexget,Pretagonist/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,LynxyssCZ/Flexget,OmgOhnoes/Flexget,ZefQ/Flexget,offbyone/Flexget,ZefQ/Flexget,antivirtel/Flexget,poulpito/Flexget,v17al/Flexget,ianstalk/Flexget,ibrahimkarahan/Flexget,qvazzler/Flexget,LynxyssCZ/Flexget,JorisDeRieck/Flexget,ibrahimkarahan/Flexget,lildadou/Flexget,lildadou/Flexget,Danfocus/Flexget,patsissons/Flexget,dsemi/Flexget,antivirtel/Flexget,malkavi/Flexget,tarzasai/Flexget,tarzasai/Flexget,tobinjt/Flexget,jawilson/Flexget,Flexget/Flexget,ianstalk/Flexget,cvium/Flexget,qk4l/Flexget,crawln45/Flexget,vfrc2/Flexget,patsissons/Flexget,antivirtel/Flexget,xfouloux/Flexget,Danfocus/Flexget,tsnoam/Flexget,LynxyssCZ/Flexget,ratoaq2/Flexget,poulpito/Flexget,xfouloux/Flexget,crawln45/Flexget,drwyrm/Flexget,Pretagonist/Flexget,Danfocus/Flexget,Pretagonist/Flexget,oxc/Flexget,cvium/Flexget,OmgOhnoes/Flexget,vfrc2/Flexget,Flexget/Flexget,malkavi/Flexget,jacobmetrick/Flexget,ibrahimkarahan/Flexget,jawilson/Flexget,Danfocus/Flexget,spencerjanssen/Flexget,tsnoam/Flexget,qvazzler/Flexget,malkavi/Flexget,gazpachoking/Flexget,Flexget/Flexget,tobinjt/Flexget,jawilson/Flexget,lildadou/Flexget,v17al/Flexget,gazpachoking/Flexget,OmgOhnoes/Flexget,jacobmetrick/Flexget,xfouloux/Flexget,ratoaq2/Flexget,spencerjanssen/Flexget,grrr2/Flexget,tobinjt/Flexget,qk4l/Flexget,malkavi/Flexget,sean797/Flexget,grrr2/Flexget,patsissons/Flexget,qk4l/Flexget,drwyrm/Flexget,dsemi/Flexget,spencerjanssen/Flexget,grrr2/Flexget,jawilson/Flexget,qvazzler/Flexget,tobinjt/Flexget,v17al/Flexget,thalamus/Flexget,offbyone/Flexget,drwyrm/Flexget,JorisDeRieck/Flexget,vfrc2/Flexget,ZefQ/Flexget,ianstalk/Flexget,tsnoam/Flexget,dsemi/Flexget,offbyone/Flexget,sean797/Flexget,oxc/Flexget,LynxyssCZ/Flexget,ratoaq2/Flexget,Flexget/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,sean797/Flexget,tarzasai/Flexget,thalamus/Flexget,thalamus/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.276.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.275'
| mit | Python |
f4a4b733445abba45a0a168dde9b7c10248688a6 | Prepare v1.2.318.dev | patsissons/Flexget,tobinjt/Flexget,gazpachoking/Flexget,oxc/Flexget,ratoaq2/Flexget,cvium/Flexget,ibrahimkarahan/Flexget,oxc/Flexget,grrr2/Flexget,sean797/Flexget,drwyrm/Flexget,JorisDeRieck/Flexget,jawilson/Flexget,tarzasai/Flexget,qk4l/Flexget,JorisDeRieck/Flexget,Pretagonist/Flexget,tsnoam/Flexget,Danfocus/Flexget,crawln45/Flexget,ianstalk/Flexget,LynxyssCZ/Flexget,patsissons/Flexget,antivirtel/Flexget,qvazzler/Flexget,crawln45/Flexget,ianstalk/Flexget,ZefQ/Flexget,drwyrm/Flexget,antivirtel/Flexget,JorisDeRieck/Flexget,Pretagonist/Flexget,jacobmetrick/Flexget,lildadou/Flexget,qvazzler/Flexget,thalamus/Flexget,gazpachoking/Flexget,grrr2/Flexget,malkavi/Flexget,jawilson/Flexget,qk4l/Flexget,oxc/Flexget,xfouloux/Flexget,ZefQ/Flexget,drwyrm/Flexget,cvium/Flexget,Flexget/Flexget,spencerjanssen/Flexget,spencerjanssen/Flexget,sean797/Flexget,grrr2/Flexget,qk4l/Flexget,Danfocus/Flexget,thalamus/Flexget,spencerjanssen/Flexget,thalamus/Flexget,JorisDeRieck/Flexget,ZefQ/Flexget,ibrahimkarahan/Flexget,OmgOhnoes/Flexget,jacobmetrick/Flexget,dsemi/Flexget,jawilson/Flexget,Pretagonist/Flexget,patsissons/Flexget,Danfocus/Flexget,tsnoam/Flexget,tarzasai/Flexget,qvazzler/Flexget,poulpito/Flexget,crawln45/Flexget,malkavi/Flexget,tarzasai/Flexget,offbyone/Flexget,offbyone/Flexget,ratoaq2/Flexget,tobinjt/Flexget,Flexget/Flexget,malkavi/Flexget,antivirtel/Flexget,xfouloux/Flexget,ratoaq2/Flexget,malkavi/Flexget,sean797/Flexget,tobinjt/Flexget,ibrahimkarahan/Flexget,dsemi/Flexget,OmgOhnoes/Flexget,Danfocus/Flexget,LynxyssCZ/Flexget,lildadou/Flexget,cvium/Flexget,Flexget/Flexget,LynxyssCZ/Flexget,lildadou/Flexget,tobinjt/Flexget,jawilson/Flexget,tsnoam/Flexget,dsemi/Flexget,OmgOhnoes/Flexget,jacobmetrick/Flexget,LynxyssCZ/Flexget,crawln45/Flexget,poulpito/Flexget,xfouloux/Flexget,poulpito/Flexget,ianstalk/Flexget,offbyone/Flexget,Flexget/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.318.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.317'
| mit | Python |
860cf7b9743744c9d21796b227cf21d684fb5519 | Add test_modulepickling_change_cache_dir | tjwei/jedi,flurischt/jedi,dwillmer/jedi,WoLpH/jedi,flurischt/jedi,mfussenegger/jedi,mfussenegger/jedi,WoLpH/jedi,dwillmer/jedi,jonashaag/jedi,jonashaag/jedi,tjwei/jedi | test/test_cache.py | test/test_cache.py | from jedi import settings
from jedi.cache import ParserCacheItem, _ModulePickling
ModulePickling = _ModulePickling()
def test_modulepickling_change_cache_dir(monkeypatch, tmpdir):
"""
ModulePickling should not save old cache when cache_directory is changed.
See: `#168 <https://github.com/davidhalter/jedi/pull/168>`_
"""
dir_1 = str(tmpdir.mkdir('first'))
dir_2 = str(tmpdir.mkdir('second'))
item_1 = ParserCacheItem('fake parser 1')
item_2 = ParserCacheItem('fake parser 2')
path_1 = 'fake path 1'
path_2 = 'fake path 2'
monkeypatch.setattr(settings, 'cache_directory', dir_1)
ModulePickling.save_module(path_1, item_1)
cached = ModulePickling.load_module(path_1, item_1.change_time - 1)
assert cached == item_1.parser
monkeypatch.setattr(settings, 'cache_directory', dir_2)
ModulePickling.save_module(path_2, item_2)
cached = ModulePickling.load_module(path_1, item_1.change_time - 1)
assert cached is None
| mit | Python |
|
d082eb41c2ccef7178d228896a7658fe52bcbdec | Create directory for useless symbols remove | PatrikValkovic/grammpy | tests/UselessSymbolsRemove/__init__.py | tests/UselessSymbolsRemove/__init__.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:38
:Licence GNUv3
Part of grammpy-transforms
""" | mit | Python |
|
3c82f0228095b2616b35a2881f51c93999fdd79b | Test models/FieldMapper | WSULib/combine,WSULib/combine,WSULib/combine,WSULib/combine | tests/test_models/test_field_mapper.py | tests/test_models/test_field_mapper.py | import json
import jsonschema
from django.test import TestCase
from core.models import FieldMapper
from tests.utils import json_string
class FieldMapperTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.attributes = {
'name': 'Test Field Mapper',
'config_json': json_string({"add_literals":{"foo":"bar"}}),
'field_mapper_type': 'xml2kvp'
}
cls.field_mapper = FieldMapper(**cls.attributes)
def test_str(self):
self.assertEqual('Test Field Mapper, FieldMapper: #{}'.format(FieldMapperTestCase.field_mapper.id),
format(FieldMapperTestCase.field_mapper))
def test_as_dict(self):
as_dict = FieldMapperTestCase.field_mapper.as_dict()
for k, v in FieldMapperTestCase.attributes.items():
self.assertEqual(as_dict[k], v)
def test_config(self):
self.assertEqual(json.loads(FieldMapperTestCase.attributes['config_json']),
FieldMapperTestCase.field_mapper.config)
def test_config_none(self):
no_config_mapper = FieldMapper(name='new field mapper')
self.assertIsNone(no_config_mapper.config)
def test_validate_config_json(self):
self.assertIsNone(FieldMapperTestCase.field_mapper.validate_config_json())
def test_validate_config_json_invalid(self):
invalid_config_mapper = FieldMapper(config_json=json_string({"add_literals": "invalid value"}))
self.assertRaises(jsonschema.exceptions.ValidationError,
invalid_config_mapper.validate_config_json)
def test_validate_config_json_provided(self):
invalid_config_mapper = FieldMapper(config_json=json_string({"add_literals": "invalid value"}))
self.assertIsNone(invalid_config_mapper.validate_config_json(json_string({"add_literals":{"foo":"bar"}})))
| mit | Python |
|
ebe10d39064410fc49ac90e38339a54d0ed47c80 | update hooks for sqlalchemy | ODM2/ODM2StreamingDataLoader,ODM2/ODM2StreamingDataLoader | setup/hooks/hook-sqlalchemy.py | setup/hooks/hook-sqlalchemy.py | __author__ = 'stephanie'
# Copyright (C) 2009, Giovanni Bajo
# Based on previous work under copyright (c) 2001, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# Contributed by Greg Copeland
from PyInstaller.hooks.hookutils import exec_statement
# include most common database bindings
# some database bindings are detected and include some
# are not. We should explicitly include database backends.
hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'pyodbc', 'pymysql']
print "in custom sql alchemy hook "
# sqlalchemy.databases package from pre 0.6 sqlachemy versions
databases = exec_statement("import sqlalchemy.databases;print sqlalchemy.databases.__all__")
databases = eval(databases.strip())
for n in databases:
hiddenimports.append("sqlalchemy.databases." + n)
# sqlalchemy.orm package from pre 0.6 sqlachemy versions
orm = exec_statement("import sqlalchemy.ormprint sqlalchemy.orm.__all__")
orm = eval(orm.strip())
for n in orm:
hiddenimports.append("sqlalchemy.orm." + n)
# sqlalchemy.dialects package from 0.6 and newer sqlachemy versions
version = exec_statement('import sqlalchemy; print sqlalchemy.__version__')
is_alch06 = version >= '0.6'
if is_alch06:
dialects = exec_statement("import sqlalchemy.dialects;print sqlalchemy.dialects.__all__")
dialects = eval(dialects.strip())
for n in databases:
hiddenimports.append("sqlalchemy.dialects." + n)
| bsd-3-clause | Python |
|
7f8f5e14f88304b272423ab12728d5329a2ba808 | use raw strings for urls | khchine5/django-shop,chriscauley/django-shop,rfleschenberg/django-shop,febsn/django-shop,rfleschenberg/django-shop,pjdelport/django-shop,atheiste/django-shop,febsn/django-shop,jrief/django-shop,febsn/django-shop,awesto/django-shop,schacki/django-shop,katomaso/django-shop,jrutila/django-shop,rfleschenberg/django-shop,dwx9/test,jrutila/django-shop,fusionbox/django-shop,nimbis/django-shop,schacki/django-shop,nimbis/django-shop,creimers/django-shop,khchine5/django-shop,jrief/django-shop,nimbis/django-shop,jrutila/django-shop,awesto/django-shop,awesto/django-shop,divio/django-shop,jrief/django-shop,DavideyLee/django-shop,chriscauley/django-shop,katomaso/django-shop,schacki/django-shop,dwx9/test,DavideyLee/django-shop,nimbis/django-shop,khchine5/django-shop,khchine5/django-shop,katomaso/django-shop,creimers/django-shop,rfleschenberg/django-shop,atheiste/django-shop,pjdelport/django-shop,creimers/django-shop,bmihelac/django-shop,pjdelport/django-shop,dwx9/test,divio/django-shop,atheiste/django-shop,fusionbox/django-shop,bmihelac/django-shop,schacki/django-shop,divio/django-shop,jrief/django-shop,chriscauley/django-shop | shop/urls/cart.py | shop/urls/cart.py | from django.conf.urls.defaults import url, patterns
from shop.views.cart import CartDetails, CartItemDetail
urlpatterns = patterns('',
url(r'^delete/$', CartDetails.as_view(action='delete'), # DELETE
name='cart_delete'),
url(r'^item/$', CartDetails.as_view(action='post'), # POST
name='cart_item_add'),
url(r'^$', CartDetails.as_view(), name='cart'), # GET
url(r'^update/$', CartDetails.as_view(action='put'),
name='cart_update'),
# CartItems
url(r'^item/(?P<id>[0-9]+)$', CartItemDetail.as_view(),
name='cart_item'),
url(r'^item/(?P<id>[0-9]+)/delete$',
CartItemDetail.as_view(action='delete'),
name='cart_item_delete'),
)
| from django.conf.urls.defaults import url, patterns
from shop.views.cart import CartDetails, CartItemDetail
urlpatterns = patterns('',
url(r'^delete/$', CartDetails.as_view(action='delete'), # DELETE
name='cart_delete'),
url('^item/$', CartDetails.as_view(action='post'), # POST
name='cart_item_add'),
url(r'^$', CartDetails.as_view(), name='cart'), # GET
url(r'^update/$', CartDetails.as_view(action='put'),
name='cart_update'),
# CartItems
url('^item/(?P<id>[0-9]+)$', CartItemDetail.as_view(),
name='cart_item'),
url('^item/(?P<id>[0-9]+)/delete$',
CartItemDetail.as_view(action='delete'),
name='cart_item_delete'),
)
| bsd-3-clause | Python |
45869cdf6087cd625db385ef52475d98c9842efa | add migen_local_install script | litex-hub/fpga_101,litex-hub/fpga_101 | migen_local_install.py | migen_local_install.py | import os
os.system("git clone http://github.com/m-labs/migen")
os.system("mv migen migen_tmp")
os.system("mv migen_tmp/migen migen")
os.system("rm -rf migen_tmp") | bsd-2-clause | Python |
|
5f31e729ce6752c2f0a6b7f19f76c2a7e95636b9 | Create friends-of-appropriate-ages.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/friends-of-appropriate-ages.py | Python/friends-of-appropriate-ages.py | # Time: O(a^2 + n), a is the number of ages,
# n is the number of people
# Space: O(a)
# Some people will make friend requests.
# The list of their ages is given and ages[i] is the age of the ith person.
#
# Person A will NOT friend request person B (B != A)
# if any of the following conditions are true:
#
# age[B] <= 0.5 * age[A] + 7
# age[B] > age[A]
# age[B] > 100 && age[A] < 100
# Otherwise, A will friend request B.
#
# Note that if A requests B, B does not necessarily request A.
# Also, people will not friend request themselves.
#
# How many total friend requests are made?
#
# Example 1:
#
# Input: [16,16]
# Output: 2
# Explanation: 2 people friend request each other.
# Example 2:
#
# Input: [16,17,18]
# Output: 2
# Explanation: Friend requests are made 17 -> 16, 18 -> 17.
# Example 3:
#
# Input: [20,30,100,110,120]
# Output:
# Explanation: Friend requests are made 110 -> 100, 120 -> 110, 120 -> 100.
#
# Notes:
# - 1 <= ages.length <= 20000.
# - 1 <= ages[i] <= 120.
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
import collections
class Solution(object):
def numFriendRequests(self, ages):
"""
:type ages: List[int]
:rtype: int
"""
def request(a, b):
return 0.5*a+7 < b <= a
c = collections.Counter(ages)
return sum(int(request(a, b)) * c[a]*(c[b]-int(a == b))
for a in c
for b in c)
| mit | Python |
|
9caf9d3bfaaff9d7721f611d9c351dd14f67daa6 | add log progress | imminfo/hex | log_progress.py | log_progress.py | def log_progress(sequence, every=None, size=None):
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = size / 200 # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{index} / ?'.format(index=index)
else:
progress.value = index
label.value = u'{index} / {size}'.format(
index=index,
size=size
)
yield record
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = unicode(index or '?') | apache-2.0 | Python |
|
d9c7ce7f2b47bee3b2e657157fe4df8f9a00973a | Create smiles_preview.py | yklsorok/SmilesPreview | smiles_preview.py | smiles_preview.py | import sublime
import sublime_plugin
import base64
import os
import re
class SmilesPreview(sublime_plugin.EventListener):
def on_hover(self, view, point, hover_zone):
if (hover_zone == sublime.HOVER_TEXT):
# locate smiles in the string. smiles string should be at the beginning and followed by tab (cxsmiles)
hovered_line_text = view.substr(view.line(point)).strip()
smiles_regex = re.compile(r'^([^J][A-Za-z0-9@+\-\[\]\(\)\\\/%=#$]+)\t', re.IGNORECASE)
if (smiles_regex.match(hovered_line_text)):
smiles_string = smiles_regex.match(hovered_line_text).group(0)
file_name = "1.png"
os.system("obabel -ismi -:" + smiles_string + "-opng -O " + file_name)
# Check that file exists
if (file_name and os.path.isfile(file_name)):
encoded = str(base64.b64encode(
open(file_name, "rb").read()
), "utf-8")
view.show_popup('<img src="data:image/png;base64,' +
encoded +
'">',
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
location=point)
return
return
return
| mit | Python |
|
fbeb3d04b16afa0b2daf49597a07c32b0d72630c | Add missing mica.report __init__ to project | sot/mica,sot/mica | mica/report/__init__.py | mica/report/__init__.py | from .report import main, update
| bsd-3-clause | Python |
|
dca9931e894c1e5cae9f5229b04cc72c31eef5f5 | Create a.py | francolinofrancolino123/example,francolinofrancolino123/example,francolinofrancolino123/example | a.py | a.py | # this code is wrote on python
a = 3
print a
| mit | Python |
|
2c0a06a8e460de06dd9a929baa02e2d369fbe0a6 | Prepare v2.17.4.dev | Flexget/Flexget,Flexget/Flexget,tobinjt/Flexget,crawln45/Flexget,ianstalk/Flexget,ianstalk/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,malkavi/Flexget,Danfocus/Flexget,malkavi/Flexget,Flexget/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,ianstalk/Flexget,crawln45/Flexget,malkavi/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,Danfocus/Flexget,tobinjt/Flexget,gazpachoking/Flexget,gazpachoking/Flexget,crawln45/Flexget,Flexget/Flexget,malkavi/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.17.4.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.17.3'
| mit | Python |
b815b2e94814e86ba2e4713d15aa2143594344bc | Prepare v2.13.13.dev | malkavi/Flexget,gazpachoking/Flexget,ianstalk/Flexget,malkavi/Flexget,Flexget/Flexget,crawln45/Flexget,Flexget/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,Flexget/Flexget,tobinjt/Flexget,malkavi/Flexget,tobinjt/Flexget,jawilson/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,jawilson/Flexget,malkavi/Flexget,crawln45/Flexget,ianstalk/Flexget,crawln45/Flexget,tobinjt/Flexget,LynxyssCZ/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,Danfocus/Flexget,Danfocus/Flexget,LynxyssCZ/Flexget,Danfocus/Flexget,jawilson/Flexget,LynxyssCZ/Flexget,jawilson/Flexget,JorisDeRieck/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.13.13.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.13.12'
| mit | Python |
a30a6104554fb39d068fd8aadbb128dff1d482fb | Create dl.py | donkeybot5000/chanrip | dl.py | dl.py | #!/usr/bin/env python
import requests, urllib2, os, shutil, sys, futures
from time import sleep
download_board = sys.argv[1]
def download(**kwargs):
with open('./'+download_board+'/'+kwargs['filename'], 'wb') as handle:
request = requests.get(kwargs['url'], stream=True)
for block in request.iter_content(1024):
if not block:
break
handle.write(block)
if os.path.exists("stopcron.txt"):
print "stopcron.txt exists, downloader is aborting"
exit()
if not os.path.exists(download_board+"-modified.txt"):
shutil.copy(".backup_modified.txt", download_board+"-modified.txt")
if os.path.getsize(download_board+"-modified.txt") == 0:
shutil.copy(".backup_modified.txt", download_board+"-modified.txt")
pages = []
with open(download_board+"-modified.txt", 'r') as f:
modified = [s.strip("\n") for s in f.readlines()]
realch = 0
for a in xrange(15):
p = requests.get("http://a.4cdn.org/"+download_board+"/%s.json" % str(a), headers={'If-Modified-Since': str(modified[a])})
if p.status_code == 200 or len(modified[a]) == 0:
pages.append(p.json())
modified[a] = p.headers['Last-Modified']
sleep(1.0)
a = a + 1
with open(download_board+"-modified.txt", 'w') as f:
for a in modified:
f.write(a+"\n")
links = []
already = 0
links = []
filenames = []
for page in pages:
for thread in page['threads']:
for post in thread['posts']:
if u'filename' in post:
filename_clean = post[u'filename']
ext_clean = post[u'ext']
if 'filename' in post and not os.path.exists("./"+download_board+"/"+filename_clean+ext_clean):
links.append("http://i.4cdn.org/"+download_board+"/"+filename_clean+ext_clean)
filenames.append(filename_clean+ext_clean)
if not os.path.exists("./"+download_board+"/"):
os.makedirs("./"+download_board+"/")
with futures.ThreadPoolExecutor(max_workers=10) as e:
for i in xrange(len(links)):
e.submit(download, url=links[i], filename=filenames[i])
print "[chanrip] %s downloaded" % (str(len(links)))
| mit | Python |
|
0b90446471805276ed141800337e6044ce130b93 | Test for the bugfix of Project.last_update | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rsr/tests/models/test_project.py | akvo/rsr/tests/models/test_project.py | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from unittest import TestCase
from django.contrib.auth import get_user_model
from akvo.rsr.models import Project
from akvo.rsr.models import ProjectUpdate, PayPalGateway, MollieGateway, PaymentGatewaySelector
class ProjectModelTestCase(TestCase):
"""Tests for the project model"""
def test_project_last_update(self):
""" Test Project.last_update. The field is a denormalization keeping track of the latest
update for a project, if any. When deletion of updates was introduced, a bug occurs when
deleting the latest update, as the Project.last_update field was set to None in that case.
The tests check that the fix for this bug works correctly
"""
# setup needed model instances
paypal = PayPalGateway.objects.create(name='paypal')
mollie = MollieGateway.objects.create(name='mollie')
project_1 = Project.objects.create(title="Test project 1")
user_1 = get_user_model().objects.create(email='[email protected]')
update_1 = ProjectUpdate.objects.create(title="Test update 1", project=project_1, user=user_1)
update_2 = ProjectUpdate.objects.create(title="Test update 2", project=project_1, user=user_1)
# check that update_2 is the latest
self.assertTrue(update_1.created_at < update_2.created_at)
# check that update_2 is Project.last_update
self.assertEqual(project_1.last_update, update_2)
update_2.delete()
# now update_1 should be last_update
self.assertEqual(project_1.last_update, update_1)
update_1.delete()
# now last_update is None
self.assertEqual(project_1.last_update, None)
| agpl-3.0 | Python |
|
30381ced0d7535428398b3df5f1caffd684b20d5 | Implement K means network. | prasanna08/MachineLearning | KMeansnet.py | KMeansnet.py | import numpy as np
class Kmeansnet(object):
def __init__(self, data, clusters, eta):
self.data = data
self.n_dim = data.shape[1]
self.num_clusters = clusters
self.weights = np.random.rand(self.num_clusters, self.n_dim)
self.eta = eta
def calc_dist(self, inp, weights):
return np.sum((weights * inp), axis=1)
def normalise_data(self, data):
normalisers = np.sqrt(np.sum(data ** 2, axis=1)).reshape(self.data.shape[0], 1)
return data / normalisers
def train(self, epochs):
self.data = self.normalise_data(self.data)
for i in range(epochs):
for d in range(self.data.shape[0]):
dist = self.calc_dist(self.data[d, :], self.weights)
cluster = np.argmax(dist)
self.weights[cluster, :] += self.eta * self.data[d, :] - self.weights[cluster, :]
def predict(self, inp):
dist = self.calc_dist(inp, self.weights)
best = np.argmax(dist)
return best
def predict_all(self, data):
best = np.zeros((data.shape[0], 1))
for i in range(data.shape[0]):
best[i] = self.predict(data[i, :])
return best
| mit | Python |
|
b876332debd21edb3e3b84f01bb8aec5196bd8d8 | add enumerating partition | dragonwolverines/DataStructures,dragonwolverines/DataStructures,dragonwolverines/DataStructures | resource-4/combinatorics/integer-partitions/enumerating/partition.py | resource-4/combinatorics/integer-partitions/enumerating/partition.py | #zero
if n == 0:
yield []
return
#modify
for ig in partitions(n-1):
yield [1] + ig
if ig and (len(ig) < 2 or ig[1] > ig[0]):
yield [ig[0] + 1] + ig[1:]
| bsd-2-clause | Python |
|
39714efdbfb9620acb1bb43fa8a3dbf59bfbef85 | add shortnaturaltime template filter | arturtamborski/wypok,arturtamborski/wypok,arturtamborski/wypok,arturtamborski/wypok | wypok/templatetags/shortnaturaltime.py | wypok/templatetags/shortnaturaltime.py | # from: https://github.com/ollieglass/django-shortnaturaltime
from django import template
from django.utils.timezone import utc
import time
from datetime import datetime, timedelta, date
register = template.Library()
def _now():
return datetime.utcnow().replace(tzinfo=utc)
# return datetime.now()
def abs_timedelta(delta):
"""Returns an "absolute" value for a timedelta, always representing a
time distance."""
if delta.days < 0:
now = _now()
return now - (now + delta)
return delta
def date_and_delta(value):
"""Turn a value into a date and a timedelta which represents how long ago
it was. If that's not possible, return (None, value)."""
now = _now()
if isinstance(value, datetime):
date = value
delta = now - value
elif isinstance(value, timedelta):
date = now - value
delta = value
else:
try:
value = int(value)
delta = timedelta(seconds=value)
date = now - delta
except (ValueError, TypeError):
return (None, value)
return date, abs_timedelta(delta)
def shortnaturaldelta(value, months=True):
"""Given a timedelta or a number of seconds, return a natural
representation of the amount of time elapsed. This is similar to
``naturaltime``, but does not add tense to the result. If ``months``
is True, then a number of months (based on 30.5 days) will be used
for fuzziness between years."""
now = _now()
date, delta = date_and_delta(value)
if date is None:
return value
use_months = months
seconds = abs(delta.seconds)
days = abs(delta.days)
years = days // 365
days = days % 365
months = int(days // 30.5)
if not years and days < 1:
if seconds == 0:
return "1s"
elif seconds == 1:
return "1s"
elif seconds < 60:
return "%ds" % (seconds)
elif 60 <= seconds < 120:
return "1m"
elif 120 <= seconds < 3600:
return "%dm" % (seconds // 60)
elif 3600 <= seconds < 3600*2:
return "1h"
elif 3600 < seconds:
return "%dh" % (seconds // 3600)
elif years == 0:
if days == 1:
return "1d"
if not use_months:
return "%dd" % days
else:
if not months:
return "%dd" % days
elif months == 1:
return "1m"
else:
return "%dm" % months
elif years == 1:
if not months and not days:
return "1y"
elif not months:
return "1y %dd" % days
elif use_months:
if months == 1:
return "1y, 1m"
else:
return "1y %dm" % months
else:
return "1y %dd" % days
else:
return "%dy" % years
@register.filter
def shortnaturaltime(value, future=False, months=True):
"""Given a datetime or a number of seconds, return a natural representation
of that time in a resolution that makes sense. This is more or less
compatible with Django's ``naturaltime`` filter. ``future`` is ignored for
datetimes, where the tense is always figured out based on the current time.
If an integer is passed, the return value will be past tense by default,
unless ``future`` is set to True."""
now = _now()
date, delta = date_and_delta(value)
if date is None:
return value
# determine tense by value only if datetime/timedelta were passed
if isinstance(value, (datetime, timedelta)):
future = date > now
delta = shortnaturaldelta(delta)
if delta == "a moment":
return "now"
return delta
| mit | Python |
|
d9c95fcf89f0e72c3504a4988e6d4fb6ef2ae6cd | Add the timeseries neural network | malkoto1/tribe,malkoto1/tribe,malkoto1/tribe,malkoto1/tribe | src/backend/timeseries_nnet.py | src/backend/timeseries_nnet.py | # Modified code from https://github.com/hawk31/nnet-ts
import logging
import numpy as np
from keras.optimizers import SGD
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from sklearn.preprocessing import StandardScaler
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class TimeSeriesNnet(object):
def __init__(self, timeseries, hidden_layers=[20, 15, 5],
activation_functions=['relu', 'relu', 'relu'],
optimizer=SGD(), loss='mean_absolute_error',
lag=11):
self._hidden_layers = hidden_layers
self._activation_functions = activation_functions
self._optimizer = optimizer
self._loss = loss
self._lag = lag
self._timeseries = self._prepare_data(timeseries)
self._scaler = StandardScaler()
self._nn = Sequential()
if len(self._hidden_layers) != len(self._activation_functions):
raise Exception('hidden_layers size must match'
'activation_functions size')
def _prepare_data(self, timeseries):
return np.array(timeseries, dtype='float64')
def fit(self, epochs=10000, verbose=0):
timeseries_len = len(self._timeseries)
if self._lag >= timeseries_len:
raise ValueError('Lag is higher than length of the timeseries')
X = np.zeros((timeseries_len - self._lag, self._lag), dtype='float64')
y = np.log(self._timeseries[self._lag:])
# Building X matrixs
logging.info('Building regressor matrix')
for i in range(0, timeseries_len - self._lag):
X[i, :] = self._timeseries[range(i, i + self._lag)]
logging.info('Scaling data')
self._scaler.fit(X)
X = self._scaler.transform(X)
# Neural net architecture
logging.info('Checking network consistency')
self._nn.add(Dense(self._hidden_layers[0], input_shape=(X.shape[1],)))
self._nn.add(Activation(self._activation_functions[0]))
for layer_size, activation_function in zip(
self._hidden_layers[1:], self._activation_functions[1:]):
self._nn.add(Dense(layer_size))
self._nn.add(Activation(activation_function))
# Add final node
self._nn.add(Dense(1))
self._nn.add(Activation('linear'))
self._nn.compile(loss=self._loss, optimizer=self._optimizer)
# Train neural net
logging.info('Training neural net')
self._nn.fit(X, y, nb_epoch=epochs, verbose=verbose)
def predict_ahead(self, n_ahead=1):
# Store predictions and predict iteratively
predictions = np.zeros(n_ahead)
timeseries = self._timeseries
for i in range(n_ahead):
current_x = self._scaler.transform(
timeseries[-self._lag:].reshape((1, self._lag)))
next_pred = self._nn.predict(current_x)
predictions[i] = np.exp(next_pred[0, 0])
timeseries = np.concatenate((
timeseries, np.exp(next_pred[0, :])), axis=0)
return predictions
| mit | Python |
|
fefb9a9fa5a7c6080bc52896e2d1517828b01a3d | Add all PLs to db | Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2 | migrations/versions/299e1d15a55f_populate_provincial_legislatures.py | migrations/versions/299e1d15a55f_populate_provincial_legislatures.py | """populate-provincial-legislatures
Revision ID: 299e1d15a55f
Revises: 1f97f799a477
Create Date: 2018-08-20 16:17:28.919476
"""
# revision identifiers, used by Alembic.
revision = '299e1d15a55f'
down_revision = '1f97f799a477'
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Ensure all provinces exist as Provincial Legislatures
"""
from pmg.models import House, db
from pmg.utils import get_provincial_legislatures
pls = [
{
'name': 'Eastern Cape Legislature',
'name_short': 'EC'
},
{
'name': 'Free State Legislature',
'name_short': 'FS'
},
{
'name': 'Gauteng Legislature',
'name_short': 'GT'
},
{
'name': 'KwaZulu-Natal Legislature',
'name_short': 'KZN'
},
{
'name': 'Limpopo Legislature',
'name_short': 'LIM'
},
{
'name': 'Mpumalanga Legislature',
'name_short': 'MP'
},
{
'name': 'Northern Cape Legislature',
'name_short': 'NC'
},
{
'name': 'North West Legislature',
'name_short': 'NW'
},
{
'name': 'Western Cape Parliament',
'name_short': 'WC'
}
]
existing_pls = House.query.filter(House.sphere=='provincial').all()
pl_codes = [p.name_short for p in existing_pls]
for pl in pls:
if pl['name_short'] not in pl_codes:
new_pl = House()
new_pl.name = pl['name']
new_pl.name_short = pl['name_short']
new_pl.sphere = 'provincial'
db.session.add(new_pl)
db.session.commit()
def downgrade():
pass
| apache-2.0 | Python |
|
253ff8bc8f848effea6ad7602b6424cf997c926c | rename celeba_multitask_acc to celeba_multilabel_acc | last-one/tools,last-one/tools | caffe/result/celeba_multilabel_acc.py | caffe/result/celeba_multilabel_acc.py | import os
import numpy as np
import sys
label_file = open('/home/hypan/data/celebA/test.txt', 'r')
lines = label_file.readlines()
label_file.close()
acc = np.zeros(40)
cou = 0
for line in lines:
info = line.strip('\r\n').split()
name = info[0].split('.')[0]
gt_labels = info[1: ]
feat_path = '/home/hypan/data/celebA/result/' + sys.argv[1] + '/test_feature/' + name + '.npy'
if not os.path.exists(feat_path):
print '{} has not predict feature.'.format(name)
pd_labels = np.load(feat_path)
cnt = len(pd_labels)
for i in range(cnt):
gt_label = int(gt_labels[i])
pd_label = pd_labels[i]
if pd_label > 0:
pd_label = 1
else:
pd_label = -1
if gt_label == pd_label:
acc[i] += 1
cou += 1
for i in range(40):
print i, acc[i] * 1.0 / cou
| bsd-2-clause | Python |
|
d2e165ace4fc26b51e18494c4878f95ebcefa20a | add api | gobuild-old/gobuild3,gobuild-old/gobuild3,gobuild/gobuild3,gobuild-old/gobuild3,gobuild/gobuild3,gobuild-old/gobuild3,gobuild/gobuild3,gobuild-old/gobuild3,gobuild/gobuild3,gobuild/gobuild3 | web/routers/api.py | web/routers/api.py | # coding: utf-8
import os
import json
import time
import datetime
import humanize
import flask
from flask import request, flash, redirect, url_for, render_template
import models
import gcfg
bp = flask.Blueprint('api', __name__)
@bp.route('/')
def home():
return flask.render_template('api.html')
@bp.route('/v1/repolist')
@models.db_session
def repolist():
goos='windows'
goarch='amd64'
data = []
for r in models.select(r for r in models.Recommend)[:]:
item = dict(
reponame=r.repo.name,
alias=r.name,
author=r.repo.author,
description=r.repo.description,
offical=r.repo.offcial,
category=r.category.name if r.category else None,
stars=r.repo.stars,
osarch=goos+'-'+goarch,
)
files = []
for b in r.repo.builds:
if not b.downloadable:
continue
# actually only one loop
file = {'label':b.tag, 'updated':b.updated}
for f in models.select(f for f in models.File \
if f.build==b and f.os == goos and f.arch == goarch)[:1]:
file.update({'binfiles': [os.path.basename(f.reponame)], # FIXME: need to parse from gobuildrc
'size': f.size, 'url': f.outlink, 'sha1': f.sha})
files.append(file)
if files:
item['files'] = files
data.append(item)
data.append(dict(
reponame = 'github.com/codeskyblue/cgotest',
description='this is is just a test program',
alias='cgotest', # this could be null
author='unknown,lunny',
offical=True,
category='music',
stars=18,
files=[
{'label': 'branch:master', 'url': 'http://gobuild3.qiniudn.com/github.com/gogits/gogs/branch-v-master/gogs-linux-386.tar.gz', 'binfiles': ['gogs'], 'sha1': '408eebced1c2cdbd363df2fe843831bf337d4273', 'size': 7000000},
{'label': 'tag:v0.5.2', 'url': 'http://gobuild3.qiniudn.com/github.com/gogits/gogs/tag-v-v0.5.2/gogs-linux-386.tar.gz', 'binfiles': ['gogs'], 'sha1': '960e329d46ec7a79745cf3438eaf3c3151d38d97', 'size': 7100000}],
))
return flask.jsonify({'status': 0, 'message': 'success', 'osarch': 'linux-386', 'data': data})
| mit | Python |
|
edbdf0d955eb387d74a73997cd11a2d05550e05a | add new action plugin junos_config | thaim/ansible,thaim/ansible | lib/ansible/plugins/action/junos_config.py | lib/ansible/plugins/action/junos_config.py | #
# Copyright 2015 Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import os
import time
import glob
import urlparse
from ansible.plugins.action import ActionBase
from ansible.utils.unicode import to_unicode
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['changed'] = False
src = self._task.args.get('src')
if src:
if src.endswith('.xml'):
fmt = 'xml'
elif src.endswith('.set'):
fmt = 'set'
else:
fmt = 'text'
if self._task.args.get('format') is None:
self._task.args['format'] = fmt
try:
self._handle_source()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
if self._task.args.get('comment') is None:
self._task.args['comment'] = self._task.name
result.update(self._execute_module(module_name=self._task.action,
module_args=self._task.args, task_vars=task_vars))
if self._task.args.get('backup') and result.get('_backup'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
self._write_backup(task_vars['inventory_hostname'], result['_backup'])
if '_backup' in result:
del result['_backup']
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
def _handle_source(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlparse.urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'files', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('Unable to load source file')
try:
with open(source, 'r') as f:
template_data = to_unicode(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
self._task.args['src'] = self._templar.template(template_data)
| mit | Python |
|
6c9760b328716d6b2e099698293c93cba9361932 | Add script for testing error reporting. | Cue/greplin-nagios-utils,Cue/greplin-nagios-utils | checkserver/testchecks/check_error.py | checkserver/testchecks/check_error.py | #!/usr/bin/env python
# Copyright 2012 The greplin-nagios-utils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status
nagios config:
use regular-service
params $HOSTNAME$
"""
from greplin.nagios import parseArgs, Maximum, ResponseBuilder
def check(argv):
"""Runs the check."""
_ = parseArgs('check_fast.py', ('NAME', str), argv=argv) / 0 # Badness!
(ResponseBuilder().addRule('seven', Maximum(8, 11), 7)).finish()
if __name__ == '__main__':
import sys
check(sys.argv)
| apache-2.0 | Python |
|
bc651b5ca15cf41eece321b77142c2973bd41ede | Add a sqlite config | 1844144/django-blog-zinnia,ghachey/django-blog-zinnia,extertioner/django-blog-zinnia,Fantomas42/django-blog-zinnia,ZuluPro/django-blog-zinnia,petecummings/django-blog-zinnia,dapeng0802/django-blog-zinnia,Maplecroft/django-blog-zinnia,Fantomas42/django-blog-zinnia,ZuluPro/django-blog-zinnia,dapeng0802/django-blog-zinnia,Maplecroft/django-blog-zinnia,ghachey/django-blog-zinnia,aorzh/django-blog-zinnia,Fantomas42/django-blog-zinnia,aorzh/django-blog-zinnia,extertioner/django-blog-zinnia,1844144/django-blog-zinnia,marctc/django-blog-zinnia,petecummings/django-blog-zinnia,extertioner/django-blog-zinnia,Zopieux/django-blog-zinnia,marctc/django-blog-zinnia,Maplecroft/django-blog-zinnia,ZuluPro/django-blog-zinnia,bywbilly/django-blog-zinnia,bywbilly/django-blog-zinnia,petecummings/django-blog-zinnia,dapeng0802/django-blog-zinnia,1844144/django-blog-zinnia,aorzh/django-blog-zinnia,marctc/django-blog-zinnia,bywbilly/django-blog-zinnia,Zopieux/django-blog-zinnia,ghachey/django-blog-zinnia,Zopieux/django-blog-zinnia | zinnia/tests/implementations/sqlite.py | zinnia/tests/implementations/sqlite.py | """Settings for testing zinnia on SQLite"""
from zinnia.tests.implementations.settings import * # noqa
DATABASES = {
'default': {
'NAME': 'zinnia.db',
'ENGINE': 'django.db.backends.sqlite3'
}
}
| bsd-3-clause | Python |
|
b546ac87cd3e3821619a5ac7ed7806c1f569a3cd | Create PySMS.py | CodyTXR0KR/PySMS | PySMS.py | PySMS.py | # -*- coding: utf-8 -*-
import smtplib
from time import strftime
# User account credentials -- (gmail username and password)
USERNAME = ''
PASSWORD = ''
# Routing -- (FROMADDR can be null iirc)
FROMADDR = ''
TOADDRS = ''
# Message Body
MESSAGE = ''
def SendMessage(MESSAGE):
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(USERNAME, PASSWORD)
server.sendmail(FROMADDR, TOADDRS, MESSAGE)
server.quit()
def TimeStamp():
return strftime('%-I:%M %p - %b %d %Y')
| mit | Python |
|
36a8a2f52f1b85d70cda0bf399a371a4c04d0ccd | add utility script to easily launch the bottle development server | ianthetechie/dmon,ianthetechie/dmon | util/dev_runner.py | util/dev_runner.py | import os, dmon, bottle
os.chdir(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))
bottle.run(host='localhost', port=8001) | mit | Python |
|
2fdace2e358ede8da1a6f569b063548f8969d825 | Add supervisor config generator | rzeka/QLDS-Manager | util/supervisor.py | util/supervisor.py | from util.config import Configuration
from configparser import ConfigParser
import platform
import os
class Supervisor:
__config = Configuration()
def __init__(self):
self.__config_file = self.__config.get_config_dir() + '/supervisor.conf'
def generate_config(self, servers):
parser = ConfigParser()
config_dir = self.__config.get_config_dir()
parser.add_section('unix_http_server')
parser.set('unix_http_server', 'file', config_dir + '/supervisor.sock')
parser.set('unix_http_server', 'chmod', '0700')
parser.add_section('supervisord')
parser.set('supervisord', 'logfile', config_dir + '/supervisor_error.log')
parser.set('supervisord', 'pidfile', config_dir + '/supervisor.pid')
parser.add_section('rpcinterface:supervisor')
parser.set('rpcinterface:supervisor', 'supervisor.rpcinterface_factory', 'supervisor.rpcinterface:make_main_rpcinterface')
parser.add_section('supervisorctl')
parser.set('supervisorctl', 'serverurl', 'unix://' + config_dir + '/supervisor.sock')
ql_executable = self.get_ql_executable()
for sid,data in servers.items():
name = 'qlds_' + sid
section = 'program:' + name
parser.add_section(section)
parser.set(section, 'command', self.build_command_line(data, ql_executable))
parser.set(section, 'process_name', name)
parser.set(section, 'autorestart', 'true')
if os.path.isfile(self.__config_file) and not os.access(self.__config_file, os.W_OK):
raise IOError('Cannot write to file ' + self.__config_file)
with (open(self.__config_file, 'w+')) as config_fp:
parser.write(config_fp)
def build_command_line(self, server, executable):
command_line = [executable]
for k,v in server.items():
command_line.append('+set %s %s' % (k, v))
return ' '.join(command_line)
def get_ql_executable(self):
if platform.architecture()[0] == '64bit':
executable = 'run_server_x64.sh'
else:
executable = 'run_server_x86.sh'
return os.path.expanduser(self.__config.get('dir', 'ql')) + '/' + executable
def get_config_location(self):
return self.__config_file
| mit | Python |
|
9c731cd17ccc853207b715b778622274b28e9efd | Create clientsocket.py | vanbelle/CMPUT404-Lab2 | clientsocket.py | clientsocket.py |
#!/usr/bin/env python
import socket
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect(("www.google.com", 80))
request = "GET / HTTP/1.0\n\n"
clientSocket.sendall(request)
response = bytearray()
while True:
part = clientSocket.recv(1024)
if (part):
response.extend(part)
else:
break
print response
| apache-2.0 | Python |
|
885ff9c8886abd30518d2cd149f37f0ba507bb71 | add 6 | ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler | 006.py | 006.py | def sum_squares(l):
return reduce(lambda x, y: x + y**2, l)
def square_sums(l):
return reduce(lambda x, y: x + y, l) ** 2
r = range(1, 101)
ssum = sum_squares(r)
ssquare = square_sums(r)
delta = ssquare - ssum
print ssum, ssquare, delta
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.