commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
8410b027987f088b86989898b4fade5b0960886a | Solve problem 2 | mazayus/ProjectEuler | problem002.py | problem002.py | #!/usr/bin/env python3
def fibs(maxnumber):
fib1, fib2 = 1, 2
while fib1 < maxnumber:
yield fib1
fib1, fib2 = fib2, fib1 + fib2
print(sum(f for f in fibs(4000000) if f % 2 == 0))
| mit | Python |
|
278920272efd7ab959d7cad5b5f7d6c17935c7e6 | Add problem 35, circular primes | dimkarakostas/project-euler | problem_35.py | problem_35.py | from math import sqrt
from time import time
PRIME_STATUS = {}
def is_prime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
for i in range(3, int(sqrt(n))+1, 2):
if n % i == 0:
return False
return True
def check_prime_circles(num):
circles = []
s = str(num)
for i in range(len(s)):
circle = int(s[i:] + s[0:i])
circles.append(circle)
if circle not in PRIME_STATUS:
PRIME_STATUS[circle] = is_prime(circle)
if not PRIME_STATUS[circle]:
return False
return True
def main():
circular_primes = []
for num in range(2, 1000000):
if check_prime_circles(num):
circular_primes.append(num)
print 'Circular primes:', circular_primes
print 'Amount of circular primes:', len(circular_primes)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
| mit | Python |
|
dad430fd56b8be22bd1a3b9773f9948c3e305883 | Add unit tests for lazy strings | CovenantEyes/py_stringlike | stringlike/test/lazy_tests.py | stringlike/test/lazy_tests.py | import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from stringlike.lazy import LazyString, CachedLazyString
from unittest import main, TestCase
class TestLazyString(TestCase):
def test_equality(self):
self.assertEqual(LazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
lazyString = LazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 2)
class TestCachedLazyString(TestCase):
def test_equality(self):
self.assertEqual(CachedLazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
cachedLazyString = CachedLazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
if __name__ == '__main__':
main()
| mit | Python |
|
458d2e55de4db6c9f72758b745245301ebd02f48 | Add solution 100 | byung-u/ProjectEuler | 100_to_199/euler_100.py | 100_to_199/euler_100.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 100
If a box contains twenty-one coloured discs, composed of fifteen blue discs and six red discs, and two discs were taken at random, it can be seen that the probability of taking two blue discs, P(BB) = (15/21)Γ(14/20) = 1/2.
The next such arrangement, for which there is exactly 50% chance of taking two blue discs at random, is a box containing eighty-five blue discs and thirty-five red discs.
By finding the first arrangement to contain over 1012 = 1,000,000,000,000 discs in total, determine the number of blue discs that the box would contain.
'''
from itertools import count
from math import sqrt, ceil
# https://oeis.org/A001542
def get_nominator(n):
a = ceil((((3 + 2 * sqrt(2)) ** n) - ((3 - 2 * sqrt(2)) ** n)) / (2 * sqrt(2)))
return a
# Actually Diophantine pairs.. https://oeis.org/A011900
def p100(): # Answer: 756872327473, 0.01s
L = 10 ** 12
n = 1
for i in count(1):
np = get_nominator(i // 2) # pattern is repeated
res = n * (n+np)
n = n + np
if res * 1.414 > L: # 15/21, 85/120 is around 1.414xxxx
print(res)
break
return
p100()
| mit | Python |
|
c421024bfd1660685bb6ec6cb84a0369244627c5 | add celery module | theirc/ServiceInfo,theirc/ServiceInfo-ircdeploy,theirc/ServiceInfo,theirc/ServiceInfo,theirc/ServiceInfo | service_mapper/celery.py | service_mapper/celery.py | from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'service_mapper.settings')
app = Celery('service_mapper')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| bsd-3-clause | Python |
|
2eb05eb7d42f1b14191cccba2563c2105fabaed1 | Add processing module | petebachant/waveFlapper-OpenFOAM,petebachant/waveFlapper-OpenFOAM,petebachant/waveFlapper-OpenFOAM | processing.py | processing.py | #!/usr/bin/env python
"""
Processing routines for the waveFlapper case.
"""
import foampy
import numpy as np
import matplotlib.pyplot as plt
width_2d = 0.1
width_3d = 3.66
def plot_force():
"""Plots the streamwise force on the paddle over time."""
def plot_moment():
data = foampy.load_forces_moments()
i = 10
t = data["time"][i:]
m = data["moment"]["pressure"]["z"] + data["moment"]["viscous"]["z"]
m = m[i:]*width_3d/width_2d
plt.figure()
plt.plot(t, m)
plt.xlabel("t (s)")
plt.ylabel("Flapper moment (Nm)")
print("Max moment from CFD =", m.max(), "Nm")
print("Theoretical max moment (including inertia) =", 5500*3.3, "Nm")
plt.show()
if __name__ == "__main__":
plot_moment()
| cc0-1.0 | Python |
|
df0e285b6f8465eb273af50c242299c5601fa09f | Add a new example | channelcat/sanic,yunstanford/sanic,channelcat/sanic,ashleysommer/sanic,channelcat/sanic,yunstanford/sanic,Tim-Erwin/sanic,channelcat/sanic,yunstanford/sanic,lixxu/sanic,Tim-Erwin/sanic,ai0/sanic,jrocketfingers/sanic,lixxu/sanic,r0fls/sanic,jrocketfingers/sanic,ai0/sanic,ashleysommer/sanic,yunstanford/sanic,lixxu/sanic,lixxu/sanic,r0fls/sanic,ashleysommer/sanic | examples/sanic_aiomysql_with_global_pool.py | examples/sanic_aiomysql_with_global_pool.py | # encoding: utf-8
"""
You need the aiomysql
"""
import asyncio
import os
import aiomysql
import uvloop
from sanic import Sanic
from sanic.response import json
database_name = os.environ['DATABASE_NAME']
database_host = os.environ['DATABASE_HOST']
database_user = os.environ['DATABASE_USER']
database_password = os.environ['DATABASE_PASSWORD']
app = Sanic()
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
async def get_pool(*args, **kwargs):
"""
the first param in *args is the global instance ,
so we can store our connection pool in it .
and it can be used by different request
:param args:
:param kwargs:
:return:
"""
args[0].pool = {
"aiomysql": await aiomysql.create_pool(host=database_host, user=database_user, password=database_password,
db=database_name,
maxsize=5)}
async with args[0].pool['aiomysql'].acquire() as conn:
async with conn.cursor() as cur:
await cur.execute('DROP TABLE IF EXISTS sanic_polls')
await cur.execute("""CREATE TABLE sanic_polls (
id serial primary key,
question varchar(50),
pub_date timestamp
);""")
for i in range(0, 100):
await cur.execute("""INSERT INTO sanic_polls
(id, question, pub_date) VALUES ({}, {}, now())
""".format(i, i))
@app.route("/")
async def test():
result = []
data = {}
async with app.pool['aiomysql'].acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT question, pub_date FROM sanic_polls")
async for row in cur:
result.append({"question": row[0], "pub_date": row[1]})
if result or len(result) > 0:
data['data'] = res
return json(data)
if __name__ == '__main__':
app.run(host="127.0.0.1", workers=4, port=12000, before_start=get_pool)
| mit | Python |
|
e7b6aef4db85c777463d2335107145b60b678ae2 | Create a new tour example | seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase | examples/tour_examples/maps_introjs_tour.py | examples/tour_examples/maps_introjs_tour.py | from seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_maps_tour(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
self.create_tour(theme="introjs")
self.add_tour_step("Welcome to Google Maps!",
title="β
SeleniumBase Tours π")
self.add_tour_step("Type in a location here.", "#searchboxinput",
title="Search Box")
self.add_tour_step("Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to switch to Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.", "#widget-zoom-in",
alignment="left")
self.add_tour_step("Or click here to zoom out.", "#widget-zoom-out",
alignment="left")
self.add_tour_step("Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours!",
title="π End of Guided Tour π")
self.export_tour(filename="google_maps_introjs_tour.js")
self.play_tour()
| mit | Python |
|
8ddc9333513a2e900ff61b6d2904db3e58635bb9 | add initial self_publish version | NoRedInk/elm-ops-tooling,NoRedInk/elm-ops-tooling | elm_self_publish.py | elm_self_publish.py | #! /usr/bin/env python
from __future__ import print_function
import sys
import json
import shutil
import argparse
def copy_package(location, destination):
shutil.copytree(location, destination)
def package_name(url):
""" get the package name from a github url """
project = url.split('/')[-1].split('.')[0]
user = url.split('/')[-2]
return {
"project": project,
"user": user
}
def self_publish(package_location, destination=".", quiet=False):
""" package_location should be the local package to install
"""
elm_package_file = "{location}/elm-package.json".format(location=package_location)
exact_deps_file = "{destination}/elm-stuff/exact-dependencies.json".format(
destination=destination,
location=package_location
)
with open(elm_package_file) as f:
elm_package = json.load(f)
package_details = package_name(elm_package['repository'])
version = elm_package['version']
place = package_details['user'] + '/' + package_details['project']
copy_package(package_location, '{destination}/elm-stuff/packages/{place}/{version}'.format(
place=place,
version=version,
destination=destination
))
with open(exact_deps_file) as f:
data = f.read()
package_info = {}
if data:
package_info = json.loads(data)
with open(exact_deps_file, 'w') as f:
package_info[place] = version
json.dump(package_info, f, sort_keys=False, indent=4)
with open(elm_package_file, 'w') as f:
elm_package['dependencies'][place] = version
json.dump(elm_package, f, sort_keys=False, indent=4)
def main():
parser = argparse.ArgumentParser(description='Publish a local package into your project')
parser.add_argument('--quiet', '-q', action='store_true', help='don\'t print anything', default=False)
parser.add_argument('package_location')
parser.add_argument('destination')
args = parser.parse_args()
self_publish(args.package_location, args.destination, quiet=args.quiet)
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
|
a004611ceb3402c95675a749eb9a3db764c97e51 | Move cython_build_ext command to utils.distutils and put it to setup.cfg | edgedb/edgedb,edgedb/edgedb,edgedb/edgedb | edgedb/lang/common/distutils.py | edgedb/lang/common/distutils.py | ##
# Copyright (c) 2014 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
from distutils.command import build_ext as _build_ext
class cython_build_ext(_build_ext.build_ext):
def __init__(self, *args, **kwargs):
self._ctor_args = args
self._ctor_kwargs = kwargs
self._cython = None
def __getattribute__(self, name):
cython = object.__getattribute__(self, '_cython')
if cython is None:
from Cython.Distutils import build_ext
_ctor_args = object.__getattribute__(self, '_ctor_args')
_ctor_kwargs = object.__getattribute__(self, '_ctor_kwargs')
cython = build_ext(*_ctor_args, **_ctor_kwargs)
object.__setattr__(self, '_cython', cython)
return getattr(cython, name)
| apache-2.0 | Python |
|
bc235b15bbeacf7fee7e1d23a5d94b6271e33e41 | Add initial code | adamjforster/rpsls | rpsls.py | rpsls.py | #!/usr/bin/python
from collections import OrderedDict
from random import choice, seed
from sys import exit
WEAPONS = OrderedDict([
('rock', 1),
('paper', 2),
('scissors', 3),
('lizard', 5),
('spock', 4)
])
EXPLANATIONS = {
'lizardlizard': 'Lizard equals lizard',
'lizardpaper': 'Lizard eats paper',
'lizardrock': 'Rock crushes lizard',
'lizardscissors': 'Scissors decapitate lizard',
'lizardspock': 'Lizard poisons spock',
'paperpaper': 'Paper equals paper',
'paperrock': 'Paper wraps rock',
'paperscissors': 'Scissors cut paper',
'paperspock': 'Paper disproves Spock',
'rockrock': 'Rock equals rock',
'rockscissors': 'Rock breaks scissors',
'rockspock': 'Spock vapourises rock',
'scissorsscissors': 'Scissors equal scissors',
'scissorsspock': 'Spock breaks scissors',
'spockspock': 'Spock equals Spock'
}
def do_battle(player_weapon, cpu_weapon):
explanation = EXPLANATIONS[''.join(sorted([player_weapon, cpu_weapon]))]
result = (WEAPONS[player_weapon] - WEAPONS[cpu_weapon]) % 5
if result == 0:
message = 'It\'s a draw.'
elif result % 2 == 0:
message = 'CPU wins!'
else:
message = 'Player wins!'
return '{}. {}'.format(explanation, message)
def is_valid_weapon(weapon):
return weapon in WEAPONS.keys()
def get_random_weapon():
seed()
return choice(WEAPONS.keys())
def run():
print 'Choose your weapon ({}), or quit:'.format(', '.join(WEAPONS.keys()))
player_weapon = raw_input('> ').lower()
if player_weapon == 'quit':
print 'Thanks for playing.'
exit()
if not is_valid_weapon(player_weapon):
print '\'{}\' is not a valid weapon, try again.\n'.format(player_weapon)
run()
cpu_weapon = get_random_weapon()
print '(Player) {} - vs - {} (CPU)'.format(player_weapon, cpu_weapon)
print '{}\n'.format(do_battle(player_weapon, cpu_weapon))
run()
if __name__ == '__main__':
run()
| bsd-3-clause | Python |
|
43c74dc2dbe82a30f7a9b6c0403db39eb159fc96 | add control panel test for fetch | andela-sjames/paystack-python | paystackapi/tests/test_cpanel.py | paystackapi/tests/test_cpanel.py | import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.cpanel import ControlPanel
class TestPage(BaseTestCase):
@httpretty.activate
def test_fetch_payment_session_timeout(self):
"""Method defined to test fetch payment session timeout."""
httpretty.register_uri(
httpretty.get,
self.endpoint_url("/integration/payment_session_timeout"),
content_type='text/json',
body='{"status": true, "message": "Payment session timeout retrieved"}',
status=201,
)
response = ControlPanel.fetch_payment_session_timeout()
self.assertTrue(response['status'])
| mit | Python |
|
233db6d2decad39c98bf5cbe8b974f93308bea16 | Create re.py | GuardianRG/Learn,GuardianRG/Learn,XlogicX/Learn,XlogicX/Learn,XlogicX/Learn,GuardianRG/Learn,GuardianRG/Learn,GuardianRG/Learn,XlogicX/Learn,GuardianRG/Learn,XlogicX/Learn,XlogicX/Learn,GuardianRG/Learn,XlogicX/Learn | python2.7/re.py | python2.7/re.py | #/usr/bin/python
import re
#Shows how to test if a string matches a regular expression (yes/no) and uses more than one modifier
expression = re.compile(r"^\w+.+string", re.I | re.S) #compile the expression
if expression.match("A Simple String To Test"): #See if a string matches it
print "Matched"
else:
print "Did Not Match"
#Splitting with a regular expression
scalar_list = "item 1, item 2, item 3" #A text string delimitted by comma and variable whitespace
items = re.split(",\s+", scalar_list) #Splitting this up into an array called items
print items[1] + ":" + items[0] #printing a couple of the elements
#Extraction/parsing
parse_this = "Text with some digits: 1234 and some hexidecimal deadbeef1337"
extractions = re.compile(r"[^\d]+(\d+).+\s([0-9a-f]+)$") #Our regex; groups we want in ()'s
peices = extractions.match(parse_this) #exec our re and result in peices
print "Number: " + peices.group(1) + " Hex:" + peices.group(2) #display both extracted groups
| mit | Python |
|
d93916b1927f0ae099cee3cf93619d3113db147b | Add small example of basic anomaly detection w/peewee. | coleifer/peewee,coleifer/peewee,coleifer/peewee | examples/anomaly_detection.py | examples/anomaly_detection.py | import math
from peewee import *
db = SqliteDatabase(':memory:')
class Reg(Model):
key = TextField()
value = IntegerField()
class Meta:
database = db
db.create_tables([Reg])
# Create a user-defined aggregate function suitable for computing the standard
# deviation of a series.
@db.aggregate('stddev')
class StdDev(object):
def __init__(self):
self.n = 0
self.values = []
def step(self, value):
self.n += 1
self.values.append(value)
def finalize(self):
if self.n < 2:
return 0
mean = sum(self.values) / self.n
sqsum = sum((i - mean) ** 2 for i in self.values)
return math.sqrt(sqsum / (self.n - 1))
values = [2, 3, 5, 2, 3, 12, 5, 3, 4, 1, 2, 1, -9, 3, 3, 5]
Reg.create_table()
Reg.insert_many([{'key': 'k%02d' % i, 'value': v}
for i, v in enumerate(values)]).execute()
# We'll calculate the mean and the standard deviation of the series in a common
# table expression, which will then be used by our query to find rows whose
# zscore exceeds a certain threshold.
cte = (Reg
.select(fn.avg(Reg.value), fn.stddev(Reg.value))
.cte('stats', columns=('series_mean', 'series_stddev')))
# The zscore is defined as the (value - mean) / stddev.
zscore = (Reg.value - cte.c.series_mean) / cte.c.series_stddev
# Find rows which fall outside of 2 standard deviations.
threshold = 2
query = (Reg
.select(Reg.key, Reg.value, zscore.alias('zscore'))
.from_(Reg, cte)
.where((zscore >= threshold) | (zscore <= -threshold))
.with_cte(cte))
for row in query:
print(row.key, row.value, round(row.zscore, 2))
db.close()
| mit | Python |
|
12b334983be4caf0ba97534b52f928180e31e564 | add quick script to release lock | texastribune/salesforce-stripe,MinnPost/salesforce-stripe,texastribune/salesforce-stripe,texastribune/salesforce-stripe,MinnPost/salesforce-stripe,MinnPost/salesforce-stripe | release-lock.py | release-lock.py | from batch import Lock
lock = Lock(key="charge-cards-lock")
lock.release()
| mit | Python |
|
687a186bd29eb1bef7a134fa5499c9b4c56abaa6 | Create setup.py | sontung/pick_a_number | setup.py | setup.py | from distutils.core import setup
import py2exe, os, pygame
origIsSystemDLL = py2exe.build_exe.isSystemDLL
def isSystemDLL(pathname):
if os.path.basename(pathname).lower() in ["sdl_ttf.dll"]:
return 0
return origIsSystemDLL(pathname)
py2exe.build_exe.isSystemDLL = isSystemDLL
pygamedir = os.path.split(pygame.base.__file__)[0]
os.path.join(pygamedir, pygame.font.get_default_font()),
os.path.join(pygamedir, 'SDL.dll'),
os.path.join(pygamedir, 'SDL_ttf.dll')
setup(
console=["pick_a_number.py"],
options={
"py2exe":{
"packages": ["pygame"]
}
}
)
| mit | Python |
|
8dfdcfa0f1d13e810a6e56e0a031f15dbaba3656 | Use environment metadata for conditional dependencies | skirsdeda/djangocms-blog,kriwil/djangocms-blog,jedie/djangocms-blog,ImaginaryLandscape/djangocms-blog,skirsdeda/djangocms-blog,motleytech/djangocms-blog,sephii/djangocms-blog,mistalaba/djangocms-blog,nephila/djangocms-blog,vnavascues/djangocms-blog,britny/djangocms-blog,britny/djangocms-blog,skirsdeda/djangocms-blog,DjangoBeer/djangocms-blog,DjangoBeer/djangocms-blog,jedie/djangocms-blog,nephila/djangocms-blog,marty3d/djangocms-blog,EnglishConnection/djangocms-blog,creimers/djangocms-blog,creimers/djangocms-blog,DjangoBeer/djangocms-blog,dapeng0802/djangocms-blog,ImaginaryLandscape/djangocms-blog,marty3d/djangocms-blog,mistalaba/djangocms-blog,nephila/djangocms-blog,EnglishConnection/djangocms-blog,vnavascues/djangocms-blog,sephii/djangocms-blog,dapeng0802/djangocms-blog,motleytech/djangocms-blog,kriwil/djangocms-blog | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import djangocms_blog
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = djangocms_blog.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='djangocms-blog',
version=version,
description='A djangoCMS 3 blog application',
long_description=readme + '\n\n' + history,
author='Iacopo Spalletti',
author_email='[email protected]',
url='https://github.com/nephila/djangocms-blog',
packages=[
'djangocms_blog',
],
include_package_data=True,
install_requires=[
'django-parler>=1.2',
'django-cms>=3.0',
'django-taggit',
'django-filer',
'pytz',
'django-taggit-templatetags',
'django-taggit-autosuggest',
'django-admin-enhancer',
'djangocms-text-ckeditor',
'cmsplugin-filer',
'django-meta>=0.2',
'django-meta-mixin>=0.1.1',
'south>=1.0.1',
],
extras_require={
":python_version=='3.3'": ['django-select2-py3'],
":python_version=='3.4'": ['django-select2-py3'],
":python_version=='2.6'": ['django-select2'],
":python_version=='2.7'": ['django-select2'],
},
license="BSD",
zip_safe=False,
keywords='djangocms-blog, blog, django, wordpress, multilingual',
test_suite='cms_helper.run',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import djangocms_blog
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = djangocms_blog.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='djangocms-blog',
version=version,
description='A djangoCMS 3 blog application',
long_description=readme + '\n\n' + history,
author='Iacopo Spalletti',
author_email='[email protected]',
url='https://github.com/nephila/djangocms-blog',
packages=[
'djangocms_blog',
],
include_package_data=True,
install_requires=[
'django-parler>=1.2',
'django-cms>=3.0',
'django-taggit',
'django-filer',
'django-select2' if sys.version_info[0]==2 else 'django-select2-py3',
'pytz',
'django-taggit-templatetags',
'django-taggit-autosuggest',
'django-admin-enhancer',
'djangocms-text-ckeditor',
'cmsplugin-filer',
'django-meta>=0.2',
'django-meta-mixin>=0.1.1',
'south>=1.0.1',
],
license="BSD",
zip_safe=False,
keywords='djangocms-blog, blog, django, wordpress, multilingual',
test_suite='cms_helper.run',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| bsd-3-clause | Python |
2e57e929db19ebd864680d4616eb1bba595f1e57 | Create setup.py | LawtonSoft/Fram3w0rk-Python | setup.py | setup.py | from distutils.core import setup
setup(
name = 'fram3w0rk-python',
packages = ['fram3w0rk-python'],
version = '0.5',
description = '"Class" effort to unify functions across 30 languages.',
author = 'Jonathan Lawton',
author_email = '[email protected]',
url = 'https://github.com/LawtonSoft/Fram3w0rk-Python',
download_url = 'https://github.com/LawtonSoft/Fram3work-Python/tarball/0.1',
keywords = ['fram3w0rk', 'mvc', 'web'],
classifiers = [],
)
| mit | Python |
|
b0184d74d0f186662df8596f511f95e1130bcf20 | Add libffi package | BreakawayConsulting/xyz | rules/libffi.py | rules/libffi.py | import xyz
import os
import shutil
class Libffi(xyz.BuildProtocol):
pkg_name = 'libffi'
def configure(self, builder, config):
builder.host_lib_configure(config=config)
rules = Libffi()
| mit | Python |
|
e846a9c77f98e61287a37953fdbee570208dd2d5 | add setup.py for python packaging | astromme/pinyinflix | setup.py | setup.py | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pinyinflix',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='Converts mandarin chinese .srt files to pinyin-annotated .dfxp files that can be used with Netflix.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/astromme/pinyinflix',
# Author details
author='Andrew Stromme',
author_email='[email protected]',
# Choose your license
license='APACHE2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache 2.0 License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='netflix subtitle subtitles subs chinese mandarin pinyin hanzi srt dfxp',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
py_modules=["pinyinflix"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['jieba', 'pinyin'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'pinyinflix=pinyinflix:main',
],
},
)
| apache-2.0 | Python |
|
b1d08df29b02c107bbb2f2edc9add0c6f486c530 | Add app | JokerQyou/bot | app.py | app.py | # coding: utf-8
import json
import flask
from flask import request
import telegram
__name__ = u'eth0_bot'
__author__ = u'Joker_Qyou'
__config__ = u'config.json'
app = flask.Flask(__name__)
app.debug = False
with open(__config__, 'r') as cfr:
config = json.loads(cfr.read())
bot = telegram.Bot(token=token_info)
bot.setWebhook(u'%(server)s/%(token)s' % config)
@app.route(u'/%s' % config.get('token').split(':')[-1])
def webhook():
''' WebHook API func '''
print request.POST
| bsd-2-clause | Python |
|
69f787a69e400b69fa4aef2e49f6f03781304dae | Update setup.py. | LogicalKnight/python-astm,123412345/python-astm,andrexmd/python-astm,MarcosHaenisch/python-astm,Alwnikrotikz/python-astm,tinoshot/python-astm,asingla87/python-astm,kxepal/python-astm,kxepal/python-astm,pombreda/python-astm,tectronics/python-astm,Iskander1b/python-astm,eddiep1101/python-astm,briankip/python-astm,mhaulo/python-astm,AlanZatarain/python-astm | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from astm.version import __version__
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
# http://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery
import os
def is_package(path):
return (
os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py'))
)
def find_packages(path='.', base=""):
""" Find all packages in path """
packages = {}
for item in os.listdir(path):
dir = os.path.join(path, item)
if is_package(dir):
if base:
module_name = "%(base)s.%(item)s" % vars()
else:
module_name = item
packages[module_name] = dir
packages.update(find_packages(dir, module_name))
return packages
setup(
name = 'astm',
version = __version__,
description = 'Python implementation of ASTM E1381/1394 protocol.',
long_description = open('README').read(),
author = 'Alexander Shorin',
author_email = '[email protected]',
license = 'BSD',
url = 'http://code.google.com/p/python-astm',
install_requires = [],
test_suite = 'astm.tests',
zip_safe = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Medical Science Apps.'
],
packages = find_packages(),
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from astm.version import __version__
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
# http://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery
import os
def is_package(path):
return (
os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py'))
)
def find_packages(path='.', base=""):
""" Find all packages in path """
packages = {}
for item in os.listdir(path):
dir = os.path.join(path, item)
if is_package( dir ):
if base:
module_name = "%(base)s.%(item)s" % vars()
else:
module_name = item
packages[module_name] = dir
packages.update(find_packages(dir, module_name))
return packages
setup(
name = 'astm',
version = __version__,
description = 'Python implementation of ASTM E1381/1394 protocol.',
author = 'Alexander Shorin',
author_email = '[email protected]',
license = 'BSD',
url = 'http://code.google.com/p/python-astm',
install_requires = [],
test_suite = 'astm.tests',
zip_safe = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Medical Science Apps.'
],
packages = find_packages(),
)
| bsd-3-clause | Python |
19b6d71e17f616bed3566d5615b5938bbfe3a497 | Add setup.py | HTTP-APIs/hydrus,xadahiya/hydrus | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='hydrus',
version='0.0.1',
description='A space-based application for W3C HYDRA Draft',
author='W3C HYDRA development group',
author_email='[email protected]',
url='https://github.com/HTTP-APIs/hydrus',
packages=['flask==0.11'],
)
| mit | Python |
|
e2ae0798424d4aa0577e22d563646856866fbd1f | add setup.py file for pypi | byteweaver/django-versioncheck | setup.py | setup.py | import os
from setuptools import setup, find_packages
import versioncheck
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-versioncheck',
version=versioncheck.__version__,
description='A small django app which tries to be annoying if your django version is outdated.',
long_description=read('README.md'),
license='MIT License',
author='Richard Stromer',
author_email='[email protected]',
url='https://github.com/noxan/django-versioncheck',
packages=find_packages(),
install_requires=[
'django',
],
)
| bsd-3-clause | Python |
|
d43bcc978b1d79a20820ab1df73bd69d5d3c100d | Add setup.py | JPO1/BigQuery-Python,hagino3000/BigQuery-Python,tylertreat/BigQuery-Python,fusioneng/BigQuery-Python,blarghmatey/BigQuery-Python | setup.py | setup.py | from setuptools import find_packages
from setuptools import setup
VERSION = '0.0.1'
setup_args = dict(
name='BigQuery-Python',
description='Simple Python client for interacting with Google BigQuery.',
url='https://github.com/tylertreat/BigQuery-Python',
version=VERSION,
license='Apache',
packages=find_packages(),
include_package_data=True,
install_requires=['google-api-python-client', 'pyopenssl'],
author='Tyler Treat',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
if __name__ == '__main__':
setup(**setup_args)
| apache-2.0 | Python |
|
840e178a85da246d8357481a8e6ea5a8d87deef7 | Create setup.py | JoeVirtual/KonFoo | setup.py | setup.py | """
KonF'00'
~~~~~~~~
KonFoo is a Python Package for creating byte stream mappers in a declarative
way with as little code as necessary to help fighting the confusion with the
foo of the all too well-known memory dumps or binary data.
Setup
-----
.. code:: bash
$ pip install KonFoo
Links
-----
* `website <http://github.com/JoeVirtual/KonFoo/>`_
* `documentation <http://github.com/JoeVirtual/KonFoo/master/docs/>`_
* `development version
<http://github.com/JoeVirtual/KonFoo/master>`_
"""
import re
import ast
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('konfoo/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='KonFoo',
version=version,
license='BSD',
author='Jochen Gerhaeusser',
author_email='[email protected]',
url='http://github.com/JoeVirtual/KonFoo',
description='A declarative byte stream mapping engine.',
long_description=__doc__,
packages=['konfoo'],
install_requires=[],
classifiers=[
'License :: BSD License',
'Programming Language :: Python :: 3',
]
)
| bsd-3-clause | Python |
|
10ccc510deab5c97ce8a6c5ee57232c5e399986e | Add decision tree classifier attempt. | andretadeu/jhu-immuno,andretadeu/jhu-immuno | decision_tree.py | decision_tree.py | import pandas as pd
from sklearn import tree
# X = [[0, 1], [1, 1]]
# Y = [0, 1]
#clf = tree.DecisionTreeClassifier()
#clf = clf.fit(X, Y)
data = pd.read_excel('/home/andre/sandbox/jhu-immuno/journal.pcbi.1003266.s001-2.XLS')
resp_cols = [ 'MHC' ]
data['y'] = data.Immunogenicity.map({'non-immunogenic': 0, 'immunogenic': 1 })
X = data[resp_cols]
Y = data.y
clf = tree.DecisionTreeClassifier()
dummy = pd.get_dummies(data.MHC)
clf.fit(dummy, Y)
from sklearn.externals.six import StringIO
f = tree.export_graphviz(clf, out_file = 'decision_tree') | mit | Python |
|
efe596e3f935fe31af5bcbd8ef1afbb6750be123 | add a setup.py | jalanb/kd,jalanb/kd | setup.py | setup.py | """Set up the kd project"""
from setuptools import setup
import kd
setup(
name='kd',
version=kd.__version__,
url='https://github.com/jalanb/kd',
license='MIT License',
author='J Alan Brogan',
author_email='[email protected]',
description='kd is a smarter cd',
platforms='any',
classifiers=[
'Programming Language :: Python :: 2.7',
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Topic :: System :: Shells',
],
test_suite='nose.collector',
tests_require=['nose'],
extras_require={
'docs': ['Sphinx'],
'testing': ['nose'],
}
)
| mit | Python |
|
9220523e6bcac6b80410a099b2f2fd30d7cbb7d3 | Add first draft of setup.py | weinshec/pyAPT | setup.py | setup.py | from setuptools import setup
setup(
name = 'pyAPT',
version = '0.1.0',
author = 'Christoph Weinsheimer',
author_email = '[email protected]',
packages = ['pyAPT'],
scripts = [],
description = 'Controller module for Thorlabs motorized stages',
install_requires = [],
)
| mit | Python |
|
49a7fdc78cd71b75b1fbcc0023e428479ce38f41 | Implement a cryptographic hash function | ElliotPenson/cryptography | sha_1.py | sha_1.py | #!/usr/local/bin/python
"""
sha_1.py
@author Elliot and Erica
"""
from cryptography_utilities import (wrap_bits_left, decimal_to_binary,
binary_to_decimal, pad_plaintext, block_split, bitwise_and,
bitwise_or, bitwise_xor, bitwise_not, hex_to_binary)
BLOCKSIZE = 512
SUB_BLOCKSIZE = 32
SHA_1_INTERVALS = 80
def add(*binaries):
"""Execute modular arithmetic mod 2^32. Input may consist of any
number of binary strings.
"""
total = 0
for binary in binaries:
total += binary_to_decimal(binary) % 2**32
return decimal_to_binary(total % 2**32)
def mixing_operation(interval, b, c, d):
"""Perform one of four operations, based on the interval. The b, c, and
d arguments are SHA-1 sub-registers.
"""
if 0 <= interval <= 19:
return bitwise_or(bitwise_and(b, c),
bitwise_and(bitwise_not(b), d))
elif interval <= 39:
return bitwise_xor(b, c, d)
elif interval <= 59:
return bitwise_or(bitwise_and(b, c),
bitwise_and(b, d),
bitwise_and(c, d))
elif interval <= 79:
return bitwise_xor(b, c, d)
else:
raise Exception('Interval out of bounds')
def round_constant(interval):
"""Return one of four binary string constants, based on the interval."""
if 0 <= interval <= 19:
return hex_to_binary('5A827999')
elif interval <= 39:
return hex_to_binary('6ED9EBA1')
elif interval <= 59:
return hex_to_binary('8F1BBCDC')
elif interval <= 79:
return hex_to_binary('CA62C1D6')
else:
raise Exception('Interval out of bounds')
def sha_1_expansion(block):
"""Take a 512 bit binary message and convert it into a series of
32 bit blocks.
"""
sub_blocks = block_split(block, SUB_BLOCKSIZE)
for interval in xrange(len(sub_blocks), SHA_1_INTERVALS):
new_sub_block = bitwise_xor(sub_blocks[interval - 3],
sub_blocks[interval - 8],
sub_blocks[interval - 14],
sub_blocks[interval - 16])
sub_blocks.append(wrap_bits_left(new_sub_block, 1))
return sub_blocks
def sha_1_compression(sub_registers, sub_blocks):
"""Combines a series of sub_blocks into a single 160-bit binary
string. The sub-registers and sub_blocks parameters should be
collections of 32-bit binary strings.
"""
a, b, c, d, e = sub_registers
for interval in xrange(SHA_1_INTERVALS):
new_a = add(wrap_bits_left(a, 5),
mixing_operation(interval, b, c, d),
e,
sub_blocks[interval],
round_constant(interval))
e = d
d = c
c = wrap_bits_left(b, 30)
b = a
a = new_a
return map(add, sub_registers, [a, b, c, d, e])
def sha_1(binary_message):
"""SHA-1 cryptographic hash function. Take a binary string of any
length and output an obfuscated 160-bit binary hash."""
padded_message = pad_plaintext(binary_message, BLOCKSIZE)
sub_registers = [hex_to_binary(initial_register)
for initial_register
in ['67452301', 'EFCDAB89', '98BADCFE',
'10325476', 'C3D2E1F0']]
for block in block_split(padded_message, BLOCKSIZE):
sub_blocks = sha_1_expansion(block)
sub_registers = sha_1_compression(sub_registers, sub_blocks)
return ''.join(sub_registers)
| mit | Python |
|
d923548321961bad8dcbe15a31ceaeda79aae934 | Create xr.py | memfiz/telnet | xr.py | xr.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
'''Element Manager xr class'''
__author__ = "Arnis Civciss ([email protected])"
__copyright__ = "Copyright (c) 2012 Arnis Civciss"
#__version__ = "$Revision: 0.1 $"
#__date__ = "$Date: 2012/01/08 $"
#__license__ = ""
import re
from lib.telnet import Telnet
class CliError(Exception):
'''iSAM command line error exception class.'''
def __init__(self, command, output):
'''Initialize cli exception. Join output in one string if it's a list.'''
self.command = command
if isinstance(output, list):
self.output = ''.join([`num` for num in output])
else:
self.output = output
def __str__(self):
'''Returns friendly cli error. Command and error ouput.'''
return "cli error in command: %s\nOutput: %s." % (self.command, self.output)
class EmXr(Telnet):
'''XR Element Manager Class'''
def __init__(self, **kwargs):
'''Initialize the node. Mandatory parameter - host - node IP address.
Default parameters:
user = 'script2'
passwd = 'xxxx'
login_wait = 'name:'
password_wait = 'assword:'
prompt='#'
timeout= 15
port = 23
enable_string=''
enable_prompt=''
enable_passwd = ''
enable_wait=''
init_command='terminal length 0'
debuglevel=None #100 is debug on
'''
host = kwargs['host']
debuglevel = kwargs.get('debuglevel', 100)
user = 'user'
passwd = 'password'
login_wait = 'name:'
password_wait = 'assword:'
prompt='#'
timeout= 15
port = 23
enable_string=''
enable_prompt='#'
enable_wait = 'assword'
init_command = 'terminal length 0'
enable_passwd = ''
self.cli_err = re.compile('% Invalid input|% Bad IP|% Access denied|% No such configuration|%|Namespace is locked by another agent|Do you wish to proceed with this commit anyway', re.DOTALL)
Telnet.__init__(self, host, user, passwd, login_wait, password_wait, port,
prompt, timeout, enable_string, enable_prompt, enable_passwd,
enable_wait, init_command, debuglevel)
#def write_raw_sequence(self, seq):
def open(self):
out = Telnet.open(self)
reh = re.compile('\n([^#]+)', re.DOTALL)
out = Telnet.run_command(self, 'terminal exec prompt no-timestamp')
out = Telnet.run_command(self, 'terminal monitor disable')
if reh.search(out).group(1):
part = (reh.search(out).group(1)).rsplit(':')[1]
self.prompt = part
self.hostname = part
def run_command(self, command):
'''Runs any command on the node. Raises CliError in case of syntax errors.
Returns output as a list.'''
out = Telnet.run_command(self, command)
out = out.translate(None, '\b')
if self.cli_err.search(out):
raise CliError(command, out)
return out
if __name__ == "__main__":
pass
try:
rtr = EmXr(host='192.168.140.1', debuglevel=0)
rtr.open()
print rtr.hostname
print rtr.prompt
out = rtr.run_command('show arp vrf ngn vlan 4005')
for key, value in arp.items():
print "Key %s, Value %s" % (key, value)
except CliError as e:
print 'Cli Error %s ' % e
# except CliError as e:
# print 'Cli Error %s ' % e
| bsd-2-clause | Python |
|
31d018181c5183acadbe309a250aed17cbae5a28 | Create Add_Binary.py | UmassJin/Leetcode | Array/Add_Binary.py | Array/Add_Binary.py | Given two binary strings, return their sum (also a binary string).
For example,
a = "11"
b = "1"
Return "100".
class Solution:
# @param a, a string
# @param b, a string
# @return a string
def addBinary(self, a, b):
A = len(a)
B = len(b)
i = 1
result = []
carry = 0
while i <= max(A,B):
sum = carry
if i <= A:
sum += int(a[-i])
if i <= B:
sum += int(b[-i])
bit = sum % 2
carry = sum / 2
i += 1
result.insert(0,str(bit))
if carry > 0 :
result.insert(0,'1')
return ''.join(result)
| mit | Python |
|
12192eca146dc1974417bd4fd2cf3722e0049910 | add arduino example | sourceperl/pyRRD_Redis,sourceperl/pyRRD_Redis,sourceperl/pyRRD_Redis | example/ard2rrd.py | example/ard2rrd.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Arduino UNO A0 value to RRD db
# - read an integer from a serial port and store it on RRD redis database
import serial
from pyRRD_Redis import RRD_redis, StepAddFunc
# some const
TAG_NAME = 'arduino_a0'
# init serial port and RRD db
ser = serial.Serial(port='/dev/ttyACM0', baudrate=9600, timeout=1)
rrd = RRD_redis('rrd:' + TAG_NAME, size=2048, step=1.0, add_func=StepAddFunc.avg)
# fill database
while True:
# read A0 on serial
try:
a0 = int(ser.readline())
if not 0 <= a0 <= 1023:
raise ValueError
except ValueError:
a0 = None
# store value
if a0 is not None:
# store with scale to 0/100 %
rrd.add_step(float(a0) * 100 / 1023)
| mit | Python |
|
013ee19808dc86d29cb3aa86b38dc35fe98a5580 | add to and remove from /etc/hosts some agent node info so condor can recognise its workers | ema/conpaas,ema/conpaas,ema/conpaas,ema/conpaas,ema/conpaas | conpaas-services/src/conpaas/services/htcondor/manager/node_info.py | conpaas-services/src/conpaas/services/htcondor/manager/node_info.py | """
Copyright (c) 2010-2013, Contrail consortium.
All rights reserved.
Redistribution and use in source and binary forms,
with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the
above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce
the above copyright notice, this list of
conditions and the following disclaimer in the
documentation and/or other materials provided
with the distribution.
3. Neither the name of the Contrail consortium nor the
names of its contributors may be used to endorse
or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os
import re
from functools import wraps
def test_rw_permissions(f):
"""
Checks the read/write permissions of the specified file
"""
@wraps(f)
def rw_check(thefile, *args, **kwargs):
if not os.access(thefile, os.R_OK | os.W_OK):
raise Exception("Cannot read/write file %s " % thefile)
else:
return f(thefile, *args, **kwargs)
return rw_check
@test_rw_permissions
def add_node_info(hostsfile, ip, vmid):
"""
Add the newly created agent-IP and VM-id to the hostsfile
"""
targetfile = open(hostsfile,'a')
targetfile.write("%s worker-%s.htc\n" % (ip, vmid))
targetfile.close()
def remove_node_info(hostsfile, ip):
"""
Remove the agent-IP and VM-id from the hostsfile
"""
contentlines = open(hostsfile).readlines()
targetfile = open(hostsfile, 'w')
for line in contentlines:
if not re.search('^' + ip, line):
targetfile.write(line)
| bsd-3-clause | Python |
|
2f47284b44ceef3c12990a4f9621062040fe6fcb | Add day 4 solution | jesserobertson/advent | day4.py | day4.py | #!/usr/bin/env python
from hashlib import md5
tests = ['abcdef', 'pqrstuv']
string = 'iwrupvqb'
for idx in range(10000000):
hash = md5((string + str(idx)).encode('ascii'))
if hash.hexdigest().startswith('000000'):
print(idx)
break
| mit | Python |
|
e5008fdf481a80db3b5583d35e6fd369a28cd7ce | drop session_details for sessions | opencivicdata/pupa,datamade/pupa,opencivicdata/pupa,rshorey/pupa,influence-usa/pupa,datamade/pupa,mileswwatkins/pupa,mileswwatkins/pupa,rshorey/pupa,influence-usa/pupa | example/__init__.py | example/__init__.py | from pupa.scrape import Jurisdiction
from .people import PersonScraper
class Example(Jurisdiction):
jurisdiction_id = 'ocd-jurisdiction/country:us/state:ex/place:example'
name = 'Example Legislature'
url = 'http://example.com'
provides = ['people']
parties = [
{'name': 'Independent' },
{'name': 'Green' },
{'name': 'Bull-Moose'}
]
sessions = [
{'name': '2013', '_scraped_name': '2013'}
]
def get_scraper(self, session, scraper_type):
if scraper_type == 'people':
return PersonScraper
def scrape_session_list(self):
return ['2013']
| from pupa.scrape import Jurisdiction
from .people import PersonScraper
class Example(Jurisdiction):
jurisdiction_id = 'ocd-jurisdiction/country:us/state:ex/place:example'
name = 'Example Legislature'
url = 'http://example.com'
provides = ['people']
parties = [
{'name': 'Independent' },
{'name': 'Green' },
{'name': 'Bull-Moose'}
]
session_details = {
'2013': {'_scraped_name': '2013'}
}
def get_scraper(self, session, scraper_type):
if scraper_type == 'people':
return PersonScraper
def scrape_session_list(self):
return ['2013']
| bsd-3-clause | Python |
863fbee6edc89b68412831677391bc51e41a1e03 | add combine program | wclark3/machine-learning,wclark3/machine-learning,wclark3/machine-learning | final-project/code/combine.py | final-project/code/combine.py | #!/usr/bin/env python
import argparse
import os
import re
import time
import pandas as pd
import numpy as np
COORD_COLUMNS = [
"left_eye_center_x", "left_eye_center_y",
"right_eye_center_x", "right_eye_center_y",
"left_eye_inner_corner_x", "left_eye_inner_corner_y",
"left_eye_outer_corner_x", "left_eye_outer_corner_y",
"right_eye_inner_corner_x", "right_eye_inner_corner_y",
"right_eye_outer_corner_x", "right_eye_outer_corner_y",
"left_eyebrow_inner_end_x", "left_eyebrow_inner_end_y",
"left_eyebrow_outer_end_x", "left_eyebrow_outer_end_y",
"right_eyebrow_inner_end_x", "right_eyebrow_inner_end_y",
"right_eyebrow_outer_end_x", "right_eyebrow_outer_end_y",
"nose_tip_x", "nose_tip_y",
"mouth_left_corner_x", "mouth_left_corner_y",
"mouth_right_corner_x", "mouth_right_corner_y",
"mouth_center_top_lip_x", "mouth_center_top_lip_y",
"mouth_center_bottom_lip_x", "mouth_center_bottom_lip_y"]
def missing_cols_names():
ordered_cols = [re.sub(r'_[xy]$', '', f) for f in COORD_COLUMNS]
selected_cols = ([c for (i, c) in enumerate(ordered_cols) if i
in range(0, len(ordered_cols), 2)])
assert set(selected_cols) == set(ordered_cols)
return ['missing_' + c for c in selected_cols]
def process(in_dir, in_filename, out_filepath):
candidate_sources = (
[d for d in os.listdir(in_dir)
if os.path.isdir(os.path.join(in_dir, d))])
sources = (
[d for d in candidate_sources if
os.path.exists(os.path.join(in_dir, d, in_filename))])
def process_file(source):
y_hat_path = os.path.join(in_dir, source, in_filename)
return pd.read_csv(y_hat_path, engine='c', index_col=0)
start_time = time.time()
print "Reading files"
frames = [process_file(s) for s in sources]
print [df.shape for df in frames]
print " took {:.3f}s".format(time.time() - start_time)
start_time = time.time()
print "Concatenating Dataframes"
result = pd.concat(frames, axis=1)
all_column_names = np.concatenate((COORD_COLUMNS, missing_cols_names()))
result.sort_index(inplace=True)
result = result[all_column_names]
print " took {:.3f}s".format(time.time() - start_time)
start_time = time.time()
print "Writing output to %s" % out_filepath
result.to_csv(out_filepath)
print " took {:.3f}s".format(time.time() - start_time)
def real_main(options):
datasources = {
"valid": {
"pred": "last_layer_val.csv",
"actual": "y_validate.csv"
},
"train": {
"pred": "last_layer_train.csv",
"actual": "y_train.csv"
}
}
for source_name, source_dict in datasources.items():
for type_name, filename in source_dict.items():
out_file = (
"combined_" + "_".join([source_name, type_name]) + '.csv')
process(options.in_dir,
filename, os.path.join(options.in_dir, out_file))
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-d', '--dir', dest='in_dir', help="Input Directory", required=True)
options = parser.parse_args()
real_main(options)
if __name__ == "__main__":
# missing_cols_names()
main()
| mit | Python |
|
04bc7c9bfe017f981a73a55b51587343725a2159 | edit 2 | ArtezGDA/text-IO,ArtezGDA/text-IO,ArtezGDA/text-IO,ArtezGDA/text-IO,ArtezGDA/text-IO | Floris/dexter.py | Floris/dexter.py | serie = {
'seasons': [
{
'name': 'S01',
'year': 2006,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': "Pilot"},
{'name': 's01e01', 'title': "Crocodile"},
{'name': 's01e01', 'title': "Popping Cherry"},
{'name': 's01e01', 'title': "Let's give the boy a hand"}
]
},
{
'name': 'S02',
'year': 2007,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S03',
'year': 2008,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S04',
'year': 2009,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S05',
'year': 2010,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S06',
'year': 2011,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S07',
'year': 2012,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
},
{
'name': 'S08',
'year': 2013,
'director': "James Manos Jr.",
'episodes': [
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""},
{'name': 's01e01', 'title': ""}
]
}
],
'main character': 'Dexter Morgan',
'title': 'Dexter'
} | mit | Python |
|
cd9c9080a00cc7e05b5ae4574dd39ddfc86fef3b | Create enc.py | flipmarley/encrypt-and-wrap | enc.py | enc.py | #!/usr/bin/python
"""
Generate encrypted messages wrapped in a self-decrypting python script
usage: python enc.py password > out.py
where password is the encryption password and out.py is the message/script file
to decrypt use: python out.py password
this will print the message to stdout.
"""
import sys, random
def encrypt(key, msg):
encrypted = []
for i, c in enumerate(msg):
key_c = ord(key[i % len(key)])-32
msg_c = ord(c)-32
encrypted.append(chr(((msg_c + key_c) % 95)+32))
return ''.join(encrypted)
def decrypt(key, enc):
msg=[]
for i, c in enumerate(enc):
key_c = ord(key[i % len(key)])-32
enc_c = ord(c)-32
msg.append(chr(((enc_c - key_c) % 95)+32))
return ''.join(msg)
def check(enc):
is_good=True
for i, c in enumerate(enc):
is_good = is_good and (32 <= ord(c) <= 126)
return is_good
def make_randstr(msg_len):
sl = []
r = random.SystemRandom()
for i in range(msg_len):
sl.append(chr(r.randint(32,126)))
return ''.join(sl)
if __name__ == '__main__':
msg = sys.stdin.read().replace("\n","\\n").replace("\t","\\t")
randstr = make_randstr(len(msg))
key = encrypt(sys.argv[1], randstr)
encrypted = encrypt(key, msg)
decrypted = decrypt(key, encrypted)
if not msg == decrypted:
print msg
print decrypted
raise Exception("Encryption Fail")
print """
#!/usr/bin/python
import sys
def encrypt(key, msg):
encrypted = []
for i, c in enumerate(msg):
key_c = ord(key[i % len(key)])-32
msg_c = ord(c)-32
encrypted.append(chr(((msg_c + key_c) % 95)+32))
return ''.join(encrypted)
def decrypt(key, enc):
msg=[]
for i, c in enumerate(enc):
key_c = ord(key[i % len(key)])-32
enc_c = ord(c)-32
msg.append(chr(((enc_c - key_c) % 95)+32))
return ''.join(msg)
if __name__ == '__main__':"""
print "\trandstr = ", repr(randstr)
print "\tenc = ", repr(encrypted)
print "\tkey = encrypt(sys.argv[1], randstr)"
print "\tdecrypted = decrypt(key, enc).replace(\"\\\\n\",\"\\n\").replace(\"\\\\t\",\"\\t\")"
print "\tprint decrypted"
| mit | Python |
|
8adfedd0c30fab796fccac6ec58c09e644a91b2f | Add script to shuffle paired fastq sequences. | roryk/junkdrawer,roryk/junkdrawer | shuffle_fastq.py | shuffle_fastq.py | # shuffles the sequences in a fastq file
import os
import random
from Bio import SeqIO
import fileinput
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--fq1", required="True")
parser.add_argument("--fq2", required="True")
args = parser.parse_args()
with open(args.fq1) as in_handle:
fq1 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
with open(args.fq2) as in_handle:
fq2 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
order = range(len(fq1))
random.shuffle(order)
fq1_name = os.path.splitext(args.fq1)[0]
fq2_name = os.path.splitext(args.fq2)[0]
with open(fq1_name + ".shuffled.fq", "wa") as fq1_handle, open(fq2_name + ".shuffled.fq", "wa") as fq2_handle:
for i in order:
fq1_handle.write(fq1[i].format("fastq-sanger"))
fq2_handle.write(fq2[i].format("fastq-sanger"))
| mit | Python |
|
f9e11b0e9eb5a69adaa2021499acf329023aca09 | Add Python bindings | pcercuei/libini,pcercuei/libini | ini.py | ini.py | from ctypes import POINTER, Structure, cdll, c_char_p, c_int, c_uint, byref
from sys import argv
def _checkOpen(result, func, arguments):
if result:
return result
else:
raise IOError("Failed to open INI file: '%s'" % arguments[0])
def _checkRead(result, func, arguments):
if result == -1:
raise SyntaxError("Error occured while parsing INI file")
return result
def _init():
class _INI(Structure):
pass
IniPtr = POINTER(_INI)
lib = cdll.LoadLibrary('libini.so.0')
ini_open = lib.ini_open
ini_open.restype = IniPtr
ini_open.archtypes = (c_char_p, )
ini_open.errcheck = _checkOpen
global _ini_open
_ini_open = ini_open
ini_close = lib.ini_close
ini_close.restype = None
ini_close.archtypes = (IniPtr, )
global _ini_close
_ini_close = ini_close
ini_next_section = lib.ini_next_section
ini_next_section.restype = c_int
ini_next_section.archtypes = (IniPtr, c_char_p)
ini_next_section.errcheck = _checkRead
global _ini_next_section
_ini_next_section = ini_next_section
ini_read_pair = lib.ini_read_pair
ini_read_pair.restype = c_int
ini_read_pair.archtypes = (IniPtr, c_char_p, c_char_p)
ini_read_pair.errcheck = _checkRead
global _ini_read_pair
_ini_read_pair = ini_read_pair
_init()
class INI(object):
def __init__(self, path):
self._ini = _ini_open(path)
def __del__(self):
_ini_close(self._ini)
def next_section(self):
s = c_char_p()
res = _ini_next_section(self._ini, byref(s))
if res == 1:
return s.value
def read_pair(self):
key = c_char_p()
val = c_char_p()
res = _ini_read_pair(self._ini, byref(key), byref(val))
if res == 1:
return (key.value, val.value)
return ((),())
def main():
if len(argv) != 2:
print "Usage: ini.py [INI_FILE]..."
return
ini = INI(argv[1])
while True:
name = ini.next_section()
if not name:
print 'End.'
break
print 'In section: ' + name
while True:
key, value = ini.read_pair()
if not key:
print 'End of section.'
break
print 'Reading key: ' + key + ' value: ' + value
if __name__ == '__main__':
main()
| lgpl-2.1 | Python |
|
b7f9e5555481ba4e34bcc12beecf540d3204a15f | Fix pep8 issue | dbravender/raven-python,jmp0xf/raven-python,dirtycoder/opbeat_python,Photonomie/raven-python,daikeren/opbeat_python,dirtycoder/opbeat_python,recht/raven-python,nikolas/raven-python,ewdurbin/raven-python,ronaldevers/raven-python,tarkatronic/opbeat_python,patrys/opbeat_python,arthurlogilab/raven-python,lopter/raven-python-old,ewdurbin/raven-python,icereval/raven-python,getsentry/raven-python,daikeren/opbeat_python,Photonomie/raven-python,johansteffner/raven-python,johansteffner/raven-python,smarkets/raven-python,recht/raven-python,collective/mr.poe,akalipetis/raven-python,icereval/raven-python,beniwohli/apm-agent-python,danriti/raven-python,jbarbuto/raven-python,beniwohli/apm-agent-python,ewdurbin/raven-python,someonehan/raven-python,getsentry/raven-python,ronaldevers/raven-python,akheron/raven-python,smarkets/raven-python,tarkatronic/opbeat_python,jmp0xf/raven-python,openlabs/raven,lepture/raven-python,ronaldevers/raven-python,ticosax/opbeat_python,smarkets/raven-python,danriti/raven-python,nikolas/raven-python,recht/raven-python,Photonomie/raven-python,jmagnusson/raven-python,johansteffner/raven-python,Goldmund-Wyldebeast-Wunderliebe/raven-python,patrys/opbeat_python,dbravender/raven-python,Goldmund-Wyldebeast-Wunderliebe/raven-python,percipient/raven-python,nikolas/raven-python,tarkatronic/opbeat_python,smarkets/raven-python,lepture/raven-python,jbarbuto/raven-python,someonehan/raven-python,akalipetis/raven-python,percipient/raven-python,ticosax/opbeat_python,beniwohli/apm-agent-python,hzy/raven-python,arthurlogilab/raven-python,Goldmund-Wyldebeast-Wunderliebe/raven-python,akalipetis/raven-python,akheron/raven-python,inspirehep/raven-python,akheron/raven-python,Goldmund-Wyldebeast-Wunderliebe/raven-python,jbarbuto/raven-python,arthurlogilab/raven-python,someonehan/raven-python,lepture/raven-python,dirtycoder/opbeat_python,danriti/raven-python,jmp0xf/raven-python,icereval/raven-python,jbarbuto/raven-python,getsentry/raven-python,dbravender/raven-python,hzy/raven-python,jmagnusson/raven-python,beniwohli/apm-agent-python,icereval/raven-python,patrys/opbeat_python,inspirehep/raven-python,arthurlogilab/raven-python,inspirehep/raven-python,inspirehep/raven-python,alex/raven,percipient/raven-python,hzy/raven-python,daikeren/opbeat_python,jmagnusson/raven-python,nikolas/raven-python,patrys/opbeat_python,ticosax/opbeat_python | raven/contrib/celery/__init__.py | raven/contrib/celery/__init__.py | """
raven.contrib.celery
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from celery.task import task
except ImportError:
from celery.decorators import task
from celery.signals import task_failure
from raven.base import Client
class CeleryMixin(object):
def send_encoded(self, message):
"Errors through celery"
self.send_raw.delay(message)
@task(routing_key='sentry')
def send_raw(self, message):
return super(CeleryMixin, self).send_encoded(message)
class CeleryClient(CeleryMixin, Client):
pass
def register_signal(client):
@task_failure.connect(weak=False)
def process_failure_signal(sender, task_id, exception, args, kwargs,
traceback, einfo, **kw):
client.captureException(
exc_info=einfo.exc_info,
extra={
'task_id': task_id,
'task': sender,
'args': args,
'kwargs': kwargs,
})
| """
raven.contrib.celery
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from celery.task import task
except ImportError:
from celery.decorators import task
from celery.signals import task_failure
from raven.base import Client
class CeleryMixin(object):
def send_encoded(self, message):
"Errors through celery"
self.send_raw.delay(message)
@task(routing_key='sentry')
def send_raw(self, message):
return super(CeleryMixin, self).send_encoded(message)
class CeleryClient(CeleryMixin, Client):
pass
def register_signal(client):
@task_failure.connect(weak=False)
def process_failure_signal(sender, task_id, exception, args, kwargs,
traceback, einfo, **kw):
client.captureException(
exc_info=einfo.exc_info,
extra={
'task_id': task_id,
'task': sender,
'args': args,
'kwargs': kwargs,
})
| bsd-3-clause | Python |
dba14e6dfbaacf79d88f1be0b831488f45fc1bfc | Create coroutine.py | Python-IoT/Smart-IoT-Planting-System,Python-IoT/Smart-IoT-Planting-System | gateway/src/test/coroutine.py | gateway/src/test/coroutine.py | #!/usr/bin/python3.5
import asyncio
import time
now = lambda: time.time()
async def func(x):
print('Waiting for %d s' % x)
await asyncio.sleep(x)
return 'Done after {}s'.format(x)
start = now()
coro1 = func(1)
coro2 = func(2)
coro3 = func(4)
tasks = [
asyncio.ensure_future(coro1),
asyncio.ensure_future(coro2),
asyncio.ensure_future(coro3)
]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
for task in tasks:
print('Task return: ', task.result())
print('Program consumes: %f s' % (now() - start))
| mit | Python |
|
b9d47f54b76345f0c8f7d486282fc416ba540aee | Add specs for ArgumentParser | codeclimate/python-test-reporter,codeclimate/python-test-reporter | tests/test_argument_parser.py | tests/test_argument_parser.py | import pytest
from codeclimate_test_reporter.components.argument_parser import ArgumentParser
def test_parse_args_default():
parsed_args = ArgumentParser().parse_args([])
assert(parsed_args.file == "./.coverage")
assert(parsed_args.token is None)
assert(parsed_args.stdout is False)
assert(parsed_args.debug is False)
assert(parsed_args.version is False)
def test_parse_args_with_options():
args = ["--version", "--debug", "--stdout", "--file", "file", "--token", "token"]
parsed_args = ArgumentParser().parse_args(args)
assert(parsed_args.debug)
assert(parsed_args.file == "file")
assert(parsed_args.token == "token")
assert(parsed_args.stdout)
assert(parsed_args.version)
| mit | Python |
|
614579c38bea10798d285ec2608650d36369020a | add test demonstrating duplicate stream handling | malwarefrank/dnfile | tests/test_invalid_streams.py | tests/test_invalid_streams.py | import fixtures
import dnfile
def test_duplicate_stream():
path = fixtures.DATA / "invalid-streams" / "duplicate-stream.exe"
dn = dnfile.dnPE(path)
assert "#US" in dn.net.metadata.streams
assert dn.net.user_strings.get_us(1).value == "BBBBBBBB" | mit | Python |
|
ffb5caf83055e734baf711366b6779ecb24a013c | Add script to generate other adobe themes | Geequlim/godot-themes | addons/adobe/clone.py | addons/adobe/clone.py | #!/usr/bin/env python
from PIL import Image, ImageEnhance
import PIL.ImageOps
import fnmatch
import shutil
import os
def globPath(path, pattern):
result = []
for root, subdirs, files in os.walk(path):
for filename in files:
if fnmatch.fnmatch(filename, pattern):
result.append(os.path.join(root, filename))
return result
def inverse(inpng, outpng):
image = Image.open(inpng)
if image.mode == 'RGBA':
r, g, b, a = image.split()
rgb_image = Image.merge('RGB', (r, g, b))
inverted_image = PIL.ImageOps.invert(rgb_image)
r2, g2, b2 = inverted_image.split()
final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
final_transparent_image.save(outpng)
else:
inverted_image = PIL.ImageOps.invert(image)
inverted_image.save(outpng)
def darken(inpng, outpng, darkness):
im1 = Image.open(inpng)
im2 = im1.point(lambda p: p * darkness)
im2.save(outpng)
def bright(inpng, outpng, brightness):
peak = Image.open(inpng)
enhancer = ImageEnhance.Brightness(peak)
bright = enhancer.enhance(brightness)
bright.save(outpng)
def makeClone(name, brightness):
outdir = os.path.join("..", name)
if not os.path.isdir(outdir):
os.makedirs(outdir)
for p in globPath('.', "**"):
outfile = os.path.join(outdir, p)
curdir = os.path.dirname(outfile)
if not os.path.isdir(curdir):
os.makedirs(curdir)
if p.endswith(".png"):
bright(p, outfile, brightness)
elif p.endswith(".tres"):
content = open(p).read()
content = content.replace("res://addons/adobe/", "res://addons/{}/".format(name))
of = open(outfile, 'w')
of.write(content)
of.close()
else:
shutil.copy(p, outfile)
makeClone("adobe_dark", 0.65)
makeClone("adobe_light", 1.35)
| mit | Python |
|
c5ecaef62d788b69446181c6ba495cb273bf98ef | Add rolling mean scatter plot example | altair-viz/altair,jakevdp/altair | altair/examples/scatter_with_rolling_mean.py | altair/examples/scatter_with_rolling_mean.py | """
Scatter Plot with Rolling Mean
------------------------------
A scatter plot with a rolling mean overlay. In this example a 30 day window
is used to calculate the mean of the maximum temperature around each date.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
line = alt.Chart(source).mark_line(
color='red',
size=3
).transform_window(
rolling_mean='mean(temp_max)',
frame=[-15, 15]
).encode(
x='date:T',
y='rolling_mean:Q'
)
points = alt.Chart(source).mark_point().encode(
x='date:T',
y=alt.Y('temp_max:Q',
axis=alt.Axis(title='Max Temp'))
)
points + line
| bsd-3-clause | Python |
|
5ec793ffb8c260a02ab7da655b5f56ff3c3f5da7 | add find_anagrams.py | gsathya/dsalgo,gsathya/dsalgo | algo/find_anagrams.py | algo/find_anagrams.py | words = "oolf folo oolf lfoo fool oofl fool loof oofl folo abr bra bar rab rba abr arb bar abr abr"
words = [word.strip() for word in words.split(" ")]
anagrams = {}
for word in words:
sorted_word = ''.join(sorted(word))
anagrams[sorted_word] = anagrams.get(sorted_word, []) + [word]
print anagrams
| mit | Python |
|
f830c778fd06e1548da0b87aafa778834005c64e | Add fls simprocedures | iamahuman/angr,chubbymaggie/angr,axt/angr,schieb/angr,axt/angr,schieb/angr,f-prettyland/angr,angr/angr,iamahuman/angr,tyb0807/angr,iamahuman/angr,tyb0807/angr,chubbymaggie/angr,angr/angr,f-prettyland/angr,axt/angr,chubbymaggie/angr,f-prettyland/angr,schieb/angr,tyb0807/angr,angr/angr | angr/procedures/win32/fiber_local_storage.py | angr/procedures/win32/fiber_local_storage.py | import angr
KEY = 'win32_fls'
def mutate_dict(state):
d = dict(state.globals.get(KEY, {}))
state.globals[KEY] = d
return d
def has_index(state, idx):
if KEY not in state.globals:
return False
return idx in state.globals[KEY]
class FlsAlloc(angr.SimProcedure):
def run(self, callback):
if not self.state.solver.is_true(callback == 0):
raise angr.errors.SimValueError("Can't handle callback function in FlsAlloc")
d = mutate_dict(self.state)
new_key = len(d) + 1
d[new_key] = self.state.se.BVV(0, self.state.arch.bits)
return new_key
class FlsFree(angr.SimProcedure):
def run(self, index):
set_val = self.inline_call(FlsSetValue, (index, self.state.se.BVV(0, self.state.arch.bits)))
return set_val.ret_expr
class FlsSetValue(angr.SimProcedure):
def run(self, index, value):
conc_indexs = self.state.se.any_n_int(index, 2)
if len(conc_indexs) != 1:
raise angr.errors.SimValueError("Can't handle symbolic index in FlsSetValue")
conc_index = conc_indexs[0]
if not has_index(self.state, conc_index):
return 0
mutate_dict(self.state)[conc_index] = value
return 1
class FlsGetValue(angr.SimProcedure):
def run(self, index):
conc_indexs = self.state.se.any_n_int(index, 2)
if len(conc_indexs) != 1:
raise angr.errors.SimValueError("Can't handle symbolic index in FlsGetValue")
conc_index = conc_indexs[0]
if not has_index(self.state, conc_index):
return 0
return self.globals[KEY][conc_index]
| bsd-2-clause | Python |
|
1bda23c9e6fee7815617a8ad7f64c80a32e223c5 | Add script for jira story point report. | YzPaul3/h2o-3,mrgloom/h2o-3,PawarPawan/h2o-v3,h2oai/h2o-dev,h2oai/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,tarasane/h2o-3,madmax983/h2o-3,spennihana/h2o-3,pchmieli/h2o-3,PawarPawan/h2o-v3,spennihana/h2o-3,junwucs/h2o-3,mathemage/h2o-3,printedheart/h2o-3,brightchen/h2o-3,tarasane/h2o-3,junwucs/h2o-3,junwucs/h2o-3,mathemage/h2o-3,pchmieli/h2o-3,ChristosChristofidis/h2o-3,datachand/h2o-3,jangorecki/h2o-3,ChristosChristofidis/h2o-3,printedheart/h2o-3,weaver-viii/h2o-3,pchmieli/h2o-3,bospetersen/h2o-3,ChristosChristofidis/h2o-3,kyoren/https-github.com-h2oai-h2o-3,printedheart/h2o-3,brightchen/h2o-3,weaver-viii/h2o-3,pchmieli/h2o-3,bikash/h2o-dev,datachand/h2o-3,bospetersen/h2o-3,YzPaul3/h2o-3,weaver-viii/h2o-3,pchmieli/h2o-3,kyoren/https-github.com-h2oai-h2o-3,jangorecki/h2o-3,h2oai/h2o-3,h2oai/h2o-3,nilbody/h2o-3,kyoren/https-github.com-h2oai-h2o-3,michalkurka/h2o-3,datachand/h2o-3,tarasane/h2o-3,nilbody/h2o-3,pchmieli/h2o-3,bospetersen/h2o-3,h2oai/h2o-3,datachand/h2o-3,mrgloom/h2o-3,jangorecki/h2o-3,printedheart/h2o-3,YzPaul3/h2o-3,YzPaul3/h2o-3,ChristosChristofidis/h2o-3,bikash/h2o-dev,weaver-viii/h2o-3,bikash/h2o-dev,junwucs/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,YzPaul3/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,junwucs/h2o-3,madmax983/h2o-3,junwucs/h2o-3,brightchen/h2o-3,michalkurka/h2o-3,printedheart/h2o-3,michalkurka/h2o-3,mrgloom/h2o-3,tarasane/h2o-3,spennihana/h2o-3,PawarPawan/h2o-v3,kyoren/https-github.com-h2oai-h2o-3,h2oai/h2o-dev,brightchen/h2o-3,h2oai/h2o-3,brightchen/h2o-3,mrgloom/h2o-3,kyoren/https-github.com-h2oai-h2o-3,mathemage/h2o-3,mrgloom/h2o-3,madmax983/h2o-3,h2oai/h2o-3,madmax983/h2o-3,bikash/h2o-dev,weaver-viii/h2o-3,pchmieli/h2o-3,michalkurka/h2o-3,mrgloom/h2o-3,mathemage/h2o-3,h2oai/h2o-3,nilbody/h2o-3,jangorecki/h2o-3,nilbody/h2o-3,datachand/h2o-3,weaver-viii/h2o-3,YzPaul3/h2o-3,weaver-viii/h2o-3,printedheart/h2o-3,datachand/h2o-3,bospetersen/h2o-3,h2oai/h2o-dev,PawarPawan/h2o-v3,YzPaul3/h2o-3,mathemage/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,bospetersen/h2o-3,bospetersen/h2o-3,bikash/h2o-dev,brightchen/h2o-3,michalkurka/h2o-3,kyoren/https-github.com-h2oai-h2o-3,jangorecki/h2o-3,junwucs/h2o-3,ChristosChristofidis/h2o-3,brightchen/h2o-3,madmax983/h2o-3,tarasane/h2o-3,mrgloom/h2o-3,printedheart/h2o-3,h2oai/h2o-dev,madmax983/h2o-3,nilbody/h2o-3,spennihana/h2o-3,bospetersen/h2o-3,bikash/h2o-dev,PawarPawan/h2o-v3,mathemage/h2o-3,spennihana/h2o-3,madmax983/h2o-3,PawarPawan/h2o-v3,datachand/h2o-3,h2oai/h2o-dev,ChristosChristofidis/h2o-3,ChristosChristofidis/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,PawarPawan/h2o-v3,tarasane/h2o-3,nilbody/h2o-3,kyoren/https-github.com-h2oai-h2o-3,tarasane/h2o-3,nilbody/h2o-3 | scripts/jira.py | scripts/jira.py | #!/usr/bin/python
import sys
import os
import requests
import urllib
g_user = None
g_pass = None
g_sprint = None
def usage():
print("")
print("usage: " + g_script_name + " --user username --pass password --sprint sprintname")
print("")
sys.exit(1)
def unknown_arg(s):
print("")
print("ERROR: Unknown argument: " + s)
print("")
usage()
def parse_args(argv):
global g_user
global g_pass
global g_sprint
i = 1
while (i < len(argv)):
s = argv[i]
if (s == "--user"):
i += 1
if (i > len(argv)):
usage()
g_user = argv[i]
elif (s == "--pass"):
i += 1
if (i > len(argv)):
usage()
g_pass = argv[i]
elif (s == "--sprint"):
i += 1
if (i > len(argv)):
usage()
g_sprint = argv[i]
elif (s == "-h" or s == "--h" or s == "-help" or s == "--help"):
usage()
else:
unknown_arg(s)
i += 1
if (g_user is None):
usage()
if (g_pass is None):
usage()
if (g_sprint is None):
usage()
def main(argv):
"""
Main program.
@return: none
"""
global g_script_name
g_script_name = os.path.basename(argv[0])
parse_args(argv)
url = 'https://0xdata.atlassian.net/rest/api/2/search?jql=sprint="' + urllib.quote(g_sprint) + '"&maxResults=1000'
r = requests.get(url, auth=(g_user, g_pass))
if (r.status_code != 200):
print("ERROR: status code is " + str(r.status_code))
sys.exit(1)
j = r.json()
issues = j[u'issues']
story_points_map = {}
for issue in issues:
name = issue[u'fields'][u'assignee'][u'name']
story_points = issue[u'fields'][u'customfield_10004']
if story_points is None:
story_points = 0
else:
story_points = float(story_points)
if name in story_points_map:
n = story_points_map[name]
story_points_map[name] = n + story_points
else:
story_points_map[name] = story_points
for key in sorted(story_points_map.keys()):
value = story_points_map[key]
print("{}: {}").format(key, value)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | Python |
|
a24095964e32da33ea946b3c28bdc829a505585d | Add lidar example | trikset/trik-models,trikset/trik-models,trikset/trik-models,trikset/trik-models | lidar.py | lidar.py | """ Copyright 2021 CyberTech Labs Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. """
import math
w = 240
h = 280
scale = 0.5
waitTimer = 500
moveControl = 0
tickPerSecond = 1000 // waitTimer
while not brick.keys().wasPressed(KeysEnum.Up):
moveControl = (moveControl + 1) % (10 * tickPerSecond)
power = 100
if math.sin(moveControl / tickPerSecond) < 0:
power = -100
brick.motor('M3').setPower(power)
brick.motor('M4').setPower(power)
pic = [0x008800] * (h * w)
for j in range(w // 2, w):
pic[h // 2 * w + j] = 0x888888
data = brick.lidar().read()
for i in range(360):
distance = data[i]
if distance == 0:
continue
theta = i * math.pi / 180
x = distance * math.cos(theta)
y = distance * math.sin(theta)
x_px = min(w - 1, max(0, math.floor(x * scale + w / 2)))
y_px = min(h - 1, max(0, math.floor(y * scale + h / 2)))
pic[y_px * w + x_px] = 0
brick.display().show(pic, w, h, 'rgb32')
script.wait(waitTimer)
brick.stop()
| apache-2.0 | Python |
|
ccf1fb5d5ef1e2b12bc49afd260b1d2d0a166a43 | Prepare v2.20.7.dev | ianstalk/Flexget,JorisDeRieck/Flexget,ianstalk/Flexget,crawln45/Flexget,Danfocus/Flexget,Flexget/Flexget,tobinjt/Flexget,gazpachoking/Flexget,ianstalk/Flexget,crawln45/Flexget,crawln45/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,Danfocus/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,Flexget/Flexget,Danfocus/Flexget,tobinjt/Flexget,malkavi/Flexget,malkavi/Flexget,malkavi/Flexget,Flexget/Flexget,Danfocus/Flexget,crawln45/Flexget,gazpachoking/Flexget,malkavi/Flexget,JorisDeRieck/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.20.7.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.20.6'
| mit | Python |
950e6b975323293ed8b73a5ffe8448072e0dac27 | Fix downloader | mojoBrendan/fmt,wangshijin/cppformat,alabuzhev/fmt,cppformat/cppformat,seungrye/cppformat,wangshijin/cppformat,blaquee/cppformat,wangshijin/cppformat,dean0x7d/cppformat,lightslife/cppformat,nelson4722/cppformat,seungrye/cppformat,seungrye/cppformat,lightslife/cppformat,dean0x7d/cppformat,lightslife/cppformat,nelson4722/cppformat,mojoBrendan/fmt,blaquee/cppformat,Jopie64/cppformat,alabuzhev/fmt,dean0x7d/cppformat,Jopie64/cppformat,blaquee/cppformat,Jopie64/cppformat,alabuzhev/fmt,cppformat/cppformat,cppformat/cppformat,nelson4722/cppformat,mojoBrendan/fmt | support/download.py | support/download.py | # A file downloader.
import contextlib, os, tempfile, timer, urllib2, urlparse
class Downloader:
def __init__(self, dir=None):
self.dir = dir
# Downloads a file and removes it when exiting a block.
# Usage:
# d = Downloader()
# with d.download(url) as f:
# use_file(f)
def download(self, url, cookie=None):
suffix = os.path.splitext(urlparse.urlsplit(url)[2])[1]
fd, filename = tempfile.mkstemp(suffix=suffix, dir=self.dir)
os.close(fd)
with timer.print_time('Downloading', url, 'to', filename):
opener = urllib2.build_opener()
if cookie:
opener.addheaders.append(('Cookie', cookie))
num_tries = 2
for i in range(num_tries):
try:
f = opener.open(url)
except urllib2.URLError, e:
print('Failed to open url', url)
continue
length = f.headers.get('content-length')
if not length:
print('Failed to get content-length')
continue
length = int(length)
with open(filename, 'wb') as out:
count = 0
while count < length:
data = f.read(1024 * 1024)
count += len(data)
out.write(data)
@contextlib.contextmanager
def remove(filename):
try:
yield filename
finally:
os.remove(filename)
return remove(filename)
| # A file downloader.
import contextlib, os, tempfile, timer, urllib2, urlparse
class Downloader:
def __init__(self, dir=None):
self.dir = dir
# Downloads a file and removes it when exiting a block.
# Usage:
# d = Downloader()
# with d.download(url) as f:
# use_file(f)
def download(self, url, cookie=None):
suffix = os.path.splitext(urlparse.urlsplit(url)[2])[1]
fd, filename = tempfile.mkstemp(suffix=suffix, dir=self.dir)
os.close(fd)
with timer.print_time('Downloading', url, 'to', filename):
opener = urllib2.build_opener()
if cookie:
opener.addheaders.append(('Cookie', cookie))
num_tries = 2
for i in range(num_tries):
try:
f = opener.open(url)
except urllib2.URLError, e:
print('Failed to open url', url)
continue
length = f.headers.get('content-length')
if not length:
print('Failed to get content-length')
continue
length = int(length)
with open(filename, 'wb') as out:
count = 0
while count < length:
data = f.read(1024 * 1024)
count += len(data)
out.write(data)
@contextlib.contextmanager
def remove(filename):
try:
yield filename
finally:
pass #os.remove(filename)
return remove(filename)
| bsd-2-clause | Python |
7c84bfb5a37705cc824489b0c1c5aba415ccff6b | Split out of SWDCommon.py | kcuzner/PySWD,pfalcon/PySWD,heartscrytech/PySWD,heartscrytech/PySWD,pfalcon/PySWD,kcuzner/PySWD,pfalcon/PySWD | DebugPort.py | DebugPort.py | class DebugPort:
ID_CODES = (
0x1BA01477, # EFM32
0x2BA01477, # STM32
0x0BB11477, # NUC1xx
)
def __init__ (self, swd):
self.swd = swd
# read the IDCODE
# Hugo: according to ARM DDI 0316D we should have 0x2B.. not 0x1B.., but
# 0x1B.. is what upstream used, so leave it in here...
idcode = self.idcode()
if idcode not in DebugPort.ID_CODES:
print "warning: unexpected idcode: ", idcode
# power shit up
self.swd.writeSWD(False, 1, 0x54000000)
if (self.status() >> 24) != 0xF4:
print "error powering up system"
sys.exit(1)
# get the SELECT register to a known state
self.select(0,0)
self.curAP = 0
self.curBank = 0
def idcode (self):
return self.swd.readSWD(False, 0)
def abort (self, orunerr, wdataerr, stickyerr, stickycmp, dap):
value = 0x00000000
value = value | (0x10 if orunerr else 0x00)
value = value | (0x08 if wdataerr else 0x00)
value = value | (0x04 if stickyerr else 0x00)
value = value | (0x02 if stickycmp else 0x00)
value = value | (0x01 if dap else 0x00)
self.swd.writeSWD(False, 0, value)
def status (self):
return self.swd.readSWD(False, 1)
def control (self, trnCount = 0, trnMode = 0, maskLane = 0, orunDetect = 0):
value = 0x54000000
value = value | ((trnCount & 0xFFF) << 12)
value = value | ((maskLane & 0x00F) << 8)
value = value | ((trnMode & 0x003) << 2)
value = value | (0x1 if orunDetect else 0x0)
self.swd.writeSWD(False, 1, value)
def select (self, apsel, apbank):
value = 0x00000000
value = value | ((apsel & 0xFF) << 24)
value = value | ((apbank & 0x0F) << 4)
self.swd.writeSWD(False, 2, value)
def readRB (self):
return self.swd.readSWD(False, 3)
def readAP (self, apsel, address):
adrBank = (address >> 4) & 0xF
adrReg = (address >> 2) & 0x3
if apsel != self.curAP or adrBank != self.curBank:
self.select(apsel, adrBank)
self.curAP = apsel
self.curBank = adrBank
return self.swd.readSWD(True, adrReg)
def writeAP (self, apsel, address, data, ignore = False):
adrBank = (address >> 4) & 0xF
adrReg = (address >> 2) & 0x3
if apsel != self.curAP or adrBank != self.curBank:
self.select(apsel, adrBank)
self.curAP = apsel
self.curBank = adrBank
self.swd.writeSWD(True, adrReg, data, ignore)
| bsd-3-clause | Python |
|
570aaad3da93f9252efb787a58bbe5151eff93d4 | Create run_ToolKit.py | Pharaoh00/Pharaoh-Toolkit | 0.0.5/run_ToolKit.py | 0.0.5/run_ToolKit.py | # run_ToolKit.py
from modulos import main
if __name__ == "__main__":
main.main()
| mit | Python |
|
857ccf7f6cfed4e8663d635c119f8683c9ee09e0 | Add random choice plugin (with_random_choice) | thaim/ansible,thaim/ansible | lib/ansible/runner/lookup_plugins/random_choice.py | lib/ansible/runner/lookup_plugins/random_choice.py | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from ansible import utils, errors
import random
# useful for introducing chaos ... or just somewhat reasonably fair selection
# amongst available mirrors
#
# tasks:
# - debug: msg=$item
# with_random_choice:
# - one
# - two
# - three
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
return [ random.choice(terms) ]
| mit | Python |
|
b3f91806b525ddef50d541f937bed539f9bae20a | Use cache backend for sessions in deployed settings. | Kniyl/mezzanine,webounty/mezzanine,spookylukey/mezzanine,theclanks/mezzanine,batpad/mezzanine,sjdines/mezzanine,dovydas/mezzanine,readevalprint/mezzanine,eino-makitalo/mezzanine,industrydive/mezzanine,joshcartme/mezzanine,Cajoline/mezzanine,frankier/mezzanine,PegasusWang/mezzanine,biomassives/mezzanine,Skytorn86/mezzanine,adrian-the-git/mezzanine,agepoly/mezzanine,saintbird/mezzanine,damnfine/mezzanine,stbarnabas/mezzanine,dsanders11/mezzanine,biomassives/mezzanine,gradel/mezzanine,joshcartme/mezzanine,vladir/mezzanine,geodesign/mezzanine,molokov/mezzanine,geodesign/mezzanine,geodesign/mezzanine,sjuxax/mezzanine,orlenko/sfpirg,SoLoHiC/mezzanine,orlenko/sfpirg,wyzex/mezzanine,vladir/mezzanine,wyzex/mezzanine,douglaskastle/mezzanine,Cicero-Zhao/mezzanine,nikolas/mezzanine,theclanks/mezzanine,scarcry/snm-mezzanine,wyzex/mezzanine,frankchin/mezzanine,dekomote/mezzanine-modeltranslation-backport,dekomote/mezzanine-modeltranslation-backport,readevalprint/mezzanine,dsanders11/mezzanine,gbosh/mezzanine,saintbird/mezzanine,damnfine/mezzanine,molokov/mezzanine,scarcry/snm-mezzanine,SoLoHiC/mezzanine,christianwgd/mezzanine,sjuxax/mezzanine,stephenmcd/mezzanine,ZeroXn/mezzanine,vladir/mezzanine,batpad/mezzanine,nikolas/mezzanine,Kniyl/mezzanine,wrwrwr/mezzanine,biomassives/mezzanine,promil23/mezzanine,dekomote/mezzanine-modeltranslation-backport,Skytorn86/mezzanine,jerivas/mezzanine,cccs-web/mezzanine,AlexHill/mezzanine,Cajoline/mezzanine,mush42/mezzanine,fusionbox/mezzanine,agepoly/mezzanine,orlenko/sfpirg,dsanders11/mezzanine,wbtuomela/mezzanine,guibernardino/mezzanine,wbtuomela/mezzanine,viaregio/mezzanine,orlenko/plei,emile2016/mezzanine,dustinrb/mezzanine,webounty/mezzanine,douglaskastle/mezzanine,orlenko/plei,promil23/mezzanine,gradel/mezzanine,frankier/mezzanine,emile2016/mezzanine,Skytorn86/mezzanine,mush42/mezzanine,cccs-web/mezzanine,SoLoHiC/mezzanine,damnfine/mezzanine,douglaskastle/mezzanine,nikolas/mezzanine,PegasusWang/mezzanine,industrydive/mezzanine,spookylukey/mezzanine,Cicero-Zhao/mezzanine,PegasusWang/mezzanine,adrian-the-git/mezzanine,viaregio/mezzanine,fusionbox/mezzanine,eino-makitalo/mezzanine,jerivas/mezzanine,ryneeverett/mezzanine,dovydas/mezzanine,gbosh/mezzanine,emile2016/mezzanine,frankchin/mezzanine,dovydas/mezzanine,saintbird/mezzanine,ZeroXn/mezzanine,webounty/mezzanine,ryneeverett/mezzanine,jerivas/mezzanine,agepoly/mezzanine,stephenmcd/mezzanine,readevalprint/mezzanine,wrwrwr/mezzanine,gradel/mezzanine,theclanks/mezzanine,joshcartme/mezzanine,dustinrb/mezzanine,frankchin/mezzanine,Kniyl/mezzanine,tuxinhang1989/mezzanine,christianwgd/mezzanine,molokov/mezzanine,ryneeverett/mezzanine,stbarnabas/mezzanine,tuxinhang1989/mezzanine,sjdines/mezzanine,ZeroXn/mezzanine,viaregio/mezzanine,jjz/mezzanine,jjz/mezzanine,guibernardino/mezzanine,Cajoline/mezzanine,industrydive/mezzanine,sjuxax/mezzanine,tuxinhang1989/mezzanine,eino-makitalo/mezzanine,orlenko/plei,jjz/mezzanine,sjdines/mezzanine,gbosh/mezzanine,mush42/mezzanine,dustinrb/mezzanine,scarcry/snm-mezzanine,christianwgd/mezzanine,adrian-the-git/mezzanine,stephenmcd/mezzanine,promil23/mezzanine,spookylukey/mezzanine,wbtuomela/mezzanine,frankier/mezzanine,AlexHill/mezzanine | mezzanine/project_template/deploy/live_settings.py | mezzanine/project_template/deploy/live_settings.py |
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
|
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
| bsd-2-clause | Python |
62545500553443863d61d9e5ecc80307c745a227 | Add migration to remove non-{entity,classifier} dimensions from the database, and to recompute cubes if necessary | CivicVision/datahub,openspending/spendb,johnjohndoe/spendb,USStateDept/FPA_Core,spendb/spendb,johnjohndoe/spendb,openspending/spendb,nathanhilbert/FPA_Core,pudo/spendb,openspending/spendb,nathanhilbert/FPA_Core,spendb/spendb,CivicVision/datahub,pudo/spendb,USStateDept/FPA_Core,pudo/spendb,nathanhilbert/FPA_Core,USStateDept/FPA_Core,CivicVision/datahub,johnjohndoe/spendb,spendb/spendb | migrate/20110917T143029-remove-value-dimensions.py | migrate/20110917T143029-remove-value-dimensions.py | import logging
from openspending.lib import cubes
from openspending import migration, model, mongo
log = logging.getLogger(__name__)
def up():
group_args = ({'dataset':1}, {}, {'num': 0},
'function (x, acc) { acc.num += 1 }')
before = mongo.db.dimension.group(*group_args)
dims = model.dimension.find({'type': {'$nin': ['entity', 'classifier']}})
for d in dims:
log.info("Removing dimension: %s", d)
model.dimension.remove({'_id': d['_id']})
after = mongo.db.dimension.group(*group_args)
for bf, af in zip(before, after):
if int(bf['num']) != int(af['num']):
log.warn("Number of dimensions for dimension '%s' "
"changed. Recomputing cubes.", bf['dataset'])
ds = model.dataset.find_one({'name': bf['dataset']})
cubes.Cube.update_all_cubes(ds)
def down():
raise migration.IrreversibleMigrationError("Can't add back dimension "
"fields that we dropped!") | agpl-3.0 | Python |
|
87cbdd44ee17ecc5951b6f062a160c9fad465053 | add BaiduMap | PKU-Dragon-Team/Datalab-Utilities | BaiduMap/__init__.py | BaiduMap/__init__.py | import png, numpy
import matplotlib.pyplot as plt
import json, urllib.request, collections.abc, os, sys
from urllib.parse import quote_plus
from collections import OrderedDict
AK = None
SERVER_URL = None
__location__ = os.path.join(os.getcwd(), os.path.dirname(os.path.realpath(__file__)))
with open(os.path.join(__location__, 'config.json'), 'r') as config:
x = json.load(config)
AK = x['ak']
SERVER_URL = x['server']
BASE_URL = "%s?ak=%s" % (SERVER_URL, AK)
class URLBuilder:
def __init__(self, base_url):
if '?' not in base_url:
base_url += '?'
self.__base_url = base_url
self.__url = base_url
self.__attr = OrderedDict()
def __addParam(self, name, value):
if not self.__url.endswith('&'):
self.__url += '&'
self.__url += "%s=%s" % (name, value)
def __resetURL(self):
self.__url = self.__base_url
def addParam(self, name, value):
self.__attr[str(name)] = str(value)
def removeParam(self, name):
try:
del self.__attr[str(name)]
except KeyError:
pass
def generateURL(self):
self.__resetURL()
for item in self.__attr.items():
self.__addParam(item[0], item[1])
return self.__url
def buildURL(width=None, height=None, certer=[], zoom=None, copyright=1, scale=2, bbox=[], markers=[], markerStyles=[], labels=[], labelStyles=[], paths=[], pathStyles=[]):
url = URLBuilder(BASE_URL)
if width:
url.addParam('width', quote_plus(str(width)))
if height:
url.addParam('height', quote_plus(str(height)))
if certer:
if isinstance(center, str):
url.addParam('center', quote_plus(center))
elif isinstance(center, collections.abc.Sequence):
url.addParam('center', quote_plus('%f,%f' % center))
if zoom:
url.addParam('zoom', quote_plus(str(zoom)))
if copyright:
url.addParam('copyright', quote_plus(str(copyright)))
if scale:
url.addParam('scale', quote_plus(str(scale)))
if bbox:
url.addParam('bbox', quote_plus('%f,%f,%f,%f' % bbox))
if markers:
pass
# not implemented
if markerStyles:
pass
# not implemented
if labels:
pass
# not implemented
if labelStyles:
pass
# not implemented
if paths:
pass
# not implemented
if pathStyles:
pass
# not implemented
return url.generateURL()
def fetchImage(url):
r = png.Reader(file=urllib.request.urlopen(url))
data = r.asFloat()
column_count = data[0]
row_count = data[1]
pngdata = data[2]
plane_count = data[3]['planes']
image_2d = numpy.vstack(map(numpy.float_, pngdata))
image_3d = numpy.reshape(image_2d, (row_count, column_count, plane_count))
return image_3d
def plotMap(image):
# TODO: ιζγε―Ήι½
plt.imshow(image, alpha=0.5)
plt.show() | mit | Python |
|
c599b5d470cf80b964af1b261a11540516e120df | Add Dehnen smoothing as a wrapper | jobovy/galpy,jobovy/galpy,jobovy/galpy,jobovy/galpy | galpy/potential_src/DehnenSmoothWrapperPotential.py | galpy/potential_src/DehnenSmoothWrapperPotential.py | ###############################################################################
# DehnenSmoothWrapperPotential.py: Wrapper to smoothly grow a potential
###############################################################################
from galpy.potential_src.WrapperPotential import SimpleWrapperPotential
class DehnenSmoothWrapperPotential(SimpleWrapperPotential):
def __init__(self,amp=1.,pot=None,tform=-4.,tsteady=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DehnenSmoothWrapper Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof; the amplitude of this will be grown by this wrapper
tform - start of growth
tsteady - time from tform at which the potential is fully grown (default: -tform/2, st the perturbation is fully grown at tform/2)
OUTPUT:
(none)
HISTORY:
2017-06-26 - Started - Bovy (UofT)
"""
SimpleWrapperPotential.__init__(self,amp=amp,pot=pot,ro=ro,vo=vo)
self._tform= tform
if tsteady is None:
self._tsteady= self._tform/2.
else:
self._tsteady= self._tform+tsteady
self.hasC= False
self.hasC_dxdv= False
def _smooth(self,t):
#Calculate relevant time
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
return smooth
def _wrap(self,attribute,R,Z,phi=0.,t=0.):
return self._smooth(t)\
*self._wrap_pot_func(attribute)(self._pot,R,Z,phi=phi,t=t)
| bsd-3-clause | Python |
|
ddc61e8158fb1dfb33b30a19f7e9cd3be8eaf3a2 | add app.py | xianjunzhengbackup/Cloud-Native-Python,xianjunzhengbackup/Cloud-Native-Python,xianjunzhengbackup/Cloud-Native-Python | app.py | app.py | from flask import Flask
app = Flask(__name__)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| mit | Python |
|
cab4b903b986a7f8bfe4955bf80190bb7f33b012 | Create bot.py | tenkisi/markovtweet | bot.py | bot.py | # -*- coding: utf-8 -*-
import twitter_key
import tweepy
import markovtweet
def auth():
auth = tweepy.OAuthHandler(twitter_key.CONSUMER_KEY, twitter_key.CONSUMER_SECRET)
auth.set_access_token(twitter_key.ACCESS_TOKEN, twitter_key.ACCESS_SECRET)
return tweepy.API(auth)
if __name__ == "__main__":
api = auth()
markovtweet.markovtweet(api)
| mit | Python |
|
f1b11d2b111ef0b70f0babe6e025056ff1a68acc | Create InMoov.LeapMotionHandTracking.py | MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab | home/Alessandruino/InMoov.LeapMotionHandTracking.py | home/Alessandruino/InMoov.LeapMotionHandTracking.py | i01 = Runtime.createAndStart("i01","InMoov")
#Set here the port of your InMoov Left Hand Arduino , in this case COM5
leftHand = i01.startLeftHand("COM5")
#==============================
#Set the min/max values for fingers
i01.leftHand.thumb.setMinMax( 0, 61)
i01.leftHand.index.map(0 , 89)
i01.leftHand.majeure.map(0 , 89)
i01.leftHand.ringFinger.map(0 , 104)
i01.leftHand.pinky.map(0 , 91)
#===============================
#Start the Leap Tracking
i01.leftHand.starLeapTracking()
#stop leap tracking
#i01.leftHand.stopLeapTracking()
| apache-2.0 | Python |
|
ddf940dc932c04ebd287085ec7d035a93ac5598f | add findmyiphone flask api | pirate/nicksweeting.com,pirate/nicksweeting.com | ios.py | ios.py | from pyicloud import PyiCloudService
from flask import Flask, jsonify, request, abort
api = PyiCloudService('[email protected]')
app = Flask(__name__)
@app.route('/devices', methods=['GET'])
def device_list():
devices = []
for id, device in api.devices.items():
location_info = device.location()
device_json = {
'id': id,
'name': device.data['name'],
'model': device.data['deviceDisplayName'],
'is_desktop': device.data['isMac'],
'location': {
'lat': location_info['latitude'],
'lng': location_info['longitude'],
'source': location_info['positionType'],
'accuracy': location_info['horizontalAccuracy'],
'is_old': location_info['isOld'],
'is_accurate': not location_info['isInaccurate'],
'timestamp': location_info['timeStamp'],
} if location_info else None,
}
devices.append(device_json)
return jsonify({'devices': devices})
@app.route('/alert', methods=['POST'])
def alert():
device_id = request.form['id']
subject = request.form.get('subject', '').strip()
message = request.form.get('message', '').strip()
sounds = request.form.get('sounds')
device = api.devices.get(device_id)
if not device:
abort(404)
if not message:
device.play_sound(subject=subject)
else:
device.display_message(subject=subject, message=message, sounds=bool(sounds))
return jsonify({'success': True, 'errors': []})
if __name__ == '__main__':
app.run()
| mit | Python |
|
81791b79fca6b23436518cf94b79175bd6ec06e7 | Create lcd.py | ric96/joypi3 | lcd.py | lcd.py | #!/usr/bin/python
#--------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# lcd_i2c.py
# LCD test script using I2C backpack.
# Supports 16x2 and 20x4 screens.
#
# Author : Matt Hawkins
# Date : 20/09/2015
#
# http://www.raspberrypi-spy.co.uk/
#
# Copyright 2015 Matt Hawkins
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#--------------------------------------
import smbus
import time
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
# Define some device parameters
I2C_ADDR = 0x27 # I2C device address
LCD_WIDTH = 16 # Maximum characters per line
# Define some device constants
LCD_CHR = 1 # Mode - Sending data
LCD_CMD = 0 # Mode - Sending command
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
LCD_LINE_3 = 0x94 # LCD RAM address for the 3rd line
LCD_LINE_4 = 0xD4 # LCD RAM address for the 4th line
LCD_BACKLIGHT = 0x08 # On
#LCD_BACKLIGHT = 0x00 # Off
ENABLE = 0b00000100 # Enable bit
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
#Open I2C interface
#bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
def lcd_init():
# Initialise display
lcd_byte(0x33,LCD_CMD) # 110011 Initialise
lcd_byte(0x32,LCD_CMD) # 110010 Initialise
lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
lcd_byte(0x01,LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
# Send byte to data pins
# bits = the data
# mode = 1 for data
# 0 for command
bits_high = mode | (bits & 0xF0) | LCD_BACKLIGHT
bits_low = mode | ((bits<<4) & 0xF0) | LCD_BACKLIGHT
# High bits
bus.write_byte(I2C_ADDR, bits_high)
lcd_toggle_enable(bits_high)
# Low bits
bus.write_byte(I2C_ADDR, bits_low)
lcd_toggle_enable(bits_low)
def lcd_toggle_enable(bits):
# Toggle enable
time.sleep(E_DELAY)
bus.write_byte(I2C_ADDR, (bits | ENABLE))
time.sleep(E_PULSE)
bus.write_byte(I2C_ADDR,(bits & ~ENABLE))
time.sleep(E_DELAY)
def lcd_string(message,line):
# Send string to display
message = message.ljust(LCD_WIDTH," ")
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
def main():
# Main program block
# Initialise display
lcd_init()
while True:
fl = mc.get("d1")
fc = mc.get("d2")
fr = mc.get("d3")
bl = mc.get("d6")
bc = mc.get("d5")
br = mc.get("d4")
f = "FL:",round(fl)," C:",round(fc)," R:",round(fr)
b = "BL:",round(bl)," C:",round(bc)," R:",round(br)
# Send some test
lcd_string(f,LCD_LINE_1)
lcd_string(b,LCD_LINE_2)
time.sleep(0.5)
# Send some more text
lcd_string("> RPiSpy",LCD_LINE_1)
lcd_string("> I2C LCD",LCD_LINE_2)
time.sleep(3)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
lcd_byte(0x01, LCD_CMD)
| mit | Python |
|
bb7bb2e12d3ccbb55f0b0e6db5d0cb79c3ea8079 | Add missing migration for profile items. | knowmetools/km-api,knowmetools/km-api,knowmetools/km-api,knowmetools/km-api | km_api/know_me/migrations/0013_remove_profileitem_media_resource.py | km_api/know_me/migrations/0013_remove_profileitem_media_resource.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 14:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('know_me', '0012_emergencyitem'),
]
operations = [
migrations.RemoveField(
model_name='profileitem',
name='media_resource',
),
]
| apache-2.0 | Python |
|
a0b9d1977b2aa2366a334231b4dd5dbe047d7122 | Add testcase for Category.can_create_events | indico/indico,OmeGak/indico,OmeGak/indico,mvidalgarcia/indico,ThiefMaster/indico,indico/indico,ThiefMaster/indico,pferreir/indico,mvidalgarcia/indico,indico/indico,mic4ael/indico,pferreir/indico,DirkHoffmann/indico,DirkHoffmann/indico,pferreir/indico,ThiefMaster/indico,indico/indico,mvidalgarcia/indico,ThiefMaster/indico,OmeGak/indico,mic4ael/indico,OmeGak/indico,DirkHoffmann/indico,mic4ael/indico,mic4ael/indico,DirkHoffmann/indico,pferreir/indico,mvidalgarcia/indico | indico/modules/categories/models/categories_test.py | indico/modules/categories/models/categories_test.py | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import pytest
from indico.core.db.sqlalchemy.protection import ProtectionMode
@pytest.mark.parametrize(('protection_mode', 'creation_restricted', 'acl', 'allowed'), (
# not restricted
(ProtectionMode.public, False, None, True),
(ProtectionMode.protected, False, None, False),
(ProtectionMode.protected, False, {'read_access': True}, True),
# restricted - authorized
(ProtectionMode.protected, True, {'full_access': True}, True),
(ProtectionMode.protected, True, {'roles': {'create'}}, True),
# restricted - not authorized
(ProtectionMode.public, True, None, False),
(ProtectionMode.protected, True, None, False),
(ProtectionMode.protected, True, {'read_access': True}, False)
))
def test_can_create_events(dummy_category, dummy_user, protection_mode, creation_restricted, acl, allowed):
dummy_category.protection_mode = protection_mode
dummy_category.event_creation_restricted = creation_restricted
if acl:
dummy_category.update_principal(dummy_user, **acl)
assert dummy_category.can_create_events(dummy_user) == allowed
def test_can_create_events_no_user(dummy_category):
assert not dummy_category.can_create_events(None)
| mit | Python |
|
eb54c75c0f5b7e909177777ce935358b7ac25def | Add zip and unzip to zip_file | interhui/py-sys | py_sys/file/zip_file.py | py_sys/file/zip_file.py | # coding=utf-8
import os
import zipfile
class ZipFile(object):
def __init__(self):
pass
def zip(self, dir_path, zip_file):
file_list = []
def walk_dir(sub_dir):
for root, dirs, files in os.walk(sub_dir):
for _file in files:
file_list.append(os.path.join(root, _file))
for _dir in dirs:
walk_dir(_dir)
if os.path.isfile(dir_path):
file_list.append(dir_path)
else :
walk_dir(dir_path)
zf = zipfile.ZipFile(zip_file, "w", zipfile.zlib.DEFLATED)
for tar in file_list:
arcname = tar[len(dir_path):]
zf.write(tar, arcname)
zf.close()
def unzip(self, zip_file, dir_path):
if not os.path.exists(dir_path): os.mkdir(dir_path, 0777)
zf_obj = zipfile.ZipFile(zip_file)
for zf_name in zf_obj.namelist():
zf_name = zf_name.replace('\\','/')
if zf_name.endswith('/'):
os.mkdir(os.path.join(dir_path, zf_name))
else:
ext_file = os.path.join(dir_path, zf_name)
ext_dir= os.path.dirname(ext_file)
if not os.path.exists(ext_dir):
os.mkdir(ext_dir,0777)
out_file = open(ext_file, 'wb')
out_file.write(zf_obj.read(zf_name))
out_file.close()
| apache-2.0 | Python |
|
e27b005e5dc797e2326ab175ef947021c5a85cb7 | Add ptt.py | StanleyDing/PyTT | ptt.py | ptt.py | import telnetlib
import re
RN = '\r\n'
C_L = '\x0C'
C_Z = '\x1A'
ESC = '\x1B'
class PTT():
def __init__(self):
self.ptt = telnetlib.Telnet('ptt.cc')
self.where = 'login'
def login(self, username, password, dup=False):
self.__wait_til('註ε: ', encoding='big5')
self.__send(username, ',', RN)
self.__wait_til('ε―η’Ό: ', encoding='big5')
self.__send(password, RN)
index = self.__expect('ζ‘θΏζ¨εεΊ¦ζθ¨ͺ', 'ιθ€η»ε
₯', 'θ«εΏι »ηΉη»ε
₯')[0]
if index == 2:
self.__send(RN)
index = self.__expect('ζ‘θΏζ¨εεΊ¦ζθ¨ͺ', 'ιθ€η»ε
₯')[0]
if index == 1:
self.__send('n' if dup else 'y', RN)
index = self.__expect('ζ‘θΏζ¨εεΊ¦ζθ¨ͺ')[0]
if index == -1:
print("Login failed")
self.close()
self.__send(RN)
index = self.__expect('γδΈ»εθ½θ‘¨γ', 'ι―θͺ€ε試')[0]
if index == 1:
self.__send('y', RN)
# in menu now
self.where = 'menu'
def close(self):
self.ptt.close()
print('Connection closed')
def __wait_til(self, exp, encoding='utf-8', timeout=None):
return self.ptt.read_until(exp.encode(encoding), timeout)
def __send(self, *args):
s = ''.join(args)
self.ptt.write(s.encode())
def __expect(self, *args, encoding='utf-8', timeout=5):
exp_list = [exp.encode(encoding) for exp in args]
expect = self.ptt.expect(exp_list, timeout)
if expect[0] == -1:
raise TimeoutError(expect[2])
return expect
class TimeoutError(Exception):
pass
if __name__ == '__main__':
pass
| mit | Python |
|
eef2dff2855ef310dbdb6b864a92306cae724ed7 | add missing the missing file exceptions.py | chenjiandongx/pyecharts,chenjiandongx/pyecharts,chenjiandongx/pyecharts | pyecharts/exceptions.py | pyecharts/exceptions.py | class NoJsExtension(Exception):
pass
| mit | Python |
|
0d3255f8a69fe5192cb36ee42a731293cfd09715 | Add VmCorTaxonPhenology Class | PnEcrins/GeoNature,PnEcrins/GeoNature,PnEcrins/GeoNature,PnEcrins/GeoNature | backend/geonature/core/gn_profiles/models.py | backend/geonature/core/gn_profiles/models.py | from geonature.utils.env import DB
from utils_flask_sqla.serializers import serializable
@serializable
class VmCorTaxonPhenology(DB.Model):
__tablename__ = "vm_cor_taxon_phenology"
__table_args__ = {"schema": "gn_profiles"}
cd_ref = DB.Column(DB.Integer)
period = DB.Column(DB.Integer)
id_nomenclature_life_stage = DB.Column(DB.Integer)
id_altitude_range = DB.Column(DB.Integer)
count_valid_data = DB.Column(DB.Integer)
| bsd-2-clause | Python |
|
18d40200224d68b0ce93c2710516ed63566b1ad3 | Add merge migration | mattclark/osf.io,pattisdr/osf.io,adlius/osf.io,adlius/osf.io,saradbowman/osf.io,aaxelb/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,erinspace/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,cslzchen/osf.io,cslzchen/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,mattclark/osf.io,pattisdr/osf.io,pattisdr/osf.io,mfraezz/osf.io,baylee-d/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,adlius/osf.io,HalcyonChimera/osf.io,felliott/osf.io,aaxelb/osf.io,mattclark/osf.io,adlius/osf.io,baylee-d/osf.io,felliott/osf.io,baylee-d/osf.io,felliott/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,felliott/osf.io,cslzchen/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,mfraezz/osf.io | osf/migrations/0127_merge_20180822_1927.py | osf/migrations/0127_merge_20180822_1927.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-22 19:27
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0124_merge_20180816_1229'),
('osf', '0126_update_review_group_names'),
]
operations = [
]
| apache-2.0 | Python |
|
a55fee4515c9e6187198a8fc27ec15e7786d5782 | Create utils.py | Jake0720/XChat-Scripts | utils.py | utils.py | #!/usr/bin/env python
'''Python script that must be kept with all of these plugins'''
def color(color, message):
'''color forground/background encoding IRC messages'''
colors = {'white': '00', 'black': '01', 'blue': '02', 'navy': '02',
'green': '03', 'red': '04', 'brown': '05', 'maroon': '05',
'purple': '06', 'orange': '07', 'olive': '07', 'gold': '07',
'yellow': '08', 'lightgreen': '09', 'lime': '09', 'teal': '10',
'cyan': '11', 'lightblue': '12', 'royal': '12', 'lightpurple': '13',
'pink': '13', 'fuchsia': '13', 'grey': '14', 'lightgrey': '0', 'silver': '0'}
color = str(color).lower()
message = str(message)
if '/' in color:
color = color.split('/')
message = '\x03' + colors[color[0]] + ',' + colors[color[1]] + message + '\x03'
else:
message = '\x03' + colors[color] + message + '\x03'
return message
def bold(message):
'''bold encoding IRC messages'''
return ('\x02' + str(message) + '\x02')
def italic(message):
'''italicize encoding IRC messages'''
return ('\x16' + str(message) + '\x16')
def underline(message):
'''underlined encoding IRC messages'''
return ('\x1f' + str(message) + '\x1f')
| mit | Python |
|
36e6ff93b270672e0918e5ac0d7f9698834ad6ae | add Pathfinder skeleton | nyrocron/pathdemo | game/pathfinding.py | game/pathfinding.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""pathfinding.py: """
class Pathfinder(object):
def __init__(self, size_x, size_y):
self._size_x = size_x
self._size_y = size_y
def find_path(self, from_coords, to_coords):
pass | mpl-2.0 | Python |
|
ca956d335ad6bf6e190869d98c7abb3b554dfa3d | Create TS3IdleBot.py | rmgr/TS3IdleBot | TS3IdleBot.py | TS3IdleBot.py | import telnetlib
import time
from config import config
def getClients():
print "Getting a list of clients."
telnet.write("clientlist -times\n")
clients = telnet.read_until("msg=ok")
clients = clients.replace(" ", "\n")
clients = clients.replace("\r", "")
clients = clients.split("|")
cLen = len(clients)
print clients
for i in range(0, cLen):
try:
if config["botname"] in clients[i]:
clients.remove(clients[i])
else:
clients[i] = clients[i].split("\n")
clients[i] = filter(None,clients[i])
cLen -= 1
except IndexError:
print "Somehow we've escaped the bounds of the loop. :O Skip it and we should be fine."
return clients
def moveIdlers(clients):
print "Checking for idlers."
for i in range(0, len(clients)):
if float(clients[i][5].strip("client_idle_time=")) > float(config["idle"])*60000:
print "Moving user " + clients[i][3].replace("client_nickname=", "") + " to idle channel."
telnet.write("clientmove clid="+clients[i][0].strip("clid=")+ " cid=13\n")
telnet.read_until("msg=ok")
print "Done checking for idlers."
print "TS3IdleBot"
print "http://www.github.com/rmgr\n"
print "Exit TS3IdleBot with CTRL + C."
print "Connecting to server " + config["host"]+ ":" + config["port"]
telnet = telnetlib.Telnet(config["host"],config["port"])
telnet.open(telnet.host, telnet.port)
telnet.write("login "+config["user"]+" "+config["pass"]+"\n")
telnet.read_until("msg=ok")
print "Connected successfully."
print "Using virtual server "+config["serverid"]
telnet.write("use sid="+config["serverid"] + "\n")
telnet.read_until("msg=ok")
print "Server selection successful."
print "Setting bot nickname as " + config["botname"] + "."
telnet.write("clientupdate client_nickname="+config["botname"]+"\n")
telnet.read_until("msg=ok")
print "Set successfully."
while True:
try:
clients = getClients()
moveIdlers(clients)
print "Sleeping for 5 minutes."
time.sleep(300)
except KeyboardInterrupt:
print "Exiting TS3IdleBot"
exit()
telnet.write("logout\n")
telnet.read_until("msg=ok")
telnet.close()
| mit | Python |
|
ae0ebdccfffffbad259842365712bd4b6e52fc8e | add test files for HDF5 class and read_feats function | k2kobayashi/sprocket | sprocket/util/tests/test_hdf5.py | sprocket/util/tests/test_hdf5.py | from __future__ import division, print_function, absolute_import
import os
import unittest
import numpy as np
from sprocket.util.hdf5 import HDF5, read_feats
dirpath = os.path.dirname(os.path.realpath(__file__))
listf = os.path.join(dirpath, '/data/test.h5')
class hdf5FunctionsTest(unittest.TestCase):
def test_HDF5(self):
data1d = np.random.rand(100)
data2d = np.random.rand(100).reshape(50, 2)
# write test
path = os.path.join(dirpath, 'data/test.h5')
h5 = HDF5(path, 'w')
h5.save(data1d, '1d')
h5.save(data2d, '2d')
h5.close()
# open test
tmph5 = HDF5(path, 'r')
tmp1d = tmph5.read(ext='1d')
tmp2d = tmph5.read(ext='2d')
tmph5.close()
assert np.allclose(tmp1d, data1d)
assert np.allclose(tmp2d, data2d)
# tset read_feats function
listpath = os.path.join(dirpath, 'data/test.list')
with open(listpath, 'w') as fp:
fp.write('data/test')
list1d = read_feats(listpath, dirpath, ext='1d')
assert np.allclose(list1d[0], data1d)
# remove files
os.remove(path)
os.remove(listpath)
| mit | Python |
|
26fcbefee171f8d56504a7eba121027f0c5be8b5 | Add migration for new overrides table | Lektorium-LLC/edx-platform,CredoReference/edx-platform,arbrandes/edx-platform,msegado/edx-platform,pabloborrego93/edx-platform,edx-solutions/edx-platform,TeachAtTUM/edx-platform,gsehub/edx-platform,TeachAtTUM/edx-platform,proversity-org/edx-platform,Lektorium-LLC/edx-platform,stvstnfrd/edx-platform,proversity-org/edx-platform,gymnasium/edx-platform,lduarte1991/edx-platform,msegado/edx-platform,angelapper/edx-platform,eduNEXT/edx-platform,ahmedaljazzar/edx-platform,arbrandes/edx-platform,mitocw/edx-platform,lduarte1991/edx-platform,Stanford-Online/edx-platform,jolyonb/edx-platform,eduNEXT/edx-platform,philanthropy-u/edx-platform,procangroup/edx-platform,pabloborrego93/edx-platform,Edraak/edraak-platform,Edraak/edraak-platform,EDUlib/edx-platform,a-parhom/edx-platform,appsembler/edx-platform,arbrandes/edx-platform,teltek/edx-platform,appsembler/edx-platform,teltek/edx-platform,philanthropy-u/edx-platform,gsehub/edx-platform,eduNEXT/edx-platform,eduNEXT/edunext-platform,gymnasium/edx-platform,ahmedaljazzar/edx-platform,teltek/edx-platform,edx/edx-platform,TeachAtTUM/edx-platform,edx/edx-platform,cpennington/edx-platform,TeachAtTUM/edx-platform,edx-solutions/edx-platform,ahmedaljazzar/edx-platform,eduNEXT/edx-platform,edx/edx-platform,ESOedX/edx-platform,gymnasium/edx-platform,ESOedX/edx-platform,gsehub/edx-platform,jolyonb/edx-platform,eduNEXT/edunext-platform,BehavioralInsightsTeam/edx-platform,teltek/edx-platform,cpennington/edx-platform,edx-solutions/edx-platform,ESOedX/edx-platform,msegado/edx-platform,ahmedaljazzar/edx-platform,eduNEXT/edunext-platform,hastexo/edx-platform,philanthropy-u/edx-platform,Stanford-Online/edx-platform,hastexo/edx-platform,hastexo/edx-platform,pabloborrego93/edx-platform,angelapper/edx-platform,mitocw/edx-platform,hastexo/edx-platform,lduarte1991/edx-platform,cpennington/edx-platform,BehavioralInsightsTeam/edx-platform,Lektorium-LLC/edx-platform,jolyonb/edx-platform,Edraak/edraak-platform,lduarte1991/edx-platform,BehavioralInsightsTeam/edx-platform,philanthropy-u/edx-platform,pabloborrego93/edx-platform,a-parhom/edx-platform,Stanford-Online/edx-platform,ESOedX/edx-platform,a-parhom/edx-platform,mitocw/edx-platform,stvstnfrd/edx-platform,jolyonb/edx-platform,appsembler/edx-platform,cpennington/edx-platform,eduNEXT/edunext-platform,EDUlib/edx-platform,kmoocdev2/edx-platform,a-parhom/edx-platform,mitocw/edx-platform,Edraak/edraak-platform,msegado/edx-platform,appsembler/edx-platform,angelapper/edx-platform,msegado/edx-platform,gymnasium/edx-platform,CredoReference/edx-platform,proversity-org/edx-platform,edx-solutions/edx-platform,stvstnfrd/edx-platform,proversity-org/edx-platform,edx/edx-platform,procangroup/edx-platform,Lektorium-LLC/edx-platform,stvstnfrd/edx-platform,angelapper/edx-platform,arbrandes/edx-platform,EDUlib/edx-platform,BehavioralInsightsTeam/edx-platform,kmoocdev2/edx-platform,kmoocdev2/edx-platform,EDUlib/edx-platform,CredoReference/edx-platform,CredoReference/edx-platform,procangroup/edx-platform,procangroup/edx-platform,Stanford-Online/edx-platform,kmoocdev2/edx-platform,gsehub/edx-platform,kmoocdev2/edx-platform | lms/djangoapps/grades/migrations/0013_persistentsubsectiongradeoverride.py | lms/djangoapps/grades/migrations/0013_persistentsubsectiongradeoverride.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0012_computegradessetting'),
]
operations = [
migrations.CreateModel(
name='PersistentSubsectionGradeOverride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('earned_all_override', models.FloatField(null=True, blank=True)),
('possible_all_override', models.FloatField(null=True, blank=True)),
('earned_graded_override', models.FloatField(null=True, blank=True)),
('possible_graded_override', models.FloatField(null=True, blank=True)),
('grade', models.OneToOneField(related_name='override', to='grades.PersistentSubsectionGrade')),
],
),
]
| agpl-3.0 | Python |
|
e5d3fea99d58a1b02ebe84148d63330ea8d5c3a0 | Create WordLadder.py | jenniferwx/Programming_Practice,jenniferwx/Programming_Practice,jenniferwx/Programming_Practice | WordLadder.py | WordLadder.py | '''
Given a source word, target word and an English dictionary, transform the source word to target by
changing/adding/removing 1 character at a time, while all intermediate words being valid English words.
Return the transformation chain which has the smallest number of intermediate words.
'''
| bsd-3-clause | Python |
|
4ba2f92a9712530d084823dae52f54167f2f3afb | fix test source to work with empty msgs | tj93/pymtl,cornell-brg/pymtl,Glyfina-Fernando/pymtl,tj93/pymtl,jjffryan/pymtl,cornell-brg/pymtl,12yujim/pymtl,12yujim/pymtl,cornell-brg/pymtl,tj93/pymtl,cfelton/pymtl,jck/pymtl,jjffryan/pymtl,jck/pymtl,Glyfina-Fernando/pymtl,12yujim/pymtl,jck/pymtl,cfelton/pymtl,jjffryan/pymtl,cfelton/pymtl,Glyfina-Fernando/pymtl | new_pmlib/TestSimpleSource.py | new_pmlib/TestSimpleSource.py | #=========================================================================
# TestSimpleSource
#=========================================================================
# This class will output messages on a val/rdy interface from a
# predefined list.
#
from new_pymtl import *
from ValRdyBundle import OutValRdyBundle
class TestSimpleSource( Model ):
#-----------------------------------------------------------------------
# Constructor
#-----------------------------------------------------------------------
def __init__( s, nbits, msgs ):
s.out = OutValRdyBundle( nbits )
s.done = OutPort ( 1 )
s.msgs = msgs
s.idx = 0
#-----------------------------------------------------------------------
# Tick
#-----------------------------------------------------------------------
def elaborate_logic( s ):
@s.tick
def tick():
# Handle reset
if s.reset:
if s.msgs:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = False
return
# Check if we have more messages to send.
if ( s.idx == len(s.msgs) ):
if s.msgs:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = True
return
# At the end of the cycle, we AND together the val/rdy bits to
# determine if the output message transaction occured
out_go = s.out.val and s.out.rdy
# If the output transaction occured, then increment the index.
if out_go:
s.idx = s.idx + 1
# The output message is always the indexed message in the list, or if
# we are done then it is the first message again.
if ( s.idx < len(s.msgs) ):
s.out.msg.next = s.msgs[s.idx]
s.out.val.next = True
s.done.next = False
else:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = True
#-----------------------------------------------------------------------
# Line tracing
#-----------------------------------------------------------------------
def line_trace( s ):
return "({:2}) {}".format( s.idx, s.out )
| #=========================================================================
# TestSimpleSource
#=========================================================================
# This class will output messages on a val/rdy interface from a
# predefined list.
#
from new_pymtl import *
from ValRdyBundle import OutValRdyBundle
class TestSimpleSource( Model ):
#-----------------------------------------------------------------------
# Constructor
#-----------------------------------------------------------------------
def __init__( s, nbits, msgs ):
s.out = OutValRdyBundle( nbits )
s.done = OutPort ( 1 )
s.msgs = msgs
s.idx = 0
#-----------------------------------------------------------------------
# Tick
#-----------------------------------------------------------------------
def elaborate_logic( s ):
@s.tick
def tick():
# Handle reset
if s.reset:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = False
return
# Check if we have more messages to send.
if ( s.idx == len(s.msgs) ):
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = True
return
# At the end of the cycle, we AND together the val/rdy bits to
# determine if the output message transaction occured
out_go = s.out.val and s.out.rdy
# If the output transaction occured, then increment the index.
if out_go:
s.idx = s.idx + 1
# The output message is always the indexed message in the list, or if
# we are done then it is the first message again.
if ( s.idx < len(s.msgs) ):
s.out.msg.next = s.msgs[s.idx]
s.out.val.next = True
s.done.next = False
else:
s.out.msg.next = s.msgs[0]
s.out.val.next = False
s.done.next = True
#-----------------------------------------------------------------------
# Line tracing
#-----------------------------------------------------------------------
def line_trace( s ):
return "({:2}) {}".format( s.idx, s.out )
| bsd-3-clause | Python |
75aabd425bd32a9467d7a06b250a0a5b1f5ba852 | Add more comments | CharlesJonah/bucket_list_api,CharlesJonah/bucket_list_api | application/serializer.py | application/serializer.py | '''
This module maps the data that will be used by the marshall when returning the
data to the user
'''
from flask_restful import fields
bucket_list_item_serializer = {
'item_id': fields.Integer,
'name': fields.String,
'date_created': fields.DateTime,
'date_modified': fields.DateTime,
'done': fields.Boolean
}
bucket_list_serializer = {
'id': fields.Integer,
'name': fields.String,
'items':fields.Nested(bucket_list_item_serializer),
'created_by': fields.String,
'date_created': fields.DateTime,
'date_modified': fields.DateTime
} | mit | Python |
|
da0f31d6ca5aa8f425c86b9c0caf965f062e1dba | test buying max clicks and gen clicks in the same test | Victory/clicker-me-bliss,Victory/clicker-me-bliss,Victory/clicker-me-bliss,Victory/clicker-me-bliss | functional-tests/suite6.py | functional-tests/suite6.py | from clickerft.cft import Cft
from time import sleep
class Suite4(Cft):
def test_buy_target_max_and_gen(self):
"""
buy clicks until we have 50 max clicks of 50
and 10 clicks/sec
"""
targetGen = 4
while int(self.clicksPerGeneration.text) < targetGen:
clicksOwned = int(self.clicksOwned.text)
priceGen = int(self.pincreaseClicksPerGeneration.text)
for ii in xrange(min(clicksOwned, priceGen)):
self.increaseClicksPerGeneration.click()
assert int(self.clicksPerGeneration.text) == targetGen
targetMax = 12
while int(self.maxClicks.text) < targetMax:
clicksOwned = int(self.clicksOwned.text)
priceMax = int(self.pincreaseMaxClicks.text)
for ii in xrange(min(clicksOwned, priceMax)):
self.increaseMaxClicks.click()
assert int(self.maxClicks.text) == targetMax
if __name__ == '__main__':
Suite4()
| mit | Python |
|
158f04702b6c1dcda9981d8da05fe059e84c3f90 | Add example with churches. | OnroerendErfgoed/skosprovider_getty | examples/churches.py | examples/churches.py | # -*- coding: utf-8 -*-
'''
This script demonstrates using the AATProvider to get the concept of
Churches.
'''
from skosprovider_getty.providers import AATProvider
aat = AATProvider(metadata={'id': 'AAT'})
churches = aat.get_by_id(300007466)
lang = ['en', 'nl', 'es', 'de']
print('Labels')
print('------')
for l in churches.labels:
print(l.language + ': ' + l.label.decode('utf-8') + ' [' + l.type + ']')
print('Notes')
print('-----')
for n in churches.notes:
print(n.language + ': ' + n.note.decode('utf-8') + ' [' + n.type + ']')
| mit | Python |
|
7c82a2a8887d25ef86e5d0004cf0a0e0bc4b23ac | Create CodingContestTorontoParkingTickets2013.py | flygeneticist/misc-scripts,flygeneticist/misc-scripts,flygeneticist/misc-scripts,flygeneticist/misc-scripts | CodingContestTorontoParkingTickets2013.py | CodingContestTorontoParkingTickets2013.py | import re
from collections import defaultdict
processed_data = defaultdict(int) # dict to capture reduced dataset info, default value == 0
only_chars = re.compile('\D+').search # pre-compiled reg-exp, for fast run time, to get street name, ignoring numbers
# import raw data file with parking information
with open('Parking_data.csv', 'r') as raw_data:
# skip the first line of header data
next
# iterate over the remaining file line by line
for line in raw_data:
# split line by ',' into an array
worked_line = line.split(',')
# get and clean up street name for dict use and, if valid name found, collect fine amount in dict
try:
processed_data[only_chars(worked_line[7]).group(0).lstrip()] += int(worked_line[4])
except:
next
# find street with greatest total fines processed_data
highest_street = max(processed_data, key=processed_data.get)
highest_fine = processed_data[highest_street]
# print out the results
print('Highest revenue street: {0} with ${1}.'.format(highest_street, highest_fine))
| mit | Python |
|
f8ee383cc3b3f1f9166627e81a64af4939e4de10 | add amqp style routing for virtual channels, allows memory backend to behave like amqp | romank0/kombu,ZoranPavlovic/kombu,tkanemoto/kombu,daevaorn/kombu,pantheon-systems/kombu,urbn/kombu,mathom/kombu,WoLpH/kombu,cce/kombu,depop/kombu,iris-edu-int/kombu,disqus/kombu,andresriancho/kombu,disqus/kombu,Elastica/kombu,alex/kombu,celery/kombu,mathom/kombu,Elastica/kombu,xujun10110/kombu,jindongh/kombu,numb3r3/kombu,WoLpH/kombu,ZoranPavlovic/kombu,iris-edu-int/kombu,depop/kombu,jindongh/kombu,tkanemoto/kombu,romank0/kombu,alex/kombu,xujun10110/kombu,cce/kombu,pantheon-systems/kombu,numb3r3/kombu,daevaorn/kombu,bmbouter/kombu,mverrilli/kombu,andresriancho/kombu,bmbouter/kombu,mverrilli/kombu | example/topic.py | example/topic.py | from kombu.connection import BrokerConnection
from kombu.messaging import Exchange, Queue, Consumer, Producer
# configuration, normally in an ini file
exchange_name = "test.shane"
exchange_type = "topic"
exchange_durable = True
message_serializer = "json"
queue_name = "test.q"
# 1. setup the connection to the exchange
# hostname,userid,password,virtual_host not used with memory backend
cons_conn = BrokerConnection(hostname="localhost",
userid="guest",
password="guest",
virtual_host="/",
transport="memory")
cons_chan = cons_conn.channel()
cons_exch = Exchange(exchange_name, type=exchange_type, durable=exchange_durable)
pub_conn = BrokerConnection(hostname="localhost",
userid="guest",
password="guest",
virtual_host="/",
transport="memory")
pub_chan = pub_conn.channel()
pub_exch = Exchange(exchange_name, type=exchange_type, durable=exchange_durable)
# 2. setup the consumer, the consumer declares/creates the queue, if you
# publish to a queue before there is a consumer it will fail unless the queue
# was first created and is durable
class AConsumer:
def __init__(self, queue_name, key):
self.queue = Queue(queue_name, exchange=cons_exch, routing_key=key)
self.consumer = Consumer(cons_chan, [self.queue])
self.consumer.consume()
def mq_callback(message_data, message):
print("%s: %r: %r" % (key, message.delivery_info, message_data,))
#message.ack()
self.consumer.register_callback(mq_callback)
c1 = AConsumer("test_1","test.1")
c2 = AConsumer("testing","test.ing")
# consumers can use simple pattern matching when defining a queue
c3 = AConsumer("test_all","test.*")
# 3. publish something to consume
# publishers always send to a specific route, the mq will route to the queues
producer = Producer(pub_chan, exchange=pub_exch, serializer=message_serializer)
producer.publish({"name": "Shane Caraveo", "username": "mixedpuppy"}, routing_key="test.1")
producer.publish({"name": "Micky Mouse", "username": "donaldduck"}, routing_key="test.ing")
producer.publish({"name": "Anonymous", "username": "whoami"}, routing_key="test.foobar")
def have_messages():
return sum([q.qsize() for q in cons_chan.queues.values()])
# 5. run the event loop
while have_messages():
try:
cons_conn.drain_events()
except KeyboardInterrupt:
print
print "quitting"
break
except Exception, e:
import traceback
print traceback.format_exc()
break
| bsd-3-clause | Python |
|
aff827e9cc02bcee6cf8687e1dff65f39daaf6c6 | Add a failing test to the landing page to check for upcoming events. | shapiromatron/amy,pbanaszkiewicz/amy,swcarpentry/amy,vahtras/amy,pbanaszkiewicz/amy,pbanaszkiewicz/amy,wking/swc-amy,vahtras/amy,shapiromatron/amy,swcarpentry/amy,swcarpentry/amy,wking/swc-amy,wking/swc-amy,shapiromatron/amy,vahtras/amy,wking/swc-amy | workshops/test/test_landing_page.py | workshops/test/test_landing_page.py | from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from datetime import date
class FakeDate(date):
"A fake replacement for date that can be mocked for testing."
pass
@classmethod
def today(cls):
return cls(2013, 12, 7)
@patch('workshops.models.datetime.date', FakeDate)
class TestLandingPage(TestCase):
"Tests for the workshop landing page"
fixtures = ['event_test']
def test_has_upcoming_events(self):
"""Test that the landing page is passed some
upcoming_events in the context.
"""
response = self.client.get(reverse('index'))
# This will fail if the context variable doesn't exist
upcoming_events = response.context['upcoming_events']
# There are 2 upcoming events
assert len(upcoming_events) == 2
# They should all start with upcoming
assert all([e.slug[:8] == 'upcoming' for e in upcoming_events])
| mit | Python |
|
91918be596c83f468c6c940df7326896aa6082e7 | Fix stringify on multichoice forms | kb2ma/adagios,opinkerfi/adagios,zengzhaozheng/adagios,zengzhaozheng/adagios,kb2ma/adagios,kb2ma/adagios,kaji-project/adagios,kaji-project/adagios,opinkerfi/adagios,kaji-project/adagios,zengzhaozheng/adagios,kaji-project/adagios,opinkerfi/adagios,zengzhaozheng/adagios,opinkerfi/adagios,kb2ma/adagios | adagios/forms.py | adagios/forms.py | # -*- coding: utf-8 -*-
from django.utils.encoding import smart_str
from django import forms
class AdagiosForm(forms.Form):
""" Base class for all forms in this module. Forms that use pynag in any way should inherit from this one.
"""
def clean(self):
cleaned_data = {}
tmp = super(AdagiosForm, self).clean()
for k,v in tmp.items():
if isinstance(k, (unicode)):
k = smart_str(k)
if isinstance(v, (unicode)):
v = smart_str(v)
cleaned_data[k] = v
return cleaned_data
| # -*- coding: utf-8 -*-
from django.utils.encoding import smart_str
from django import forms
class AdagiosForm(forms.Form):
""" Base class for all forms in this module. Forms that use pynag in any way should inherit from this one.
"""
def clean(self):
cleaned_data = {}
tmp = super(AdagiosForm, self).clean()
for k,v in tmp.items():
if isinstance(k, (unicode)):
k = smart_str(k)
if isinstance(v, (unicode)):
v = smart_str(v)
cleaned_data[k] = smart_str(v)
return cleaned_data
| agpl-3.0 | Python |
cb7bb1d9f24706f3cce2e9841595ee80ce7e2c7f | Implement GetKeyboardType | angr/angr,iamahuman/angr,f-prettyland/angr,schieb/angr,iamahuman/angr,iamahuman/angr,tyb0807/angr,tyb0807/angr,f-prettyland/angr,schieb/angr,angr/angr,angr/angr,f-prettyland/angr,schieb/angr,tyb0807/angr | angr/procedures/win_user32/keyboard.py | angr/procedures/win_user32/keyboard.py | import angr
class GetKeyboardType(angr.SimProcedure):
def run(self, param):
# return the values present at time of author's testing
if self.state.solver.is_true(param == 0):
return 4
if self.state.solver.is_true(param == 1):
return 0
if self.state.solver.is_true(param == 2):
return 12
return 0
| bsd-2-clause | Python |
|
8692557a3389403b7a3450065d99e3750d91b2ed | Create views.py | staticdev/django-pagination-bootstrap,staticdev/django-pagination-bootstrap,sheepeatingtaz/django-pagination-bootstrap,sheepeatingtaz/django-pagination-bootstrap | pagination_bootstrap/views.py | pagination_bootstrap/views.py | mit | Python |
||
060c6d2eeea2235cda955c873b50e0aa2a4accd0 | use 20 | zws0932/farmer,huoxy/farmer,zws0932/farmer | farmer/models.py | farmer/models.py | #coding=utf8
import os
import time
import json
from datetime import datetime
from commands import getstatusoutput
from django.db import models
class Job(models.Model):
# hosts, like web_servers:host1 .
inventories = models.TextField(null = False, blank = False)
# 0, do not use sudo; 1, use sudo .
sudo = models.BooleanField(default = True)
# for example: ansible web_servers -m shell -a 'du -sh /tmp'
# the 'du -sh /tmp' is cmd here
cmd = models.TextField(null = False, blank = False)
# return code of this job
rc = models.IntegerField(null = True)
result = models.TextField(null = True)
start = models.DateTimeField(null = True)
end = models.DateTimeField(null = True)
@property
def cmd_shell(self):
option = self.sudo and '--sudo -f 20 -m shell -a' or '-f 20 -m shell -a'
return 'ansible %s %s "%s"' % (self.inventories, option, self.cmd)
def run(self):
if os.fork() == 0:
tmpdir = '/tmp/ansible_%s' % time.time()
os.mkdir(tmpdir)
self.start = datetime.now()
self.save()
cmd_shell = self.cmd_shell + ' -t ' + tmpdir
status, output = getstatusoutput(cmd_shell)
self.end = datetime.now()
result = {}
for f in os.listdir(tmpdir):
result[f] = json.loads(open(tmpdir + '/' + f).read())
self.rc = status
self.result = json.dumps(result)
self.save()
os.system('rm -rf ' + tmpdir)
def __unicode__(self):
return self.cmd_shell
| #coding=utf8
import os
import time
import json
from datetime import datetime
from commands import getstatusoutput
from django.db import models
class Job(models.Model):
# hosts, like web_servers:host1 .
inventories = models.TextField(null = False, blank = False)
# 0, do not use sudo; 1, use sudo .
sudo = models.BooleanField(default = True)
# for example: ansible web_servers -m shell -a 'du -sh /tmp'
# the 'du -sh /tmp' is cmd here
cmd = models.TextField(null = False, blank = False)
# return code of this job
rc = models.IntegerField(null = True)
result = models.TextField(null = True)
start = models.DateTimeField(null = True)
end = models.DateTimeField(null = True)
@property
def cmd_shell(self):
option = self.sudo and '--sudo -m shell -a' or '-m shell -a'
return 'ansible %s %s "%s"' % (self.inventories, option, self.cmd)
def run(self):
if os.fork() == 0:
tmpdir = '/tmp/ansible_%s' % time.time()
os.mkdir(tmpdir)
self.start = datetime.now()
self.save()
cmd_shell = self.cmd_shell + ' -t ' + tmpdir
status, output = getstatusoutput(cmd_shell)
self.end = datetime.now()
result = {}
for f in os.listdir(tmpdir):
result[f] = json.loads(open(tmpdir + '/' + f).read())
self.rc = status
self.result = json.dumps(result)
self.save()
os.system('rm -rf ' + tmpdir)
def __unicode__(self):
return self.cmd_shell
| mit | Python |
799109759114d141d71bed777b9a1ac2ec26a264 | add Red object detection | maximest-pierre/opencv_example,maximest-pierre/opencv_example | python/ObjectDetection/RedExtractObject.py | python/ObjectDetection/RedExtractObject.py | import cv2
import numpy as np
video = cv2.VideoCapture(0)
while (1):
# Take each frame
_, frame = video.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_red = np.array([150, 50, 50])
upper_red = np.array([255, 255, 150])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_red, upper_red) # Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows() | mit | Python |
|
21490bd6cd03d159a440b2c13a6b4641c789c954 | Add example | michaelhelmick/python-tumblpy | examples/example.py | examples/example.py | import sys
from tumblpy import Tumblpy
key = raw_input('App Consumer Key: ')
secret = raw_input('App Consumer Secret: ')
if not 'skip-auth' in sys.argv:
t = Tumblpy(key, secret)
callback_url = raw_input('Callback URL: ')
auth_props = t.get_authentication_tokens(callback_url=callback_url)
auth_url = auth_props['auth_url']
OAUTH_TOKEN_SECRET = auth_props['oauth_token_secret']
print('Connect with Tumblr via: {}'.format(auth_url))
oauth_token = raw_input('OAuth Token (from callback url): ')
oauth_verifier = raw_input('OAuth Verifier (from callback url): ')
t = Tumblpy(key, secret, oauth_token, OAUTH_TOKEN_SECRET)
authorized_tokens = t.get_authorized_tokens(oauth_verifier)
final_oauth_token = authorized_tokens['oauth_token']
final_oauth_token_secret = authorized_tokens['oauth_token_secret']
print('OAuth Token: {}'.format(final_oauth_token))
print('OAuth Token Secret: {}'.format(final_oauth_token_secret))
else:
final_oauth_token = raw_input('OAuth Token: ')
final_oauth_token_secret = raw_input('OAuth Token Secret: ')
t = Tumblpy(key, secret, final_oauth_token, final_oauth_token_secret)
blog_url = t.post('user/info')
blog_url = blog_url['user']['blogs'][0]['url']
print('Your blog url is: {}'.format(blog_url))
posts = t.posts(blog_url)
print('Here are some posts this blog has made:', posts)
# print t.post('post', blog_url=blog_url, params={'type':'text', 'title': 'Test', 'body': 'Lorem ipsum.'})
| bsd-2-clause | Python |
|
ece6fb4561e338e32e8527a068cd386f00886a67 | Add example with reuters dataset. | keras-team/autokeras,keras-team/autokeras,keras-team/autokeras | examples/reuters.py | examples/reuters.py | """shell
!pip install -q -U pip
!pip install -q -U autokeras==1.0.8
!pip install -q git+https://github.com/keras-team/[email protected]
"""
"""
Search for a good model for the
[Reuters](https://keras.io/ja/datasets/#_5) dataset.
"""
import tensorflow as tf
from tf.keras.datasets import reuters
import numpy as np
import autokeras as ak
# Prepare the dataset.
def reuters_raw(max_features=20000):
index_offset = 3 # word index offset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.reuters.load_data(
num_words=max_features,
index_from=index_offset)
x_train = x_train
y_train = y_train.reshape(-1, 1)
x_test = x_test
y_test = y_test.reshape(-1, 1)
word_to_id = tf.keras.datasets.reuters.get_word_index()
word_to_id = {k: (v + index_offset) for k, v in word_to_id.items()}
word_to_id["<PAD>"] = 0
word_to_id["<START>"] = 1
word_to_id["<UNK>"] = 2
id_to_word = {value: key for key, value in word_to_id.items()}
x_train = list(map(lambda sentence: ' '.join(
id_to_word[i] for i in sentence), x_train))
x_test = list(map(lambda sentence: ' '.join(
id_to_word[i] for i in sentence), x_test))
x_train = np.array(x_train, dtype=np.str)
x_test = np.array(x_test, dtype=np.str)
return (x_train, y_train), (x_test, y_test)
# Prepare the data.
(x_train, y_train), (x_test, y_test) = reuters_raw()
print(x_train.shape) # (8982,)
print(y_train.shape) # (8982, 1)
print(x_train[0][:50]) # <START> <UNK> <UNK> said as a result of its decemb
# Initialize the TextClassifier
clf = ak.TextClassifier(
max_trials=5,
overwrite=True,
)
# Callback to avoid overfitting with the EarlyStopping.
cbs = [
tf.keras.callbacks.EarlyStopping(patience=3),
]
# Search for the best model.
clf.fit(
x_train,
y_train,
epochs=10,
callback=cbs
)
# Evaluate on the testing data.
print('Accuracy: {accuracy}'.format(accuracy=clf.evaluate(x_test, y_test)))
| apache-2.0 | Python |
|
315914bbec88e11bf5ed3bcab29218592549eccf | Create Kmeans.py | DamiPayne/Feature-Agglomeration-Clustering | Kmeans.py | Kmeans.py | import collections
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from pprint import pprint
import csv
import pandas
def word_tokenizer(text):
#tokenizes and stems the text
tokens = word_tokenize(text)
stemmer = PorterStemmer()
tokens = [stemmer.stem(t) for t in tokens if t not in stopwords.words('english')]
return tokens
def cluster_sentences(sentences, nb_of_clusters=5):
tfidf_vectorizer = TfidfVectorizer(tokenizer=word_tokenizer,
stop_words=stopwords.words('english'),
max_df=0.99,
min_df=0.01,
lowercase=True)
#builds a tf-idf matrix for the sentences
tfidf_matrix = tfidf_vectorizer.fit_transform(sentences)
kmeans = KMeans(n_clusters=nb_of_clusters)
kmeans.fit(tfidf_matrix)
clusters = collections.defaultdict(list)
for i, label in enumerate(kmeans.labels_):
clusters[label].append(i)
return dict(clusters)
import csv
with open(r'C:\Sales\SP.csv') as f:
reader = csv.reader(f)
Pre_sentence = list(reader)
flatten = lambda l: [item for sublist in l for item in sublist]
sentences = flatten(Pre_sentence)
with open(r'C:\Sales\Cat.csv') as g:
reader_cat = csv.reader(g)
Pre_Cat = list(reader_cat)
Cats = flatten(Pre_Cat)
if __name__ == "__main__":
# sentences = ["Nature is beautiful","I like green apples",
# "We should protect the trees","Fruit trees provide fruits",
# "Green apples are tasty","My name is Dami"]
nclusters= 100
clusters = cluster_sentences(sentences, nclusters)
for cluster in range(nclusters):
print ("Grouped Engagements ",cluster,":")
for i,sentence in enumerate(clusters[cluster]):
print ("\tEngagement ", Cats[sentence],": ", sentences[sentence])
| mit | Python |
|
b0377568c9b927db588b006b7312cbe8ed9d48b7 | Add tremelo example | martinmcbride/pysound | examples/tremelo.py | examples/tremelo.py | # Author: Martin McBride
# Created: 2016-01-08
# Copyright (C) 2016, Martin McBride
# License: MIT
# Website sympl.org/pysound
#
# Square wave example
try:
import pysound
except ImportError:
# if pysound is not installed append parent dir of __file__ to sys.path
import sys, os
sys.path.insert(0, os.path.abspath(os.path.split(os.path.abspath(__file__))[0]+'/..'))
from pysound.components.soundfile import write_wav
from pysound.components.wavetable import square_wave
from pysound.components.wavetable import sine_wave
#
# Create a tremelo effect
#
amp = sine_wave(frequency=10, amplitude=0.1, offset = 0.8)
wave = square_wave(frequency=400, amplitude=amp)
write_wav(source=wave, filename='tremelo.wav') | mit | Python |
|
ea26478495d5aec6925e32c9a87245bf2e1e4bc8 | Add script demonstrating raising and catching Exceptions. | kubkon/ee106-additional-material | rps/errors.py | rps/errors.py | gestures = ["rock", "paper", "scissors"]
def verify_move(player_move):
if player_move not in gestures:
raise Exception("Wrong input!")
return player_move
# let's catch an exception
try:
player_move = verify_move(input("[rock,paper,scissors]: "))
print("The move was correct.")
except Exception:
print("The move was incorrect and Exception was raised.")
| mit | Python |
|
fb95c75b7b43bcb1fa640e4de3181fd0431c5837 | Add the unittest test_plot.py | joekasp/ionic_liquids | ionic_liquids/test/test_plot.py | ionic_liquids/test/test_plot.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
FIG_SIZE = (4, 4)
def test_parity_plot():
"""
Test the parity plot
Input
-----
y_pred : predicted values from the model
y_act : 'true' (actual) values
Output
------
fig : matplotlib figure
Check:
1. The x,y vector has the same datatype
2. The x,y vector has the same dimension
"""
y_pred=np.arange(0,1)
y_act=np.arange(0,1)
assert isinstance(y_pred,type(y_act)), "The two column in the parity plot should have same datatype"
assert len(y_pred)==len(y_act), "The two column in the parity plot should have same length"
fig = plt.figure(figsize=FIG_SIZE)
plt.scatter(y_act, y_pred)
plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],
lw=4, color='r')
plt.xlabel('Actual')
plt.ylabel('Predicted')
return fig
def test_train_test_error():
"""
Test the plot of training vs. test error
Input
-----
e_train : numpy array of training errors
e_test : numpy array of test errors
model_params : independent parameters of model (eg. alpha in LASSO)
Returns
-------
fig : matplotlib figure
Check:
1. The e_train, e_test and model_params has the same dimension
"""
e_train = np.arange(0,1)
e_test = np.arange(0,1)
model_params = np.arange(0,1)
assert len(e_train)==len(model_params), "The training error and model parameters should have the same dimension"
assert len(e_test)==len(model_params), "The test error and model parameters should have the same dimension"
fig = plt.figure(figsize=FIG_SIZE)
plt.plot(model_params, e_train, label='Training Set')
plt.plot(model_params, e_train, label='Test Set')
plt.xlabel('Model Parameter')
plt.ylabel('MSE of model')
plt.legend()
return fig
def test_scatter_plot():
"""
Test plot of predicted electric conductivity as a
function of the mole fractions.
Input
-----
x_vals : numpy vector x-axis (mole fractions)
y_vals : numpy vector y-axis (predicted conductivities)
x_variable : string for labeling the x-axis
Returns
------
fig : matplotlib figure
Check:
1. The x_variable is a string
2. The x,y vector has the same dimension
"""
x_variable = 'm'
x_vals = np.arange(0,1)
y_vals = np.arange(0,1)
assert isinstance(x_variable,str), "x_variable should be a string variable"
assert len(x_vals)==len(y_vals), "The x and y vector should have the same dimension"
if (x_variable == 'm'):
x_variable = 'Mole Fraction A'
elif (x_variable == 'p'):
x_variable = 'Pressure (kPa)'
elif (x_variable == 't'):
x_variable = 'Temperature (K)'
fig = plt.figure(figsize=FIG_SIZE)
plt.scatter(x_vals, y_vals)
plt.xlabel(x_variable)
plt.ylabel('Electrical Conductivity')
return fig
| mit | Python |
|
ecb3bd6fd9b6496a751a2145909648ba1be8f908 | add linear interpolation tests | timothydmorton/isochrones,timothydmorton/isochrones | isochrones/tests/test_interp.py | isochrones/tests/test_interp.py | import itertools
import logging
import numpy as np
import pandas as pd
from scipy.interpolate import RegularGridInterpolator
from isochrones.interp import DFInterpolator
def test_interp():
xx, yy, zz = [np.arange(10 + np.log10(n))*n for n in [1, 10, 100]]
def func(x, y, z):
return x**2*np.cos(y/10) + z
df = pd.DataFrame([(x, y, z, func(x, y, z)) for x, y, z in itertools.product(xx, yy, zz)],
columns=['x', 'y', 'z', 'val']).set_index(['x','y', 'z'])
grid = np.reshape(df.val.values, (10, 11, 12))
interp = RegularGridInterpolator([xx, yy, zz], grid)
df_interp = DFInterpolator(df)
grid_pars = [6, 50, 200]
pars = [3.1, 44, 503]
# Make sure grid point returns correct exact value
assert df_interp(grid_pars, 'val') == func(*grid_pars)
# Check linear interpolation vis-a-vis scipy
try:
assert np.isclose(df_interp(pars, 'val'), interp(pars)[0], rtol=1e-10, atol=1e-11)
except AssertionError:
logging.debug('mine: {}, scipy: {}'.format(df_interp(pars, 'val'), interp(pars)[0]))
raise
| mit | Python |
|
947c9ef100686fa1ec0acaa10bc49bf6c785665b | Use unified class for json output | spookey/ffflash,spookey/ffflash | ffflash/container.py | ffflash/container.py | from os import path
from ffflash import RELEASE, log, now, timeout
from ffflash.lib.clock import epoch_repr
from ffflash.lib.data import merge_dicts
from ffflash.lib.files import read_json_file, write_json_file
class Container:
def __init__(self, spec, filename):
self._spec = spec
self._location = path.abspath(filename)
self.data = read_json_file(self._location, fallback={})
self._info()
def _info(self, info={}):
self.data['_info'] = self.data.get('_info', {})
self.data['_info']['generator'] = RELEASE
self.data['_info']['access'] = self.data['_info'].get('access', {})
if not self.data['_info']['access'].get('first', False):
self.data['_info']['access']['first'] = now
self.data['_info']['access']['last'] = now
self.data['_info']['access']['overall'] = epoch_repr(
abs(now - self.data['_info']['access']['first']),
ms=True
)
self.data['_info']['access']['timeout'] = timeout
if info:
self.data['_info'] = merge_dicts(self.data['_info'], info)
def save(self, info={}):
self._info(info)
if write_json_file(self._location, self.data):
log.info('{} saved {}'.format(self._spec, self._location))
| bsd-3-clause | Python |
|
6c5dad5d617892a3ea5cdd20cbaef89189307195 | add simple content-based model for coldstart | Evfro/polara | polara/recommender/coldstart/models.py | polara/recommender/coldstart/models.py | import numpy as np
from polara.recommender.models import RecommenderModel
class ContentBasedColdStart(RecommenderModel):
def __init__(self, *args, **kwargs):
super(ContentBasedColdStart, self).__init__(*args, **kwargs)
self.method = 'CB'
self._key = '{}_cold'.format(self.data.fields.itemid)
self._target = self.data.fields.userid
def build(self):
pass
def get_recommendations(self):
item_similarity_scores = self.data.cold_items_similarity
user_item_matrix = self.get_training_matrix()
user_item_matrix.data = np.ones_like(user_item_matrix.data)
scores = item_similarity_scores.dot(user_item_matrix.T).tocsr()
top_similar_users = self.get_topk_elements(scores).astype(np.intp)
return top_similar_users
| mit | Python |
|
2ca6b22e645cbbe63737d4ac3929cb23700a2e06 | Prepare v1.2.342.dev | tsnoam/Flexget,tsnoam/Flexget,jacobmetrick/Flexget,antivirtel/Flexget,dsemi/Flexget,LynxyssCZ/Flexget,sean797/Flexget,xfouloux/Flexget,grrr2/Flexget,poulpito/Flexget,ianstalk/Flexget,gazpachoking/Flexget,jawilson/Flexget,qvazzler/Flexget,lildadou/Flexget,Pretagonist/Flexget,malkavi/Flexget,Danfocus/Flexget,Pretagonist/Flexget,crawln45/Flexget,antivirtel/Flexget,lildadou/Flexget,Danfocus/Flexget,spencerjanssen/Flexget,jawilson/Flexget,thalamus/Flexget,Danfocus/Flexget,poulpito/Flexget,offbyone/Flexget,tobinjt/Flexget,drwyrm/Flexget,antivirtel/Flexget,Danfocus/Flexget,tarzasai/Flexget,JorisDeRieck/Flexget,cvium/Flexget,offbyone/Flexget,ZefQ/Flexget,ibrahimkarahan/Flexget,Pretagonist/Flexget,tobinjt/Flexget,lildadou/Flexget,ratoaq2/Flexget,malkavi/Flexget,OmgOhnoes/Flexget,oxc/Flexget,LynxyssCZ/Flexget,qvazzler/Flexget,tobinjt/Flexget,ianstalk/Flexget,Flexget/Flexget,sean797/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,jacobmetrick/Flexget,ibrahimkarahan/Flexget,cvium/Flexget,thalamus/Flexget,malkavi/Flexget,xfouloux/Flexget,malkavi/Flexget,qk4l/Flexget,ibrahimkarahan/Flexget,tsnoam/Flexget,grrr2/Flexget,offbyone/Flexget,dsemi/Flexget,grrr2/Flexget,qk4l/Flexget,drwyrm/Flexget,LynxyssCZ/Flexget,Flexget/Flexget,ZefQ/Flexget,xfouloux/Flexget,ianstalk/Flexget,ratoaq2/Flexget,oxc/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,qk4l/Flexget,thalamus/Flexget,tarzasai/Flexget,crawln45/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,jawilson/Flexget,drwyrm/Flexget,oxc/Flexget,cvium/Flexget,qvazzler/Flexget,ZefQ/Flexget,jacobmetrick/Flexget,ratoaq2/Flexget,tarzasai/Flexget,crawln45/Flexget,jawilson/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,OmgOhnoes/Flexget,poulpito/Flexget,spencerjanssen/Flexget,LynxyssCZ/Flexget,dsemi/Flexget,sean797/Flexget,spencerjanssen/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.342.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.341'
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.