commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
81cd0b74e611532f8421d0dfb22266cd789a5a6a | add in oauth_keys to dev (bug 858813) | muffinresearch/solitude,muffinresearch/solitude | solitude/settings/sites/dev/db.py | solitude/settings/sites/dev/db.py | """private_base will be populated from puppet and placed in this directory"""
import logging
import dj_database_url
import private_base as private
from solitude.settings import base
from django_sha2 import get_password_hashers
ADMINS = ()
ALLOWED_HOSTS = ['payments-dev.allizom.org', 'localhost']
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
HMAC_KEYS = private.HMAC_KEYS
PASSWORD_HASHERS = get_password_hashers(base.BASE_PASSWORD_HASHERS, HMAC_KEYS)
LOG_LEVEL = logging.DEBUG
SECRET_KEY = private.SECRET_KEY
SENTRY_DSN = private.SENTRY_DSN
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
SYSLOG_TAG = 'http_app_payments_dev'
TEMPLATE_DEBUG = DEBUG
# Solitude specific settings.
AES_KEYS = private.AES_KEYS
CLEANSED_SETTINGS_ACCESS = True
CLIENT_OAUTH_KEYS = private.CLIENT_OAUTH_KEYS
PAYPAL_PROXY = private.PAYPAL_PROXY
PAYPAL_URL_WHITELIST = ('https://marketplace-dev.allizom.org',)
BANGO_PROXY = private.BANGO_PROXY
SITE_URL = 'https://payments-dev.allizom.org'
| """private_base will be populated from puppet and placed in this directory"""
import logging
import dj_database_url
import private_base as private
from solitude.settings import base
from django_sha2 import get_password_hashers
ADMINS = ()
ALLOWED_HOSTS = ['payments-dev.allizom.org', 'localhost']
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
HMAC_KEYS = private.HMAC_KEYS
PASSWORD_HASHERS = get_password_hashers(base.BASE_PASSWORD_HASHERS, HMAC_KEYS)
LOG_LEVEL = logging.DEBUG
SECRET_KEY = private.SECRET_KEY
SENTRY_DSN = private.SENTRY_DSN
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
SYSLOG_TAG = 'http_app_payments_dev'
TEMPLATE_DEBUG = DEBUG
# Solitude specific settings.
AES_KEYS = private.AES_KEYS
CLEANSED_SETTINGS_ACCESS = True
CLIENT_JWT_KEYS = private.CLIENT_JWT_KEYS
PAYPAL_PROXY = private.PAYPAL_PROXY
PAYPAL_URL_WHITELIST = ('https://marketplace-dev.allizom.org',)
BANGO_PROXY = private.BANGO_PROXY
SITE_URL = 'https://payments-dev.allizom.org'
| bsd-3-clause | Python |
3422c60553c6cd1a746e7c6e39a3e2ac707b0cf7 | return only mean predictions for cost and omit variance | numairmansur/RoBO,automl/RoBO,aaronkl/RoBO,aaronkl/RoBO,aaronkl/RoBO,numairmansur/RoBO,automl/RoBO | robo/acquisition/EnvEntropySearch.py | robo/acquisition/EnvEntropySearch.py | '''
Created on Jun 8, 2015
@author: Aaron Klein
'''
import emcee
import numpy as np
from robo.acquisition.LogEI import LogEI
from robo.acquisition.EntropyMC import EntropyMC
from scipy import stats
class EnvEntropySearch(EntropyMC):
'''
classdocs
'''
def __init__(self, model, cost_model, X_lower, X_upper, compute_incumbent, is_env_variable, n_representer=10, n_hals_vals=100, n_func_samples=100, **kwargs):
self.cost_model = cost_model
self.n_dims = X_lower.shape[0]
self.is_env_variable = is_env_variable
super(EnvEntropySearch, self).__init__(model, X_lower, X_upper, compute_incumbent, Nb=n_representer, Nf=n_func_samples, Np=n_hals_vals)
def update(self, model, cost_model):
self.cost_model = cost_model
super(EnvEntropySearch, self).update(model)
def __call__(self, X, derivative=False):
# Predict the costs for this configuration
cost = self.cost_model.predict(X)[0]
# Compute fantasized pmin
new_pmin = self.change_pmin_by_innovation(X, self.f)
# Compute acquisition value
H_old = np.sum(np.multiply(self.pmin, (self.logP + self.lmb)))
H_new = np.sum(np.multiply(new_pmin, (np.log(new_pmin) + self.lmb)))
loss = np.array([[-H_new + H_old]])
acquisition_value = loss / cost
return acquisition_value
def update_representer_points(self):
#TODO: We might want to start the sampling of the representer points from the incumbent here? Or maybe from a sobel grid?
super(EnvEntropySearch, self).update_representer_points()
# Project representer points to subspace
self.zb[:, self.is_env_variable == 1] = self.X_upper[self.is_env_variable == 1]
| '''
Created on Jun 8, 2015
@author: Aaron Klein
'''
import emcee
import numpy as np
from robo.acquisition.LogEI import LogEI
from robo.acquisition.EntropyMC import EntropyMC
from scipy import stats
class EnvEntropySearch(EntropyMC):
'''
classdocs
'''
def __init__(self, model, cost_model, X_lower, X_upper, compute_incumbent, is_env_variable, n_representer=10, n_hals_vals=100, n_func_samples=100, **kwargs):
self.cost_model = cost_model
self.n_dims = X_lower.shape[0]
self.is_env_variable = is_env_variable
super(EnvEntropySearch, self).__init__(model, X_lower, X_upper, compute_incumbent, Nb=n_representer, Nf=n_func_samples, Np=n_hals_vals)
def update(self, model, cost_model):
self.cost_model = cost_model
super(EnvEntropySearch, self).update(model)
def compute(self, X, derivative=False):
# Predict the costs for this configuration
cost = self.cost_model.predict(X)
# Compute fantasized pmin
new_pmin = self.change_pmin_by_innovation(X, self.f)
# Compute acquisition value
H_old = np.sum(np.multiply(self.pmin, (self.logP + self.lmb)))
H_new = np.sum(np.multiply(new_pmin, (np.log(new_pmin) + self.lmb)))
loss = np.array([[-H_new + H_old]])
acquisition_value = loss / cost
return acquisition_value
def update_representer_points(self):
#TODO: We might want to start the sampling of the representer points from the incumbent here? Or maybe from a sobel grid?
super(EnvEntropySearch, self).update_representer_points()
# Project representer points to subspace
self.zb[:, self.is_env_variable == 1] = self.X_upper[self.is_env_variable == 1]
| bsd-3-clause | Python |
2d8c8cce8885b24ac1766912ee7bd1897900ae0c | fix up Comment model | lionleaf/dwitter,lionleaf/dwitter,lionleaf/dwitter | dwitter/models.py | dwitter/models.py | from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import pre_delete
def get_sentinel_user():
users = get_user_model().objects
return users.get_or_create(username='[deleted]', is_active=False)[0]
@receiver(pre_delete, sender=User)
def soft_delete_user_dweets(instance, **kwargs):
for dweet in Dweet.objects.filter(_author=instance):
dweet.delete()
class NotDeletedDweetManager(models.Manager):
def get_queryset(self):
base_queryset = super(NotDeletedDweetManager, self).get_queryset()
return base_queryset.filter(deleted=False)
class Dweet(models.Model):
code = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey("self", on_delete=models.DO_NOTHING,
null=True, blank=True)
likes = models.ManyToManyField(User, related_name="liked")
hotness = models.FloatField(default=1.0)
deleted = models.BooleanField(default=False)
_author = models.ForeignKey(User, on_delete=models.SET_NULL,
null=True, blank=True)
@property
def author(self):
return self._author or get_sentinel_user()
@author.setter
def author(self, value):
self._author = value
objects = NotDeletedDweetManager()
with_deleted = models.Manager()
def delete(self):
self.deleted = True
self.save()
def __unicode__(self):
return 'd/' + str(self.id) + ' (' + self.author.username + ')'
class Meta:
ordering = ('-posted',)
class Comment(models.Model):
text = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey(Dweet, on_delete=models.CASCADE,
related_name="comments")
_author = models.ForeignKey(User, on_delete=models.CASCADE)
@property
def author(self):
return self._author
@author.setter
def author(self, value):
self._author = value
def __unicode__(self):
return ('c/' +
str(self.id) +
' (' +
self.author.username +
') to ' +
str(self.reply_to))
class Meta:
ordering = ('-posted',)
| from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import pre_delete
def get_sentinel_user():
users = get_user_model().objects
return users.get_or_create(username='[deleted]', is_active=False)[0]
@receiver(pre_delete, sender=User)
def soft_delete_user_dweets(instance, **kwargs):
for dweet in Dweet.objects.filter(_author=instance):
dweet.delete()
class NotDeletedDweetManager(models.Manager):
def get_queryset(self):
base_queryset = super(NotDeletedDweetManager, self).get_queryset()
return base_queryset.filter(deleted=False)
class Dweet(models.Model):
code = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey("self", on_delete=models.DO_NOTHING,
null=True, blank=True)
likes = models.ManyToManyField(User, related_name="liked")
hotness = models.FloatField(default=1.0)
deleted = models.BooleanField(default=False)
_author = models.ForeignKey(User, on_delete=models.SET_NULL,
null=True, blank=True)
@property
def author(self):
return self._author or get_sentinel_user()
@author.setter
def author(self, value):
self._author = value
objects = NotDeletedDweetManager()
with_deleted = models.Manager()
def delete(self):
self.deleted = True
self.save()
def __unicode__(self):
return 'd/' + str(self.id) + ' (' + self.author.username + ')'
class Meta:
ordering = ('-posted',)
class Comment(models.Model):
text = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey(Dweet, on_delete=models.CASCADE,
related_name="comments")
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __unicode__(self):
return ('c/' +
str(self.id) +
' (' +
self.author.username +
') to ' +
str(self.reply_to))
class Meta:
ordering = ('-posted',)
| apache-2.0 | Python |
c98662bf577afa1dcf1b847193dd2e856a90e864 | Fix flopped windows comment | glumpy/glumpy,glumpy/glumpy | examples/app-two-programs.py | examples/app-two-programs.py | # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import gl, app, gloo
vertex = """
attribute vec2 a_position;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
gl_PointSize = 30.0;
}
"""
fragment1 = """
void main() {
gl_FragColor = vec4(0.0, 0.0, 1.0, 1.0);
}
"""
fragment2 = """
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"""
program1 = gloo.Program(vertex, fragment1) # blue on the right
program1['a_position'] = np.zeros((1,2),dtype=np.float32) + 0.5
program2 = gloo.Program(vertex, fragment2) # red on the left
program2['a_position'] = np.zeros((1,2),dtype=np.float32) - 0.5
window = app.Window()
@window.event
def on_draw(dt):
window.clear()
program1.draw(gl.GL_POINTS)
program2.draw(gl.GL_POINTS)
app.run()
| # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import gl, app, gloo
vertex = """
attribute vec2 a_position;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
gl_PointSize = 30.0;
}
"""
fragment1 = """
void main() {
gl_FragColor = vec4(0.0, 0.0, 1.0, 1.0);
}
"""
fragment2 = """
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"""
program1 = gloo.Program(vertex, fragment1) # blue on the left
program1['a_position'] = np.zeros((1,2),dtype=np.float32) + 0.5
program2 = gloo.Program(vertex, fragment2) # red on the right
program2['a_position'] = np.zeros((1,2),dtype=np.float32) - 0.5
window = app.Window()
@window.event
def on_draw(dt):
window.clear()
program1.draw(gl.GL_POINTS)
program2.draw(gl.GL_POINTS)
app.run()
| bsd-3-clause | Python |
818f1431fe67120967f385ee090d06c1038e48c4 | Add project level imports so users don't have to worry about the module names. | caseywstark/dimensionful | dimensionful/__init__.py | dimensionful/__init__.py | from units import Unit
from quantity import Quantity
from common_units import *
from constants import *
| bsd-2-clause | Python |
|
c713aa4953063cb6e64ecaaaed464f2a441482bb | Add testing scaffolding. | pgorla/lil-kv | lilkv/testsuite/test_basic.py | lilkv/testsuite/test_basic.py | # -*- coding: utf-8 -*-
"""
lilkv.testsuite.basic
Test lilkv basic functionality.
"""
from lilkv.testsuite import LilKVTestCase
from lilkv.keyspace import Keyspace
from lilkv.columnfamily import ColumnFamily
from lilkv.column import Column
class BasicTests(LilKVTestCase):
"""Baseclass for testing out the application.
"""
def test_keyspace_creation(self):
ks = Keyspace("Test Keyspace")
self.assert_in("Test Keyspace", Keyspace.KEYSPACES)
def test_columnfamily_creation(self):
ks = Keyspace("Test Keyspace")
ks.create_columnfamily("daily_visitors")
self.assert_in("daily_visitors", ks.columnfamilies)
def test_adding_data(self):
pass
def test_reading_data(self):
pass
def test_deleting_data(self):
pass
| # -*- coding: utf-8 -*-
"""
lilkv.testsuite.basic
Test lilkv basic functionality.
"""
from lilkv.testsuite import LilKVTestCase
from lilkv.keyspace import Keyspace
from lilkv.columnfamily import ColumnFamily
from lilkv.column import Column
class BasicTests(LilKVTestCase):
"""Baseclass for testing out the application.
"""
def test_keyspace_creation(self):
ks = Keyspace("Test Keyspace")
self.assert_in("Test Keyspace", Keyspace.KEYSPACES)
def test_columnfamily_creation(self):
ks = Keyspace("Test Keyspace")
ks.create_columnfamily("daily_visitors")
self.assert_in("daily_visitors", ks.columnfamilies)
| mit | Python |
573d1e2498467da357a79bb865683e162e16eb14 | increment version to 0.13.1 | Farama-Foundation/Gymnasium,Farama-Foundation/Gymnasium | gym/version.py | gym/version.py | VERSION = '0.13.1'
| VERSION = '0.13.0'
| mit | Python |
d83e8f8702755766c1c15a35297b40d25051d55e | Bump version | Farama-Foundation/Gymnasium,dianchen96/gym,Farama-Foundation/Gymnasium,dianchen96/gym | gym/version.py | gym/version.py | VERSION = '0.4.9'
| VERSION = '0.4.8'
| mit | Python |
bb9aafe090d71c2a25eb3f3a6d591a205dbb7e5e | bump to 0.9.5 | dmpetrov/dataversioncontrol,dmpetrov/dataversioncontrol,efiop/dvc,efiop/dvc,dataversioncontrol/dvc,dataversioncontrol/dvc | dvc/__init__.py | dvc/__init__.py | """
DVC
----
Make your data science projects reproducible and shareable.
"""
import os
VERSION = '0.9.5'
if os.getenv('APPVEYOR_REPO_TAG', '').lower() != 'true' and os.getenv('TRAVIS_TAG', '') == '':
# Dynamically update version
try:
import git
repo = git.Repo(os.curdir, search_parent_directories=True)
sha = repo.head.object.hexsha
short_sha = repo.git.rev_parse(sha, short=6)
dirty = '.mod' if repo.is_dirty() else ''
VERSION = '{}+{}{}'.format(VERSION, short_sha, dirty)
except:
pass
__version__ = VERSION
| """
DVC
----
Make your data science projects reproducible and shareable.
"""
import os
VERSION = '0.9.4'
if os.getenv('APPVEYOR_REPO_TAG', '').lower() != 'true' and os.getenv('TRAVIS_TAG', '') == '':
# Dynamically update version
try:
import git
repo = git.Repo(os.curdir, search_parent_directories=True)
sha = repo.head.object.hexsha
short_sha = repo.git.rev_parse(sha, short=6)
dirty = '.mod' if repo.is_dirty() else ''
VERSION = '{}+{}{}'.format(VERSION, short_sha, dirty)
except:
pass
__version__ = VERSION
| apache-2.0 | Python |
3aac2716972c49eb3b1b688cb1fad89ce690ca58 | fix incorrect empty list condition | cielpy/build_ipa | filter_log.py | filter_log.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 ciel <ciel@cieldeMBP>
#
# Distributed under terms of the MIT license.
"""
filter git log
"""
import codecs
from call_cmd import call
import config
def filter_log(last_commit):
commit_valid = call('git -C {} cat-file -e '.format(config.config_dic['project_path']) + last_commit)[0]
if commit_valid != 0:
return '无'
git_logs_cmd = '''git -C {} log --pretty=\"%s\" {}..HEAD'''.format(config.config_dic['project_path'], last_commit)
logs = call(git_logs_cmd)
log_has_prefix = []
prefix = config.config_dic['filter_log']['prefix']
if not prefix:
prefix = '['
for line in logs[1].split("\n"):
if line.startswith(prefix):
log_has_prefix.append(line)
if not log_has_prefix:
return '无'
log_file = '{}log.txt'.format(config.config_dic['builds_path'])
with codecs.open(log_file, 'w', 'UTF-8') as f:
for line in log_has_prefix:
f.write('{}\n'.format(line))
with codecs.open(log_file, 'r+', 'UTF-8') as f:
flip_cmd = "sed '1!G;h;$!d' " + log_file
res = call(flip_cmd)
f.write(res[1])
with codecs.open(log_file, 'r+', 'UTF-8') as f:
add_num_cmd = """awk '{printf NR"."" "}1' """ + log_file
res = call(add_num_cmd)
f.write(res[1])
with codecs.open(log_file, 'r', 'UTF-8') as f:
return f.read()
def msg_with_intall_info(last_commit, build):
build_info = config.config_dic['build'][build]
log = filter_log(last_commit)
msg = '更新日志:' + '\n\n' + log + '\n\n' + '安装地址:' + build_info['download_url']
return msg | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 ciel <ciel@cieldeMBP>
#
# Distributed under terms of the MIT license.
"""
filter git log
"""
import codecs
from call_cmd import call
import config
def filter_log(last_commit):
commit_valid = call('git -C {} cat-file -e '.format(config.config_dic['project_path']) + last_commit)[0]
if commit_valid != 0:
return '无'
git_logs_cmd = '''git -C {} log --pretty=\"%s\" {}..HEAD'''.format(config.config_dic['project_path'], last_commit)
logs = call(git_logs_cmd)
log_has_prefix = []
prefix = config.config_dic['filter_log']['prefix']
if not prefix:
prefix = '['
for line in logs[1].split("\n"):
if line.startswith(prefix):
log_has_prefix.append(line)
if log_has_prefix.count:
return '无'
log_file = '{}log.txt'.format(config.config_dic['builds_path'])
with codecs.open(log_file, 'w', 'UTF-8') as f:
for line in log_has_prefix:
f.write('{}\n'.format(line))
with codecs.open(log_file, 'r+', 'UTF-8') as f:
flip_cmd = "sed '1!G;h;$!d' " + log_file
res = call(flip_cmd)
f.write(res[1])
with codecs.open(log_file, 'r+', 'UTF-8') as f:
add_num_cmd = """awk '{printf NR"."" "}1' """ + log_file
res = call(add_num_cmd)
f.write(res[1])
with codecs.open(log_file, 'r', 'UTF-8') as f:
return f.read()
def msg_with_intall_info(last_commit, build):
build_info = config.config_dic['build'][build]
log = filter_log(last_commit)
msg = '更新日志:' + '\n\n' + log + '\n\n' + '安装地址:' + build_info['download_url']
return msg | mit | Python |
ee795da3215374f30005c9daa42de6f9d581580f | Make finglonger output a bit easier to read | nibalizer/finglonger | finglonger.py | finglonger.py | #!/usr/bin/python
import os
import sys
import subprocess
import tempfile
import yaml
def validate_config(config):
environment = config.get('environment')
if environment is None:
print "No environment set, set one in config.yaml "
sys.exit(1)
def validate_environment(config):
if os.path.isfile("envs/" + config['environment'] + "/tasks.yaml"):
pass
else:
print "Tasks file not found, are you in the right directory?"
sys.exit(1)
def git_cmd(command):
p = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return out, err
def process_task(task):
print "Finglongering..."
print task['name']
temp, temp_name = tempfile.mkstemp()
print temp_name
f = os.fdopen(temp, 'w')
f.write(task['shell'])
f.close()
os.chmod(temp_name, 0755)
p = subprocess.Popen(["/bin/bash", temp_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
print out
print err
if __name__ == "__main__":
config_file = os.environ['HOME'] + "/.config/finglonger/config.yaml"
if os.path.isfile(config_file):
with open(config_file) as f:
config = yaml.load(f.read())
else:
print "Config file not found: {0}".format(config_file)
sys.exit(1)
validate_config(config)
validate_environment(config)
git_cmd('git checkout master')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
master_tasks = yaml.load(f.read())
git_cmd('git checkout done')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
done_tasks = yaml.load(f.read())
git_cmd('git checkout master')
print "Tasks on master", len(master_tasks)
print "Tasks on done", len(done_tasks)
print "Tasks to do", len(master_tasks) - len(done_tasks)
for i in done_tasks:
master_tasks.remove(i)
for task in master_tasks:
process_task(task['task'])
git_cmd('git checkout done')
git_cmd('git merge master')
git_cmd('git push origin done')
git_cmd('git checkout master')
| #!/usr/bin/python
import os
import sys
import subprocess
import tempfile
import yaml
def validate_config(config):
environment = config.get('environment')
if environment is None:
print "No environment set, set one in config.yaml "
sys.exit(1)
def validate_environment(config):
if os.path.isfile("envs/" + config['environment'] + "/tasks.yaml"):
pass
else:
print "Tasks file not found, are you in the right directory?"
sys.exit(1)
def git_cmd(command):
p = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return out, err
def process_task(task):
print "Finglongering..."
print task['name']
temp, temp_name = tempfile.mkstemp()
print temp_name
f = os.fdopen(temp, 'w')
f.write(task['shell'])
f.close()
os.chmod(temp_name, 0755)
p = subprocess.Popen(["/bin/bash", temp_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
print out
print err
if __name__ == "__main__":
config_file = os.environ['HOME'] + "/.config/finglonger/config.yaml"
if os.path.isfile(config_file):
with open(config_file) as f:
config = yaml.load(f.read())
else:
print "Config file not found: {0}".format(config_file)
sys.exit(1)
validate_config(config)
validate_environment(config)
git_cmd('git checkout master')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
master_tasks = yaml.load(f.read())
git_cmd('git checkout done')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
done_tasks = yaml.load(f.read())
git_cmd('git checkout master')
print len(master_tasks)
print len(done_tasks)
for i in done_tasks:
master_tasks.remove(i)
for task in master_tasks:
process_task(task['task'])
git_cmd('git checkout done')
git_cmd('git merge master')
git_cmd('git push origin done')
git_cmd('git checkout master')
| apache-2.0 | Python |
cb4f022fb1fe0780eb2e37c8fdc8ff6a4409115c | Test and implementation for !ptr+offset loading | benstreb/os-test-harness,benstreb/os-test-harness | ostester/yamlreader.py | ostester/yamlreader.py | import collections.abc
from io import StringIO
import yaml
import ast
def parse(file):
return yaml.safe_load(file)
def parse_from_string(string):
return parse(StringIO(string))
class Zeros(collections.abc.Sequence):
"""
Represents a zeroed region of memory in C
>>> yaml.load("!zeros 5")
Zeros(5)
>>> yaml.dump(Zeros(3))
"!zeros '3'\\n"
>>> list(Zeros(7))
[0, 0, 0, 0, 0, 0, 0]
>>> Zeros(3)[-3]
0
>>> Zeros(3)[-2]
0
>>> Zeros(4)[1:3]
[0, 0]
"""
yaml_tag='!zeros'
def __init__(self, len):
self.len = len
@staticmethod
def from_yaml_loader(loader, node):
return Zeros(int(node.value))
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Zeros.yaml_tag, str(data.len))
def __getitem__(self, key):
if isinstance(key, slice):
return [0 for key in range(*key.indices(self.len))]
elif key > self.len-1 or key < -self.len:
raise IndexError('Zeros index out of range')
return 0
def __len__(self):
return self.len
def __repr__(self):
return 'Zeros({})'.format(repr(self.len))
yaml.add_representer(Zeros, Zeros.yaml_representer)
yaml.add_constructor(Zeros.yaml_tag, Zeros.from_yaml_loader)
class Pointer():
"""
Represents a pointer into an array.
>>> yaml.load('!ptr value')
Pointer('value')
>>> yaml.load('!ptr array+3')
Pointer('array', offset=3)
>>> yaml.dump(Pointer("value"))
"!ptr 'value'\\n"
"""
yaml_tag = '!ptr'
def __init__(self, data, offset=0):
self.data = data
self.offset = int(offset)
@staticmethod
def from_yaml_loader(loader, node):
args = map(str.strip, node.value.split('+'))
return Pointer(*args)
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Pointer.yaml_tag, data.data)
def __repr__(self):
if not self.offset:
format_str = 'Pointer({})'
else:
format_str = 'Pointer({}, offset={})'
return format_str.format(repr(self.data), self.offset)
yaml.add_representer(Pointer, Pointer.yaml_representer)
yaml.add_constructor(Pointer.yaml_tag, Pointer.from_yaml_loader)
def transform(yaml):
pass
| import collections.abc
from io import StringIO
import yaml
import ast
def parse(file):
return yaml.safe_load(file)
def parse_from_string(string):
return parse(StringIO(string))
class Zeros(collections.abc.Sequence):
"""
Represents a zeroed region of memory in C
>>> yaml.load("!zeros 5")
Zeros(5)
>>> yaml.dump(Zeros(3))
"!zeros '3'\\n"
>>> list(Zeros(7))
[0, 0, 0, 0, 0, 0, 0]
>>> Zeros(3)[-3]
0
>>> Zeros(3)[-2]
0
>>> Zeros(4)[1:3]
[0, 0]
"""
yaml_tag='!zeros'
def __init__(self, len):
self.len = len
@staticmethod
def from_yaml_loader(loader, node):
return Zeros(int(node.value))
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Zeros.yaml_tag, str(data.len))
def __getitem__(self, key):
if isinstance(key, slice):
return [0 for key in range(*key.indices(self.len))]
elif key > self.len-1 or key < -self.len:
raise IndexError('Zeros index out of range')
return 0
def __len__(self):
return self.len
def __repr__(self):
return 'Zeros({})'.format(repr(self.len))
yaml.add_representer(Zeros, Zeros.yaml_representer)
yaml.add_constructor(Zeros.yaml_tag, Zeros.from_yaml_loader)
class Pointer():
"""
Represents a pointer into an array.
>>> yaml.load('!ptr value')
Pointer('value')
>>> yaml.dump(Pointer("value"))
"!ptr 'value'\\n"
"""
yaml_tag = '!ptr'
def __init__(self, data, offset=0):
self.data = data
self.offset = offset
@staticmethod
def from_yaml_loader(loader, node):
return Pointer(node.value)
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Pointer.yaml_tag, data.data)
def __repr__(self):
return 'Pointer({})'.format(repr(self.data))
yaml.add_representer(Pointer, Pointer.yaml_representer)
yaml.add_constructor(Pointer.yaml_tag, Pointer.from_yaml_loader)
def transform(yaml):
pass
| isc | Python |
3329b260fbea858dcfe3f6f6a9ff365467352d1f | optimize sum_lines for time consuming | edonyM/toolkitem,edonyM/toolkitem,edonyM/toolkitem | fileprocess/filesline.py | fileprocess/filesline.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - [email protected]
#
# twitter : @edonyzpc
#
# Last modified: 2017-09-15 15:33
#
# Filename: filesline.py
#
# Description: All Rights Are Reserved
#
"""
import sys
import os
from dirlist import DirList
class FilesLine(DirList):
"""generate the line number of files located in directory
"""
def __init__(self, directory):
super(FilesLine, self).__init__(directory)
self.filesline = 0
def sum_lines(self):
# TODO(edony): optimize algorithm of sum_lines method
filesname = []
for item_dir in self.dirlist.keys():
for item_file in self.dirlist[item_dir][1]:
filesname.append(item_dir + '/' + item_file)
for filename in filesname:
with open(filename, 'rb') as filebuf:
self.filesline += len(filebuf.readlines())
return self.filesline
if __name__ == "__main__":
import time
tmp = DirList('/Users/edony/coding/toolkitem')
#print(tmp.dirlist)
#print(sys.path)
#print(os.path.split(os.path.realpath(__file__)))
tmp1 = FilesLine('/Users/edony/coding/toolkitem')
print(tmp1.dirlist)
print(time.time())
tmp1.sum_lines()
print(time.time())
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - [email protected]
#
# twitter : @edonyzpc
#
# Last modified: 2017-09-15 15:33
#
# Filename: filesline.py
#
# Description: All Rights Are Reserved
#
"""
import sys
import os
from dirlist import DirList
class FilesLine(DirList):
"""generate the line number of files located in directory
"""
def __init__(self, directory):
super(FilesLine, self).__init__(directory)
self.filesline = 0
def sum_lines(self):
pass
if __name__ == "__main__":
tmp = DirList('/Users/edony/coding/toolkitem')
#print(tmp.dirlist)
#print(sys.path)
#print(os.path.split(os.path.realpath(__file__)))
tmp1 = FilesLine('/Users/edony/coding/toolkitem')
print(tmp1.dirlist)
| mit | Python |
e1f1f0ca797b639a730e8804dbd5595ad0f395e0 | Add docstring for module.py | scottclowe/python-continuous-integration,scottclowe/python-ci,scottclowe/python-continuous-integration,scottclowe/python-ci | package_name/module.py | package_name/module.py | """
Module provides a simple cubic_rectification function.
"""
import numpy as np
def cubic_rectification(x):
'''
Returns the rectified value of the cube of X.
If X is positive, this is the cube of X, if X is negative it is 0.
'''
return np.maximum(0, x**3)
| import numpy as np
def cubic_rectification(x):
'''
Returns the rectified value of the cube of X.
If X is positive, this is the cube of X, if X is negative it is 0.
'''
return np.maximum(0, x**3)
| mit | Python |
d71c0745a4032ce60dd506e91665e46c4c98271f | Update forwarder_ZMQ_Server.py | VitorHugoAguiar/ProBot,VitorHugoAguiar/ProBot,VitorHugoAguiar/ProBot,VitorHugoAguiar/ProBot | ProBot_Server/Midi_Device/forwarder_ZMQ_Server.py | ProBot_Server/Midi_Device/forwarder_ZMQ_Server.py | #!/usr/bin/python
import zmq
def main():
print "\nProBot's ZMQ Server is running..."
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5559")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5560")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception, e:
print e
print "bringing down zmq device"
finally:
pass
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
| #!/usr/bin/python
import zmq
def main():
print "\nServer for ProBot is running..."
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5559")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5560")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception, e:
print e
print "bringing down zmq device"
finally:
pass
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
| agpl-3.0 | Python |
788cc159e4d734b972e22ccf06dbcd8ed8f94885 | Update DictStack implementation from jaraco.collections 3.5.1 | pypa/setuptools,pypa/setuptools,pypa/setuptools | distutils/_collections.py | distutils/_collections.py | import collections
import itertools
# from jaraco.collections 3.5.1
class DictStack(list, collections.abc.Mapping):
"""
A stack of dictionaries that behaves as a view on those dictionaries,
giving preference to the last.
>>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)])
>>> stack['a']
2
>>> stack['b']
2
>>> stack['c']
2
>>> len(stack)
3
>>> stack.push(dict(a=3))
>>> stack['a']
3
>>> set(stack.keys()) == set(['a', 'b', 'c'])
True
>>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)])
True
>>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2)
True
>>> d = stack.pop()
>>> stack['a']
2
>>> d = stack.pop()
>>> stack['a']
1
>>> stack.get('b', None)
>>> 'c' in stack
True
"""
def __iter__(self):
dicts = list.__iter__(self)
return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts)))
def __getitem__(self, key):
for scope in reversed(tuple(list.__iter__(self))):
if key in scope:
return scope[key]
raise KeyError(key)
push = list.append
def __contains__(self, other):
return collections.abc.Mapping.__contains__(self, other)
def __len__(self):
return len(list(iter(self)))
| import collections
import itertools
# from jaraco.collections 3.5
class DictStack(list, collections.abc.Mapping):
"""
A stack of dictionaries that behaves as a view on those dictionaries,
giving preference to the last.
>>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)])
>>> stack['a']
2
>>> stack['b']
2
>>> stack['c']
2
>>> stack.push(dict(a=3))
>>> stack['a']
3
>>> set(stack.keys()) == set(['a', 'b', 'c'])
True
>>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)])
True
>>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2)
True
>>> d = stack.pop()
>>> stack['a']
2
>>> d = stack.pop()
>>> stack['a']
1
>>> stack.get('b', None)
>>> 'c' in stack
True
"""
def __iter__(self):
dicts = list.__iter__(self)
return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts)))
def __getitem__(self, key):
for scope in reversed(self):
if key in scope:
return scope[key]
raise KeyError(key)
push = list.append
def __contains__(self, other):
return collections.abc.Mapping.__contains__(self, other)
| mit | Python |
3ff2ecfd26097b37832a397a43db6121a0bc3627 | Remove superfluous comment. | maykinmedia/djadyen,maykinmedia/djadyen,maykinmedia/djadyen | djadyen/management/commands/adyen_maintenance.py | djadyen/management/commands/adyen_maintenance.py | from datetime import timedelta
from django.apps import apps
from django.core.management.base import BaseCommand
from django.utils import timezone
from djadyen import settings
from djadyen.choices import Status
from djadyen.models import AdyenNotification
class Command(BaseCommand):
help = "Process the adyen notifications that are not processed yet."
def handle(self, *args, **options):
order_models = [apps.get_model(model) for model in settings.ADYEN_ORDER_MODELS]
# Process notifications which have been sent by Adyen.
for notification in AdyenNotification.objects.filter(is_processed=False):
notification_data = notification.get_notification_data()
reference = notification_data.get('merchantReference')
for order_model in order_models:
orders = order_model.objects.filter(reference=reference)
for order in orders:
order.process_notification(notification)
# After five days of an Order having status 'Pending', move them to 'Error'
five_days_ago = timezone.now() - timedelta(days=5)
for order_model in order_models:
for obj in order_model.objects.filter(
status=Status.Pending,
created_on__lte=five_days_ago
):
obj.status = Status.Error
obj.save()
| from datetime import timedelta
from django.apps import apps
from django.core.management.base import BaseCommand
from django.utils import timezone
from djadyen import settings
from djadyen.choices import Status
from djadyen.models import AdyenNotification
class Command(BaseCommand):
help = "Process the adyen notifications that are not processed yet."
def handle(self, *args, **options):
order_models = [apps.get_model(model) for model in settings.ADYEN_ORDER_MODELS]
#
# N.B. In our implementations there use to be a limit at how far back in the past we
# would go to process notifications. I'm not sure why it existed, so i've removed it.
#
# Process notifications which have been sent by Adyen.
for notification in AdyenNotification.objects.filter(is_processed=False):
notification_data = notification.get_notification_data()
reference = notification_data.get('merchantReference')
for order_model in order_models:
orders = order_model.objects.filter(reference=reference)
for order in orders:
order.process_notification(notification)
# After five days of an Order having status 'Pending', move them to 'Error'
five_days_ago = timezone.now() - timedelta(days=5)
for order_model in order_models:
for obj in order_model.objects.filter(
status=Status.Pending,
created_on__lte=five_days_ago
):
obj.status = Status.Error
obj.save()
| bsd-3-clause | Python |
32eba84ec5527f1afc82998e98f5d15035e311c1 | Allow forced loading. Contemplating changing the default too. | dipakvwarade/pychef,jarosser06/pychef,dipakvwarade/pychef,cread/pychef,coderanger/pychef,coderanger/pychef,Scalr/pychef,jarosser06/pychef,Scalr/pychef,cread/pychef | chef/base.py | chef/base.py | from chef.api import ChefAPI
class DelayedAttribute(object):
"""Descriptor that calls ._populate() before access to implement lazy loading."""
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, owner):
if instance is None:
return self
if not getattr(instance, '_populated', False):
instance._populate()
instance._populated = True
return getattr(instance, '_'+self.attr)
class ChefObjectMeta(type):
"""Metaclass for ChefObject to implement lazy attributes."""
def __init__(cls, name, bases, d):
for attr in cls.attributes:
setattr(cls, attr, DelayedAttribute(attr))
class ChefObject(object):
"""A base class for Chef API objects."""
__metaclass__ = ChefObjectMeta
url = ''
attributes = []
def __init__(self, name, api=None, lazy=True):
self.name = name
self.api = api or ChefAPI.get_global()
self.url = self.__class__.url + '/' + self.name
if not lazy:
self._populate()
@classmethod
def list(cls, api=None):
api = api or ChefAPI.get_global()
for name, url in api[cls.url].iteritems():
yield cls(name, api=api)
def save(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('PUT', self.url, data=self)
def delete(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('DELETE', self.url)
def _populate(self):
data = self.api[self.url]
for attr in self.__class__.attributes:
setattr(self, '_'+attr, data[attr])
| from chef.api import ChefAPI
class DelayedAttribute(object):
"""Descriptor that calls ._populate() before access to implement lazy loading."""
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, owner):
if instance is None:
return self
if not getattr(instance, '_populated', False):
instance._populate()
instance._populated = True
return getattr(instance, '_'+self.attr)
class ChefObjectMeta(type):
"""Metaclass for ChefObject to implement lazy attributes."""
def __init__(cls, name, bases, d):
for attr in cls.attributes:
setattr(cls, attr, DelayedAttribute(attr))
class ChefObject(object):
"""A base class for Chef API objects."""
__metaclass__ = ChefObjectMeta
url = ''
attributes = []
def __init__(self, name, api=None):
self.name = name
self.api = api or ChefAPI.get_global()
self.url = self.__class__.url + '/' + self.name
@classmethod
def list(cls, api=None):
api = api or ChefAPI.get_global()
for name, url in api[cls.url].iteritems():
yield cls(name, api=api)
def save(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('PUT', self.url, data=self)
def delete(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('DELETE', self.url)
def _populate(self):
data = self.api[self.url]
for attr in self.__class__.attributes:
setattr(self, '_'+attr, data[attr])
| apache-2.0 | Python |
6ba2dc8cf06efd74cae941c370e75ccddcf1d25c | fix broken arg of DnnL2Pool2DNode | nsauder/treeano,jagill/treeano,jagill/treeano,jagill/treeano,nsauder/treeano,diogo149/treeano,nsauder/treeano,diogo149/treeano,diogo149/treeano | treeano/sandbox/nodes/l2_pool.py | treeano/sandbox/nodes/l2_pool.py | import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("l2_pool")
class L2PoolNode(treeano.Wrapper1NodeImpl):
"""
node that takes the L2 norm of the pooled over region
"""
hyperparameter_names = ("pool_size",)
def architecture_children(self):
nodes = [
tn.SqrNode(self.name + "_sqr"),
self._children.children,
# convert mean pool to sum pool by multiplying by pool size
tn.MultiplyConstantNode(self.name + "_mul"),
tn.SqrtNode(self.name + "_sqrt"),
]
return [tn.SequentialNode(self.name + "_sequential", nodes)]
def init_state(self, network):
super(L2PoolNode, self).init_state(network)
pool_size = network.find_hyperparameter(["pool_size"])
network.set_hyperparameter(self.name + "_mul",
"value",
# cast to float, to not trigger
# warn_float64
float(np.prod(pool_size)))
def L2Pool2DNode(name, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.MeanPool2DNode(name + "_pool", **kwargs),
**l2_kwargs)
def DnnL2Pool2DNode(name, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.DnnMeanPoolNode(name + "_pool", **kwargs),
**l2_kwargs)
| import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("l2_pool")
class L2PoolNode(treeano.Wrapper1NodeImpl):
"""
node that takes the L2 norm of the pooled over region
"""
hyperparameter_names = ("pool_size",)
def architecture_children(self):
nodes = [
tn.SqrNode(self.name + "_sqr"),
self._children.children,
# convert mean pool to sum pool by multiplying by pool size
tn.MultiplyConstantNode(self.name + "_mul"),
tn.SqrtNode(self.name + "_sqrt"),
]
return [tn.SequentialNode(self.name + "_sequential", nodes)]
def init_state(self, network):
super(L2PoolNode, self).init_state(network)
pool_size = network.find_hyperparameter(["pool_size"])
network.set_hyperparameter(self.name + "_mul",
"value",
# cast to float, to not trigger
# warn_float64
float(np.prod(pool_size)))
def L2Pool2DNode(name, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.MeanPool2DNode(name + "_pool", **kwargs),
**l2_kwargs)
def DnnL2Pool2DNode(name, pool_size, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.DnnMeanPoolNode(name + "_pool", **kwargs),
**l2_kwargs)
| apache-2.0 | Python |
372ce38d1ddcf2fd65d83df2499d97d4fc2128e6 | Fix issue in cbb.py | explosiveduck/ed2d,explosiveduck/ed2d | ed2d/physics/cbb.py | ed2d/physics/cbb.py | from ed2d.physics.collisiondata import*
from ed2d.glmath import vector
# Circle Bounding Box
class CBB(object):
def __init__(self, radius, center):
'''Creates a circle bounding box object to be used with the physics engine. Takes in a float for the radius and an array for the center.'''
self.radius = radius
self.center = vector.Vector(3, data=center)
def intersectCBB(self, oCBB):
tempDistance = self.center - oCBB.center
distanceCenters = tempDistance.magnitude()
distanceRadii = self.radius + oCBB.radius
# Collision happens when the distance between the two centers is less than the sum of the radii
state = distanceCenters < distanceRadii
# Calculate the depth penetration
depthPenetration = distanceCenters - (distanceRadii)
return CollisionData(state, tempDistance, depthPenetration)
def getCenter(self):
return self.center
def getRadius(self):
return self.radius
| from ed2d.physics.collisiondata import*
from ed2d.glmath import vector
# Circle Bounding Box
class CBB(object):
def __init__(self, radius, center):
'''Creates a circle bounding box object to be used with the physics engine. Takes in a float for the radius and an array for the center.'''
self.radius = radius
self.center = vector.Vector(3, data=center)
def intersectCBB(self, oCBB):
tempDistance = self.center - oCBB.center
distanceCenters = tempDistance.magnitude()
distanceRadii = self.radius + oCBB.radius
# Collision happens when the distance between the two centers is less than the sum of the radii
state = distanceCenters < distanceRadii
# Calculate the depth penetration
depthPenetration = distanceCenters - (distanceRadii)
return CollisionData(state, tempDistance, depthPenetration)
def getCenter(self):
return center
def getRadius(self):
return radius | bsd-2-clause | Python |
1451d199833b405929105f939f57b4d4faf50fa2 | Use new py.test to generate vector-vs-scalar tests | ozialien/python-skyfield,GuidoBR/python-skyfield,exoanalytic/python-skyfield,skyfielders/python-skyfield,GuidoBR/python-skyfield,ozialien/python-skyfield,skyfielders/python-skyfield,exoanalytic/python-skyfield | skyfield/tests/test_vectorization.py | skyfield/tests/test_vectorization.py | """Determine whether arrays work as well as individual inputs."""
from itertools import izip
from numpy import array
from ..constants import T0
from ..planets import earth, mars
from ..timescales import JulianDate, julian_date
dates = array([
julian_date(1969, 7, 20, 20. + 18. / 60.),
T0,
julian_date(2012, 12, 21),
julian_date(2027, 8, 2, 10. + 7. / 60. + 50. / 3600.),
])
deltas = array([39.707, 63.8285, 66.8779, 72.])
def compute_planetary_position(ut1, delta_t):
jd = JulianDate(ut1=ut1, delta_t=delta_t)
yield jd.ut1
yield jd.tt
yield jd.tdb
observer = earth(jd)
yield observer.position
yield observer.velocity
yield observer.jd.ut1
yield observer.jd.tt
yield observer.jd.tdb
astrometric = observer.observe(mars)
yield astrometric.position
yield astrometric.velocity
ra, dec, distance = astrometric.radec()
yield ra.hours()
yield dec.degrees()
yield distance
def generate_comparisons(computation):
"""Set up comparisons between vector and scalar outputs of `computation`.
The `computation` should be a generator that accepts both vector and
scalar input, and that yields a series of values whose shape
corresponds to its input's shape.
"""
vector_results = list(computation(dates, deltas))
for i, (date, delta_t) in enumerate(zip(dates, deltas)):
g = computation(date, delta_t)
for vector, scalar in izip(vector_results, g):
f = g.gi_frame
location = '{}:{}'.format(f.f_code.co_filename, f.f_lineno)
yield location, vector, i, scalar
def pytest_generate_tests(metafunc):
if 'vector_vs_scalar' in metafunc.fixturenames:
metafunc.parametrize('vector_vs_scalar',
list(generate_comparisons(compute_planetary_position))
)
def test_vector_vs_scalar(vector_vs_scalar):
location, vector, i, scalar = vector_vs_scalar
assert (vector.T[i] == scalar).all(), (
'{}:\n {}[{}] != {}'.format(location, vector.T, i, scalar))
| """Determine whether arrays work as well as individual inputs."""
import pytest
from numpy import array
from ..constants import T0
from ..planets import earth, mars
from ..timescales import JulianDate, julian_date
dates = array([
julian_date(1969, 7, 20, 20. + 18. / 60.),
T0,
julian_date(2012, 12, 21),
julian_date(2027, 8, 2, 10. + 7. / 60. + 50. / 3600.),
])
deltas = array([39.707, 63.8285, 66.8779, 72.])
def generate_planetary_position(ut1, delta_t):
jd = JulianDate(ut1=ut1, delta_t=delta_t)
yield jd.ut1
yield jd.tt
yield jd.tdb
observer = earth(jd)
yield observer.position
yield observer.velocity
yield observer.jd.ut1
yield observer.jd.tt
yield observer.jd.tdb
astrometric = observer.observe(mars)
yield astrometric.position
yield astrometric.velocity
ra, dec, distance = astrometric.radec()
yield ra.hours()
yield dec.degrees()
yield distance
@pytest.fixture(params=[generate_planetary_position])
def gradual_computation(request):
return request.param
def test_gradual_computations(gradual_computation):
vector_results = list(gradual_computation(dates, deltas))
correct_length = len(dates)
for vector_value in vector_results:
assert vector_value.shape[-1] == correct_length
for i, (date, delta) in enumerate(zip(dates, deltas)):
scalar_results = list(gradual_computation(date, delta))
for vector_value, scalar_value in zip(vector_results, scalar_results):
assert (vector_value.T[i] == scalar_value).all()
| mit | Python |
0ea687403b01dbc6268c15550f0caf45a54e9106 | Fix Joust picking with multiple minions in the deck | NightKev/fireplace,jleclanche/fireplace,amw2104/fireplace,Ragowit/fireplace,Ragowit/fireplace,smallnamespace/fireplace,beheh/fireplace,smallnamespace/fireplace,amw2104/fireplace | fireplace/cards/utils.py | fireplace/cards/utils.py | import random
from hearthstone.enums import CardClass, CardType, GameTag, Race, Rarity
from ..actions import *
from ..aura import Refresh
from ..dsl import *
from ..events import *
from ..utils import custom_card
# For buffs which are removed when the card is moved to play (eg. cost buffs)
# This needs to be Summon, because of Summon from the hand
REMOVED_IN_PLAY = Summon(PLAYER, OWNER).after(Destroy(SELF))
RandomCard = lambda **kw: RandomCardPicker(**kw)
RandomCollectible = lambda **kw: RandomCardPicker(collectible=True, **kw)
RandomMinion = lambda **kw: RandomCollectible(type=CardType.MINION, **kw)
RandomBeast = lambda **kw: RandomMinion(race=Race.BEAST)
RandomMurloc = lambda **kw: RandomMinion(race=Race.MURLOC)
RandomSpell = lambda **kw: RandomCollectible(type=CardType.SPELL, **kw)
RandomTotem = lambda **kw: RandomCardPicker(race=Race.TOTEM)
RandomWeapon = lambda **kw: RandomCollectible(type=CardType.WEAPON, **kw)
RandomSparePart = lambda **kw: RandomCardPicker(spare_part=True, **kw)
class RandomEntourage(RandomCardPicker):
def pick(self, source):
self._cards = source.entourage
return super().pick(source)
class RandomID(RandomCardPicker):
def pick(self, source):
self._cards = self.args
return super().pick(source)
Freeze = lambda target: SetTag(target, (GameTag.FROZEN, ))
Stealth = lambda target: SetTag(target, (GameTag.STEALTH, ))
Unstealth = lambda target: UnsetTag(target, (GameTag.STEALTH, ))
Taunt = lambda target: SetTag(target, (GameTag.TAUNT, ))
GiveCharge = lambda target: SetTag(target, (GameTag.CHARGE, ))
GiveDivineShield = lambda target: SetTag(target, (GameTag.DIVINE_SHIELD, ))
GiveWindfury = lambda target: SetTag(target, (GameTag.WINDFURY, ))
CLEAVE = Hit(TARGET_ADJACENT, Attr(SELF, GameTag.ATK))
COINFLIP = RandomNumber(0, 1) == 1
EMPTY_HAND = Count(FRIENDLY_HAND) == 0
HOLDING_DRAGON = Find(FRIENDLY_HAND + DRAGON)
JOUST = Joust(RANDOM(FRIENDLY_DECK + MINION), RANDOM(ENEMY_DECK + MINION))
def SET(amt):
return lambda self, i: amt
# Buff helper
def buff(atk=0, health=0, **kwargs):
buff_tags = {}
if atk:
buff_tags[GameTag.ATK] = atk
if health:
buff_tags[GameTag.HEALTH] = health
for tag in GameTag:
if tag.name.lower() in kwargs.copy():
buff_tags[tag] = kwargs.pop(tag.name.lower())
if "immune" in kwargs:
value = kwargs.pop("immune")
buff_tags[GameTag.CANT_BE_DAMAGED] = value
buff_tags[GameTag.CANT_BE_TARGETED_BY_OPPONENTS] = value
if kwargs:
raise NotImplementedError(kwargs)
class Buff:
tags = buff_tags
return Buff
| import random
from hearthstone.enums import CardClass, CardType, GameTag, Race, Rarity
from ..actions import *
from ..aura import Refresh
from ..dsl import *
from ..events import *
from ..utils import custom_card
# For buffs which are removed when the card is moved to play (eg. cost buffs)
# This needs to be Summon, because of Summon from the hand
REMOVED_IN_PLAY = Summon(PLAYER, OWNER).after(Destroy(SELF))
RandomCard = lambda **kw: RandomCardPicker(**kw)
RandomCollectible = lambda **kw: RandomCardPicker(collectible=True, **kw)
RandomMinion = lambda **kw: RandomCollectible(type=CardType.MINION, **kw)
RandomBeast = lambda **kw: RandomMinion(race=Race.BEAST)
RandomMurloc = lambda **kw: RandomMinion(race=Race.MURLOC)
RandomSpell = lambda **kw: RandomCollectible(type=CardType.SPELL, **kw)
RandomTotem = lambda **kw: RandomCardPicker(race=Race.TOTEM)
RandomWeapon = lambda **kw: RandomCollectible(type=CardType.WEAPON, **kw)
RandomSparePart = lambda **kw: RandomCardPicker(spare_part=True, **kw)
class RandomEntourage(RandomCardPicker):
def pick(self, source):
self._cards = source.entourage
return super().pick(source)
class RandomID(RandomCardPicker):
def pick(self, source):
self._cards = self.args
return super().pick(source)
Freeze = lambda target: SetTag(target, (GameTag.FROZEN, ))
Stealth = lambda target: SetTag(target, (GameTag.STEALTH, ))
Unstealth = lambda target: UnsetTag(target, (GameTag.STEALTH, ))
Taunt = lambda target: SetTag(target, (GameTag.TAUNT, ))
GiveCharge = lambda target: SetTag(target, (GameTag.CHARGE, ))
GiveDivineShield = lambda target: SetTag(target, (GameTag.DIVINE_SHIELD, ))
GiveWindfury = lambda target: SetTag(target, (GameTag.WINDFURY, ))
CLEAVE = Hit(TARGET_ADJACENT, Attr(SELF, GameTag.ATK))
COINFLIP = RandomNumber(0, 1) == 1
EMPTY_HAND = Count(FRIENDLY_HAND) == 0
HOLDING_DRAGON = Find(FRIENDLY_HAND + DRAGON)
JOUST = Joust(FRIENDLY_DECK + MINION, ENEMY_DECK + MINION)
def SET(amt):
return lambda self, i: amt
# Buff helper
def buff(atk=0, health=0, **kwargs):
buff_tags = {}
if atk:
buff_tags[GameTag.ATK] = atk
if health:
buff_tags[GameTag.HEALTH] = health
for tag in GameTag:
if tag.name.lower() in kwargs.copy():
buff_tags[tag] = kwargs.pop(tag.name.lower())
if "immune" in kwargs:
value = kwargs.pop("immune")
buff_tags[GameTag.CANT_BE_DAMAGED] = value
buff_tags[GameTag.CANT_BE_TARGETED_BY_OPPONENTS] = value
if kwargs:
raise NotImplementedError(kwargs)
class Buff:
tags = buff_tags
return Buff
| agpl-3.0 | Python |
42462135cec040d17f8ce4488c1ee6bb3b59f406 | Bump mono-basic to @mono/mono-basic/b8011b2f274606323da0927214ed98336465f467 | mono/bockbuild,mono/bockbuild | packages/mono-basic.py | packages/mono-basic.py | GitHubTarballPackage ('mono', 'mono-basic', '4.0.1', 'b8011b2f274606323da0927214ed98336465f467',
configure = './configure --prefix="%{prefix}"',
override_properties = { 'make': 'make' }
)
| GitHubTarballPackage ('mono', 'mono-basic', '3.0', '0d0440feccf648759f7316f93ad09b1e992ea13a',
configure = './configure --prefix="%{prefix}"',
override_properties = { 'make': 'make' }
)
| mit | Python |
3ddddbd24bb37c30df80233ec4c70c38b6c29e82 | Update leaflet request to be over https | EMSTrack/WebServerAndClient,EMSTrack/WebServerAndClient,EMSTrack/WebServerAndClient | emstrack/forms.py | emstrack/forms.py | from django.contrib.gis.forms import widgets
class LeafletPointWidget(widgets.BaseGeometryWidget):
template_name = 'leaflet/leaflet.html'
class Media:
css = {
'all': ('https://cdnjs.cloudflare.com/ajax/libs/leaflet/v0.7.7/leaflet.css',
'leaflet/css/location_form.css',
'leaflet/css/LeafletWidget.css')
}
js = (
'https://cdnjs.cloudflare.com/ajax/libs/leaflet/v0.7.7/leaflet.js',
'leaflet/js/LeafletWidget.js'
)
def render(self, name, value, attrs=None):
# add point
if value:
attrs.update({ 'point': { 'x': value.x,
'y': value.y,
'z': value.z,
'srid': value.srid }
})
return super().render(name, value, attrs)
| from django.contrib.gis.forms import widgets
class LeafletPointWidget(widgets.BaseGeometryWidget):
template_name = 'leaflet/leaflet.html'
class Media:
css = {
'all': ('https://cdn.leafletjs.com/leaflet/v0.7.7/leaflet.css',
'leaflet/css/location_form.css',
'leaflet/css/LeafletWidget.css')
}
js = (
'https://cdn.leafletjs.com/leaflet/v0.7.7/leaflet.js',
'leaflet/js/LeafletWidget.js'
)
def render(self, name, value, attrs=None):
# add point
if value:
attrs.update({ 'point': { 'x': value.x,
'y': value.y,
'z': value.z,
'srid': value.srid }
})
return super().render(name, value, attrs)
| bsd-3-clause | Python |
612698f37ab726fb77aa1f284c97d01d1d726abf | Bump version | ClemsonSoCUnix/django-anyvcs,ClemsonSoCUnix/django-anyvcs | django_anyvcs/__init__.py | django_anyvcs/__init__.py | # Copyright (c) 2014-2016, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = '2.5.0'
| # Copyright (c) 2014-2016, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = '2.4.0'
| bsd-3-clause | Python |
9674a0869c2a333f74178e305677259e7ac379c3 | Make the Websocket's connection header value case-insensitive | liorvh/mitmproxy,ccccccccccc/mitmproxy,dwfreed/mitmproxy,mhils/mitmproxy,ryoqun/mitmproxy,Kriechi/mitmproxy,azureplus/mitmproxy,dufferzafar/mitmproxy,ikoz/mitmproxy,jpic/mitmproxy,tfeagle/mitmproxy,rauburtin/mitmproxy,MatthewShao/mitmproxy,pombredanne/mitmproxy,pombredanne/mitmproxy,laurmurclar/mitmproxy,StevenVanAcker/mitmproxy,fimad/mitmproxy,elitest/mitmproxy,claimsmall/mitmproxy,ikoz/mitmproxy,bazzinotti/mitmproxy,liorvh/mitmproxy,zbuc/mitmproxy,devasia1000/mitmproxy,ikoz/mitmproxy,StevenVanAcker/mitmproxy,jvillacorta/mitmproxy,tdickers/mitmproxy,StevenVanAcker/mitmproxy,syjzwjj/mitmproxy,ryoqun/mitmproxy,Endika/mitmproxy,0xwindows/InfoLeak,devasia1000/mitmproxy,elitest/mitmproxy,ParthGanatra/mitmproxy,mitmproxy/mitmproxy,noikiy/mitmproxy,jvillacorta/mitmproxy,onlywade/mitmproxy,sethp-jive/mitmproxy,cortesi/mitmproxy,dweinstein/mitmproxy,azureplus/mitmproxy,dufferzafar/mitmproxy,Fuzion24/mitmproxy,ADemonisis/mitmproxy,noikiy/mitmproxy,scriptmediala/mitmproxy,macmantrl/mitmproxy,guiquanz/mitmproxy,gzzhanghao/mitmproxy,byt3bl33d3r/mitmproxy,cortesi/mitmproxy,owers19856/mitmproxy,tdickers/mitmproxy,devasia1000/mitmproxy,syjzwjj/mitmproxy,Endika/mitmproxy,ccccccccccc/mitmproxy,xbzbing/mitmproxy,ujjwal96/mitmproxy,elitest/mitmproxy,liorvh/mitmproxy,inscriptionweb/mitmproxy,inscriptionweb/mitmproxy,tekii/mitmproxy,guiquanz/mitmproxy,vhaupert/mitmproxy,mosajjal/mitmproxy,ADemonisis/mitmproxy,sethp-jive/mitmproxy,ddworken/mitmproxy,vhaupert/mitmproxy,tfeagle/mitmproxy,jpic/mitmproxy,fimad/mitmproxy,legendtang/mitmproxy,xbzbing/mitmproxy,ujjwal96/mitmproxy,ddworken/mitmproxy,Kriechi/mitmproxy,inscriptionweb/mitmproxy,azureplus/mitmproxy,pombredanne/mitmproxy,tfeagle/mitmproxy,legendtang/mitmproxy,byt3bl33d3r/mitmproxy,rauburtin/mitmproxy,Fuzion24/mitmproxy,gzzhanghao/mitmproxy,noikiy/mitmproxy,elitest/mitmproxy,mhils/mitmproxy,ParthGanatra/mitmproxy,mosajjal/mitmproxy,owers19856/mitmproxy,tekii/mitmproxy,cortesi/mitmproxy,macmantrl/mitmproxy,bazzinotti/mitmproxy,dxq-git/mitmproxy,mitmproxy/mitmproxy,jpic/mitmproxy,mosajjal/mitmproxy,mhils/mitmproxy,dweinstein/mitmproxy,fimad/mitmproxy,dxq-git/mitmproxy,xbzbing/mitmproxy,claimsmall/mitmproxy,dwfreed/mitmproxy,xaxa89/mitmproxy,vhaupert/mitmproxy,ujjwal96/mitmproxy,Endika/mitmproxy,ParthGanatra/mitmproxy,meizhoubao/mitmproxy,meizhoubao/mitmproxy,dweinstein/mitmproxy,mhils/mitmproxy,Fuzion24/mitmproxy,gzzhanghao/mitmproxy,azureplus/mitmproxy,dxq-git/mitmproxy,ddworken/mitmproxy,ADemonisis/mitmproxy,0xwindows/InfoLeak,dufferzafar/mitmproxy,zlorb/mitmproxy,tekii/mitmproxy,scriptmediala/mitmproxy,dwfreed/mitmproxy,zlorb/mitmproxy,bazzinotti/mitmproxy,StevenVanAcker/mitmproxy,syjzwjj/mitmproxy,ccccccccccc/mitmproxy,xbzbing/mitmproxy,syjzwjj/mitmproxy,Endika/mitmproxy,onlywade/mitmproxy,sethp-jive/mitmproxy,xaxa89/mitmproxy,xaxa89/mitmproxy,jpic/mitmproxy,guiquanz/mitmproxy,rauburtin/mitmproxy,jvillacorta/mitmproxy,owers19856/mitmproxy,ZeYt/mitmproxy,ZeYt/mitmproxy,zbuc/mitmproxy,zlorb/mitmproxy,Kriechi/mitmproxy,ZeYt/mitmproxy,Kriechi/mitmproxy,ZeYt/mitmproxy,ryoqun/mitmproxy,devasia1000/mitmproxy,claimsmall/mitmproxy,laurmurclar/mitmproxy,MatthewShao/mitmproxy,noikiy/mitmproxy,onlywade/mitmproxy,macmantrl/mitmproxy,scriptmediala/mitmproxy,mitmproxy/mitmproxy,zlorb/mitmproxy,mhils/mitmproxy,sethp-jive/mitmproxy,dxq-git/mitmproxy,MatthewShao/mitmproxy,mitmproxy/mitmproxy,tdickers/mitmproxy,legendtang/mitmproxy,laurmurclar/mitmproxy,macmantrl/mitmproxy,tfeagle/mitmproxy,byt3bl33d3r/mitmproxy,ujjwal96/mitmproxy,Fuzion24/mitmproxy,owers19856/mitmproxy,ikoz/mitmproxy,mosajjal/mitmproxy,vhaupert/mitmproxy,zbuc/mitmproxy,onlywade/mitmproxy,0xwindows/InfoLeak,mitmproxy/mitmproxy,inscriptionweb/mitmproxy,ParthGanatra/mitmproxy,0xwindows/InfoLeak,guiquanz/mitmproxy,byt3bl33d3r/mitmproxy,meizhoubao/mitmproxy,ryoqun/mitmproxy,legendtang/mitmproxy,tdickers/mitmproxy,laurmurclar/mitmproxy,cortesi/mitmproxy,liorvh/mitmproxy,jvillacorta/mitmproxy,dwfreed/mitmproxy,gzzhanghao/mitmproxy,scriptmediala/mitmproxy,dweinstein/mitmproxy,meizhoubao/mitmproxy,rauburtin/mitmproxy,ccccccccccc/mitmproxy,tekii/mitmproxy,bazzinotti/mitmproxy,zbuc/mitmproxy,pombredanne/mitmproxy,claimsmall/mitmproxy,ddworken/mitmproxy,xaxa89/mitmproxy,fimad/mitmproxy,dufferzafar/mitmproxy,ADemonisis/mitmproxy,MatthewShao/mitmproxy | examples/ignore_websocket.py | examples/ignore_websocket.py | # This script makes mitmproxy switch to passthrough mode for all HTTP
# responses with "Connection: Upgrade" header. This is useful to make
# WebSockets work in untrusted environments.
#
# Note: Chrome (and possibly other browsers), when explicitly configured
# to use a proxy (i.e. mitmproxy's regular mode), send a CONNECT request
# to the proxy before they initiate the websocket connection.
# To make WebSockets work in these cases, supply
# `--ignore :80$` as an additional parameter.
# (see http://mitmproxy.org/doc/features/passthrough.html)
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
value = flow.response.headers.get_first("Connection", None)
if value and value.upper() == "UPGRADE":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL) | # This script makes mitmproxy switch to passthrough mode for all HTTP
# responses with "Connection: Upgrade" header. This is useful to make
# WebSockets work in untrusted environments.
#
# Note: Chrome (and possibly other browsers), when explicitly configured
# to use a proxy (i.e. mitmproxy's regular mode), send a CONNECT request
# to the proxy before they initiate the websocket connection.
# To make WebSockets work in these cases, supply
# `--ignore :80$` as an additional parameter.
# (see http://mitmproxy.org/doc/features/passthrough.html)
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
if flow.response.headers.get_first("Connection", None) == "Upgrade":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL) | mit | Python |
7c90e73d3ffa2a8209a751b01c7cd8bd3122b13b | Use actual feature values instead of binary for making pivot predictions | tmills/uda,tmills/uda | scripts/build_pivot_training_data.py | scripts/build_pivot_training_data.py | #!/usr/bin/env python
from os.path import join, dirname
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
import numpy as np
import scipy.sparse
import sys
from uda_common import read_feature_groups
def main(args):
if len(args) < 3:
sys.stderr.write("Three required arguments: <pivot file> <data file> <output directory>\n")
sys.exit(-1)
pivot_file = args[0]
model_dir = dirname(pivot_file)
group_name = join(model_dir, 'reduced-feature-groups.txt')
group_map = read_feature_groups(group_name)
domain_inds = group_map['Domain']
out_dir = args[2]
sys.stderr.write("Reading in data files\n")
all_X, all_y = load_svmlight_file(args[1])
all_X = all_X.tolil()
## Zero out domain-indicator variables (not needed for this step)
all_X[:,domain_inds[0]] = 0
all_X[:,domain_inds[1]] = 0
num_instances, num_feats = all_X.shape
sys.stderr.write("Reading in pivot files and creating pivot labels dictionary\n")
## Read pivots file into dictionary:
pivots = []
pivot_labels = {}
for line in open(pivot_file, 'r'):
pivot = int(line.strip())
pivots.append(pivot)
pivot_labels[pivot] = np.zeros((num_instances,1))
pivot_labels[pivot] += np.round(all_X[:,pivot] > 0).astype('int').toarray()
sys.stderr.write("Creating pivot matrices for each feature group\n")
#ind_groups = [None] * num_feats
for group_key,group_inds in group_map.items():
group_inds = np.array(group_inds)
group_X = scipy.sparse.lil_matrix(np.zeros((num_instances, num_feats)))
group_X += all_X
group_X[:, group_inds] = 0
group_X[:, pivots] = 0
for group_ind in group_inds:
if group_ind in pivots:
out_file = join(out_dir, 'pivot_%s-training.liblinear' % group_ind)
print('Writing file %s ' % out_file)
sys.stderr.write('.')
dump_svmlight_file(group_X, pivot_labels[group_ind][:,0], out_file)
sys.stderr.write('\n')
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
| #!/usr/bin/env python
from os.path import join, dirname
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
import numpy as np
import scipy.sparse
import sys
from uda_common import read_feature_groups
def main(args):
if len(args) < 3:
sys.stderr.write("Three required arguments: <pivot file> <data file> <output directory>\n")
sys.exit(-1)
pivot_file = args[0]
model_dir = dirname(pivot_file)
group_name = join(model_dir, 'reduced-feature-groups.txt')
group_map = read_feature_groups(group_name)
domain_inds = group_map['Domain']
out_dir = args[2]
sys.stderr.write("Reading in data files\n")
all_X, all_y = load_svmlight_file(args[1])
all_X = all_X.tolil()
## Zero out domain-indicator variables (not needed for this step)
all_X[:,domain_inds[0]] = 0
all_X[:,domain_inds[1]] = 0
num_instances, num_feats = all_X.shape
sys.stderr.write("Reading in pivot files and creating pivot labels dictionary\n")
## Read pivots file into dictionary:
pivots = []
pivot_labels = {}
for line in open(pivot_file, 'r'):
pivot = int(line.strip())
pivots.append(pivot)
pivot_labels[pivot] = np.zeros((num_instances,1))
pivot_labels[pivot] += np.round(all_X[:,pivot] > 0).astype('int').toarray()
sys.stderr.write("Creating pivot matrices for each feature group\n")
#ind_groups = [None] * num_feats
for group_key,group_inds in group_map.items():
group_inds = np.array(group_inds)
group_X = scipy.sparse.lil_matrix(np.zeros((num_instances, num_feats)))
group_X += (all_X > 0).astype('int')
group_X[:, group_inds] = 0
group_X[:, pivots] = 0
for group_ind in group_inds:
if group_ind in pivots:
out_file = join(out_dir, 'pivot_%s-training.liblinear' % group_ind)
print('Writing file %s ' % out_file)
sys.stderr.write('.')
dump_svmlight_file(group_X, pivot_labels[group_ind][:,0], out_file)
sys.stderr.write('\n')
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
| apache-2.0 | Python |
d981caff4b6710a3779f25fd8955fd111d9ea0cf | fix export error in dj 19 | joshourisman/django-tablib,joshourisman/django-tablib | django_tablib/datasets.py | django_tablib/datasets.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from .base import BaseDataset
class SimpleDataset(BaseDataset):
def __init__(self, queryset, headers=None, encoding='utf-8'):
self.queryset = queryset
self.encoding = encoding
if headers is None:
# We'll set the queryset to include all fields including calculated
# aggregates using the same names as a values() queryset:
v_qs = queryset.values()
headers = []
headers.extend(v_qs.query.extra_select)
try:
field_names = v_qs.query.values_select
except AttributeError:
# django < 1.9
field_names = v_qs.field_names
headers.extend(field_names)
headers.extend(v_qs.query.aggregate_select)
self.header_list = headers
self.attr_list = headers
elif isinstance(headers, dict):
self.header_dict = headers
self.header_list = self.header_dict.keys()
self.attr_list = self.header_dict.values()
elif isinstance(headers, (tuple, list)):
self.header_list = headers
self.attr_list = headers
super(SimpleDataset, self).__init__()
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from .base import BaseDataset
class SimpleDataset(BaseDataset):
def __init__(self, queryset, headers=None, encoding='utf-8'):
self.queryset = queryset
self.encoding = encoding
if headers is None:
# We'll set the queryset to include all fields including calculated
# aggregates using the same names as a values() queryset:
v_qs = queryset.values()
headers = []
headers.extend(v_qs.query.extra_select)
headers.extend(v_qs.field_names)
headers.extend(v_qs.query.aggregate_select)
self.header_list = headers
self.attr_list = headers
elif isinstance(headers, dict):
self.header_dict = headers
self.header_list = self.header_dict.keys()
self.attr_list = self.header_dict.values()
elif isinstance(headers, (tuple, list)):
self.header_list = headers
self.attr_list = headers
super(SimpleDataset, self).__init__()
| mit | Python |
4c500ce1995da97861e37647b61efaf14c6b08d0 | Load saved RDD | Nathx/parental_advisory_ml,Nathx/parental_advisory_ml,Nathx/subs_check,Nathx/subs_check | code/main.py | code/main.py | from spark_model import SparkModel
import socket
from document import Document
from pyspark import SparkContext, SparkConf
from boto.s3.connection import S3Connection
from pyspark import SparkConf, SparkContext
import json
import sys
from datetime import datetime
def log_results(saved, model_type, start_time, end_time, score, n_subs, clean_n_subs):
with open('../logs/log.txt', 'a') as f:
f.write('-'*40+'\n')
duration = str(end_time - start_time).split('.')[0]
f.write('Model: %s\n' % model_type)
f.write('Number of subs: %s\n' % n_subs)
f.write('Percentage subs parsed: %.1f%%\n' % (100*float(clean_n_subs) / n_subs))
f.write('Time to run: %s\n' % duration)
f.write('Accuracy: %.2f\n' % score)
f.write('Saved.'*saved)
if __name__ == '__main__':
with open('/root/.aws/credentials.json') as f:
CREDENTIALS = json.load(f)
# sc = SparkContext()
APP_NAME = 'spark_model'
conf = (SparkConf()
.setAppName(APP_NAME)
.set("spark.executor.cores", 4)
.setMaster('spark://ec2-54-173-173-223.compute-1.amazonaws.com:7077'))
sc = SparkContext(conf=conf, pyFiles=['document.py'])
conn = S3Connection(CREDENTIALS['ACCESS_KEY'], CREDENTIALS['SECRET_ACCESS_KEY'])
model_type = sys.argv[1] if len(sys.argv) > 1 else 'naive_bayes'
start_time = datetime.now()
sm = SparkModel(sc, conn, model_type=model_type)
sm.preprocess('rdd3.pkl')
subs, clean_subs = sm.n_subs, len(sm.labeled_paths)
sm.train()
score = sm.eval_score()
saved = True
try:
sm.labeled_points.saveAsPickleFile('labeled_points.pkl')
except:
saved = False
end_time = datetime.now()
log_results(saved, model_type, start_time, end_time, score, subs, clean_subs)
sc.stop()
| from spark_model import SparkModel
import socket
from document import Document
from pyspark import SparkContext, SparkConf
from boto.s3.connection import S3Connection
from pyspark import SparkConf, SparkContext
import json
import sys
from datetime import datetime
def log_results(model_type, start_time, end_time, score, n_subs, clean_n_subs):
with open('../logs/log.txt', 'a') as f:
f.write('-'*40+'\n')
duration = str(end_time - start_time).split('.')[0]
f.write('Model: %s\n' % model_type)
f.write('Number of subs: %s\n' % n_subs)
f.write('Percentage subs parsed: %.1f%%\n' % (100*float(clean_n_subs) / n_subs))
f.write('Time to run: %s\n' % duration)
f.write('Accuracy: %.2f\n' % score)
if __name__ == '__main__':
with open('/root/.aws/credentials.json') as f:
CREDENTIALS = json.load(f)
# sc = SparkContext()
APP_NAME = 'spark_model'
conf = (SparkConf()
.setAppName(APP_NAME)
.set("spark.executor.cores", 4)
.setMaster('spark://ec2-54-173-173-223.compute-1.amazonaws.com:7077'))
sc = SparkContext(conf=conf, pyFiles=['document.py'])
conn = S3Connection(CREDENTIALS['ACCESS_KEY'], CREDENTIALS['SECRET_ACCESS_KEY'])
model_type = sys.argv[1] if len(sys.argv) > 1 else 'naive_bayes'
start_time = datetime.now()
sm = SparkModel(sc, conn, model_type=model_type)
sm.preprocess()
subs, clean_subs = sm.n_subs, len(sm.labeled_paths)
sm.train()
score = sm.eval_score()
sm.RDD.saveAsPickleFile('rdd.pkl')
end_time = datetime.now()
log_results(model_type, start_time, end_time, score, subs, clean_subs)
sc.stop()
| mit | Python |
9ec2382de5a3d5377fee03a6151e5afbf36f8e71 | add doc link | joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle,joshhartigan/semicircle | code/mode.py | code/mode.py | # an in-depth rundown of this program
# can be found at:
# https://github.com/joshhartigan/learn-programming/blob/master/Most%20Frequent%20Integer.md
def mode(array):
count = {}
for elem in array:
try:
count[elem] += 1
except (KeyError):
count[elem] = 1
# get max count
maximum = 0
modeKey = 0
for key in count.keys():
if count[key] > maximum:
maximum = count[key]
modeKey = key
return modeKey
| def mode(array):
count = {}
for elem in array:
try:
count[elem] += 1
except (KeyError):
count[elem] = 1
# get max count
maximum = 0
modeKey = 0
for key in count.keys():
if count[key] > maximum:
maximum = count[key]
modeKey = key
return modeKey
| bsd-2-clause | Python |
ae2284fa85e1ef7be43792b72480018729b1c2ba | Bump PEP version for __version__ comment | edoburu/django-fluent-blogs,edoburu/django-fluent-blogs | fluent_blogs/__init__.py | fluent_blogs/__init__.py | # following PEP 440
__version__ = "1.0"
# Fix for internal messy imports.
# When base_models is imported before models/__init__.py runs, there is a circular import:
# base_models -> models/managers.py -> invoking models/__init__.py -> models/db.py -> base_models.py
#
# This doesn't occur when the models are imported first.
| # following PEP 386
__version__ = "1.0"
# Fix for internal messy imports.
# When base_models is imported before models/__init__.py runs, there is a circular import:
# base_models -> models/managers.py -> invoking models/__init__.py -> models/db.py -> base_models.py
#
# This doesn't occur when the models are imported first.
| apache-2.0 | Python |
ef9cd0033ccfd314592be7987c262a61d0ec2fba | fix thing I apparently never testedgit add light.py | jeremybmerrill/bigappleserialbus,jeremybmerrill/bigappleserialbus,jeremybmerrill/bigappleserialbus,jeremybmerrill/bigappleserialbus | light.py | light.py | import RPi.GPIO as GPIO
class Light:
def __init__(self, pin):
self.pin = pin
self.status = False
GPIO.setup(pin, GPIO.OUT)
def toggle(self):
self.status = not self.status
self.do()
def on(self):
self.status = True
self.do()
def off(self):
self.status = False
self.do()
def do(self):
GPIO.output(self.pin, self.status)
if self.status:
logging.debug("illuminating pin #%(pinNum)d" % {'pinNum': self.pin})
| import RPi.GPIO as GPIO
class Light:
def __init__(self, pin):
self.pin = pin
self.status = False
GPIO.setup(pin, GPIO.OUT)
def toggle(self):
self.status = not self.status
self.do()
def on(self):
self.status = True
self.do()
def off(self):
self.status = False
self.do()
def do(self):
GPIO.output(light.pin, light.status)
if light.status:
logging.debug("illuminating pin #%(pinNum)d" % {'pinNum': light.pin})
| apache-2.0 | Python |
f0d4b430b627fb9e2b18ba3f82c936698fac6430 | Update to version 1.3 | xcgd/account_report_csv | __openerp__.py | __openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Account Report CSV, for OpenERP
# Copyright (C) 2013 XCG Consulting (http://odoo.consulting)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Account Report CSV",
"version": "1.3",
"author": "XCG Consulting",
"website": "http://www.openerp-experts.com",
"category": 'Accounting',
"description": """
Export reports as CSV:
- General Ledger
- Trial Balance
Provides the usual filters (by account, period, currency, etc).
""",
"depends": [
'account_report_webkit',
'analytic_structure',
],
"data": [
'wizard/general_ledger_csv_wizard_view.xml',
'wizard/trial_balance_csv_wizard_view.xml',
'csv_menu.xml',
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Account Report CSV, for OpenERP
# Copyright (C) 2013 XCG Consulting (http://odoo.consulting)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Account Report CSV",
"version": "1.2",
"author": "XCG Consulting",
"website": "http://www.openerp-experts.com",
"category": 'Accounting',
"description": """
Export reports as CSV:
- General Ledger
- Trial Balance
Provides the usual filters (by account, period, currency, etc).
""",
"depends": [
'account_report_webkit',
'analytic_structure',
],
"data": [
'wizard/general_ledger_csv_wizard_view.xml',
'wizard/trial_balance_csv_wizard_view.xml',
'csv_menu.xml',
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
| agpl-3.0 | Python |
7b176d1e775ddec384a76d6de9c121e114a8738e | load ACL | xcgd/analytic_structure | __openerp__.py | __openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Account Analytic Online, for OpenERP
# Copyright (C) 2013 XCG Consulting (www.xcg-consulting.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Analytic Structure",
"version" : "0.1",
"author" : "XCG Consulting",
"category": 'Dependency',
"description": """
This module allows to use several analytic dimensions through a structure related
to an object model.
==================================================================================
""",
'website': 'http://www.openerp-experts.com',
"depends" : ['base'],
"data": [
'security/ir.model.access.csv',
'analytic_dimension.xml',
],
#'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# Account Analytic Online, for OpenERP
# Copyright (C) 2013 XCG Consulting (www.xcg-consulting.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Analytic Structure",
"version" : "0.1",
"author" : "XCG Consulting",
"category": 'Dependency',
"description": """
This module allows to use several analytic dimensions through a structure related
to an object model.
==================================================================================
""",
'website': 'http://www.openerp-experts.com',
"depends" : ['base'],
"data": [
'analytic_dimension.xml',
],
#'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
08fddbdc0ac70a549bac82131771218107186def | add discription | kholioeg/account_discount | __openerp__.py | __openerp__.py | # -*- coding: utf-8 -*-
{
'name': "Account Discount",
'summary': """
Use Tax model for discounts as well""",
'description': """
Odoo OpenERP Account Discount from Tax
This module adds new concept to use tax model as discount model and print both taxes and discounts separetly.
The steps to perform are very easy:
First you define new tax with negative amount (e.g Name: Discount 10%, Amount: -0.10).
Enable Is Discount Checkbox.
Then add this dicount from the Taxes/Discounts column per invoice line.
This way, you can separate and analyze discounts using different account/analytic account as well.
""",
'author': "Khaled Hamed",
'website': "http://www.grandtk.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Accounting',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'account'],
# always loaded
'data': [
'account_discount_view.xml'
],
'installable': True,
'price': 5,
'currency': 'EUR',
}
| # -*- coding: utf-8 -*-
{
'name': "Account Discount",
'summary': """
Apply Discount model to taxes""",
'description': """
The purpose is to apply discount record for the same tax model
""",
'author': "Khaled Hamed",
'website': "http://www.grandtk.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Accounting',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'account'],
# always loaded
'data': [
'account_discount_view.xml'
],
'installable': True,
'price': 5,
'currency': 'EUR',
}
| agpl-3.0 | Python |
6d83f2150f7c6177385b9f2d8abbe48cd2979130 | Add staleness to MonthCache Admin display | Kromey/fbxnano,Kromey/akwriters,Kromey/akwriters,Kromey/fbxnano,Kromey/fbxnano,Kromey/akwriters,Kromey/fbxnano,Kromey/akwriters | events/admin.py | events/admin.py | from django.contrib import admin
from .models import Calendar,MonthCache
# Register your models here.
@admin.register(Calendar)
class CalendarAdmin(admin.ModelAdmin):
list_display = ('name','remote_id','css_class')
@admin.register(MonthCache)
class MonthCacheAdmin(admin.ModelAdmin):
list_display = ('calendar','month','data_cached_on','is_cache_stale')
| from django.contrib import admin
from .models import Calendar,MonthCache
# Register your models here.
@admin.register(Calendar)
class CalendarAdmin(admin.ModelAdmin):
list_display = ('name','remote_id','css_class')
@admin.register(MonthCache)
class MonthCacheAdmin(admin.ModelAdmin):
list_display = ('calendar','month','data_cached_on')
| mit | Python |
d308bbd0200e1b4783bf63cafda03650579b9351 | change help text | DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative | ynr/apps/official_documents/models.py | ynr/apps/official_documents/models.py | import os
from django.db import models
from django.urls import reverse
from django_extensions.db.models import TimeStampedModel
DOCUMENT_UPLOADERS_GROUP_NAME = "Document Uploaders"
def document_file_name(instance, filename):
return os.path.join(
"official_documents", str(instance.ballot.ballot_paper_id), filename
)
class OfficialDocument(TimeStampedModel):
NOMINATION_PAPER = "Nomination paper"
DOCUMENT_TYPES = (
(NOMINATION_PAPER, "Nomination paper", "Nomination papers"),
)
document_type = models.CharField(
blank=False,
choices=[(d[0], d[1]) for d in DOCUMENT_TYPES],
max_length=100,
)
uploaded_file = models.FileField(
upload_to=document_file_name, max_length=800
)
ballot = models.ForeignKey(
"candidates.Ballot", null=False, on_delete=models.CASCADE
)
source_url = models.URLField(
help_text="The URL of this document", max_length=1000
)
relevant_pages = models.CharField(
"The pages containing information about this ballot",
max_length=50,
default="",
)
class Meta:
get_latest_by = "modified"
def __str__(self):
return "{} ({})".format(self.ballot.ballot_paper_id, self.source_url)
def get_absolute_url(self):
return reverse(
"ballot_paper_sopn",
kwargs={"ballot_id": self.ballot.ballot_paper_id},
)
@property
def locked(self):
"""
Is this post election locked?
"""
return self.ballot.candidates_locked
@property
def lock_suggested(self):
"""
Is there a suggested lock for this document?
"""
return self.ballot.suggestedpostlock_set.exists()
def get_pages(self):
if self.relevant_pages and not self.relevant_pages == "all":
pages = self.relevant_pages.split(",")
return sorted(int(p) for p in pages)
@property
def first_page_number(self):
if self.get_pages():
return self.get_pages()[0]
@property
def last_page_number(self):
if self.get_pages():
return self.get_pages()[-1]
| import os
from django.db import models
from django.urls import reverse
from django_extensions.db.models import TimeStampedModel
DOCUMENT_UPLOADERS_GROUP_NAME = "Document Uploaders"
def document_file_name(instance, filename):
return os.path.join(
"official_documents", str(instance.ballot.ballot_paper_id), filename
)
class OfficialDocument(TimeStampedModel):
NOMINATION_PAPER = "Nomination paper"
DOCUMENT_TYPES = (
(NOMINATION_PAPER, "Nomination paper", "Nomination papers"),
)
document_type = models.CharField(
blank=False,
choices=[(d[0], d[1]) for d in DOCUMENT_TYPES],
max_length=100,
)
uploaded_file = models.FileField(
upload_to=document_file_name, max_length=800
)
ballot = models.ForeignKey(
"candidates.Ballot", null=False, on_delete=models.CASCADE
)
source_url = models.URLField(
help_text="The page that links to this document", max_length=1000
)
relevant_pages = models.CharField(
"The pages containing information about this ballot",
max_length=50,
default="",
)
class Meta:
get_latest_by = "modified"
def __str__(self):
return "{} ({})".format(self.ballot.ballot_paper_id, self.source_url)
def get_absolute_url(self):
return reverse(
"ballot_paper_sopn",
kwargs={"ballot_id": self.ballot.ballot_paper_id},
)
@property
def locked(self):
"""
Is this post election locked?
"""
return self.ballot.candidates_locked
@property
def lock_suggested(self):
"""
Is there a suggested lock for this document?
"""
return self.ballot.suggestedpostlock_set.exists()
def get_pages(self):
if self.relevant_pages and not self.relevant_pages == "all":
pages = self.relevant_pages.split(",")
return sorted(int(p) for p in pages)
@property
def first_page_number(self):
if self.get_pages():
return self.get_pages()[0]
@property
def last_page_number(self):
if self.get_pages():
return self.get_pages()[-1]
| agpl-3.0 | Python |
cedae39716587fcc0459a05e74acc43b190d7457 | split download | ecmwf/cdsapi | example-era5.py | example-era5.py | #!/usr/bin/env python
# (C) Copyright 2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import cdsapi
c = cdsapi.Client()
r = c.retrieve("reanalysis-era5-pressure-levels",
{
"variable": "temperature",
"pressure_level": "250",
"product_type": "reanalysis",
"date": "2017-12-01/2017-12-31",
"time": "12:00",
"format": "grib"
})
r.download("dowload.grib")
| #!/usr/bin/env python
# (C) Copyright 2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import cdsapi
c = cdsapi.Client()
r = c.retrieve("reanalysis-era5-pressure-levels",
{
"variable": "temperature",
"pressure_level": "250",
"product_type": "reanalysis",
"date": "2017-12-01/2017-12-31",
"time": "12:00",
"format": "grib"
})
r.download("dowload.grib")
print(r)
r.delete()
| apache-2.0 | Python |
3846907435da720c075ab89579b970da5019b49f | Add Tapastic/AmpleTime | webcomics/dosage,webcomics/dosage | dosagelib/plugins/tapastic.py | dosagelib/plugins/tapastic.py | # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
import json
import re
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Tapastic(_ParserScraper):
baseUrl = 'https://tapas.io/'
imageSearch = '//article[contains(@class, "js-episode-article")]//img/@data-src'
prevSearch = '//a[contains(@class, "js-prev-ep-btn")]'
latestSearch = '//ul[contains(@class, "js-episode-list")]//a'
starter = indirectStarter
multipleImagesPerStrip = True
def __init__(self, name, url):
super(Tapastic, self).__init__('Tapastic/' + name)
self.url = self.baseUrl + 'series/' + url
self.stripUrl = self.baseUrl + 'episode/%s'
def fetchUrls(self, url, data, urlSearch):
# Save link order for position-based filenames
self.imageUrls = super().fetchUrls(url, data, urlSearch)
return self.imageUrls
def namer(self, imageUrl, pageUrl):
# Construct filename from episode number and image position on page
episodeNum = pageUrl.rsplit('/', 1)[-1]
imageNum = self.imageUrls.index(imageUrl)
imageExt = pageUrl.rsplit('.', 1)[-1]
if len(self.imageUrls) > 1:
filename = "%s-%d.%s" % (episodeNum, imageNum, imageExt)
else:
filename = "%s.%s" % (episodeNum, imageExt)
return filename
@classmethod
def getmodules(cls):
return (
# Manually-added comics
cls('AmpleTime', 'Ample-Time'),
cls('NoFuture', 'NoFuture'),
cls('OrensForge', 'OrensForge'),
cls('RavenWolf', 'RavenWolf'),
cls('TheCatTheVineAndTheVictory', 'The-Cat-The-Vine-and-The-Victory'),
cls('TheGodsPack', 'The-Gods-Pack'),
# START AUTOUPDATE
# END AUTOUPDATE
)
| # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
import json
import re
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Tapastic(_ParserScraper):
baseUrl = 'https://tapas.io/'
imageSearch = '//article[contains(@class, "js-episode-article")]//img/@data-src'
prevSearch = '//a[contains(@class, "js-prev-ep-btn")]'
latestSearch = '//ul[contains(@class, "js-episode-list")]//a'
starter = indirectStarter
multipleImagesPerStrip = True
def __init__(self, name, url):
super(Tapastic, self).__init__('Tapastic/' + name)
self.url = self.baseUrl + 'series/' + url
self.stripUrl = self.baseUrl + 'episode/%s'
def fetchUrls(self, url, data, urlSearch):
# Save link order for position-based filenames
self.imageUrls = super().fetchUrls(url, data, urlSearch)
return self.imageUrls
def namer(self, imageUrl, pageUrl):
# Construct filename from episode number and image position on page
episodeNum = pageUrl.rsplit('/', 1)[-1]
imageNum = self.imageUrls.index(imageUrl)
imageExt = pageUrl.rsplit('.', 1)[-1]
if len(self.imageUrls) > 1:
filename = "%s-%d.%s" % (episodeNum, imageNum, imageExt)
else:
filename = "%s.%s" % (episodeNum, imageExt)
return filename
@classmethod
def getmodules(cls):
return (
# Manually-added comics
cls('NoFuture', 'NoFuture'),
cls('OrensForge', 'OrensForge'),
cls('RavenWolf', 'RavenWolf'),
cls('TheCatTheVineAndTheVictory', 'The-Cat-The-Vine-and-The-Victory'),
cls('TheGodsPack', 'The-Gods-Pack'),
# START AUTOUPDATE
# END AUTOUPDATE
)
| mit | Python |
62314491b148c51e7c27e13aded283a0622c47f4 | improve h5py config check | IntelLabs/hpat,IntelLabs/hpat,IntelLabs/hpat,IntelLabs/hpat | hpat/config.py | hpat/config.py | try:
from .io import _hdf5
import h5py
# TODO: make sure h5py/hdf5 supports parallel
except ImportError:
_has_h5py = False
else:
_has_h5py = True
try:
import pyarrow
except ImportError:
_has_pyarrow = False
else:
_has_pyarrow = True
try:
from . import ros_cpp
except ImportError:
_has_ros = False
else:
_has_ros = True
try:
from . import cv_wrapper
except ImportError:
_has_opencv = False
else:
_has_opencv = True
import hpat.cv_ext
try:
from . import hxe_ext
except ImportError:
_has_xenon = False
else:
_has_xenon = True
import hpat.io.xenon_ext
| try:
from .io import _hdf5
except ImportError:
_has_h5py = False
else:
_has_h5py = True
try:
import pyarrow
except ImportError:
_has_pyarrow = False
else:
_has_pyarrow = True
try:
from . import ros_cpp
except ImportError:
_has_ros = False
else:
_has_ros = True
try:
from . import cv_wrapper
except ImportError:
_has_opencv = False
else:
_has_opencv = True
import hpat.cv_ext
try:
from . import hxe_ext
except ImportError:
_has_xenon = False
else:
_has_xenon = True
import hpat.io.xenon_ext
| bsd-2-clause | Python |
ae7a5bef1e3ee0216651dc4aeef3abcbab3cf76e | update code | Murillo/Hackerrank-Algorithms | Strings/alternating-characters.py | Strings/alternating-characters.py | # Alternating Characters
# Developer: Murillo Grubler
# Link: https://www.hackerrank.com/challenges/alternating-characters/problem
# Time complexity: O(n)
def alternatingCharacters(s):
sumChars = 0
for i in range(len(s)):
if i == 0 or tempChar != s[i]:
tempChar = s[i]
continue
if tempChar == s[i]:
sumChars += 1
return sumChars
q = int(input().strip())
for a0 in range(q):
print(alternatingCharacters(input().strip())) | # Alternating Characters
# Developer: Murillo Grubler
# Link: https://www.hackerrank.com/challenges/alternating-characters/problem
def alternatingCharacters(s):
sumChars = 0
for i in range(len(s)):
if i == 0 or tempChar != s[i]:
tempChar = s[i]
continue
if tempChar == s[i]:
sumChars += 1
return sumChars
q = int(input().strip())
for a0 in range(q):
print(alternatingCharacters(input().strip())) | mit | Python |
44fbc835354b7612d5d203250255a323c8759b64 | fix log %(levelname)-8s to align | ClericPy/torequests,ClericPy/torequests | torequests/logs.py | torequests/logs.py | #! coding:utf-8
import logging
dummy_logger = logging.getLogger('torequests.dummy')
main_logger = logging.getLogger('torequests.main')
def init_logger(name='', handler_path_levels=None,
level=logging.INFO, formatter=None,
formatter_str=None, datefmt="%Y-%m-%d %H:%M:%S"):
"""Args:
name = '' or logger obj.
handler_path_levels = [['loggerfile.log',13],['','DEBUG'],['','info'],['','notSet']] # [[path,level]]
level : the least level for the logger.
formatter = logging.Formatter(
'%(levelname)-8s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s',
"%Y-%m-%d %H:%M:%S")
formatter_str = '%(levelname)-8s %(asctime)s %(name)s (%(funcName)s: %(lineno)s): %(message)s'
custom formatter:
%(asctime)s %(created)f %(filename)s %(funcName)s %(levelname)s %(levelno)s %(lineno)s %(message)s %(module)s %(name)s %(pathname)s %(process)s %(relativeCreated)s %(thread)s %(threadName)s
"""
levels = {'NOTSET': logging.NOTSET, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO,
'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}
if not formatter:
if formatter_str:
formatter_str = formatter_str
else:
formatter_str = '%(levelname)-8s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s'
formatter = logging.Formatter(formatter_str, datefmt=datefmt)
logger = name if isinstance(
name, logging.Logger) else logging.getLogger(str(name))
logger.setLevel(level)
handler_path_levels = handler_path_levels or [['', 'INFO']]
# ---------------------------------------
for each_handler in handler_path_levels:
path, handler_level = each_handler
handler = logging.FileHandler(
path) if path else logging.StreamHandler()
handler.setLevel(levels.get(handler_level.upper(), 1) if isinstance(
handler_level, str) else handler_level)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| #! coding:utf-8
import logging
dummy_logger = logging.getLogger('torequests.dummy')
main_logger = logging.getLogger('torequests.main')
def init_logger(name='', handler_path_levels=None,
level=logging.INFO, formatter=None,
formatter_str=None, datefmt="%Y-%m-%d %H:%M:%S"):
"""Args:
name = '' or logger obj.
handler_path_levels = [['loggerfile.log',13],['','DEBUG'],['','info'],['','notSet']] # [[path,level]]
level : the least level for the logger.
formatter = logging.Formatter(
'%(levelname)-6s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s',
"%Y-%m-%d %H:%M:%S")
formatter_str = '%(levelname)-6s %(asctime)s %(name)s (%(funcName)s: %(lineno)s): %(message)s'
custom formatter:
%(asctime)s %(created)f %(filename)s %(funcName)s %(levelname)s %(levelno)s %(lineno)s %(message)s %(module)s %(name)s %(pathname)s %(process)s %(relativeCreated)s %(thread)s %(threadName)s
"""
levels = {'NOTSET': logging.NOTSET, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO,
'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}
if not formatter:
if formatter_str:
formatter_str = formatter_str
else:
formatter_str = '%(levelname)-6s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s'
formatter = logging.Formatter(formatter_str, datefmt=datefmt)
logger = name if isinstance(
name, logging.Logger) else logging.getLogger(str(name))
logger.setLevel(level)
handler_path_levels = handler_path_levels or [['', 'INFO']]
# ---------------------------------------
for each_handler in handler_path_levels:
path, handler_level = each_handler
handler = logging.FileHandler(
path) if path else logging.StreamHandler()
handler.setLevel(levels.get(handler_level.upper(), 1) if isinstance(
handler_level, str) else handler_level)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
| mit | Python |
db47a651e380709c33c54c86f9a3861187772406 | Add metrics to MNIST | israelg99/eva | eva/examples/mnist.py | eva/examples/mnist.py | #%% Setup.
from collections import namedtuple
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import Nadam
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils
from keras.utils.visualize_util import plot
from keras import backend as K
from eva.models.pixelcnn import PixelCNN
Data = namedtuple('Data', 'x y')
nb_classes = 10
img_rows, img_cols = 28, 28
nb_filters = 128
blocks = 4
batch_size = 128
nb_epoch = 4
def clean_data(x, y, rows, cols):
if K.image_dim_ordering() == 'th':
x = x.reshape(x.shape[0], 1, rows, cols)
input_shape = (1, rows, cols)
else:
x = x.reshape(x.shape[0], rows, cols, 1)
input_shape = (rows, cols, 1)
x = x.astype('float32') / 255
y = np_utils.to_categorical(y, nb_classes)
# New way
x[np.where(x > 0)] = 1
print('X shape:', x.shape)
print(x.shape[0], 'samples')
return x, y
def get_data(rows, cols):
return [Data(*clean_data(*data, rows, cols)) for data in mnist.load_data()]
def get_input(rows, cols):
return (1, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, 1)
train, test = get_data(img_rows, img_cols)
input_shape = get_input(img_rows, img_cols)
input_dims = np.prod(input_shape)
model = PixelCNN(input_shape, nb_filters, blocks)
model.summary()
plot(model)
#%% Train.
model.fit(train.x, train.x, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(test.x, test.x))
score = model.evaluate(test.x, test.x, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
#%% Save model.
model.save('pixelcnn.h5')
| #%% Setup.
from collections import namedtuple
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import Nadam
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils
from keras.utils.visualize_util import plot
from keras import backend as K
from eva.models.pixelcnn import PixelCNN
Data = namedtuple('Data', 'x y')
nb_classes = 10
img_rows, img_cols = 28, 28
nb_filters = 128
blocks = 4
batch_size = 128
nb_epoch = 4
def clean_data(x, y, rows, cols):
if K.image_dim_ordering() == 'th':
x = x.reshape(x.shape[0], 1, rows, cols)
input_shape = (1, rows, cols)
else:
x = x.reshape(x.shape[0], rows, cols, 1)
input_shape = (rows, cols, 1)
x = x.astype('float32') / 255
y = np_utils.to_categorical(y, nb_classes)
# New way
x[np.where(x > 0)] = 1
print('X shape:', x.shape)
print(x.shape[0], 'samples')
return x, y
def get_data(rows, cols):
return [Data(*clean_data(*data, rows, cols)) for data in mnist.load_data()]
def get_input(rows, cols):
return (1, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, 1)
train, test = get_data(img_rows, img_cols)
input_shape = get_input(img_rows, img_cols)
input_dims = np.prod(input_shape)
model = PixelCNN(input_shape, nb_filters, blocks)
model.summary()
plot(model)
#%% Train.
model.fit(train.x, train.x, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(test.x, test.x))
score = model.evaluate(test.x, test.x, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
#%% Save model.
model.save('pixelcnn.h5') | apache-2.0 | Python |
f6672fd0074052ba71bc1266590f0ef0db8f14d0 | fix import. | soasme/blackgate | blackgate/cli.py | blackgate/cli.py | # -*- coding: utf-8 -*-
import click
from blackgate.core import component
from blackgate.server import run
@click.group()
def main():
# README CONFIG
component.install_from_config(config)
@main.command()
def start():
run(config.get('port', 9654))
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
import click
from blackgate.core import component
from blackgate.server importrun
@click.group()
def main():
# README CONFIG
component.install_from_config(config)
@main.command()
def start():
run(config.get('port', 9654))
if __name__ == '__main__':
main()
| mit | Python |
1c9a16a0896cd39aca2b44c0ef5c4eb155d1dab7 | Add a test for 2 framgnets case. | kcaa/kcaa,kcaa/kcaa,kcaa/kcaa,kcaa/kcaa | server/kcaa/manipulator_util_test.py | server/kcaa/manipulator_util_test.py | #!/usr/bin/env python
import pytest
import manipulator_util
class TestManipulatorManager(object):
def pytest_funcarg__manager(self, request):
return manipulator_util.ManipulatorManager(None, {}, 0)
def test_in_schedule_fragment(self):
in_schedule_fragment = (
manipulator_util.ManipulatorManager.in_schedule_fragment)
assert in_schedule_fragment(0, [0, 3600])
assert in_schedule_fragment(1800, [0, 3600])
assert in_schedule_fragment(3599, [0, 3600])
assert not in_schedule_fragment(3600, [0, 3600])
assert not in_schedule_fragment(5400, [0, 3600])
def test_are_auto_manipulator_scheduled_disabled(self, manager):
manager.set_auto_manipulator_schedules(False, [[0, 3600]])
assert not manager.are_auto_manipulator_scheduled(0)
def test_are_auto_manipulator_scheduled_one_fragment(self, manager):
manager.set_auto_manipulator_schedules(True, [[0, 3600]])
assert manager.are_auto_manipulator_scheduled(0)
assert manager.are_auto_manipulator_scheduled(1800)
assert manager.are_auto_manipulator_scheduled(3599)
assert not manager.are_auto_manipulator_scheduled(3600)
assert not manager.are_auto_manipulator_scheduled(5400)
def test_are_auto_manipulator_scheduled_two_fragments(self, manager):
manager.set_auto_manipulator_schedules(True, [[0, 3600],
[7200, 10800]])
assert manager.are_auto_manipulator_scheduled(0)
assert not manager.are_auto_manipulator_scheduled(3600)
assert manager.are_auto_manipulator_scheduled(7200)
assert manager.are_auto_manipulator_scheduled(10799)
assert not manager.are_auto_manipulator_scheduled(10800)
assert manager.are_auto_manipulator_scheduled(0)
def main():
import doctest
doctest.testmod(manipulator_util)
pytest.main(args=[__file__.replace('.pyc', '.py')])
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import pytest
import manipulator_util
class TestManipulatorManager(object):
def pytest_funcarg__manager(self, request):
return manipulator_util.ManipulatorManager(None, {}, 0)
def test_in_schedule_fragment(self):
in_schedule_fragment = (
manipulator_util.ManipulatorManager.in_schedule_fragment)
assert in_schedule_fragment(0, [0, 3600])
assert in_schedule_fragment(1800, [0, 3600])
assert in_schedule_fragment(3599, [0, 3600])
assert not in_schedule_fragment(3600, [0, 3600])
assert not in_schedule_fragment(5400, [0, 3600])
def test_are_auto_manipulator_scheduled_disabled(self, manager):
manager.set_auto_manipulator_schedules(False, [[0, 3600]])
assert not manager.are_auto_manipulator_scheduled(0)
def test_are_auto_manipulator_scheduled_one_fragment(self, manager):
manager.set_auto_manipulator_schedules(True, [[0, 3600]])
assert manager.are_auto_manipulator_scheduled(0)
assert manager.are_auto_manipulator_scheduled(1800)
assert manager.are_auto_manipulator_scheduled(3599)
assert not manager.are_auto_manipulator_scheduled(3600)
assert not manager.are_auto_manipulator_scheduled(5400)
def main():
import doctest
doctest.testmod(manipulator_util)
pytest.main(args=[__file__.replace('.pyc', '.py')])
if __name__ == '__main__':
main()
| apache-2.0 | Python |
3bd383a15902d8367097a4348de64c929732767b | Fix Test | Lab-317/NewsParser | tests/NewsParser_Test.py | tests/NewsParser_Test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: balicanta
# @Date: 2014-10-25 09:57:26
# @Last Modified by: balicanta
# @Last Modified time: 2014-10-27 23:44:57
from NewsParser import NewsParser
from requests.utils import get_encodings_from_content
test_fixtures = [
{"url": "http://udn.com/NEWS/NATIONAL/NAT3/9017464.shtml",
"title": "聯合報直擊", "author": "呂思逸","content":"是由陳老闆批了棉花棒"},
{"url": "http://world.yam.com/post.php?id=2732",
"title": "海潮人潮兇", "author":"", "content": "這座遊人如織的水都"},
{"url": "http://news.ltn.com.tw/news/business/breakingnews/1142153",
"title": "魏家退出101", "author":"", "content": "財政部次長吳當傑今天傍晚表示"}
]
def test_parser():
for test_fixture in test_fixtures:
parser = NewsParser(test_fixture['url'])
title = parser.getTitle()
author = parser.getAuthor()
content = parser.getContent()
assert test_fixture['title'] in title.encode('utf-8')
assert test_fixture['author'] in author.encode('utf-8')
assert test_fixture['content'] in content.encode('utf-8')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: balicanta
# @Date: 2014-10-25 09:57:26
# @Last Modified by: bustta
# @Last Modified time: 2014-10-27 23:22:08
from NewsParser import NewsParser
from requests.utils import get_encodings_from_content
test_fixtures = [
{"url": "http://udn.com/NEWS/NATIONAL/NAT3/9017464.shtml",
"title": "聯合報直擊", "author": "呂思逸"},
{"url": "http://world.yam.com/post.php?id=2732",
"title": "海潮人潮兇", "content": "這座遊人如織的水都"},
{"url": "http://news.ltn.com.tw/news/business/breakingnews/1142153",
"title": "魏家退出101", "content": "財政部次長吳當傑今天傍晚表示"}
]
def test_parser():
for test_fixture in test_fixtures:
parser = NewsParser(test_fixture['url'])
title = parser.getTitle()
author = parser.getAuthor()
content = parser.getContent()
assert test_fixture['title'] in title.encode('utf-8')
assert test_fixture['author'] in author.encode('utf-8')
assert test_fixture['content'] in content.encode('utf-8')
| mit | Python |
97478d2bb38b94a5effbbc74db3ae1a0360f9a19 | remove vm.id usage in exeption message | 2gis/vmmaster,2gis/vmmaster,sh0ked/vmmaster,sh0ked/vmmaster,2gis/vmmaster | vmpool/endpoint.py | vmpool/endpoint.py | # coding: utf-8
from core.utils import generator_wait_for
from core.logger import log_pool
from core.config import config
from core.exceptions import PlatformException, NoSuchEndpoint, \
CreationException
from vmpool.virtual_machines_pool import pool
from vmpool.platforms import Platforms
from vmpool.vmqueue import q
def get_vm_from_pool(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
log_pool.debug('Got vm with params: %s' % vm.info)
return vm
else:
raise NoSuchEndpoint('No such endpoint: %s' % endpoint_name)
def new_vm(desired_caps):
platform = desired_caps.get("platform", None)
if hasattr(config, "PLATFORM") and config.PLATFORM:
log_pool.info(
'Using %s. Desired platform %s has been ignored.' %
(config.PLATFORM, platform)
)
platform = config.PLATFORM
desired_caps["platform"] = platform
if isinstance(platform, unicode):
platform = platform.encode('utf-8')
if not platform:
raise CreationException(
'Platform parameter for new endpoint not found in dc'
)
if not Platforms.check_platform(platform):
raise PlatformException('No such platform %s' % platform)
delayed_vm = q.enqueue(desired_caps)
yield delayed_vm
for condition in generator_wait_for(
lambda: delayed_vm.vm, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm
if not delayed_vm.vm:
raise CreationException(
"Timeout while waiting for vm with platform %s" % platform
)
yield delayed_vm.vm
for condition in generator_wait_for(
lambda: delayed_vm.vm.ready, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm.vm
if not delayed_vm.vm.ready:
raise CreationException(
'Timeout while building vm %s (platform: %s)' %
(delayed_vm.vm.name, platform)
)
log_pool.info('Got vm for request with params: %s' % delayed_vm.vm.info)
yield delayed_vm.vm
def delete_vm(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
if vm.is_preloaded():
vm.rebuild()
else:
vm.delete()
msg = "Vm %s has been deleted" % endpoint_name
log_pool.info(msg)
else:
msg = "Vm %s not found in pool or vm is busy" % endpoint_name
log_pool.info(msg)
| # coding: utf-8
from core.utils import generator_wait_for
from core.logger import log_pool
from core.config import config
from core.exceptions import PlatformException, NoSuchEndpoint, \
CreationException
from vmpool.virtual_machines_pool import pool
from vmpool.platforms import Platforms
from vmpool.vmqueue import q
def get_vm_from_pool(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
log_pool.debug('Got vm with params: %s' % vm.info)
return vm
else:
raise NoSuchEndpoint('No such endpoint: %s' % endpoint_name)
def new_vm(desired_caps):
platform = desired_caps.get("platform", None)
if hasattr(config, "PLATFORM") and config.PLATFORM:
log_pool.info(
'Using %s. Desired platform %s has been ignored.' %
(config.PLATFORM, platform)
)
platform = config.PLATFORM
desired_caps["platform"] = platform
if isinstance(platform, unicode):
platform = platform.encode('utf-8')
if not platform:
raise CreationException(
'Platform parameter for new endpoint not found in dc'
)
if not Platforms.check_platform(platform):
raise PlatformException('No such platform %s' % platform)
delayed_vm = q.enqueue(desired_caps)
yield delayed_vm
for condition in generator_wait_for(
lambda: delayed_vm.vm, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm
if not delayed_vm.vm:
raise CreationException(
"Timeout while waiting for vm with platform %s" % platform
)
yield delayed_vm.vm
for condition in generator_wait_for(
lambda: delayed_vm.vm.ready, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm.vm
if not delayed_vm.vm.ready:
raise CreationException(
'Timeout while building vm %s (platform: %s)' %
(delayed_vm.vm.id, platform)
)
log_pool.info('Got vm for request with params: %s' % delayed_vm.vm.info)
yield delayed_vm.vm
def delete_vm(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
if vm.is_preloaded():
vm.rebuild()
else:
vm.delete()
msg = "Vm %s has been deleted" % endpoint_name
log_pool.info(msg)
else:
msg = "Vm %s not found in pool or vm is busy" % endpoint_name
log_pool.info(msg)
| mit | Python |
08125322609e97e868c5c712df9e35e4c556434d | Use enumerate() instead of managing an index variable. | ting-yuan/web-page-replay,andrey-malets/web-page-replay,bpsinc-native/src_third_party_webpagereplay,colin-scott/web-page-replay,bpsinc-native/src_third_party_webpagereplay,colin-scott/web-page-replay,snorp/web-page-replay,bpsinc-native/src_third_party_webpagereplay,ting-yuan/web-page-replay,andrey-malets/web-page-replay,chromium/web-page-replay,snorp/web-page-replay,chromium/web-page-replay | httparchive.py | httparchive.py | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HttpArchive(dict):
"""Dict with ArchivedHttpRequest keys and ArchivedHttpResponse values."""
pass
class ArchivedHttpRequest(object):
def __init__(self, command, host, path, request_body):
self.command = command
self.host = host
self.path = path
self.request_body = request_body
def __repr__(self):
return repr((self.command, self.host, self.path, self.request_body))
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.__repr__() == other.__repr__()
class ArchivedHttpResponse(object):
def __init__(self, status, reason, headers, response_data):
self.status = status
self.reason = reason
self.headers = headers
self.response_data = response_data
def get_header(self, key):
for k, v in self.headers:
if key == k:
return v
return None
def set_header(self, key, value):
for i, (k, v) in enumerate(self.headers):
if key == k:
self.headers[i] = (key, value)
return
self.headers.append((key, value))
def remove_header(self, key):
for i, (k, v) in enumerate(self.headers):
if key == k:
self.headers.pop(i)
return
| #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HttpArchive(dict):
"""Dict with ArchivedHttpRequest keys and ArchivedHttpResponse values."""
pass
class ArchivedHttpRequest(object):
def __init__(self, command, host, path, request_body):
self.command = command
self.host = host
self.path = path
self.request_body = request_body
def __repr__(self):
return repr((self.command, self.host, self.path, self.request_body))
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.__repr__() == other.__repr__()
class ArchivedHttpResponse(object):
def __init__(self, status, reason, headers, response_data):
self.status = status
self.reason = reason
self.headers = headers
self.response_data = response_data
def get_header(self, key):
for k, v in self.headers:
if key == k:
return v
return None
def set_header(self, key, value):
i = 0
for k, v in self.headers:
if key == k:
self.headers[i] = (key, value)
return
i = i + 1
self.headers.append((key, value))
def remove_header(self, key):
i = 0
for k, v in self.headers:
if key == k:
self.headers.pop(i)
return
i = i + 1
| apache-2.0 | Python |
d5482b10a712863c36a59d8ce82f3958ec41e78b | Add CORS on /swagger.json | l-vincent-l/APITaxi,openmaraude/APITaxi,l-vincent-l/APITaxi,openmaraude/APITaxi | APITaxi/api/__init__.py | APITaxi/api/__init__.py | # -*- coding: utf-8 -*-
from flask.ext.restplus import apidoc, Api
from flask import Blueprint, render_template
from flask_cors import cross_origin
api_blueprint = Blueprint('api', __name__)
api = Api(api_blueprint, doc=False, catch_all_404s=True,
title='API version 2.0')
ns_administrative = api.namespace('administrative',
description="Administrative APIs", path='/')
def init_app(app):
from . import hail, taxi, ads, drivers, zupc, profile, vehicle, documents
api.init_app(app, add_specs=False)
app.register_blueprint(api_blueprint)
app.register_blueprint(apidoc.apidoc)
@app.route('/swagger.json', endpoint='api.specs')
@cross_origin()
def swagger():
return render_template('swagger.json', host=app.config['SERVER_NAME']), 200,
{'Content-Type': 'application/json'}
| # -*- coding: utf-8 -*-
from flask.ext.restplus import apidoc, Api
from flask import Blueprint, render_template
api_blueprint = Blueprint('api', __name__)
api = Api(api_blueprint, doc=False, catch_all_404s=True,
title='API version 2.0')
ns_administrative = api.namespace('administrative',
description="Administrative APIs", path='/')
def init_app(app):
from . import hail, taxi, ads, drivers, zupc, profile, vehicle, documents
api.init_app(app, add_specs=False)
app.register_blueprint(api_blueprint)
app.register_blueprint(apidoc.apidoc)
@app.route('/swagger.json', endpoint='api.specs')
def swagger():
return render_template('swagger.json', host=app.config['SERVER_NAME']), 200,
{'Content-Type': 'application/json'}
| agpl-3.0 | Python |
6e525872537cd31a80cb791d6594a1f6800c61b4 | add invers option, add args-parsing | lnitram/pi-playground,lnitram/pi-playground | i2c/PCF8574.py | i2c/PCF8574.py | #!/usr/bin/python
import sys
import smbus
import time
import argparse
# Reads data from PCF8574 and prints the state of each port
def readPCF8574(busnumber,address):
address = int(address,16)
busnumber = int(busnumber)
bus = smbus.SMBus(busnumber)
state = bus.read_byte(address);
for i in range(0,8):
port = "port " + str(i)
value = 1&(state>>7-i)
print str(port) + ': ' + str(value)
# Reads data from PCF8574 and prints the inverted state of each port
def readPCF8574_INV(busnumber,address):
address = int(address,16)
busnumber = int(busnumber)
bus = smbus.SMBus(busnumber)
state = 255 - bus.read_byte(address);
for i in range(0,8):
port = "port " + str(i)
value = 1&(state>>(7-i))
print str(port) + ': ' + str(value)
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-i",action='store_true', help="Invert the bit of in- and output")
parser.add_argument('i2c_bus', help='Number of active i2c-bus (0 or 1)')
parser.add_argument('i2c_address', help='address of PCF8574')
args = parser.parse_args()
# run commands
if args.i:
readPCF8574_INV(args.i2c_bus,args.i2c_address)
else:
readPCF8574(args.i2c_bus,args.i2c_address)
| #!/usr/bin/python
import sys
import smbus
import time
# Reads data from PCF8574 and prints the state of each port
def readPCF8574(busnumber,address):
address = int(address,16)
busnumber = int(1)
bus = smbus.SMBus(busnumber)
state = bus.read_byte(address);
for i in range(0,8):
port = "port " + str(i)
value = 1&(state>>7-i)
print str(port) + ': ' + str(value)
if len(sys.argv) != 3:
print "Usage: python PCF8574.py bus address"
exit(1)
bus = sys.argv[1]
address = sys.argv[2]
readPCF8574(bus,address)
| mit | Python |
c888e52788ec37641f97f761d2052902db20582a | Add missing dates | gsnbng/erpnext,gsnbng/erpnext,gsnbng/erpnext,gsnbng/erpnext | erpnext/accounts/dashboard.py | erpnext/accounts/dashboard.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from itertools import groupby
from operator import itemgetter
import frappe
from frappe.utils import add_to_date, date_diff, getdate, nowdate
from erpnext.accounts.report.general_ledger.general_ledger import execute
def get(filters=None):
filters = frappe._dict({
"company": "Gadget Technologies Pvt. Ltd.",
"from_date": get_from_date_from_timespan(filters.get("timespan")),
"to_date": "2020-12-12",
"account": "Cash - GTPL",
"group_by": "Group by Voucher (Consolidated)"
})
report_columns, report_results = execute(filters=filters)
interesting_fields = ["posting_date", "balance"]
columns = [column for column in report_columns if column["fieldname"] in interesting_fields]
_results = []
for row in report_results[1:-2]:
_results.append([row[key] for key in interesting_fields])
grouped_results = groupby(_results, key=itemgetter(0))
results = [list(values)[-1] for key, values in grouped_results]
results = add_missing_dates(results, from_date, to_date)
return {
"labels": [result[0] for result in results],
"datasets": [{
"name": "Cash - GTPL",
"values": [result[1] for result in results]
}]
}
def get_from_date_from_timespan(timespan):
days = months = years = 0
if "Last Week" == timespan:
days = -7
if "Last Month" == timespan:
months = -1
elif "Last Quarter" == timespan:
months = -3
elif "Last Year" == timespan:
years = -1
return add_to_date(None, years=years, months=months, days=days,
as_string=True, as_datetime=True)
def add_missing_dates(incomplete_results, from_date, to_date):
dates = [r[0] for r in incomplete_results]
day_count = date_diff(to_date, from_date)
results_dict = dict(incomplete_results)
last_date, last_balance = incomplete_results[0]
results = []
for date in (add_to_date(getdate(from_date), days=n) for n in range(day_count + 1)):
if date in results_dict:
last_date = date
last_balance = results_dict[date]
results.append([date, last_balance])
return results
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from itertools import groupby
from operator import itemgetter
import frappe
from frappe.utils import add_to_date
from erpnext.accounts.report.general_ledger.general_ledger import execute
def get(filters=None):
filters = frappe._dict({
"company": "Gadget Technologies Pvt. Ltd.",
"from_date": get_from_date_from_timespan(filters.get("timespan")),
"to_date": "2020-12-12",
"account": "Cash - GTPL",
"group_by": "Group by Voucher (Consolidated)"
})
report_columns, report_results = execute(filters=filters)
interesting_fields = ["posting_date", "balance"]
columns = [column for column in report_columns if column["fieldname"] in interesting_fields]
_results = []
for row in report_results[1:-2]:
_results.append([row[key] for key in interesting_fields])
grouped_results = groupby(_results, key=itemgetter(0))
results = [list(values)[-1] for key, values in grouped_results]
return {
"labels": [result[0] for result in results],
"datasets": [{
"name": "Cash - GTPL",
"values": [result[1] for result in results]
}]
}
def get_from_date_from_timespan(timespan):
days = months = years = 0
if "Last Week" == timespan:
days = -7
if "Last Month" == timespan:
months = -1
elif "Last Quarter" == timespan:
months = -3
elif "Last Year" == timespan:
years = -1
return add_to_date(None, years=years, months=months, days=days,
as_string=True, as_datetime=True)
| agpl-3.0 | Python |
dca8dce24e0bea671b52d456909c35e43c4f5929 | move exchange endpoint into consumer urlspace | wanghe4096/website,wanghe4096/website,evonove/django-oauth-toolkit-example,evonove/django-oauth-toolkit-example,wanghe4096/website,wanghe4096/website | example/urls.py | example/urls.py | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
from .views import ConsumerView, ConsumerExchangeView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='example/home.html'), name='home'),
url(r'^consumer/exchange/', ConsumerExchangeView.as_view(), name='exchange'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'example/login.html'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^o/', include('oauth2_provider.urls')),
url(r'^consumer/$', ConsumerView.as_view(), name="consumer"),
)
| from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
from .views import ConsumerView, ConsumerExchangeView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='example/home.html'), name='home'),
url(r'^exchange/', ConsumerExchangeView.as_view(), name='exchange'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'example/login.html'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^o/', include('oauth2_provider.urls')),
url(r'^consumer/$', ConsumerView.as_view(), name="consumer"),
)
| bsd-2-clause | Python |
dcc472a6c8e15e7fc105277332681b38e40640df | Revert open_file_dialog example | r0x0r/pywebview,r0x0r/pywebview,r0x0r/pywebview,shivaprsdv/pywebview,shivaprsdv/pywebview,shivaprsdv/pywebview,shivaprsdv/pywebview,r0x0r/pywebview,r0x0r/pywebview | examples/open_file_dialog.py | examples/open_file_dialog.py | import webview
import threading
"""
This example demonstrates creating an open file dialog.
"""
def open_file_dialog():
import time
time.sleep(5)
print(webview.create_file_dialog(webview.OPEN_DIALOG, allow_multiple=True))
if __name__ == '__main__':
t = threading.Thread(target=open_file_dialog)
t.start()
webview.create_window("Open file dialog example", "http://www.flowrl.com")
| import webview
import threading
"""
This example demonstrates creating an open file dialog.
"""
def open_file_dialog():
import time
time.sleep(5)
print(webview.create_file_dialog(webview.OPEN_DIALOG, allow_multiple=False))
if __name__ == '__main__':
t = threading.Thread(target=open_file_dialog)
t.start()
webview.create_window("Open file dialog example", "http://www.flowrl.com")
| bsd-3-clause | Python |
965236870ce5bf6dcbe9398b444b977c796b096e | set the right keyword to the close function | simphony/simphony-paraview,simphony/simphony-paraview | simphony_paraview/tests/test_show.py | simphony_paraview/tests/test_show.py | import unittest
from hypothesis import given
from paraview import servermanager
from paraview.simple import Disconnect
from simphony_paraview.show import show
from simphony_paraview.core.testing import cuds_containers
class TestShow(unittest.TestCase):
def setUp(self):
if servermanager.ActiveConnection is not None:
Disconnect()
self.closed = False
def tearDown(self):
if servermanager.ActiveConnection is not None:
raise RuntimeError('There is still an active connection')
@given(cuds_containers)
def test_valid_cuds_containers(self, setup):
# XXX This is a very basic test.
# given
cuds, kind = setup
def close(obj, event):
obj.TerminateApp()
show(cuds, testing=close)
def test_unknown_container(self):
container = object()
with self.assertRaises(TypeError):
show(container)
| import unittest
from hypothesis import given
from paraview import servermanager
from paraview.simple import Disconnect
from simphony_paraview.show import show
from simphony_paraview.core.testing import cuds_containers
class TestShow(unittest.TestCase):
def setUp(self):
if servermanager.ActiveConnection is not None:
Disconnect()
self.closed = False
def tearDown(self):
if servermanager.ActiveConnection is not None:
raise RuntimeError('There is still an active connection')
@given(cuds_containers)
def test_valid_cuds_containers(self, setup):
# XXX This is a very basic test.
# given
cuds, kind = setup
def close(obj, event):
obj.TerminateApp()
show(cuds, close)
def test_unknown_container(self):
container = object()
with self.assertRaises(TypeError):
show(container)
| bsd-2-clause | Python |
c0358584f2b5a05947ebb558c6d10293cc969a1a | Fix tests | Pawamoy/dependenpy,Pawamoy/dependenpy | tests/test_dependenpy.py | tests/test_dependenpy.py | # -*- coding: utf-8 -*-
"""Main test script."""
from dependenpy.cli import main
def test_main():
"""Main test method."""
main(['-lm', 'dependenpy'])
| # -*- coding: utf-8 -*-
"""Main test script."""
from dependenpy.cli import main
def test_main():
"""Main test method."""
main('dependenpy')
| isc | Python |
a6435a8713985464b8c37a438ac035d65f66b4cd | Add more user mapfiles and validate | geographika/mappyfile,geographika/mappyfile | tests/test_large_file.py | tests/test_large_file.py | import logging
import os
import cProfile
import glob
import json
import mappyfile
from mappyfile.parser import Parser
from mappyfile.pprint import PrettyPrinter
from mappyfile.transformer import MapfileToDict
from mappyfile.validator import Validator
def output(fn):
"""
Parse, transform, and pretty print
the result
"""
p = Parser(expand_includes=False)
m = MapfileToDict()
v = Validator()
ast = p.parse_file(fn)
# print(ast)
d = m.transform(ast)
assert(v.validate(d))
output_file = fn + ".map"
try:
mappyfile.utils.write(d, output_file)
except Exception:
logging.warning(json.dumps(d, indent=4))
logging.warning("%s could not be successfully re-written", fn)
raise
# now try reading it again
ast = p.parse_file(output_file)
d = m.transform(ast)
assert(v.validate(d))
def main():
sample_dir = os.path.join(os.path.dirname(__file__), "mapfiles")
mapfiles = glob.glob(sample_dir + '/*.txt')
# mapfiles = ["map4.txt"]
for fn in mapfiles:
print("Processing {}".format(fn))
fn = os.path.join(sample_dir, fn)
pr = cProfile.Profile()
pr.enable()
output(fn)
pr.disable()
# pr.print_stats(sort='time')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
print("Done!")
| import logging
import cProfile
from mappyfile.parser import Parser
from mappyfile.pprint import PrettyPrinter
from mappyfile.transformer import MapfileToDict
def output(fn):
"""
Parse, transform, and pretty print
the result
"""
p = Parser()
m = MapfileToDict()
ast = p.parse_file(fn)
# print(ast)
d = m.transform(ast)
# print(d)
pp = PrettyPrinter(indent=0, newlinechar=" ", quote="'")
pp.pprint(d)
def main():
fns = [r"D:\Temp\large_map1.txt", r"D:\Temp\large_map2.txt"]
for fn in fns:
pr = cProfile.Profile()
pr.enable()
output(fn)
pr.disable()
pr.print_stats(sort='time')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
print("Done!")
| mit | Python |
ddf311b4dc7c08f3f08516c702531053f8919720 | Tidy imports | lamby/django-slack | tests/test_validation.py | tests/test_validation.py | import json
from django.test import TestCase
from django_slack.exceptions import ChannelNotFound, MsgTooLong
from django_slack.backends import Backend
class TestOverride(TestCase):
def test_ok_result(self):
backend = Backend()
backend.validate('application/json', json.dumps({'ok': True}), {})
def test_msg_too_long_result(self):
# Arbitrarily chosen 'simple' error
backend = Backend()
with self.assertRaises(
MsgTooLong,
expected_regexp=r"MsgTooLong: msg_too_long",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'msg_too_long'}),
{},
)
def test_channel_not_found_result(self):
backend = Backend()
with self.assertRaises(
ChannelNotFound,
expected_regexp=r"ChannelNotFound: channel 'bad-channel' could not be found",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'channel_not_found'}),
{'channel': 'bad-channel'},
)
| import json
from django.conf import settings
from django.test import TestCase, override_settings
from django_slack.exceptions import ChannelNotFound, MsgTooLong
from django_slack.backends import Backend
class TestOverride(TestCase):
def test_ok_result(self):
backend = Backend()
backend.validate('application/json', json.dumps({'ok': True}), {})
def test_msg_too_long_result(self):
# Arbitrarily chosen 'simple' error
backend = Backend()
with self.assertRaises(
MsgTooLong,
expected_regexp=r"MsgTooLong: msg_too_long",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'msg_too_long'}),
{},
)
def test_channel_not_found_result(self):
backend = Backend()
with self.assertRaises(
ChannelNotFound,
expected_regexp=r"ChannelNotFound: channel 'bad-channel' could not be found",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'channel_not_found'}),
{'channel': 'bad-channel'},
)
| bsd-3-clause | Python |
4f8429e9cd17f207ef429bdf21508cfac4200c4c | improve display | UGentPortaal/django-ldapdb-archived,crito/django-ldapdb,chronossc/django-ldapdb,UGentPortaal/django-ldapdb,crito/django-ldapdb | examples/admin.py | examples/admin.py | # -*- coding: utf-8 -*-
#
# django-granadilla
# Copyright (C) 2009 Bolloré telecom
# See AUTHORS file for a full list of contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from examples.models import LdapGroup, LdapUser
class LdapGroupAdmin(admin.ModelAdmin):
exclude = ['dn', 'usernames']
list_display = ['name', 'gid']
search_fields = ['name']
class LdapUserAdmin(admin.ModelAdmin):
exclude = ['dn', 'password', 'photo']
list_display = ['username', 'first_name', 'last_name', 'email', 'uid']
search_fields = ['first_name', 'last_name', 'full_name', 'username']
admin.site.register(LdapGroup, LdapGroupAdmin)
admin.site.register(LdapUser, LdapUserAdmin)
| # -*- coding: utf-8 -*-
#
# django-granadilla
# Copyright (C) 2009 Bolloré telecom
# See AUTHORS file for a full list of contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from examples.models import LdapGroup, LdapUser
class LdapGroupAdmin(admin.ModelAdmin):
exclude = ['dn', 'usernames']
list_display = ['name', 'gid']
search_fields = ['name']
class LdapUserAdmin(admin.ModelAdmin):
exclude = ['dn', 'password', 'photo']
list_display = ['username', 'uid']
search_fields = ['first_name', 'last_name', 'full_name', 'username']
admin.site.register(LdapGroup, LdapGroupAdmin)
admin.site.register(LdapUser, LdapUserAdmin)
| bsd-3-clause | Python |
cbc8632a74f32415b2819b678340b6e4f0944dba | Use build_context factory | translationexchange/tml-python,translationexchange/tml-python | tests/unit/tools/list.py | tests/unit/tools/list.py | # encoding: UTF-8
import unittest
from tml.tools.list import List
from tml.tools.template import Template
from tests.mock import Client
from tml import build_context
class ListTest(unittest.TestCase):
def setUp(self):
self.context = build_context(client = Client.read_all(), locale = 'ru')
def test_render(self):
self.assertEquals('a, b, c', List(['a','b','c']).render(self.context), 'Just list')
self.assertEquals('a;b;c', List(['a','b','c'], separator = ';').render(self.context), 'Custom separator')
self.assertEquals('a, b and c', List(['a','b','c'], last_separator = 'and').render(self.context), 'Last separator')
self.assertEquals('a, b', List(['a','b','c'], limit = 2).render(self.context), 'Limit')
self.assertEquals('a and b', List(['a','b','c'], limit = 2, last_separator = 'and').render(self.context), 'Limit')
self.assertEquals('a', List(['a'], limit = 2, last_separator = 'and').render(self.context), 'One element')
def test_tpl(self):
list = List(['a','b','c'], tpl = Template('<b>{$0}</b>'))
self.assertEquals('<b>a</b>, <b>b</b>, <b>c</b>', list.render(self.context), 'Apply template')
list = List([{'name':'Вася','gender':'male'},{'name':'Андрей','gender':'male'},{'name':'Семен','gender':'male'}], tpl = Template('{$0::dat}'), last_separator = u'и')
self.assertEquals(u'Васе, Андрею и Семену', list.render(self.context), 'Apply context')
if __name__ == '__main__':
unittest.main()
| # encoding: UTF-8
import unittest
from tml.tools.list import List
from tml.tools.template import Template
from tests.mock import Client
from tml import Context
class list(unittest.TestCase):
def setUp(self):
self.context = Context(client = Client.read_all(), locale = 'ru')
def test_render(self):
self.assertEquals('a, b, c', List(['a','b','c']).render(self.context), 'Just list')
self.assertEquals('a;b;c', List(['a','b','c'], separator = ';').render(self.context), 'Custom separator')
self.assertEquals('a, b and c', List(['a','b','c'], last_separator = 'and').render(self.context), 'Last separator')
self.assertEquals('a, b', List(['a','b','c'], limit = 2).render(self.context), 'Limit')
self.assertEquals('a and b', List(['a','b','c'], limit = 2, last_separator = 'and').render(self.context), 'Limit')
self.assertEquals('a', List(['a'], limit = 2, last_separator = 'and').render(self.context), 'One element')
def test_tpl(self):
list = List(['a','b','c'], tpl = Template('<b>{$0}</b>'))
self.assertEquals('<b>a</b>, <b>b</b>, <b>c</b>', list.render(self.context), 'Apply template')
list = List([{'name':'Вася','gender':'male'},{'name':'Андрей','gender':'male'},{'name':'Семен','gender':'male'}], tpl = Template('{$0::dat}'), last_separator = u'и')
self.assertEquals(u'Васе, Андрею и Семену', list.render(self.context), 'Apply context')
if __name__ == '__main__':
unittest.main()
| mit | Python |
dbce79102efa8fee233af95939f1ff0b9d060b00 | Update example workflow to show you can use classes | botify-labs/simpleflow,botify-labs/simpleflow | examples/basic.py | examples/basic.py | import time
from simpleflow import (
activity,
Workflow,
futures,
)
@activity.with_attributes(task_list='quickstart', version='example')
def increment(x):
return x + 1
@activity.with_attributes(task_list='quickstart', version='example')
def double(x):
return x * 2
# A simpleflow activity can be any callable, so a function works, but a class
# will also work given the processing happens in __init__()
@activity.with_attributes(task_list='quickstart', version='example')
class Delay(object):
def __init__(self, t, x):
time.sleep(t)
return x
class BasicWorkflow(Workflow):
name = 'basic'
version = 'example'
task_list = 'example'
def run(self, x, t=30):
y = self.submit(increment, x)
yy = self.submit(Delay, t, y)
z = self.submit(double, y)
print '({x} + 1) * 2 = {result}'.format(
x=x,
result=z.result)
futures.wait(yy, z)
return z.result
| import time
from simpleflow import (
activity,
Workflow,
futures,
)
@activity.with_attributes(task_list='quickstart', version='example')
def increment(x):
return x + 1
@activity.with_attributes(task_list='quickstart', version='example')
def double(x):
return x * 2
@activity.with_attributes(task_list='quickstart', version='example')
def delay(t, x):
time.sleep(t)
return x
class BasicWorkflow(Workflow):
name = 'basic'
version = 'example'
task_list = 'example'
def run(self, x, t=30):
y = self.submit(increment, x)
yy = self.submit(delay, t, y)
z = self.submit(double, y)
print '({x} + 1) * 2 = {result}'.format(
x=x,
result=z.result)
futures.wait(yy, z)
return z.result
| mit | Python |
9308152c67bc2ad2150a76e7897c8fd2568bf590 | Bump version: 0.0.4 -> 0.0.5 | polysquare/include-what-you-use-target-cmake,polysquare/iwyu-target-cmake | conanfile.py | conanfile.py | from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.5"
class IWYUCTargetCmakeConan(ConanFile):
name = "iwyu-target-cmake"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"tooling-find-pkg-util/master@smspillaz/tooling-find-pkg-util",
"tooling-cmake-util/master@smspillaz/tooling-cmake-util")
url = "http://github.com/polysquare/iwyu-target-cmake"
license = "MIT"
options = {
"dev": [True, False]
}
default_options = "dev=False"
def requirements(self):
if self.options.dev:
self.requires("cmake-module-common/master@smspillaz/cmake-module-common")
def source(self):
zip_name = "iwyu-target-cmake.zip"
download("https://github.com/polysquare/"
"iwyu-target-cmake/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="Find*.cmake",
dst="",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
self.copy(pattern="*.cmake",
dst="cmake/iwyu-target-cmake",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
| from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.4"
class IWYUCTargetCmakeConan(ConanFile):
name = "iwyu-target-cmake"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"tooling-find-pkg-util/master@smspillaz/tooling-find-pkg-util",
"tooling-cmake-util/master@smspillaz/tooling-cmake-util")
url = "http://github.com/polysquare/iwyu-target-cmake"
license = "MIT"
options = {
"dev": [True, False]
}
default_options = "dev=False"
def requirements(self):
if self.options.dev:
self.requires("cmake-module-common/master@smspillaz/cmake-module-common")
def source(self):
zip_name = "iwyu-target-cmake.zip"
download("https://github.com/polysquare/"
"iwyu-target-cmake/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="Find*.cmake",
dst="",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
self.copy(pattern="*.cmake",
dst="cmake/iwyu-target-cmake",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
| mit | Python |
09ab8f6290e3c5bf33e01857d11b124444a4c990 | add sendaddr support to isotp | commaai/panda,commaai/panda,commaai/panda,commaai/panda | examples/isotp.py | examples/isotp.py | DEBUG = False
def msg(x):
if DEBUG:
print "S:",x.encode("hex")
if len(x) <= 7:
ret = chr(len(x)) + x
else:
assert False
return ret.ljust(8, "\x00")
def isotp_send(panda, x, addr, bus=0):
if len(x) <= 7:
panda.can_send(addr, msg(x), bus)
else:
ss = chr(0x10 + (len(x)>>8)) + chr(len(x)&0xFF) + x[0:6]
x = x[6:]
idx = 1
sends = []
while len(x) > 0:
sends.append(((chr(0x20 + (idx&0xF)) + x[0:7]).ljust(8, "\x00")))
x = x[7:]
idx += 1
# actually send
panda.can_send(addr, ss, bus)
rr = recv(panda, 1, addr+8, bus)[0]
panda.can_send_many([(addr, None, s, 0) for s in sends])
kmsgs = []
def recv(panda, cnt, addr, nbus):
global kmsgs
ret = []
while len(ret) < cnt:
kmsgs += panda.can_recv()
nmsgs = []
for ids, ts, dat, bus in kmsgs:
if ids == addr and bus == nbus and len(ret) < cnt:
ret.append(dat)
else:
pass
kmsgs = nmsgs
return map(str, ret)
def isotp_recv(panda, addr, bus=0, sendaddr=None):
msg = recv(panda, 1, addr, bus)[0]
if sendaddr is None:
sendaddr = addr-8
if ord(msg[0])&0xf0 == 0x10:
# first
tlen = ((ord(msg[0]) & 0xf) << 8) | ord(msg[1])
dat = msg[2:]
# 0 block size?
CONTINUE = "\x30" + "\x00"*7
panda.can_send(sendaddr, CONTINUE, bus)
idx = 1
for mm in recv(panda, (tlen-len(dat) + 7)/8, addr, bus):
assert ord(mm[0]) == (0x20 | idx)
dat += mm[1:]
idx += 1
elif ord(msg[0])&0xf0 == 0x00:
# single
tlen = ord(msg[0]) & 0xf
dat = msg[1:]
else:
assert False
dat = dat[0:tlen]
if DEBUG:
print "R:",dat.encode("hex")
return dat
| DEBUG = False
def msg(x):
if DEBUG:
print "S:",x.encode("hex")
if len(x) <= 7:
ret = chr(len(x)) + x
else:
assert False
return ret.ljust(8, "\x00")
def isotp_send(panda, x, addr, bus=0):
if len(x) <= 7:
panda.can_send(addr, msg(x), bus)
else:
ss = chr(0x10 + (len(x)>>8)) + chr(len(x)&0xFF) + x[0:6]
x = x[6:]
idx = 1
sends = []
while len(x) > 0:
sends.append(((chr(0x20 + (idx&0xF)) + x[0:7]).ljust(8, "\x00")))
x = x[7:]
idx += 1
# actually send
panda.can_send(addr, ss, bus)
rr = recv(panda, 1, addr+8, bus)[0]
panda.can_send_many([(addr, None, s, 0) for s in sends])
kmsgs = []
def recv(panda, cnt, addr, nbus):
global kmsgs
ret = []
while len(ret) < cnt:
kmsgs += panda.can_recv()
nmsgs = []
for ids, ts, dat, bus in kmsgs:
if ids == addr and bus == nbus and len(ret) < cnt:
ret.append(dat)
else:
pass
kmsgs = nmsgs
return map(str, ret)
def isotp_recv(panda, addr, bus=0):
msg = recv(panda, 1, addr, bus)[0]
if ord(msg[0])&0xf0 == 0x10:
# first
tlen = ((ord(msg[0]) & 0xf) << 8) | ord(msg[1])
dat = msg[2:]
# 0 block size?
CONTINUE = "\x30" + "\x00"*7
panda.can_send(addr-8, CONTINUE, bus)
idx = 1
for mm in recv(panda, (tlen-len(dat) + 7)/8, addr, bus):
assert ord(mm[0]) == (0x20 | idx)
dat += mm[1:]
idx += 1
elif ord(msg[0])&0xf0 == 0x00:
# single
tlen = ord(msg[0]) & 0xf
dat = msg[1:]
else:
assert False
dat = dat[0:tlen]
if DEBUG:
print "R:",dat.encode("hex")
return dat
| mit | Python |
ba1186c47e5f3466faeea9f2d5bf96948d5f7183 | Add --strict flag to raise exception on undefined variables | j4mie/confuzzle | confuzzle.py | confuzzle.py | import sys
import argparse
import yaml
import jinja2
def render(template_string, context_dict, strict=False):
template = jinja2.Template(template_string)
if strict:
template.environment.undefined = jinja2.StrictUndefined
return template.render(**context_dict)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('template', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help="Config file template. If not supplied, stdin is used")
parser.add_argument('config', type=argparse.FileType('r'), help="YAML data file to read")
parser.add_argument('--out', '-o', dest='out', type=argparse.FileType('w'), default=sys.stdout, help="Output file to write. If not supplied, stdout is used")
parser.add_argument('--strict', dest='strict', action='store_true', default=False, help="Raise an exception on undefined variables")
args = parser.parse_args()
context_dict = yaml.load(args.config.read())
template_string = args.template.read()
rendered = render(template_string, context_dict, args.strict)
args.out.write(rendered)
if __name__ == "__main__":
main()
| import sys
import argparse
import yaml
from jinja2 import Template
def render(template_string, context_dict):
template = Template(template_string)
return template.render(**context_dict)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('template', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help="Config file template. If not supplied, stdin is used")
parser.add_argument('config', type=argparse.FileType('r'), help="YAML data file to read")
parser.add_argument('--out', '-o', dest='out', type=argparse.FileType('w'), default=sys.stdout, help="Output file to write. If not supplied, stdout is used")
args = parser.parse_args()
context_dict = yaml.load(args.config.read())
template_string = args.template.read()
rendered = render(template_string, context_dict)
args.out.write(rendered)
if __name__ == "__main__":
main()
| unlicense | Python |
b336e83a63722b3a3e4d3f1779686149d5cef8d1 | Add compatibility for Python 2 | pypa/setuptools,pypa/setuptools,pypa/setuptools | setuptools/tests/test_setopt.py | setuptools/tests/test_setopt.py | # coding: utf-8
from __future__ import unicode_literals
import io
import six
from setuptools.command import setopt
from setuptools.extern.six.moves import configparser
class TestEdit:
@staticmethod
def parse_config(filename):
parser = configparser.ConfigParser()
with io.open(filename, encoding='utf-8') as reader:
(parser.read_file if six.PY3 else parser.readfp)(reader)
return parser
@staticmethod
def write_text(file, content):
with io.open(file, 'wb') as strm:
strm.write(content.encode('utf-8'))
def test_utf8_encoding_retained(self, tmpdir):
"""
When editing a file, non-ASCII characters encoded in
UTF-8 should be retained.
"""
config = tmpdir.join('setup.cfg')
self.write_text(str(config), '[names]\njaraco=йарацо')
setopt.edit_config(str(config), dict(names=dict(other='yes')))
parser = self.parse_config(str(config))
assert parser.get('names', 'jaraco') == 'йарацо'
assert parser.get('names', 'other') == 'yes'
| # coding: utf-8
from __future__ import unicode_literals
import io
import six
from setuptools.command import setopt
from setuptools.extern.six.moves import configparser
class TestEdit:
@staticmethod
def parse_config(filename):
parser = configparser.ConfigParser()
with io.open(filename, encoding='utf-8') as reader:
(parser.read_file if six.PY3 else parser.readfp)(reader)
return parser
@staticmethod
def write_text(file, content):
with io.open(file, 'wb') as strm:
strm.write(content.encode('utf-8'))
def test_utf8_encoding_retained(self, tmpdir):
"""
When editing a file, non-ASCII characters encoded in
UTF-8 should be retained.
"""
config = tmpdir.join('setup.cfg')
self.write_text(config, '[names]\njaraco=йарацо')
setopt.edit_config(str(config), dict(names=dict(other='yes')))
parser = self.parse_config(str(config))
assert parser['names']['jaraco'] == 'йарацо'
assert parser['names']['other'] == 'yes'
| mit | Python |
71f67f02dd26e29002ced50298b245c6114ece3b | Update mathfunctions.py | kaitheslayer/developingprojects | Python/Math/mathfunctions.py | Python/Math/mathfunctions.py | # File with the functions which will be used in math script
# Number to the power of
def po (number, pof):
b = number
for _ in range(pof - 1):
b = int(b) * int(number)
return b
# Factors of a number
def factors (number):
current, ao, nums = 0, 0, []
while current < number:
ao = ao + 1
current = number % ao
if current == 0:
nums.append(ao)
return nums
# Sqare root of number
def sqroot (number):
fac, f = factors (number), ''
for x in fac:
a = x * x
if a == number:
return (x)
f = True
if f != True:
return "No Square Root Found"
# Linear Patern Solver
def lseq (ls1, ls2, ls3, ls4):
if int(ls2) - int(ls1) == int(ls4) - int(ls3):
lsd1 = int(ls2) - int(ls1) # common difference
lsc = int(lsd1) - int(ls1) # constant e.g. Tn = xn + c
lsc = int(lsc) * -1
if lsd1 == 1: # added to change Tn = 1n to Tn = n
return("Tn = %sn+" % (lsd1) + ("%s" % (lsc)))
elif lsc == 0: # added to prevent problem where 0 is neither '+' or '-'. So a sequence: 1;2;3;4 -> Tn = n0
return("Tn = %sn" % (lsd1))
else:
return("Tn = %sn+" % (lsd1) + ("%s" % (lsc)))
elif ls2 - ls1 != ls4 - ls3:
return("This is not a Linear Equation!")
# THIS CAN SERIOUSLY BE DONE BETTER WITH CREATING OTHER FUCNTIONS, BUT LEAVING IT HERE FOR NOW...
def lineareq(numbers):
ai = numbers[3]
bi = numbers[1] * -1
ci = numbers[2] * -1
di = numbers[0]
# Calculate the Determinent of the inverse
de = ai * di - bi * ci
# Calculate the final answer, for easy eye viewing
xo = ai * numbers[4]
xoo = bi * numbers[5]
ans1 = xo + xoo
xo = ci * numbers[4]
xoo = di * numbers[5]
ans2 = xo + xoo
# Finish Equation
ans1 = ans1 / de
ans2 = ans2 / de
return ans1, ans2
| # File with the functions which will be used in math script
# Number to the power of
def po (number, pof):
b = number
for _ in range(pof - 1):
b = int(b) * int(number)
return b
# Factors of a number
def factors (number):
current, ao, nums = 0, 0, []
while current < number:
ao = ao + 1
current = number % ao
if current == 0:
nums.append(ao)
return nums
# Sqare root of number
def sqroot (number):
fac, f = factors (number), ''
for x in fac:
a = x * x
if a == number:
return (x)
f = True
if f != True:
return "No Square Root Found"
# THIS CAN SERIOUSLY BE DONE BETTER WITH CREATING OTHER FUCNTIONS, BUT LEAVING IT HERE FOR NOW...
def lineareq(numbers):
ai = numbers[3]
bi = numbers[1] * -1
ci = numbers[2] * -1
di = numbers[0]
# Calculate the Determinent of the inverse
de = ai * di - bi * ci
# Calculate the final answer, for easy eye viewing
xo = ai * numbers[4]
xoo = bi * numbers[5]
ans1 = xo + xoo
xo = ci * numbers[4]
xoo = di * numbers[5]
ans2 = xo + xoo
# Finish Equation
ans1 = ans1 / de
ans2 = ans2 / de
return ans1, ans2
| mit | Python |
1d3eb0bafd46f3e9cfb7d6395ad1a100052ff821 | Clean up parameter types (#52527) | thaim/ansible,thaim/ansible | lib/ansible/plugins/doc_fragments/online.py | lib/ansible/plugins/doc_fragments/online.py | # -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = r'''
options:
api_token:
description:
- Online OAuth token.
type: str
aliases: [ oauth_token ]
api_url:
description:
- Online API URL
type: str
default: 'https://api.online.net'
aliases: [ base_url ]
api_timeout:
description:
- HTTP timeout to Online API in seconds.
type: int
default: 30
aliases: [ timeout ]
validate_certs:
description:
- Validate SSL certs of the Online API.
type: bool
default: yes
notes:
- Also see the API documentation on U(https://console.online.net/en/api/)
- If C(api_token) is not set within the module, the following
environment variables can be used in decreasing order of precedence
C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
- If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
environment variable.
'''
| # -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
api_token:
description:
- Online OAuth token.
aliases: ['oauth_token']
api_url:
description:
- Online API URL
default: 'https://api.online.net'
aliases: ['base_url']
api_timeout:
description:
- HTTP timeout to Online API in seconds.
default: 30
aliases: ['timeout']
validate_certs:
description:
- Validate SSL certs of the Online API.
default: yes
type: bool
notes:
- Also see the API documentation on U(https://console.online.net/en/api/)
- If C(api_token) is not set within the module, the following
environment variables can be used in decreasing order of precedence
C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
- If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
environment variable.
'''
| mit | Python |
eb3f93ac64953eacecdd48e2cb8d5ca80554a95b | Update search-for-a-range.py | yiwen-luo/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,githubutilities/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,githubutilities/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,githubutilities/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,jaredkoontz/leetcode | Python/search-for-a-range.py | Python/search-for-a-range.py | # Time: O(logn)
# Space: O(1)
#
# Given a sorted array of integers, find the starting and ending position of a given target value.
#
# Your algorithm's runtime complexity must be in the order of O(log n).
#
# If the target is not found in the array, return [-1, -1].
#
# For example,
# Given [5, 7, 7, 8, 8, 10] and target value 8,
# return [3, 4].
#
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# Find the first index where target <= nums[idx]
left = self.binarySearch(lambda x, y: x >= y, nums, target)
if left >= len(nums) or nums[left] != target:
return [-1, -1]
# Find the first index where target < nums[idx]
right = self.binarySearch(lambda x, y: x > y, nums, target)
return [left, right - 1]
def binarySearch(self, compare, nums, target):
left, right = 0, len(nums)
while left < right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid
else:
left = mid + 1
return left
def binarySearch2(self, compare, nums, target):
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid - 1
else:
left = mid + 1
return left
def binarySearch3(self, compare, nums, target):
left, right = -1, len(nums)
while left + 1 < right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid
else:
left = mid
return right
if __name__ == "__main__":
print Solution().searchRange([2, 2], 3)
print Solution().searchRange([5, 7, 7, 8, 8, 10], 8)
| # Time: O(logn)
# Space: O(1)
#
# Given a sorted array of integers, find the starting and ending position of a given target value.
#
# Your algorithm's runtime complexity must be in the order of O(log n).
#
# If the target is not found in the array, return [-1, -1].
#
# For example,
# Given [5, 7, 7, 8, 8, 10] and target value 8,
# return [3, 4].
#
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# Find the first index where target <= nums[idx]
left = self.binarySearch(lambda x, y: x <= y, nums, target)
if left >= len(nums) or nums[left] != target:
return [-1, -1]
# Find the first index where target < nums[idx]
right = self.binarySearch(lambda x, y: x < y, nums, target)
return [left, right - 1]
def binarySearch(self, compare, nums, target):
left, right = 0, len(nums)
while left < right:
mid = left + (right - left) / 2
if compare(target, nums[mid]):
right = mid
else:
left = mid + 1
return left
def binarySearch2(self, compare, nums, target):
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) / 2
if compare(target, nums[mid]):
right = mid - 1
else:
left = mid + 1
return left
def binarySearch3(self, compare, nums, target):
left, right = -1, len(nums)
while right - left > 1:
mid = left + (right - left) / 2
if compare(target, nums[mid]):
right = mid
else:
left = mid
return right
if __name__ == "__main__":
print Solution().searchRange([2, 2], 3)
print Solution().searchRange([5, 7, 7, 8, 8, 10], 8)
| mit | Python |
946b3867f464d96e85056b60d94593346a39cc51 | add map to tweet list | KingPixil/ice,KingPixil/ice | index.py | index.py | import os
import time
import TwitterAPI
import src.art.fluid
import src.art.gas
import src.art.map
# Configuration
twitterAPI = TwitterAPI.TwitterAPI(
consumer_key=os.environ["CONSUMER_KEY"],
consumer_secret=os.environ["CONSUMER_SECRET"],
access_token_key=os.environ["ACCESS_TOKEN_KEY"],
access_token_secret=os.environ["ACCESS_TOKEN_SECRET"]
)
# Generate
types = [src.art.fluid, src.art.gas, src.art.map]
totalTypes = len(types)
current = 0
while True:
print("\x1b[36mIce\x1b[0m Crafting Post 💡")
seedText = types[current].generate()
f = open("art.png", "rb")
twitterAPI.request("statuses/update_with_media", {
"status": seedText
}, {
"media[]": f.read()
})
f.close()
print("\x1b[36mIce\x1b[0m Success \"" + seedText + "\" ✨\n")
current = (current + 1) % totalTypes
time.sleep(1020)
| import os
import time
import TwitterAPI
import src.art.fluid
import src.art.gas
# Configuration
twitterAPI = TwitterAPI.TwitterAPI(
consumer_key=os.environ["CONSUMER_KEY"],
consumer_secret=os.environ["CONSUMER_SECRET"],
access_token_key=os.environ["ACCESS_TOKEN_KEY"],
access_token_secret=os.environ["ACCESS_TOKEN_SECRET"]
)
# Generate
types = [src.art.fluid, src.art.gas]
totalTypes = len(types)
current = 0
while True:
print("\x1b[36mIce\x1b[0m Crafting Post 💡")
seedText = types[current].generate()
f = open("art.png", "rb")
twitterAPI.request("statuses/update_with_media", {
"status": seedText
}, {
"media[]": f.read()
})
f.close()
print("\x1b[36mIce\x1b[0m Success \"" + seedText + "\" ✨\n")
current = (current + 1) % totalTypes
time.sleep(1020)
| mit | Python |
34adb8bb30860eb7748188a7d1a9345a09c4519f | Implement punctuation filtering | ikaruswill/vector-space-model,ikaruswill/boolean-retrieval | index.py | index.py | from nltk.tokenize import word_tokenize, sent_tokenize
import getopt
import sys
import os
import io
import string
def build_dict(docs):
dictionary = set()
for doc_id, doc in docs.items():
dictionary.update(doc)
dictionary = list(dictionary)
dictionary.sort()
return dictionary
def build_postings(dictionary):
postings = {}
for term in dictionary:
postings[term] = []
return postings
def populate_postings(docs, postings):
for doc_id, doc in docs.items():
for term in set(doc):
postings[term].append(doc_id)
def load_data(dir_doc):
docs = {}
for dirpath, dirnames, filenames in os.walk(dir_doc):
for name in filenames:
file = os.path.join(dirpath, name)
with io.open(file, 'r+') as f:
docs[name] = f.read()
return docs
def preprocess(docs):
punctuations = set(string.punctuation)
processed_docs = {}
for doc_id, doc in docs.items():
processed_docs[doc_id] = set(word_tokenize(doc.lower()))
processed_docs[doc_id].difference_update(punctuations)
return processed_docs
def usage():
print("usage: " + sys.argv[0] + " -i directory-of-documents -d dictionary-file -p postings-file")
if __name__ == '__main__':
dir_doc = dict_file = postings_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-i':
dir_doc = a
elif o == '-d':
dict_file = a
elif o == '-p':
postings_file = a
else:
assert False, "unhandled option"
if dir_doc == None or dict_file == None or postings_file == None:
usage()
sys.exit(2)
docs = load_data(dir_doc)
docs = preprocess(docs)
dictionary = build_dict(docs)
postings = build_postings(dictionary)
populate_postings(docs, postings)
| from nltk.tokenize import word_tokenize, sent_tokenize
import getopt
import sys
import os
import io
def build_dict(docs):
dictionary = set()
for doc_id, doc in docs.items():
dictionary.update(doc)
dictionary = list(dictionary)
dictionary.sort()
return dictionary
def build_postings(dictionary):
postings = {}
for term in dictionary:
postings[term] = []
return postings
def populate_postings(docs, postings):
for doc_id, doc in docs.items():
for term in set(doc):
postings[term].append(doc_id)
def load_data(dir_doc):
docs = {}
for dirpath, dirnames, filenames in os.walk(dir_doc):
for name in filenames:
file = os.path.join(dirpath, name)
with io.open(file, 'r+') as f:
docs[name] = f.read()
return docs
def preprocess(docs):
processed_docs = {}
for doc_id, doc in docs.items():
processed_docs[doc_id] = set(word_tokenize(doc.lower()))
return processed_docs
def usage():
print("usage: " + sys.argv[0] + " -i directory-of-documents -d dictionary-file -p postings-file")
if __name__ == '__main__':
dir_doc = dict_file = postings_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-i':
dir_doc = a
elif o == '-d':
dict_file = a
elif o == '-p':
postings_file = a
else:
assert False, "unhandled option"
if dir_doc == None or dict_file == None or postings_file == None:
usage()
sys.exit(2)
docs = load_data(dir_doc)
docs = preprocess(docs)
dictionary = build_dict(docs)
postings = build_postings(dictionary)
populate_postings(docs, postings)
| mit | Python |
e320c8558646233b78760e1c84c5334a3a743d6d | Fix test_ensemble on Python 3.5 | RasaHQ/rasa_nlu,RasaHQ/rasa_core,RasaHQ/rasa_core,RasaHQ/rasa_core,RasaHQ/rasa_nlu,RasaHQ/rasa_nlu | tests/test_ensemble.py | tests/test_ensemble.py | import pytest
from rasa_core.policies import Policy
from rasa_core.policies.ensemble import PolicyEnsemble
class WorkingPolicy(Policy):
@classmethod
def load(cls, path):
return WorkingPolicy()
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def __eq__(self, other):
return isinstance(other, WorkingPolicy)
def test_policy_loading_simple(tmpdir):
original_policy_ensemble = PolicyEnsemble([WorkingPolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(str(tmpdir))
loaded_policy_ensemble = PolicyEnsemble.load(str(tmpdir))
assert original_policy_ensemble.policies == loaded_policy_ensemble.policies
class LoadReturnsNonePolicy(Policy):
@classmethod
def load(cls, path):
return None
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_none(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsNonePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(str(tmpdir))
with pytest.raises(Exception):
PolicyEnsemble.load(str(tmpdir))
class LoadReturnsWrongTypePolicy(Policy):
@classmethod
def load(cls, path):
return ""
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_wrong_type(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsWrongTypePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(str(tmpdir))
with pytest.raises(Exception):
PolicyEnsemble.load(str(tmpdir))
| import pytest
from rasa_core.policies import Policy
from rasa_core.policies.ensemble import PolicyEnsemble
class WorkingPolicy(Policy):
@classmethod
def load(cls, path):
return WorkingPolicy()
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def __eq__(self, other):
return isinstance(other, WorkingPolicy)
def test_policy_loading_simple(tmpdir):
original_policy_ensemble = PolicyEnsemble([WorkingPolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(tmpdir)
loaded_policy_ensemble = PolicyEnsemble.load(tmpdir)
assert original_policy_ensemble.policies == loaded_policy_ensemble.policies
class LoadReturnsNonePolicy(Policy):
@classmethod
def load(cls, path):
return None
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_none(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsNonePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(tmpdir)
with pytest.raises(Exception):
PolicyEnsemble.load(tmpdir)
class LoadReturnsWrongTypePolicy(Policy):
@classmethod
def load(cls, path):
return ""
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_wrong_type(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsWrongTypePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(tmpdir)
with pytest.raises(Exception):
PolicyEnsemble.load(tmpdir)
| apache-2.0 | Python |
ed410e81af61699a16c34c1edbbaa18a80bcdcfe | use global DocSimServer instance in views | ConsumerAffairs/django-document-similarity,ConsumerAffairs/django-document-similarity | docsim/documents/views.py | docsim/documents/views.py | from ujson import dumps
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from rest_framework.generics import ListAPIView, RetrieveAPIView
from .docsimserver import DocSimServer
from .models import Cluster, Document
from .serializers import ClusterSerializer
ACCEPTED = 202
DSS = DocSimServer()
@csrf_exempt
@require_POST
def add_or_update(request):
id = request.POST.get('id')
text = request.POST.get('text')
if id and text:
Document(id=id, text=text).save()
return HttpResponse(status=ACCEPTED)
else:
return HttpResponseBadRequest()
class ClusterList(ListAPIView):
model = Cluster
serializer_class = ClusterSerializer
class ClusterDetail(RetrieveAPIView):
model = Cluster
serializer_class = ClusterSerializer
@csrf_exempt
@require_POST
def find_similar(request):
try:
text = request.POST['text']
min_score = float(request.POST.get('min_score', .8))
max_results = int(request.POST.get('max_results', 10))
except:
return HttpResponseBadRequest()
id = request.POST.get('id')
doc = Document(id=id, text=text)
tokens = doc.tokens()
similar = DSS.find_similar({'tokens': tokens}, min_score=min_score,
max_results=max_results)
if id:
doc.save()
DSS.server.index([{'id': id, 'tokens': tokens}])
return HttpResponse(content=dumps(similar), content_type='text/json')
| from ujson import dumps
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from rest_framework.generics import ListAPIView, RetrieveAPIView
from .docsimserver import DocSimServer
from .models import Cluster, Document
from .serializers import ClusterSerializer
ACCEPTED = 202
@csrf_exempt
@require_POST
def add_or_update(request):
id = request.POST.get('id')
text = request.POST.get('text')
if id and text:
Document(id=id, text=text).save()
return HttpResponse(status=ACCEPTED)
else:
return HttpResponseBadRequest()
class ClusterList(ListAPIView):
model = Cluster
serializer_class = ClusterSerializer
class ClusterDetail(RetrieveAPIView):
model = Cluster
serializer_class = ClusterSerializer
@csrf_exempt
@require_POST
def find_similar(request):
try:
text = request.POST['text']
min_score = float(request.POST.get('min_score', .8))
max_results = int(request.POST.get('max_results', 10))
except:
return HttpResponseBadRequest()
id = request.POST.get('id')
doc = Document(id=id, text=text)
dss = DocSimServer()
tokens = doc.tokens()
similar = dss.find_similar({'tokens': tokens}, min_score=min_score,
max_results=max_results)
if id:
doc.save()
dss.server.index([{'id': id, 'tokens': tokens}])
return HttpResponse(content=dumps(similar), content_type='text/json')
| agpl-3.0 | Python |
829d68f842c5076be7a8b2c3963c032977fe2f47 | Bump to 4.4-dp2. | gregoiresage/pebble-tool,pebble/pebble-tool,pebble/pebble-tool,gregoiresage/pebble-tool,gregoiresage/pebble-tool,pebble/pebble-tool,gregoiresage/pebble-tool,pebble/pebble-tool | pebble_tool/version.py | pebble_tool/version.py | version_base = (4, 4, 0)
version_suffix = 'dp2'
if version_suffix is None:
__version_info__ = version_base
else:
__version_info__ = version_base + (version_suffix,)
__version__ = '{}.{}'.format(*version_base)
if version_base[2] != 0:
__version__ += '.{}'.format(version_base[2])
if version_suffix is not None:
__version__ += '-{}'.format(version_suffix)
| version_base = (4, 4, 0)
version_suffix = 'dp1'
if version_suffix is None:
__version_info__ = version_base
else:
__version_info__ = version_base + (version_suffix,)
__version__ = '{}.{}'.format(*version_base)
if version_base[2] != 0:
__version__ += '.{}'.format(version_base[2])
if version_suffix is not None:
__version__ += '-{}'.format(version_suffix)
| mit | Python |
752132f83cacb15273625f819eed1dab1d558e97 | Make sure all relevant fields are shown in the admin interface | sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer,sbsdev/daisyproducer | dictionary/admin.py | dictionary/admin.py | from daisyproducer.dictionary.models import Word
from django.contrib import admin
class WordAdmin(admin.ModelAdmin):
list_display = ('untranslated', 'grade1', 'grade2', 'type', 'isConfirmed', 'isLocal')
ordering = ('untranslated',)
search_fields = ('untranslated',)
admin.site.register(Word, WordAdmin)
| from daisyproducer.dictionary.models import Word
from django.contrib import admin
class WordAdmin(admin.ModelAdmin):
list_display = ('untranslated', 'grade1', 'grade2', 'type', 'isConfirmed')
ordering = ('untranslated',)
search_fields = ('untranslated',)
admin.site.register(Word, WordAdmin)
| agpl-3.0 | Python |
bda269c5b745703cf517222e004caf0233b40699 | refactor p4io to io | michaelaye/planet4,michaelaye/planet4,michaelaye/planet4,michaelaye/planet4 | tests/test_get_data.py | tests/test_get_data.py | from planet4 import io
import datetime as dt
def test_get_numbers_from_date_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
assert io.split_date_from_fname(fname1) == [2014, 6, 2]
def test_get_datetime_object_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
dt_obj = dt.datetime(2014, 6, 2)
assert dt_obj == io.get_dt_from_fname(fname1)
def test_from_2_files_get_latest_file(monkeypatch):
import glob
fname1 = '/a/b/c/2014-06-02_some_name.h5'
fname2 = '/a/b/c/2014-06-09_some_name.h5'
def mockreturn(path):
return [fname1, fname2]
monkeypatch.setattr(glob, 'glob', mockreturn)
x = io.get_current_database_fname()
assert x == fname2
| from planet4 import p4io
import datetime as dt
def test_get_numbers_from_date_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
assert p4io.split_date_from_fname(fname1) == [2014, 6, 2]
def test_get_datetime_object_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
dt_obj = dt.datetime(2014, 6, 2)
assert dt_obj == p4io.get_dt_from_fname(fname1)
def test_from_2_files_get_latest_file(monkeypatch):
import glob
fname1 = '/a/b/c/2014-06-02_some_name.h5'
fname2 = '/a/b/c/2014-06-09_some_name.h5'
def mockreturn(path):
return [fname1, fname2]
monkeypatch.setattr(glob, 'glob', mockreturn)
x = p4io.get_current_database_fname()
assert x == fname2
| isc | Python |
3cc7e0cebc8a7a7410ce6b239e55db0cf55b1dc8 | Fix broken tests in test_messages | tysonholub/twilio-python,twilio/twilio-python | tests/test_messages.py | tests/test_messages.py | from datetime import date
import unittest
from mock import patch
from six import u
from twilio.rest.resources import Messages
DEFAULT = {
'From': None,
'DateSent<': None,
'DateSent>': None,
'DateSent': None,
}
class MessageTest(unittest.TestCase):
def setUp(self):
self.resource = Messages("foo", ("sid", "token"))
self.params = DEFAULT.copy()
def test_list_on(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(date_sent=date(2011, 1, 1))
self.params['DateSent'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_after(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(after=date(2011, 1, 1))
self.params['DateSent>'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_before(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(before=date(2011, 1, 1))
self.params['DateSent<'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_create(self):
with patch.object(self.resource, 'create_instance') as mock:
self.resource.create(
from_='+14155551234',
to='+14155556789',
body=u('ahoy hoy'),
)
mock.assert_called_with(
{
'From': '+14155551234',
'to': '+14155556789',
'body': u('ahoy hoy'),
},
)
def test_delete(self):
with patch.object(self.resource, 'delete_instance') as mock:
self.resource.delete('MM123')
mock.assert_called_with('MM123')
def test_redact(self):
with patch.object(self.resource, 'update_instance') as mock:
self.resource.redact('MM123')
mock.assert_called_with(sid='MM123', body={'Body': ''})
| from datetime import date
import unittest
from mock import patch
from six import u
from twilio.rest.resources import Messages
DEFAULT = {
'From': None,
'DateSent<': None,
'DateSent>': None,
'DateSent': None,
}
class MessageTest(unittest.TestCase):
def setUp(self):
self.resource = Messages("foo", ("sid", "token"))
self.params = DEFAULT.copy()
def test_list_on(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(date_sent=date(2011, 1, 1))
self.params['DateSent'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_after(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(after=date(2011, 1, 1))
self.params['DateSent>'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_list_before(self):
with patch.object(self.resource, 'get_instances') as mock:
self.resource.list(before=date(2011, 1, 1))
self.params['DateSent<'] = "2011-01-01"
mock.assert_called_with(self.params)
def test_create(self):
with patch.object(self.resource, 'create_instance') as mock:
self.resource.create(
from_='+14155551234',
to='+14155556789',
body=u('ahoy hoy'),
)
mock.assert_called_with(
{
'from': '+14155551234',
'to': '+14155556789',
'body': u('ahoy hoy'),
},
)
def test_delete(self):
with patch.object(self.resource, 'delete_instance') as mock:
self.resource.delete('MM123')
mock.assert_called_with('MM123')
def test_redact(self):
with patch.object(self.resource, 'update_instance') as mock:
self.resource.redact('MM123')
mock.assert_called_with('MM123', {'Body': ''})
| mit | Python |
f6ecf6a45e2749261a20869aca5dfca6d7c03494 | Correct method doc. | ohsu-qin/qiprofile-rest-client | qiprofile_rest_client/helpers/database.py | qiprofile_rest_client/helpers/database.py | """Mongo Engine interaction utilities."""
def get_or_create(klass, key=None, **non_key):
"""
This function stands in for the Mongo Engine ``get_or_create``
collection method which was deprecated in mongoengine v0.8.0
and dropped in mongoengine v0.10.0, since MongoDB does not
support transactions.
If there is an object of the given Mongo Engine data model
class which matches the primary key, then that object is
returned. Otherwise, a new object is created with the content
prescribed by both the primary and non-primary parameters.
The create step is an upsert, i.e. a new object is created only
if it does not yet exist. The upsert allows for the small
possibility that an object is created after the fetch attempt
but before the create attempt. In that situation, the existing
object non-key content is modified and the modified object is
returned.
:Note: The idiom used in this function modifies the solution
proposed in http://stackoverflow.com/questions/25846462/mongoengine-replacing-get-or-create-with-upsert-update-one/25863633#25863633.
That StackOverflow work-around returns the following error:
ValueError: update only works with $ operators
The work-around to the StackOverflow work-around is to call
the data model class *update_one* method rather than *modify*.
:param klass: the Mongo Engine data model class
:param key: the secondary field key {attribute: value}
dictionary, or None if no fields comprise a secondary key
:param non_key: the non-key {attribute: value} dictionary
:return: the existing or new object
"""
try:
# Search by primary key.
return klass.objects.get(**key)
except klass.DoesNotExist:
# Create the new object as an upsert. Specify the MongoDB Engine
# set__*attribute* modification options for each non-primary
# key (attribute, value) pair.
mod_opts = {'set__' + attr: val for attr, val in non_key.iteritems()}
return klass.objects(**key).update_one(upsert=True, **mod_opts)
| """Mongo Engine interaction utilities."""
def get_or_create(klass, pk, **non_pk):
"""
This function stands in for the Mongo Engine ``get_or_create``
collection method which was deprecated in mongoengine v0.8.0
and dropped in mongoengine v0.10.0, since MongoDB does not
support transactions.
If there is an object of the given Mongo Engine data model
class which matches the primary key, then that object is
returned. Otherwise, a new object is created with the content
prescribed by both the primary and non-primary parameters.
The create step is an upsert, i.e. a new object is created only
if it does not yet exist. The upsert allows for the small
possibility that an object is created after the fetch attempt
but before the create attempt. In that situation, the existing
object non-key content is modified and the modified object is
returned.
:Note: The idiom used in this function modifies the solution
proposed in http://stackoverflow.com/questions/25846462/mongoengine-replacing-get-or-create-with-upsert-update-one/25863633#25863633.
That StackOverflow work-around returns the following error:
ValueError: update only works with $ operators
The work-around to the StackOverflow work-around is to use
call *update* rather than *modify*.
:param klass: the Mongo Engine data model class
:param pk: the primary key {attribute: value} dictionary
:param non_pk: the non-key {attribute: value} dictionary
:return: the existing or new object
"""
try:
return klass.objects.get(**pk)
except klass.DoesNotExist:
mod_opts = {'set__' + attr: val for attr, val in non_pk.iteritems()}
return klass.objects(**pk).update_one(upsert=True, **mod_opts)
| bsd-2-clause | Python |
c2df896183f80fe3ca0eab259874bc4385d399e9 | Clean up detrius in parallel test file | bitprophet/fabric,MjAbuz/fabric,likesxuqiang/fabric,sdelements/fabric,opavader/fabric,TarasRudnyk/fabric,tekapo/fabric,haridsv/fabric,SamuelMarks/fabric,bspink/fabric,tolbkni/fabric,rane-hs/fabric-py3,mathiasertl/fabric,askulkarni2/fabric,fernandezcuesta/fabric,elijah513/fabric,xLegoz/fabric,raimon49/fabric,amaniak/fabric,pgroudas/fabric,hrubi/fabric,rodrigc/fabric,cgvarela/fabric,cmattoon/fabric,ploxiln/fabric,itoed/fabric,kxxoling/fabric,jaraco/fabric,bitmonk/fabric,felix-d/fabric,rbramwell/fabric,qinrong/fabric,StackStorm/fabric,pashinin/fabric,kmonsoor/fabric,akaariai/fabric,getsentry/fabric | tests/test_parallel.py | tests/test_parallel.py | from __future__ import with_statement
from fabric.api import run, parallel, env, hide
from utils import FabricTest, eq_
from server import server, RESPONSES
class TestParallel(FabricTest):
@server()
@parallel
def test_parallel(self):
"""
Want to do a simple call and respond
"""
env.pool_size = 10
cmd = "ls /simple"
with hide('everything'):
eq_(run(cmd), RESPONSES[cmd])
| from __future__ import with_statement
from datetime import datetime
import copy
import getpass
import sys
import paramiko
from nose.tools import with_setup
from fudge import (Fake, clear_calls, clear_expectations, patch_object, verify,
with_patched_object, patched_context, with_fakes)
from fabric.context_managers import settings, hide, show
from fabric.network import (HostConnectionCache, join_host_strings, normalize,
denormalize)
from fabric.io import output_loop
import fabric.network # So I can call patch_object correctly. Sigh.
from fabric.state import env, output, _get_system_username
from fabric.operations import run, sudo
from fabric.decorators import parallel
from utils import *
from server import (server, PORT, RESPONSES, PASSWORDS, CLIENT_PRIVKEY, USER,
CLIENT_PRIVKEY_PASSPHRASE)
class TestParallel(FabricTest):
@server()
@parallel
def test_parallel(self):
"""
Want to do a simple call and respond
"""
env.pool_size = 10
cmd = "ls /simple"
with hide('everything'):
eq_(run(cmd), RESPONSES[cmd])
| bsd-2-clause | Python |
8f86eacf1b85a0c497f9e8586a59cc19e6a0484f | Stop passing a recorder argument unecessarily in tests | jstutters/Plumbium | tests/test_pipeline.py | tests/test_pipeline.py | from __future__ import print_function
import pytest
from plumbium.processresult import record, pipeline, call
class DummyRecorder(object):
def write(self, results):
self.results = results
@pytest.fixture
def simple_pipeline():
@record('an_output')
def recorded_function():
call(['echo', 'test output'])
return 'test_result'
def a_pipeline():
recorded_function()
return a_pipeline
@pytest.fixture
def failing_pipeline():
@record('an_output')
def recorded_function():
raise IOError
def a_pipeline():
recorded_function()
return a_pipeline
def test_result(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run('test', simple_pipeline, str(tmpdir))
print(pipeline.results)
assert pipeline.results[0]['an_output'] == 'test_result'
def test_stdout_captured(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run('test', simple_pipeline, str(tmpdir))
proc = pipeline.results[0].as_dict()
assert proc['printed_output'] == 'test output\n'
def test_exception_captured(failing_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run('test', failing_pipeline, str(tmpdir))
proc = pipeline.results[0].as_dict()
assert 'IOError' in proc['exception']
def test_save_filename(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'test': 1},
filename='result_file_{metadata[test]:03d}'
)
assert 'result_file_001.tar.gz' in [f.basename for f in tmpdir.listdir()]
| from __future__ import print_function
import pytest
from plumbium.processresult import record, pipeline, call
class DummyRecorder(object):
def write(self, results):
self.results = results
@pytest.fixture
def simple_pipeline():
@record('an_output')
def recorded_function():
call(['echo', 'test output'])
return 'test_result'
def a_pipeline():
recorded_function()
return a_pipeline
@pytest.fixture
def failing_pipeline():
@record('an_output')
def recorded_function():
raise IOError
def a_pipeline():
recorded_function()
return a_pipeline
def test_result(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run('test', simple_pipeline, str(tmpdir))
print(pipeline.results)
assert pipeline.results[0]['an_output'] == 'test_result'
def test_stdout_captured(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = DummyRecorder()
pipeline.run('test', simple_pipeline, str(tmpdir), recorder=recorder)
proc = pipeline.results[0].as_dict()
assert proc['printed_output'] == 'test output\n'
def test_exception_captured(failing_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = DummyRecorder()
pipeline.run('test', failing_pipeline, str(tmpdir), recorder=recorder)
proc = pipeline.results[0].as_dict()
assert 'IOError' in proc['exception']
def test_save_filename(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'test': 1},
filename='result_file_{metadata[test]:03d}'
)
assert 'result_file_001.tar.gz' in [f.basename for f in tmpdir.listdir()]
| mit | Python |
7b75f508bf651bdeb57bdc4d263ced26434054c8 | add pct test | SunPower/PVMismatch | tests/test_pvmodule.py | tests/test_pvmodule.py | """
Tests for pvmodules.
"""
from nose.tools import ok_
from pvmismatch.pvmismatch_lib.pvmodule import PVmodule, TCT96, PCT96
def test_calc_mod():
pvmod = PVmodule()
ok_(isinstance(pvmod, PVmodule))
return pvmod
def test_calc_TCT_mod():
pvmod = PVmodule(cell_pos=TCT96)
ok_(isinstance(pvmod, PVmodule))
return pvmod
def test_calc_PCT_mod():
pvmod = PVmodule(cell_pos=PCT96)
ok_(isinstance(pvmod, PVmodule))
return pvmod
if __name__ == "__main__":
test_calc_mod()
test_calc_TCT_mod()
| """
Tests for pvmodules.
"""
from nose.tools import ok_
from pvmismatch.pvmismatch_lib.pvmodule import PVmodule, TCT96
def test_calc_mod():
pvmod = PVmodule()
ok_(isinstance(pvmod, PVmodule))
return pvmod
def test_calc_TCT_mod():
pvmod = PVmodule(cell_pos=TCT96)
ok_(isinstance(pvmod, PVmodule))
return pvmod
if __name__ == "__main__":
test_calc_mod()
test_calc_TCT_mod()
| bsd-3-clause | Python |
9744226621e27d4bd5d19a52b75b718e86bfef87 | Add extra filter for equipment | GETLIMS/LIMS-Backend,GETLIMS/LIMS-Backend | lims/equipment/views.py | lims/equipment/views.py |
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
import django_filters
from lims.permissions.permissions import IsInAdminGroupOrRO
from .models import Equipment, EquipmentReservation
from .serializers import EquipmentSerializer, EquipmentReservationSerializer
class EquipmentViewSet(viewsets.ModelViewSet):
queryset = Equipment.objects.all()
serializer_class = EquipmentSerializer
filter_fields = ('can_reserve', 'status',)
search_fields = ('name',)
permission_classes = (IsInAdminGroupOrRO,)
class EquipmentReservationFilter(django_filters.FilterSet):
class Meta:
model = EquipmentReservation
fields = {
'id': ['exact'],
'start': ['exact', 'gte'],
'end': ['exact', 'lte'],
'equipment_reserved': ['exact'],
}
class EquipmentReservationViewSet(viewsets.ModelViewSet):
queryset = EquipmentReservation.objects.all()
serializer_class = EquipmentReservationSerializer
filter_class = EquipmentReservationFilter
def perform_create(self, serializer):
if self.request.user.groups.filter(name='staff').exists():
serializer.validated_data['is_confirmed'] = True
serializer.validated_data['confirmed_by'] = self.request.user
serializer.save(reserved_by=self.request.user)
def perform_update(self, serializer):
if (serializer.instance.reserved_by == self.request.user or
self.request.user.groups.filter(name='staff').exists()):
serializer.save()
else:
raise PermissionDenied()
def destroy(self, request, pk=None):
if (request.user == self.get_object().reserved_by or
request.user.groups.filter(name='staff').exists()):
return super(EquipmentReservationViewSet, self).destroy(request, self.get_object().id)
else:
return Response({'message': 'You must have permission to delete'}, status=403)
|
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
import django_filters
from lims.permissions.permissions import IsInAdminGroupOrRO
from .models import Equipment, EquipmentReservation
from .serializers import EquipmentSerializer, EquipmentReservationSerializer
class EquipmentViewSet(viewsets.ModelViewSet):
queryset = Equipment.objects.all()
serializer_class = EquipmentSerializer
filter_fields = ('can_reserve',)
search_fields = ('name',)
permission_classes = (IsInAdminGroupOrRO,)
class EquipmentReservationFilter(django_filters.FilterSet):
class Meta:
model = EquipmentReservation
fields = {
'id': ['exact'],
'start': ['exact', 'gte'],
'end': ['exact', 'lte'],
'equipment_reserved': ['exact'],
}
class EquipmentReservationViewSet(viewsets.ModelViewSet):
queryset = EquipmentReservation.objects.all()
serializer_class = EquipmentReservationSerializer
filter_class = EquipmentReservationFilter
def perform_create(self, serializer):
if self.request.user.groups.filter(name='staff').exists():
serializer.validated_data['is_confirmed'] = True
serializer.validated_data['confirmed_by'] = self.request.user
serializer.save(reserved_by=self.request.user)
def perform_update(self, serializer):
if (serializer.instance.reserved_by == self.request.user or
self.request.user.groups.filter(name='staff').exists()):
serializer.save()
else:
raise PermissionDenied()
def destroy(self, request, pk=None):
if (request.user == self.get_object().reserved_by or
request.user.groups.filter(name='staff').exists()):
return super(EquipmentReservationViewSet, self).destroy(request, self.get_object().id)
else:
return Response({'message': 'You must have permission to delete'}, status=403)
| mit | Python |
368d46ba4bec2da22abfba306badf39a3a552e88 | Remove now-unused imports | mozilla/snippets-service,mozilla/snippets-service,mozmar/snippets-service,glogiotatidis/snippets-service,glogiotatidis/snippets-service,mozmar/snippets-service,mozilla/snippets-service,mozmar/snippets-service,glogiotatidis/snippets-service,glogiotatidis/snippets-service,mozilla/snippets-service,mozmar/snippets-service | tests/test_snippets.py | tests/test_snippets.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import pytest
import requests
from bs4 import BeautifulSoup
REQUESTS_TIMEOUT = 20
URL_TEMPLATE = '{}/{}/Firefox/default/default/default/en-US/{}/default/default/default/'
_user_agent_firefox = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.1) Gecko/20100101 Firefox/10.0.1'
def _get_redirect(url, user_agent=_user_agent_firefox, locale='en-US'):
headers = {'user-agent': user_agent,
'accept-language': locale}
return requests.get(url, headers=headers, timeout=REQUESTS_TIMEOUT)
def _parse_response(content):
return BeautifulSoup(content, 'html.parser')
@pytest.mark.parametrize(('version'), ['3', '5'], ids=['legacy', 'activitystream'])
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_response_codes(base_url, version, channel):
url = URL_TEMPLATE.format(base_url, version, channel)
r = _get_redirect(url)
assert r.status_code in (requests.codes.ok, requests.codes.no_content)
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_that_snippets_are_well_formed_xml(base_url, channel):
url = URL_TEMPLATE.format(base_url, '3', channel)
r = _get_redirect(url)
try:
print(r.content)
parseString('<div>{}</div>'.format(r.content))
except ExpatError as e:
raise AssertionError('Snippets at {0} do not contain well formed '
'xml: {1}'.format(url, e))
| # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import re
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import pytest
import requests
from bs4 import BeautifulSoup
REQUESTS_TIMEOUT = 20
URL_TEMPLATE = '{}/{}/Firefox/default/default/default/en-US/{}/default/default/default/'
_user_agent_firefox = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.1) Gecko/20100101 Firefox/10.0.1'
def _get_redirect(url, user_agent=_user_agent_firefox, locale='en-US'):
headers = {'user-agent': user_agent,
'accept-language': locale}
return requests.get(url, headers=headers, timeout=REQUESTS_TIMEOUT)
def _parse_response(content):
return BeautifulSoup(content, 'html.parser')
@pytest.mark.parametrize(('version'), ['3', '5'], ids=['legacy', 'activitystream'])
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_response_codes(base_url, version, channel):
url = URL_TEMPLATE.format(base_url, version, channel)
r = _get_redirect(url)
assert r.status_code in (requests.codes.ok, requests.codes.no_content)
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_that_snippets_are_well_formed_xml(base_url, channel):
url = URL_TEMPLATE.format(base_url, '3', channel)
r = _get_redirect(url)
try:
print(r.content)
parseString('<div>{}</div>'.format(r.content))
except ExpatError as e:
raise AssertionError('Snippets at {0} do not contain well formed '
'xml: {1}'.format(url, e))
| mpl-2.0 | Python |
142ef9b907868f53c696bd4426a7f08b7ef57528 | Change metatdata test | imageio/imageio | tests/test_tifffile.py | tests/test_tifffile.py | """ Test tifffile plugin functionality.
"""
import os
import numpy as np
from pytest import raises
from imageio.testing import run_tests_if_main, get_test_dir, need_internet
from imageio.core import get_remote_file
import imageio
test_dir = get_test_dir()
def test_tifffile_format():
# Test selection
for name in ['tiff', '.tif']:
format = imageio.formats[name]
assert format.name == 'TIFF'
def test_tifffile_reading_writing():
""" Test reading and saving tiff """
need_internet() # We keep a test image in the imageio-binary repo
im2 = np.ones((10, 10, 3), np.uint8) * 2
filename1 = os.path.join(test_dir, 'test_tiff.tiff')
# One image
imageio.imsave(filename1, im2)
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 1
# Multiple images
imageio.mimsave(filename1, [im2, im2, im2])
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 3, ims[0].shape
# remote multipage rgb file
filename2 = get_remote_file('images/multipage_rgb.tif')
img = imageio.mimread(filename2)
assert len(img) == 2
assert img[0].shape == (3, 10, 10)
# Mixed
W = imageio.save(filename1)
W.set_meta_data({'planarconfig': 'planar'})
assert W.format.name == 'TIFF'
W.append_data(im2)
W.append_data(im2)
W.close()
#
R = imageio.read(filename1)
assert R.format.name == 'TIFF'
ims = list(R) # == [im for im in R]
assert (ims[0] == im2).all()
meta = R.get_meta_data()
assert meta['orientation'] == 'top_left'
# Fail
raises(IndexError, R.get_data, -1)
raises(IndexError, R.get_data, 3)
# Ensure imwrite write works round trip
filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
R = imageio.imread(filename1)
imageio.imwrite(filename3, R)
R2 = imageio.imread(filename3)
assert (R == R2).all()
run_tests_if_main()
| """ Test tifffile plugin functionality.
"""
import os
import numpy as np
from pytest import raises
from imageio.testing import run_tests_if_main, get_test_dir, need_internet
from imageio.core import get_remote_file
import imageio
test_dir = get_test_dir()
def test_tifffile_format():
# Test selection
for name in ['tiff', '.tif']:
format = imageio.formats[name]
assert format.name == 'TIFF'
def test_tifffile_reading_writing():
""" Test reading and saving tiff """
need_internet() # We keep a test image in the imageio-binary repo
im2 = np.ones((10, 10, 3), np.uint8) * 2
filename1 = os.path.join(test_dir, 'test_tiff.tiff')
# One image
imageio.imsave(filename1, im2)
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 1
# Multiple images
imageio.mimsave(filename1, [im2, im2, im2])
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 3, ims[0].shape
# remote multipage rgb file
filename2 = get_remote_file('images/multipage_rgb.tif')
img = imageio.mimread(filename2)
assert len(img) == 2
assert img[0].shape == (3, 10, 10)
# Mixed
W = imageio.save(filename1)
W.set_meta_data({'planarconfig': 'planar'})
assert W.format.name == 'TIFF'
W.append_data(im2)
W.append_data(im2)
W.close()
#
R = imageio.read(filename1)
assert R.format.name == 'TIFF'
ims = list(R) # == [im for im in R]
assert (ims[0] == im2).all()
meta = R.get_meta_data()
print(meta)
assert meta['is_rgb']
# Fail
raises(IndexError, R.get_data, -1)
raises(IndexError, R.get_data, 3)
# Ensure imwrite write works round trip
filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
R = imageio.imread(filename1)
imageio.imwrite(filename3, R)
R2 = imageio.imread(filename3)
assert (R == R2).all()
run_tests_if_main()
| bsd-2-clause | Python |
8d09e745f24e663cb81ff5be6bc7b643c6c5bd76 | call it pennyblack 0.3.0 | nickburlett/pennyblack,nickburlett/pennyblack | pennyblack/__init__.py | pennyblack/__init__.py | VERSION = (0, 3, 0,)
__version__ = '.'.join(map(str, VERSION))
# Do not use Django settings at module level as recommended
try:
from django.utils.functional import LazyObject
except ImportError:
pass
else:
class LazySettings(LazyObject):
def _setup(self):
from pennyblack import default_settings
self._wrapped = Settings(default_settings)
class Settings(object):
def __init__(self, settings_module):
for setting in dir(settings_module):
if setting == setting.upper():
setattr(self, setting, getattr(settings_module, setting))
settings = LazySettings()
def send_newsletter(newsletter_name, *args, **kwargs):
"""
Gets a newsletter by its name and tries to send it to receiver
"""
from pennyblack.models import Newsletter
newsletter = Newsletter.objects.get_workflow_newsletter_by_name(newsletter_name)
if newsletter:
newsletter.send(*args, **kwargs) | VERSION = (0, 3, 0, 'pre')
__version__ = '.'.join(map(str, VERSION))
# Do not use Django settings at module level as recommended
try:
from django.utils.functional import LazyObject
except ImportError:
pass
else:
class LazySettings(LazyObject):
def _setup(self):
from pennyblack import default_settings
self._wrapped = Settings(default_settings)
class Settings(object):
def __init__(self, settings_module):
for setting in dir(settings_module):
if setting == setting.upper():
setattr(self, setting, getattr(settings_module, setting))
settings = LazySettings()
def send_newsletter(newsletter_name, *args, **kwargs):
"""
Gets a newsletter by its name and tries to send it to receiver
"""
from pennyblack.models import Newsletter
newsletter = Newsletter.objects.get_workflow_newsletter_by_name(newsletter_name)
if newsletter:
newsletter.send(*args, **kwargs) | bsd-3-clause | Python |
57d5622d205854eafd8babf8dfa1ad45bf05ebcb | Update ipc_lista1.15.py | any1m1c/ipc20161 | lista1/ipc_lista1.15.py | lista1/ipc_lista1.15.py | #ipc_lista1.15
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
##Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês, sabendo-se que são descontados 11% para o Imposto de Renda, 8% para o INSS e 5% para o sindicato, faça um programa que nos dê:
#salário bruto.
#quanto pagou ao INSS.
#quanto pagou ao sindicato.
#o salário líquido.
#calcule os descontos e o salário líquido, conforme a tabela abaixo:
#+ Salário Bruto : R$
#- IR (11%) : R$
#- INSS (8%) : R$
#- Sindicato ( 5%) : R$
#= Salário Liquido : R$
#Obs.: Salário Bruto - Descontos = Salário Líquido.
qHora = input("Quanto você ganha por hora: ")
hT = input("Quantas horas você trabalhou: ")
SalBruto = qHora
ir = (11/100.0 * salBruto)
inss = (8/100.0 * SalBruto)
sindicato = (5/100.0 * SalBruto)
vT = ir + sindicato
SalLiq = SalBruto - vT
print "------------------------"
print "Seu salário bruto e: ",SalBruto
print '------------------------"
print "Valor dos impostos"
print "-------------------------"
print "IR: ",ir
print "INSS: ",inss
print"--------------------------"
print"Se salario liquido e: ",SalLiq
| #ipc_lista1.15
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
##Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês, sabendo-se que são descontados 11% para o Imposto de Renda, 8% para o INSS e 5% para o sindicato, faça um programa que nos dê:
#salário bruto.
#quanto pagou ao INSS.
#quanto pagou ao sindicato.
#o salário líquido.
#calcule os descontos e o salário líquido, conforme a tabela abaixo:
#+ Salário Bruto : R$
#- IR (11%) : R$
#- INSS (8%) : R$
#- Sindicato ( 5%) : R$
#= Salário Liquido : R$
#Obs.: Salário Bruto - Descontos = Salário Líquido.
qHora = input("Quanto você ganha por hora: ")
hT = input("Quantas horas você trabalhou: ")
SalBruto = qHora
ir = (11/100.0 * salBruto)
inss = (8/100.0m* SalBruto)
sindicato = (5/100.0 * SalBruto)
vT = ir + sindicato
SalLiq = SalBruto - vT
print "------------------------"
print "Seu salário bruto e: ",SalBruto
print '------------------------"
print "Valor dos impostos"
print "-------------------------"
print "IR: ",ir
print "INSS: ",inss
print"--------------------------"
print"Se salario liquido e: ",SalLiq
| apache-2.0 | Python |
b37432e914b6c6e45803a928f35fbaa8964780aa | test by uuid | kurin/py-raft | tests/unit/test_log.py | tests/unit/test_log.py | import pytest
from raft import log
def mle(index, term, committed=False, msgid='', msg={}):
return dict(index=index, term=term, committed=committed,
msgid=msgid, msg=msg)
def test_le():
# a's term is greater than b's
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 3)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert ra > rb
# terms are equal
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert a <= b
assert b <= a
# terms equal but more commits in b
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4),
4: mle(4, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert rb > ra
def test_dump():
rl = log.RaftLog(None)
dump = {0: {'term': 0, 'msgid': '', 'committed': True,
'acked': [], 'msg': {}, 'index': 0}}
assert rl.dump() == dump
def test_get_max_index_term():
rl = log.RaftLog(None)
le = log.logentry(2, 'abcd', {})
rl.add(le)
assert rl.get_max_index_term() == (1, 2)
le = log.logentry(6, 'abcdefg', {})
rl.add(le)
assert rl.get_max_index_term() == (2, 6)
def test_has_uuid():
rl = log.RaftLog(None)
le = log.logentry(2, 'abcd', {})
rl.add(le)
assert rl.has_uuid('abcd') == True
assert rl.has_uuid('dcba') == False
| import pytest
from raft import log
def mle(index, term, committed=False, msgid='', msg={}):
return dict(index=index, term=term, committed=committed,
msgid=msgid, msg=msg)
def test_le():
# a's term is greater than b's
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 3)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert ra > rb
# terms are equal
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert a <= b
assert b <= a
# terms equal but more commits in b
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4),
4: mle(4, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert rb > ra
def test_dump():
rl = log.RaftLog(None)
dump = {0: {'term': 0, 'msgid': '', 'committed': True,
'acked': [], 'msg': {}, 'index': 0}}
assert rl.dump() == dump
def test_get_max_index_term():
rl = log.RaftLog(None)
le = log.logentry(2, 'abcd', {})
rl.add(le)
assert rl.get_max_index_term() == (1, 2)
le = log.logentry(6, 'abcdefg', {})
rl.add(le)
assert rl.get_max_index_term() == (2, 6)
| unlicense | Python |
62d5a4446c4c0a919557dd5f2e95d21c5a8259a8 | Test the optimized set | grigi/pypred,armon/pypred | tests/unit/test_set.py | tests/unit/test_set.py | import pytest
from pypred import OptimizedPredicateSet, PredicateSet, Predicate
class TestPredicateSet(object):
def test_two(self):
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p2])
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
def test_dup(self):
p1 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p1])
match = s.evaluate({'name': 'Jill'})
assert match == [p1]
class TestOptPredicateSet(object):
def test_two(self):
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = OptimizedPredicateSet([p1, p2])
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
def test_dup(self):
p1 = Predicate("name is 'Jill'")
s = OptimizedPredicateSet([p1, p1])
match = s.evaluate({'name': 'Jill'})
assert match == [p1]
def test_invalidate(self):
"AST is invalidated when set changes"
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = OptimizedPredicateSet([p1, p2])
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
p3 = Predicate("name is 'Joe'")
s.add(p3)
assert s.ast == None
match = s.evaluate({'name': 'Joe'})
assert match == [p3]
def test_finalize(self):
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = OptimizedPredicateSet([p1, p2])
s.finalize()
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
p3 = Predicate("name is 'Joe'")
with pytest.raises(Exception):
s.add(p3)
| from pypred import PredicateSet, Predicate
class TestPredicateSet(object):
def test_two(self):
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p2])
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
def test_dup(self):
p1 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p1])
match = s.evaluate({'name': 'Jill'})
assert match == [p1]
| bsd-3-clause | Python |
cae0764f2cbb8d00de1832079e55b8e4d45f55f2 | Fix for short OTU name when there is a species but no genus or higher | smdabdoub/phylotoast,akshayparopkari/phylotoast | phylotoast/otu_calc.py | phylotoast/otu_calc.py | from __future__ import division
import ast
from collections import defaultdict
from phylotoast import biom_calc as bc
def otu_name(tax):
"""
Determine a simple Genus-species identifier for an OTU, if possible.
If OTU is not identified to the species level, name it as
Unclassified (familly/genus/etc...).
:type tax: list
:param tax: QIIME-style taxonomy identifiers, e.g.
["k__Bacteria", u"p__Firmicutes", u"c__Bacilli", ...
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
extract_name = lambda lvl: "_".join(lvl.split("_")[2:])
spname = "spp."
for lvl in tax[::-1]:
if len(lvl) <= 3:
continue
if lvl.startswith("s"):
spname = extract_name(lvl)
elif lvl.startswith("g"):
return "{}_{}".format(extract_name(lvl), spname)
else:
if spname != "spp.":
return spname
else:
return "Unclassified_{}".format(extract_name(lvl))
def load_core_file(core_fp):
"""
For core OTU data file, returns Genus-species identifier for each data
entry.
:type core_fp: str
:param core_fp: A file containing core OTU data.
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
with open(core_fp, "rU") as in_f:
return {otu_name(ast.literal_eval(line.split("\t")[1]))
for line in in_f.readlines()[1:]}
def assign_otu_membership(biomfile):
"""
Determines the OTUIDs present in each sample.
:type biomfile: biom.table.Table
:param biomfile: BIOM table object from the biom-format library.
:rtype: dict
:return: Returns a dictionary keyed on Sample ID with sets containing
the IDs of OTUIDs found in each sample.
"""
samples = defaultdict(set)
_ = biomfile.pa()
for sid in biomfile.ids():
for otuid in biomfile.ids("observation"):
if biomfile.get_value_by_ids(otuid, sid) == 1:
samples[sid].add(otuid)
return samples
| from __future__ import division
import ast
from collections import defaultdict
from phylotoast import biom_calc as bc
def otu_name(tax):
"""
Determine a simple Genus-species identifier for an OTU, if possible.
If OTU is not identified to the species level, name it as
Unclassified (familly/genus/etc...).
:type tax: list
:param tax: QIIME-style taxonomy identifiers, e.g.
["k__Bacteria", u"p__Firmicutes", u"c__Bacilli", ...
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
extract_name = lambda lvl: "_".join(lvl.split("_")[2:])
spname = "spp."
for lvl in tax[::-1]:
if len(lvl) <= 3:
continue
if lvl.startswith("s"):
spname = extract_name(lvl)
elif lvl.startswith("g"):
return "{}_{}".format(extract_name(lvl), spname)
else:
return "Unclassified_{}".format(extract_name(lvl))
def load_core_file(core_fp):
"""
For core OTU data file, returns Genus-species identifier for each data
entry.
:type core_fp: str
:param core_fp: A file containing core OTU data.
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
with open(core_fp, "rU") as in_f:
return {otu_name(ast.literal_eval(line.split("\t")[1]))
for line in in_f.readlines()[1:]}
def assign_otu_membership(biomfile):
"""
Determines the OTUIDs present in each sample.
:type biomfile: biom.table.Table
:param biomfile: BIOM table object from the biom-format library.
:rtype: dict
:return: Returns a dictionary keyed on Sample ID with sets containing
the IDs of OTUIDs found in each sample.
"""
samples = defaultdict(set)
_ = biomfile.pa()
for sid in biomfile.ids():
for otuid in biomfile.ids("observation"):
if biomfile.get_value_by_ids(otuid, sid) == 1:
samples[sid].add(otuid)
return samples
| mit | Python |
7c4d3fffe62190b8c27317ed83bd5e7110b103ec | Update parser.py | Lagostra/MusicPlayer,Lagostra/MusicPlayer | MusicXMLParser/parser.py | MusicXMLParser/parser.py | '''
Takes a musicXML file, and creates a file that can be played by my MusicPlayer arduino library.
Written by Eivind Lie Andreassen, 2016
Licensed under the MIT license.
'''
import xml.dom.minidom
import valueHelper
xmlPath = input("Enter path to MusicXML file: ")
savePath = input("Enter save path of converted file: ")
domTree = xml.dom.minidom.parse(xmlPath)
collection = domTree.documentElement
if(collection.hasAttribute("example")):
print(collection.getAttribute("sample"))
notesXML = collection.getElementsByTagName("note")
notes = []
noteLengths = []
for note in notesXML:
if (len(note.getElementsByTagName("rest"))>0):
noteValue = '0'
else:
noteValue = note.getElementsByTagName("step")[0].childNodes[0].data + note.getElementsByTagName("octave")[0].childNodes[0].data
if len(note.getElementsByTagName("alter")) > 0:
index = valueHelper.noteValues.index(noteValue) + int(note.getElementsByTagName("alter")[0].childNodes[0].data)
if(index < 0):
index = 0
elif(index >= len(valueHelper.noteValues)):
index = len(valueHelper.noteValues) - 1
noteValue = valueHelper.noteValues[index]
if(len(note.getElementsByTagName("type")) == 0):
continue
noteLength = valueHelper.lengthValues[note.getElementsByTagName("type")[0].childNodes[0].data]
if(len(note.getElementsByTagName("dot")) > 1):
noteLength *= 1.75
elif(len(note.getElementsByTagName("dot")) > 0):
noteLength *= 1.5
notes.append(noteValue)
noteLengths.append(noteLength)
output = "NoteNumber: " + str(len(notes)) + "\n\n"
output += "Notes:\n{"
for i in range(len(notes)):
output += notes[i]
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};\n\n"
output += "NoteLengths:\n{"
for i in range(len(notes)):
output += str(noteLengths[i])
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};"
with open(savePath, "w") as file:
file.write(output)
| '''
Takes a musicXML file, and creates a file that can be played by my MusicPlayer arduino library.
Written by Eivind Lie Andreassen, 2016
Licensed under Creative Commons Attribution-ShareAlike 4.0 International. http://creativecommons.org/licenses/by-sa/4.0/
'''
import xml.dom.minidom
import valueHelper
xmlPath = input("Enter path to MusicXML file: ")
savePath = input("Enter save path of converted file: ")
domTree = xml.dom.minidom.parse(xmlPath)
collection = domTree.documentElement
if(collection.hasAttribute("example")):
print(collection.getAttribute("sample"))
notesXML = collection.getElementsByTagName("note")
notes = []
noteLengths = []
for note in notesXML:
if (len(note.getElementsByTagName("rest"))>0):
noteValue = '0'
else:
noteValue = note.getElementsByTagName("step")[0].childNodes[0].data + note.getElementsByTagName("octave")[0].childNodes[0].data
if len(note.getElementsByTagName("alter")) > 0:
index = valueHelper.noteValues.index(noteValue) + int(note.getElementsByTagName("alter")[0].childNodes[0].data)
if(index < 0):
index = 0
elif(index >= len(valueHelper.noteValues)):
index = len(valueHelper.noteValues) - 1
noteValue = valueHelper.noteValues[index]
if(len(note.getElementsByTagName("type")) == 0):
continue
noteLength = valueHelper.lengthValues[note.getElementsByTagName("type")[0].childNodes[0].data]
if(len(note.getElementsByTagName("dot")) > 1):
noteLength *= 1.75
elif(len(note.getElementsByTagName("dot")) > 0):
noteLength *= 1.5
notes.append(noteValue)
noteLengths.append(noteLength)
output = "NoteNumber: " + str(len(notes)) + "\n\n"
output += "Notes:\n{"
for i in range(len(notes)):
output += notes[i]
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};\n\n"
output += "NoteLengths:\n{"
for i in range(len(notes)):
output += str(noteLengths[i])
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};"
with open(savePath, "w") as file:
file.write(output) | mit | Python |
83b83cb3491bd4ccf39e2c6ade72f8f526ea27fe | Increase toolbox reporting | maphew/arcplus,maphew/arcplus | ArcToolbox/Scripts/ExportFolder2PDF.py | ArcToolbox/Scripts/ExportFolder2PDF.py | #Export a folder of maps to PDFs at their Map Document set sizes
#Written using ArcGIS 10 and Python 2.6.5
#by: Guest
# https://gis.stackexchange.com/questions/7147/how-to-batch-export-mxd-to-pdf-files
import arcpy, os
#Read input parameter from user.
path = arcpy.GetParameterAsText(0)
#Write MXD names in folder to txt log file.
writeLog=open(path+"\FileListLog.txt","w")
for fileName in os.listdir(path):
fullPath = os.path.join(path, fileName)
if os.path.isfile(fullPath):
basename, extension = os.path.splitext(fullPath)
if extension == ".mxd":
writeLog.write(fullPath+"\n")
mxd = arcpy.mapping.MapDocument(fullPath)
arcpy.AddMessage('Found: ' + fileName)
del mxd
arcpy.AddMessage("Done")
writeLog.close()
# Set all the parameters as variables here:
data_frame = 'PAGE_LAYOUT'
df_export_width = 1920
df_export_height = 1200
resolution = "300"
image_quality = "BETTER"
colorspace = "RGB"
compress_vectors = "True"
image_compression = "ADAPTIVE"
picture_symbol = 'VECTORIZE_BITMAP'
convert_markers = "False"
embed_fonts = "True"
layers_attributes = "LAYERS_ONLY"
georef_info = "False"
jpeg_compression_quality = 85
exportPath =arcpy.GetParameterAsText(1)
MXDread=open(path+"\FileListLog.txt","r")
for line in MXDread:
#Strip newline from line.
line=line.rstrip('\n')
if os.path.isfile(line):
basename, extension = os.path.splitext(line)
newName=basename.split('\\')[-1]
if extension.lower() == ".mxd":
# arcpy.AddMessage( "Basename:" +newName )
mxd = arcpy.mapping.MapDocument(line)
newPDF=exportPath+"\\"+newName+".pdf"
arcpy.AddMessage( 'Writing: ' + newPDF )
arcpy.mapping.ExportToPDF(mxd,newPDF, data_frame, df_export_width, df_export_height, resolution, image_quality, colorspace, compress_vectors, image_compression, picture_symbol, convert_markers, embed_fonts, layers_attributes, georef_info, jpeg_compression_quality)
arcpy.AddMessage( 'Finished: ' + line)
MXDread.close()
item=path+"\FileListLog.txt"
os.remove(item)
del mxd
arcpy.GetMessages() | #Export a folder of maps to PDFs at their Map Document set sizes
#Written using ArcGIS 10 and Python 2.6.5
#by: Guest
# https://gis.stackexchange.com/questions/7147/how-to-batch-export-mxd-to-pdf-files
import arcpy, os
#Read input parameter from user.
path = arcpy.GetParameterAsText(0)
#Write MXD names in folder to txt log file.
writeLog=open(path+"\FileListLog.txt","w")
for fileName in os.listdir(path):
fullPath = os.path.join(path, fileName)
if os.path.isfile(fullPath):
basename, extension = os.path.splitext(fullPath)
if extension == ".mxd":
writeLog.write(fullPath+"\n")
mxd = arcpy.mapping.MapDocument(fullPath)
print fileName + "\n"
del mxd
print "Done"
writeLog.close()
# Set all the parameters as variables here:
data_frame = 'PAGE_LAYOUT'
df_export_width = 1920
df_export_height = 1200
resolution = "300"
image_quality = "BETTER"
colorspace = "RGB"
compress_vectors = "True"
image_compression = "ADAPTIVE"
picture_symbol = 'VECTORIZE_BITMAP'
convert_markers = "False"
embed_fonts = "True"
layers_attributes = "LAYERS_ONLY"
georef_info = "False"
jpeg_compression_quality = 85
exportPath =arcpy.GetParameterAsText(1)
MXDread=open(path+"\FileListLog.txt","r")
for line in MXDread:
#Strip newline from line.
line=line.rstrip('\n')
if os.path.isfile(line):
basename, extension = os.path.splitext(line)
newName=basename.split('\\')[-1]
if extension.lower() == ".mxd":
print "Basename:" +newName
mxd = arcpy.mapping.MapDocument(line)
newPDF=exportPath+"\\"+newName+".pdf"
print newPDF
arcpy.mapping.ExportToPDF(mxd,newPDF, data_frame, df_export_width, df_export_height, resolution, image_quality, colorspace, compress_vectors, image_compression, picture_symbol, convert_markers, embed_fonts, layers_attributes, georef_info, jpeg_compression_quality)
print line + "Export Done"
MXDread.close()
item=path+"\FileListLog.txt"
os.remove(item)
del mxd | mit | Python |
b614436766e8ee3316936c5718262b35cfae3869 | Add slug field on save | 0x0mar/memex-explorer,0x0mar/memex-explorer,firebitsbr/memex-explorer,kod3r/memex-explorer,firebitsbr/memex-explorer,firebitsbr/memex-explorer,YongchaoShang/memex-explorer,0x0mar/memex-explorer,firebitsbr/memex-explorer,YongchaoShang/memex-explorer,0x0mar/memex-explorer,memex-explorer/memex-explorer,0x0mar/memex-explorer,YongchaoShang/memex-explorer,0x0mar/memex-explorer,kod3r/memex-explorer,memex-explorer/memex-explorer,kod3r/memex-explorer,kod3r/memex-explorer,YongchaoShang/memex-explorer,memex-explorer/memex-explorer,memex-explorer/memex-explorer | memex_explorer/base/models.py | memex_explorer/base/models.py | from django.db import models
from django.utils.text import slugify
class Project(models.Model):
name = models.CharField(max_length=64)
slug = models.SlugField(max_length=64, unique=True)
description = models.TextField()
icon = models.CharField(max_length=64)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.id:
# Newly created object, so save to get self.id
super(Project, self).save(*args, **kwargs)
self.slug = '%i-%s' % (
self.id, slugify(self.name)
)
super(Project, self).save(*args, **kwargs)
class DataModel(models.Model):
name = models.CharField(max_length=64)
project = models.ForeignKey(Project)
def __str__(self):
return self.name
class Crawl(models.Model):
name = models.CharField(max_length=64)
slug = models.CharField(max_length=64)
description = models.TextField()
crawler = models.CharField(max_length=64)
status = models.CharField(max_length=64)
config = models.CharField(max_length=64)
seeds_list = models.CharField(max_length=64)
pages_crawled = models.BigIntegerField()
harvest_rate = models.FloatField()
project = models.ForeignKey(Project)
data_model = models.ForeignKey(DataModel)
def __str__(self):
return self.name
class DataSource(models.Model):
name = models.CharField(max_length=64)
data_uri = models.CharField(max_length=200)
description = models.TextField()
project = models.ForeignKey(Project)
crawl = models.ForeignKey(Crawl)
def __str__(self):
return self.name
| from django.db import models
class Project(models.Model):
name = models.CharField(max_length=64)
slug = models.SlugField(max_length=64, unique=True)
description = models.TextField()
icon = models.CharField(max_length=64)
def __str__(self):
return self.name
class DataModel(models.Model):
name = models.CharField(max_length=64)
project = models.ForeignKey(Project)
def __str__(self):
return self.name
class Crawl(models.Model):
name = models.CharField(max_length=64)
slug = models.CharField(max_length=64)
description = models.TextField()
crawler = models.CharField(max_length=64)
status = models.CharField(max_length=64)
config = models.CharField(max_length=64)
seeds_list = models.CharField(max_length=64)
pages_crawled = models.BigIntegerField()
harvest_rate = models.FloatField()
project = models.ForeignKey(Project)
data_model = models.ForeignKey(DataModel)
def __str__(self):
return self.name
class DataSource(models.Model):
name = models.CharField(max_length=64)
data_uri = models.CharField(max_length=200)
description = models.TextField()
project = models.ForeignKey(Project)
crawl = models.ForeignKey(Crawl)
def __str__(self):
return self.name
| bsd-2-clause | Python |
1018d6bde32a8d18a2315dafd084826443209ba1 | Update clock.py | lmperez2/Pi-Seven-Segment | examples/clock.py | examples/clock.py | #!/usr/bin/python
import time
import datetime
from Adafruit_LED_Backpack import SevenSegment
# ===========================================================================
# Clock Example
# ===========================================================================
segment = SevenSegment.SevenSegment(address=0x70)
# Initialize the display. Must be called once before using the display.
segment.begin()
print "Press CTRL+Z to exit"
# Continually update the time on a 4 char, 7-segment display
while(True):
now = datetime.datetime.now()
hour = now.hour
minute = now.minute
second = now.second
if hour >= 22 or hour < 7 and minutes == 26:
segment.set_brightness(0)
else:
segment.set_brightness(5)
if hour == 24:
hour = 12
else:
hour = hour % 12
A = int(hour / 10)
if A == 0:
A = ' '
segment.clear()
# Set hours
segment.set_digit(0, A) # Tens
segment.set_digit(1, hour % 10) # Ones
# Set minutes
segment.set_digit(2, int(minute / 10)) # Tens
segment.set_digit(3, minute % 10) # Ones
# Toggle colon
segment.set_colon(second % 2) # Toggle colon at 1Hz
# Write the display buffer to the hardware. This must be called to
# update the actual display LEDs.
segment.write_display()
# Wait a quarter second (less than 1 second to prevent colon blinking getting$
time.sleep(0.25)
| #!/usr/bin/python
import time
import datetime
from Adafruit_LED_Backpack import SevenSegment
# ===========================================================================
# Clock Example
# ===========================================================================
segment = SevenSegment.SevenSegment(address=0x70)
# Initialize the display. Must be called once before using the display.
segment.begin()
print "Press CTRL+Z to exit"
# Continually update the time on a 4 char, 7-segment display
while(True):
now = datetime.datetime.now()
hour = now.hour
minute = now.minute
second = now.second
if hour >= 22 or hour < 7:
segment.set_brightness(0)
else:
segment.set_brightness(5)
if hour == 24:
hour = 12
else:
hour = hour % 12
A = int(hour / 10)
if A == 0:
A = ' '
segment.clear()
# Set hours
segment.set_digit(0, A) # Tens
segment.set_digit(1, hour % 10) # Ones
# Set minutes
segment.set_digit(2, int(minute / 10)) # Tens
segment.set_digit(3, minute % 10) # Ones
# Toggle colon
segment.set_colon(second % 2) # Toggle colon at 1Hz
# Write the display buffer to the hardware. This must be called to
# update the actual display LEDs.
segment.write_display()
# Wait a quarter second (less than 1 second to prevent colon blinking getting$
time.sleep(0.25)
| mit | Python |
37da8a56f127a871c4133f0ba58921779e9b487c | Update __init__.py | uchicago-cs/deepdish,uchicago-cs/deepdish,agarbuno/deepdish,agarbuno/deepdish | deepdish/io/__init__.py | deepdish/io/__init__.py | from __future__ import division, print_function, absolute_import
from .mnist import load_mnist
from .norb import load_small_norb
from .casia import load_casia
from .cifar import load_cifar_10
try:
import tables
_pytables_ok = True
del tables
except ImportError:
_pytables_ok = False
if _pytables_ok:
from .hdf5io import load, save
else:
def _f(*args, **kwargs):
raise ImportError("You need PyTables for this function")
load = save = _f
__all__ = ['load_mnist', 'load_small_norb', 'load_casia', 'load_cifar_10']
| from __future__ import division, print_function, absolute_import
from .mnist import load_mnist
from .norb import load_small_norb
from .casia import load_casia
from .cifar import load_cifar_10
try:
import tables
_pytables_ok = True
except ImportError:
_pytables_ok = False
del tables
if _pytables_ok:
from .hdf5io import load, save
else:
def _f(*args, **kwargs):
raise ImportError("You need PyTables for this function")
load = save = _f
__all__ = ['load_mnist', 'load_small_norb', 'load_casia', 'load_cifar_10']
| bsd-3-clause | Python |
0d914a4843e5959c108077e8c5275a1ddd05f617 | Upgrade version number | ljean/djaloha,ljean/djaloha,ljean/djaloha,ljean/djaloha | djaloha/__init__.py | djaloha/__init__.py | # -*- coding: utf-8 -*-
VERSION = (0, 2)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
# if VERSION[2]:
# version = '%s.%s' % (version, VERSION[2])
# if VERSION[3] != "final":
# version = '%s%s%s' % (version, VERSION[3], VERSION[4])
return version
__version__ = get_version()
| # -*- coding: utf-8 -*-
VERSION = (0, 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
# if VERSION[2]:
# version = '%s.%s' % (version, VERSION[2])
# if VERSION[3] != "final":
# version = '%s%s%s' % (version, VERSION[3], VERSION[4])
return version
__version__ = get_version()
| bsd-3-clause | Python |
0f5433458be9add6a879e8e490017663714d7664 | fix cron job FailedRunsNotificationCronJob to import get_class routine from new place | Tivix/django-cron,eriktelepovsky/django-cron,mozillazg/django-cron | django_cron/cron.py | django_cron/cron.py | from django.conf import settings
from django_cron import CronJobBase, Schedule, get_class
from django_cron.models import CronJobLog
from django_common.helper import send_mail
class FailedRunsNotificationCronJob(CronJobBase):
"""
Send email if cron failed to run X times in a row
"""
RUN_EVERY_MINS = 30
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'django_cron.FailedRunsNotificationCronJob'
def do(self):
CRONS_TO_CHECK = map(lambda x: get_class(x), settings.CRON_CLASSES)
EMAILS = [admin[1] for admin in settings.ADMINS]
try:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = settings.FAILED_RUNS_CRONJOB_EMAIL_PREFIX
except:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = ''
for cron in CRONS_TO_CHECK:
try:
min_failures = cron.MIN_NUM_FAILURES
except AttributeError:
min_failures = 10
failures = 0
jobs = CronJobLog.objects.filter(code=cron.code).order_by('-end_time')[:min_failures]
message = ''
for job in jobs:
if not job.is_success:
failures += 1
message += 'Job ran at %s : \n\n %s \n\n' % (job.start_time, job.message)
if failures == min_failures:
send_mail(
'%s%s failed %s times in a row!' % (FAILED_RUNS_CRONJOB_EMAIL_PREFIX, cron.code, \
min_failures), message,
settings.DEFAULT_FROM_EMAIL, EMAILS
)
| from django.conf import settings
from django_cron import CronJobBase, Schedule
from django_cron.models import CronJobLog
from django_cron.management.commands.runcrons import get_class
from django_common.helper import send_mail
class FailedRunsNotificationCronJob(CronJobBase):
"""
Send email if cron failed to run X times in a row
"""
RUN_EVERY_MINS = 30
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'django_cron.FailedRunsNotificationCronJob'
def do(self):
CRONS_TO_CHECK = map(lambda x: get_class(x), settings.CRON_CLASSES)
EMAILS = [admin[1] for admin in settings.ADMINS]
try:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = settings.FAILED_RUNS_CRONJOB_EMAIL_PREFIX
except:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = ''
for cron in CRONS_TO_CHECK:
try:
min_failures = cron.MIN_NUM_FAILURES
except AttributeError:
min_failures = 10
failures = 0
jobs = CronJobLog.objects.filter(code=cron.code).order_by('-end_time')[:min_failures]
message = ''
for job in jobs:
if not job.is_success:
failures += 1
message += 'Job ran at %s : \n\n %s \n\n' % (job.start_time, job.message)
if failures == min_failures:
send_mail(
'%s%s failed %s times in a row!' % (FAILED_RUNS_CRONJOB_EMAIL_PREFIX, cron.code, \
min_failures), message,
settings.DEFAULT_FROM_EMAIL, EMAILS
)
| mit | Python |
7f3b2b0ab21e4dadffb55da912684eb84ce6da3d | Check if remot git is already on commit | dbravender/gitric | gitric/api.py | gitric/api.py | from __future__ import with_statement
from fabric.state import env
from fabric.api import local, run, abort, task, cd, puts
from fabric.context_managers import settings
@task
def allow_dirty():
'''allow pushing even when the working copy is dirty'''
env.gitric_allow_dirty = True
@task
def force_push():
'''allow pushing even when history will be lost'''
env.gitric_force_push = True
def git_seed(repo_path, commit=None, ignore_untracked_files=False):
'''seed a remote git repository'''
commit = _get_commit(commit)
force = ('gitric_force_push' in env) and '-f' or ''
dirty_working_copy = _is_dirty(commit, ignore_untracked_files)
if dirty_working_copy and 'gitric_allow_dirty' not in env:
abort(
'Working copy is dirty. This check can be overridden by\n'
'importing gitric.api.allow_dirty and adding allow_dirty to your '
'call.')
# initialize the remote repository (idempotent)
run('git init %s' % repo_path)
# finis execution if remote git it's already on commit.
with cd(repo_path):
if run('git rev-parse HEAD') == commit:
puts('Remote already on commit %s' % commit)
return
# silence git complaints about pushes coming in on the current branch
# the pushes only seed the immutable object store and do not modify the
# working copy
run('GIT_DIR=%s/.git git config receive.denyCurrentBranch ignore' %
repo_path)
# a target doesn't need to keep track of which branch it is on so we always
# push to its "master"
with settings(warn_only=True):
push = local(
'git push git+ssh://%s@%s:%s%s %s:refs/heads/master %s' % (
env.user, env.host, env.port, repo_path, commit, force))
if push.failed:
abort(
'%s is a non-fast-forward\n'
'push. The seed will abort so you don\'t lose information. '
'If you are doing this\nintentionally import '
'gitric.api.force_push and add it to your call.' % commit)
def git_reset(repo_path, commit=None):
'''checkout a sha1 on a remote git repo'''
commit = _get_commit(commit)
run('cd %s && git reset --hard %s' % (repo_path, commit))
def _get_commit(commit):
if commit is None:
# if no commit is specified we will push HEAD
commit = local('git rev-parse HEAD', capture=True)
return commit
def _is_dirty(commit, ignore_untracked_files):
untracked_files = '--untracked-files=no' if ignore_untracked_files else ''
return local('git status %s --porcelain' % untracked_files, capture=True) != ''
| from __future__ import with_statement
from fabric.state import env
from fabric.api import local, run, abort, task
from fabric.context_managers import settings
@task
def allow_dirty():
'''allow pushing even when the working copy is dirty'''
env.gitric_allow_dirty = True
@task
def force_push():
'''allow pushing even when history will be lost'''
env.gitric_force_push = True
def git_seed(repo_path, commit=None, ignore_untracked_files=False):
'''seed a remote git repository'''
commit = _get_commit(commit)
force = ('gitric_force_push' in env) and '-f' or ''
dirty_working_copy = _is_dirty(commit, ignore_untracked_files)
if dirty_working_copy and 'gitric_allow_dirty' not in env:
abort(
'Working copy is dirty. This check can be overridden by\n'
'importing gitric.api.allow_dirty and adding allow_dirty to your '
'call.')
# initialize the remote repository (idempotent)
run('git init %s' % repo_path)
# silence git complaints about pushes coming in on the current branch
# the pushes only seed the immutable object store and do not modify the
# working copy
run('GIT_DIR=%s/.git git config receive.denyCurrentBranch ignore' %
repo_path)
# a target doesn't need to keep track of which branch it is on so we always
# push to its "master"
with settings(warn_only=True):
push = local(
'git push git+ssh://%s@%s:%s%s %s:refs/heads/master %s' % (
env.user, env.host, env.port, repo_path, commit, force))
if push.failed:
abort(
'%s is a non-fast-forward\n'
'push. The seed will abort so you don\'t lose information. '
'If you are doing this\nintentionally import '
'gitric.api.force_push and add it to your call.' % commit)
def git_reset(repo_path, commit=None):
'''checkout a sha1 on a remote git repo'''
commit = _get_commit(commit)
run('cd %s && git reset --hard %s' % (repo_path, commit))
def _get_commit(commit):
if commit is None:
# if no commit is specified we will push HEAD
commit = local('git rev-parse HEAD', capture=True)
return commit
def _is_dirty(commit, ignore_untracked_files):
untracked_files = '--untracked-files=no' if ignore_untracked_files else ''
return local('git status %s --porcelain' % untracked_files, capture=True) != ''
| mit | Python |
47e7fcc3b837b459a2800e09ee87c2a6f87cdfba | Update SController.py | nvthanh1/Skypybot | skype_controller/SController.py | skype_controller/SController.py | """Import somes important packages"""
import Skype4Py
import config as gbconfig
import json
from common import get_project_path
# Get Skype class instance
SKYPE_OBJ = Skype4Py.Skype()
# Establish the connection from the Skype object to the Skype ddclient.
SKYPE_OBJ.Attach()
# Get all contact from object. This function might not be used in this case
def get_file():
"""Function to get file contains list of skype's contact"""
returndata = {}
try:
root_path = get_project_path()
# print root_path
file_path = "%s/%s" % (root_path, gbconfig.FILE_CONTACT)
filename = open(file_path, 'r')
returndata = json.loads(filename.read())
filename.close()
except Exception as ex:
print 'What the fuck? I could not load your file: %s - %s' % (gbconfig.FILE_CONTACT, ex)
return returndata
def main_function():
"""Runable function"""
get_file()
for contact, message in get_file().iteritems():
SKYPE_OBJ.SendMessage(contact, message)
print "Message has been sent"
if __name__ == "__main__":
main_function()
| """Import somes important packages"""
import Skype4Py
import config as gbconfig
import json
from common import get_project_path
# Get Skype class instance
SKYPE_OBJ = Skype4Py.Skype()
# Establish the connection from the Skype object to the Skype ddclient.
SKYPE_OBJ.Attach()
# Get all contact from object. This function might not be used in this case
"""Function to get file contains list of skype's contact"""
returndata = {}
try:
root_path = get_project_path()
# print root_path
file_path = "%s/%s" % (root_path, gbconfig.FILE_CONTACT)
filename = open(file_path, 'r')
returndata = json.loads(filename.read())
filename.close()
except Exception as ex:
print 'What the fuck? I could not load your file: %s - %s' % (gbconfig.FILE_CONTACT, ex)
return returndata
def main_function():
"""Runable function"""
get_file()
for contact, message in get_file().iteritems():
SKYPE_OBJ.SendMessage(contact, message)
print "Message has been sent"
if __name__ == "__main__":
main_function()
| mit | Python |
f21e732eada64a18e08524052ec66ce8705d9e9b | make imagemagick env var default to 'convert' instead of None | leovoel/glc.py | glc/config.py | glc/config.py | """
glc.config
==========
At the moment this only houses the environmental variable
for the ImageMagick binary. If you don't want to set that,
or can't for some reason, you can replace ``"convert"`` with the
path where the ``convert`` application that comes with it
lives in, if it doesn't happen to be in your PATH.
(c) 2016 LeoV
https://github.com/leovoel/
"""
import os
IMAGEMAGICK_BINARY = os.getenv("IMAGEMAGICK_BINARY", "convert")
| """
glc.config
==========
At the moment this only houses the environmental variable
for the ImageMagick binary. If you don't want to set that,
or can't for some reason, you can replace ``None`` with the
path where the ``convert`` application that comes with it
lives in.
(c) 2016 LeoV
https://github.com/leovoel/
"""
import os
IMAGEMAGICK_BINARY = os.getenv("IMAGEMAGICK_BINARY", None)
| mit | Python |
09a313a2cd74c391c12761306cb8ae641e9f0d28 | fix logs app prompt | synctree/synctree-awsebcli,synctree/synctree-awsebcli | ebcli/controllers/logs.py | ebcli/controllers/logs.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings, flag_text
from ..operations import logsops
from ..objects.exceptions import InvalidOptionsError, NotFoundError
class LogsController(AbstractBaseController):
class Meta:
label = 'logs'
description = strings['logs.info']
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
arguments = AbstractBaseController.Meta.arguments + [
(['-a', '--all'], dict(
action='store_true', help=flag_text['logs.all'])),
(['-z', '--zip'], dict(
action='store_true', help=flag_text['logs.zip'])),
(['-i', '--instance'], dict(help=flag_text['logs.instance'])),
(['--stream'], dict(action='store_true',
help=flag_text['logs.stream'])),
]
epilog = strings['logs.epilog']
def do_command(self):
app_name = self.get_app_name()
env_name = self.get_env_name()
if self.app.pargs.stream:
try:
return logsops.stream_logs(env_name)
except NotFoundError:
raise NotFoundError(strings['cloudwatch-stream.notsetup'])
all = self.app.pargs.all
instance = self.app.pargs.instance
zip = self.app.pargs.zip
if all and instance:
raise InvalidOptionsError(strings['logs.allandinstance'])
if zip:
info_type = 'bundle'
do_zip = True
elif all:
info_type = 'bundle'
do_zip = False
else:
info_type = 'tail'
do_zip = False
logsops.logs(env_name, info_type, do_zip=do_zip,
instance_id=instance) | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings, flag_text
from ..operations import logsops
from ..objects.exceptions import InvalidOptionsError, NotFoundError
class LogsController(AbstractBaseController):
class Meta:
label = 'logs'
description = strings['logs.info']
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
arguments = AbstractBaseController.Meta.arguments + [
(['-a', '--all'], dict(
action='store_true', help=flag_text['logs.all'])),
(['-z', '--zip'], dict(
action='store_true', help=flag_text['logs.zip'])),
(['-i', '--instance'], dict(help=flag_text['logs.instance'])),
(['--stream'], dict(action='store_true',
help=flag_text['logs.stream'])),
]
epilog = strings['logs.epilog']
def do_command(self):
env_name = self.get_env_name()
if self.app.pargs.stream:
try:
return logsops.stream_logs(env_name)
except NotFoundError:
raise NotFoundError(strings['cloudwatch-stream.notsetup'])
all = self.app.pargs.all
instance = self.app.pargs.instance
zip = self.app.pargs.zip
if all and instance:
raise InvalidOptionsError(strings['logs.allandinstance'])
if zip:
info_type = 'bundle'
do_zip = True
elif all:
info_type = 'bundle'
do_zip = False
else:
info_type = 'tail'
do_zip = False
logsops.logs(env_name, info_type, do_zip=do_zip,
instance_id=instance) | apache-2.0 | Python |
d3cd1778f4ccb1651feb2186ecfdd0c81f86088c | Improve Instruction parsing | mossberg/spym,mossberg/spym | instruction.py | instruction.py | class Instruction(object):
def __init__(self, line):
instr = line.split(' ')
self.name = instr[0]
self.ops = []
if len(instr) > 4:
raise Exception('too many operands: {}'.format(line))
# iterate through operands, perform some loose checks, and append
# to self.ops
for each in instr[1:]:
if each.endswith(','):
each = each[:-1]
self.ops.append(each[1:] if each.startswith('$') else each)
| class Instruction(object):
def __init__(self, line):
instr = line.split(' ')
self.name = instr[0]
self.ops = []
if len(instr) > 4:
raise Exception('too many operands: {}'.format(line))
# iterate through operands, perform some loose checks, and append
# to self.ops
for i, each in enumerate(instr[1:]):
if each.endswith(','):
each = each[:-1]
if each.startswith('$'):
self.ops.append(each[1:])
else:
self.ops.append(each)
| mit | Python |
8d0a41391fae5c66c296d5dfacc0ac6f82a6b355 | fix gridsearch path | rshkv/goethe | gridsearch.py | gridsearch.py | import time
import itertools as it
from gensim.models import word2vec
from goethe.corpora import Corpus
model_config = {
'size': [200, 300, 400, 500, 600],
'window': [5, 10, 20],
'sg': [0, 1] # Skip-gram or CBOW
}
sample_size = 10000000
epochs = 10
def train_model(config):
size, window, sg = config
sentences = Corpus('../corpora/eval', limit=sample_size)
model = word2vec.Word2Vec(sentences=sentences, size=size, window=window,
iter=epochs, workers=4)
name = 'n{}_size{}_epochs{}_sg{}_window{}'.format(sample_size, size, epochs, sg, window)
return name, model
def minutes(t0):
t1 = time.time()
return int((t1-t0)/60)
if __name__ == '__main__':
parameters = it.product(model_config['size'], model_config['window'],
model_config['sg'])
t0 = time.time()
for p in parameters:
name, model = train_model(p)
model.save('models/' + name + '.model')
print('{}\', saved model: {}'.format(minutes(t0), name))
| import time
import itertools as it
from gensim.models import word2vec
from goethe.corpora import Corpus
model_config = {
'size': [200, 300, 400, 500, 600],
'window': [5, 10, 20],
'sg': [0, 1] # Skip-gram or CBOW
}
sample_size = 10000000
epochs = 10
def train_model(config):
size, window, sg = config
sentences = Corpus('../corpora/eval/eval.tokens.txt', limit=sample_size)
model = word2vec.Word2Vec(sentences=sentences, size=size, window=window,
iter=epochs, workers=4)
name = 'n{}_size{}_epochs{}_sg{}_window{}'.format(sample_size, size, epochs, sg, window)
return name, model
def minutes(t0):
t1 = time.time()
return int((t1-t0)/60)
if __name__ == '__main__':
parameters = it.product(model_config['size'], model_config['window'],
model_config['sg'])
t0 = time.time()
for p in parameters:
name, model = train_model(p)
model.save('models/' + name + '.model')
print('{}\', saved model: {}'.format(minutes(t0), name))
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.