commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
1cb55e4feec981efdf629b01ed7508f825b6c2c0
|
add comment TODO
|
z0rkuM/stockbros,z0rkuM/stockbros,z0rkuM/stockbros,z0rkuM/stockbros
|
StockIndicators/StockIndicators.py
|
StockIndicators/StockIndicators.py
|
#!flask/bin/python
from flask import Blueprint, jsonify
api_si = Blueprint('api_si', __name__)
#TODO: probar a meter token de seguridad a ver se funciona
@api_si.route("/stock_indicators")
def get_stock_indicators():
return jsonify(stock_indicators=[
{"username": "alice", "user_id": 1},
{"username": "bob", "user_id": 2}
])
|
#!flask/bin/python
from flask import Blueprint, jsonify
api_si = Blueprint('api_si', __name__)
@api_si.route("/stock_indicators")
def get_stock_indicators():
return jsonify(stock_indicators=[
{"username": "alice", "user_id": 1},
{"username": "bob", "user_id": 2}
])
|
mit
|
Python
|
13a5e797bf3c268ae42dda79c75959ca0602096f
|
Update unit tests script.
|
ciechowoj/master,ciechowoj/master,ciechowoj/master
|
unit_test.py
|
unit_test.py
|
#!/usr/bin/python3
import glob
import re
import subprocess
import os
import os.path
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
test_cases = sorted(glob.glob("models/TestCase*.blend"), key=natural_keys)
if not os.path.exists("test_results"):
os.makedirs("test_results")
num_samples = 10000
def run_test(test_case, technique, beta):
output = os.path.join("test_results", os.path.basename(test_case[:-6]) + "." + technique + str(beta) + ".exr")
if not os.path.exists(output):
command = ["master", test_case, "--" + technique, "--parallel", "--beta=" + str(beta), "--output=" + output, "--num-samples=" + str(num_samples), "--batch"]
print(" ".join(command))
subprocess.run(command)
for test_case in test_cases:
for technique in ["PT", "BPT", "UPG"]:
for beta in [0, 1, 2]:
run_test(test_case, technique, beta)
|
#!/usr/bin/python3
import glob
import re
import subprocess
import os
import os.path
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split('(\d+)', text) ]
test_cases = sorted(glob.glob("models/TestCase*.blend"), key=natural_keys)
if not os.path.exists("test_results"):
os.makedirs("test_results")
num_samples = 10000
def run_test(test_case, technique, beta):
output = os.path.join("test_results", os.path.basename(test_case[:-6]) + "." + technique + str(beta) + ".exr")
command = ["master", test_case, "--" + technique, "--parallel", "--beta=" + str(beta), "--output=" + output, "--num-samples=" + str(num_samples), "--batch"]
print(" ".join(command))
subprocess.run(command)
for test_case in test_cases:
for technique in ["PT", "BPT", "UPG"]:
for beta in [0, 1, 2]:
run_test(test_case, technique, beta)
|
mit
|
Python
|
7ccb8d443fe3dda236d05ff4ed13e067a6893872
|
create a changeset against local before changing local; we'll use this later
|
sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary
|
updatecmd.py
|
updatecmd.py
|
#
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
import changeset
import os
import sys
import util
import versions
def doUpdate(repos, db, cfg, pkg, versionStr = None):
cs = None
if not os.path.exists(cfg.root):
util.mkdirChain(cfg.root)
if os.path.exists(pkg):
# there is a file, try to read it as a changeset file
if versionStr:
sys.stderr.write("Verison should not be specified when a SRS "
"change set is being installed.\n")
return 1
try:
cs = changeset.ChangeSetFromFile(pkg)
except KeyError:
# invalid changeset file
pass
else:
if cs.isAbstract():
newcs = db.rootChangeSet(cs, cfg.defaultbranch)
if newcs:
cs = newcs
list = []
list = map(lambda x: list.append(x), cs.getPackageList())
if not cs:
# so far no changeset (either the path didn't exist or we could not
# read it
if pkg and pkg[0] != ":":
pkg = cfg.packagenamespace + ":" + pkg
if versionStr and versionStr[0] != "/":
versionStr = cfg.defaultbranch.asString() + "/" + versionStr
if versionStr:
newVersion = versions.VersionFromString(versionStr)
else:
newVersion = None
list = []
bail = 0
for pkgName in repos.getPackageList(pkg):
if not newVersion:
newVersion = repos.pkgLatestVersion(pkgName, cfg.defaultbranch)
if not repos.hasPackageVersion(pkgName, newVersion):
sys.stderr.write("package %s does not contain version %s\n" %
(pkgName, newVersion.asString()))
bail = 1
else:
if db.hasPackage(pkgName):
currentVersion = db.pkgLatestVersion(pkgName,
newVersion.branch())
else:
currentVersion = None
list.append((pkgName, currentVersion, newVersion))
if bail:
return
if not list:
sys.stderr.write("repository does not contain a package called %s\n" % pkg)
return
cs = repos.createChangeSet(list)
# permute the list into a list of just package names
list = map(lambda x: x[0], list)
# create a change set between what is in the database and what is
# on the disk
localChanges = changeset.CreateAgainstLocal(cfg, db, list)
if cs.isAbstract():
db.commitChangeSet(cfg.sourcepath, cs, eraseOld = 0)
else:
inverse = cs.invert(db)
db.addRollback(inverse)
db.commitChangeSet(cfg.sourcepath, cs, eraseOld = 1)
|
#
# Copyright (c) 2004 Specifix, Inc.
# All rights reserved
#
import changeset
import os
import sys
import util
import versions
def doUpdate(repos, db, cfg, pkg, versionStr = None):
cs = None
if not os.path.exists(cfg.root):
util.mkdirChain(cfg.root)
if os.path.exists(pkg):
# there is a file, try to read it as a changeset file
if versionStr:
sys.stderr.write("Verison should not be specified when a SRS "
"change set is being installed.\n")
return 1
try:
cs = changeset.ChangeSetFromFile(pkg)
except KeyError:
# invalid changeset file
pass
else:
if cs.isAbstract():
newcs = db.rootChangeSet(cs, cfg.defaultbranch)
if newcs:
cs = newcs
if not cs:
# so far no changeset (either the path didn't exist or we could not
# read it
if pkg and pkg[0] != ":":
pkg = cfg.packagenamespace + ":" + pkg
if versionStr and versionStr[0] != "/":
versionStr = cfg.defaultbranch.asString() + "/" + versionStr
if versionStr:
newVersion = versions.VersionFromString(versionStr)
else:
newVersion = None
list = []
bail = 0
for pkgName in repos.getPackageList(pkg):
if not newVersion:
newVersion = repos.pkgLatestVersion(pkgName, cfg.defaultbranch)
if not repos.hasPackageVersion(pkgName, newVersion):
sys.stderr.write("package %s does not contain version %s\n" %
(pkgName, newVersion.asString()))
bail = 1
else:
if db.hasPackage(pkgName):
currentVersion = db.pkgLatestVersion(pkgName,
newVersion.branch())
else:
currentVersion = None
list.append((pkgName, currentVersion, newVersion))
if bail:
return
if not list:
sys.stderr.write("repository does not contain a package called %s\n" % pkg)
return
cs = repos.createChangeSet(list)
if cs.isAbstract():
db.commitChangeSet(cfg.sourcepath, cs, eraseOld = 0)
else:
inverse = cs.invert(db)
db.addRollback(inverse)
db.commitChangeSet(cfg.sourcepath, cs, eraseOld = 1)
|
apache-2.0
|
Python
|
6aea68d6c1de498583c42839a3a31ef25f51e17e
|
Complete alg_breadth_first_search.py
|
bowen0701/algorithms_data_structures
|
alg_breadth_first_search.py
|
alg_breadth_first_search.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
def bfs(graph_adj_d, start_vertex):
visit_queue = []
visit_queue.insert(0, start_vertex)
distance_d = {v: np.inf for v in graph_adj_d.keys()}
distance_d[start_vertex] = 0
while visit_queue:
v_visit = visit_queue.pop()
for v_neighbor in graph_adj_d[v_visit]:
if np.isinf(distance_d[v_neighbor]):
visit_queue.insert(0, v_neighbor)
distance_d[v_neighbor] = distance_d[v_visit] + 1
return distance_d
def main():
# Small word ladder graph.
graph_adj_d = {
'fool': ['cool', 'pool', 'foil', 'foul'],
'foul': ['fool', 'foil'],
'foil': ['fool', 'foul', 'fail'],
'cool': ['fool', 'pool'],
'fail': ['foil', 'fall'],
'fall': ['fail', 'pall'],
'pool': ['fool', 'cool', 'poll'],
'poll': ['pool', 'pall', 'pole'],
'pall': ['fall', 'pale', 'poll'],
'pole': ['poll', 'pope', 'pale'],
'pope': ['pole'],
'pale': ['pall', 'pole', 'sale', 'page'],
'sale': ['pale', 'sage'],
'page': ['pale', 'sage'],
'sage': ['sale', 'page']
}
print('Graph: {}'.format(graph_adj_d))
start_vertex = 'fool'
print('Start vertex: {}'.format(start_vertex))
distance_d = bfs(graph_adj_d, start_vertex)
print('By BFS, the distance dict is {}'.format(distance_d))
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def bfs():
pass
def main():
# Small word ladder graph.
graph_adj_d = {
'fool': {'cool', 'pool', 'foil', 'foul'},
'foul': {'fool', 'foil'},
'foil': {'fool', 'foul', 'fail'},
'cool': {'fool', 'pool'},
'fail': {'foil', 'fall'},
'fall': {'fail', 'pall'},
'pool': {'fool', 'cool', 'poll'},
'poll': {'pool', 'pall', 'pole'},
'pall': {'fall', 'pale', 'poll'},
'pole': {'poll', 'pope', 'pale'},
'pope': {'pole'},
'pale': {'pall', 'pole', 'sale', 'page'},
'sale': {'pale', 'sage'},
'page': {'pale', 'sage'},
'sage': {'sale', 'page'}
}
if __name__ == '__main__':
main()
|
bsd-2-clause
|
Python
|
649029d2ad04eb5afd618b40ccd62993d69e389f
|
Complete alg_percentile_selection.py
|
bowen0701/algorithms_data_structures
|
alg_percentile_selection.py
|
alg_percentile_selection.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
def select_percentile(ls, k):
"""Kth percentile selection algorithm.
Just select the kth element, without caring about
the relative ordering of the rest of them.
The algorithm performs in place without allocating
new memory for the three sublists using three pointers.
"""
v = random.sample(ls, 1)[0]
idx_eq_v = [i for i, a in enumerate(ls) if a == v]
idx_le_v = [i for i, a in enumerate(ls) if a < v]
idx_ge_v = [i for i, a in enumerate(ls) if a > v]
if k <= len(idx_le_v):
le_v_ls = [ls[idx] for idx in idx_le_v]
return select_percentile(le_v_ls, k)
elif len(idx_le_v) < k <= len(idx_le_v) + len(idx_eq_v):
return v
elif k > len(idx_le_v) + len(idx_eq_v):
ge_v_ls = [ls[idx] for idx in idx_ge_v]
return select_percentile(ge_v_ls, k - len(idx_le_v) - len(idx_eq_v))
def main():
n = 100
ls = range(n)
random.shuffle(ls)
print('List: {}'.format(ls))
print('Get median by selection:')
print(select_percentile(ls, n // 2))
print('Get min by selection:')
print(select_percentile(ls, 1))
print('Get max by selection:')
print(select_percentile(ls, n))
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def select_percentile(a_list, k):
"""Select list's kth percentile.
Just select the kth element, without caring about
the relative ordering of the rest of them.
"""
pass
def main():
pass
if __name__ == '__main__':
main()
|
bsd-2-clause
|
Python
|
7de37a5ba8164b757a6a8ed64f80ee379ff7a3ad
|
fix some bugs
|
XiaowenLin/cs598rk
|
scripts/extract_computer_and_accessories.py
|
scripts/extract_computer_and_accessories.py
|
from scripts.rake import *
import json
# get review texts aggregated by asin id
def get_rdd(base, input, num_part):
base_dir = os.path.join(base)
input_path = os.path.join(input)
file_name = os.path.join(base_dir, input_path)
# load data
rdd = sc.textFile(file_name, num_part)
rdd_j = rdd.map(json.loads)
rdd_j.cache()
return rdd_j
num_part = 16
revs = get_rdd('data', 'reviews_electronics.json', num_part)
rev_texts = revs.map(lambda x: (x['asin'], x['reviewText']))
rev_agg_texts = rev_texts.map(lambda (asin, text): (asin, [text])).reduceByKey(lambda x, y: x + y)
rev_agg_texts.cache()
prods = get_rdd('data', 'meta_electronics.json', num_part)
categ = prods.map( lambda x: (x.get('asin'), x.get('categories')) )
categ = categ.flatMapValues(lambda x: x)
computers = categ.filter( lambda (asin, cats): 'Computers & Accessories' in cats )
prods_ = prods.join(computers)
prods_.cache()
# (asin, ([review], (d_prod, [category])) )
items = rev_agg_texts.join(prods_)
items = items.map( lambda (asin, (reviews, (d_prod, categories))): (asin, reviews, d_prod, categories) )
# 1. RAKE: keyword. use rake algorithm to extract keywords and take top 10 keywords from each asin
rake = Rake('data/MergedStopList.txt') # TODO: add more into this list
items_wk = items.map( lambda (asin, reviews, d_prod, categories): (asin, rake.run(' '.join(reviews)), reviews, d_prod, categories) )
# 2. NP: noun phrasee among these keywords
import nltk
from scripts.np_extractor import *
items_wk.cache
items_np = items_wk.map(lambda (asin, pairs, reviews, d_prod, categories):
(asin, [(NPExtractor(string).extract(), score) for (string, score) in pairs], reviews, d_prod, categories)
)
items_np = items_np.map(lambda (asin, pairs, reviews, d_prod, categories):
(asin, [(toks, scr) for (toks, scr) in pairs if len(toks) > 0], reviews, d_prod, categories)
)
# 3. output
import pandas as pd
df = pd.DataFrame(items_np.collect())
df.to_csv('data/processed/computers_kw.csv')
|
from scripts.rake import *
import json
# get review texts aggregated by asin id
def get_rdd(base, input, num_part):
base_dir = os.path.join(base)
input_path = os.path.join(input)
file_name = os.path.join(base_dir, input_path)
# load data
rdd = sc.textFile(file_name, num_part)
rdd_j = rdd.map(json.loads)
rdd_j.cache()
return rdd_j
num_part = 16
revs = get_rdd('data', 'reviews_electronics.json', num_part)
rev_texts = revs.map(lambda x: (x['asin'], x['reviewText']))
rev_agg_texts = rev_texts.map(lambda (asin, text): (asin, [text])).reduceByKey(lambda x, y: x + y)
rev_agg_texts.cache()
prods = get_rdd('data', 'meta_electronics.json', num_part)
categ = prods.map( lambda x: (x.get('asin'), x.get('categories')) )
categ = categ.flapMapValues(lambda x: x)
computers = categ_.filter( lambda (asin, cats): 'Computers & Accessories' in cats )
prods_ = prods_.join(computers)
prods.cache()
# (asin, ([review], (d_prod, [category])) )
items = rev_agg_texts.join(prods_)
items = items.map( lambda (asin, (reviews, (d_prod, categories))): (asin, reviews, d_prod, categories) )
# 1. RAKE: keyword. use rake algorithm to extract keywords and take top 10 keywords from each asin
rake = Rake('data/MergedStopList.txt') # TODO: add more into this list
items_wk = items.map( lambda (asin, reviews, d_prod, categories): (asin, rake.run(' '.join(reviews)), reviews, d_prod, categories) )
# 2. NP: noun phrasee among these keywords
import nltk
from scripts.np_extractor import *
items_wk.cache()
items_np = items_wk.map(lambda (asin, pairs, reviews, d_prod, categories):
(asin, [(NPExtractor(string).extract(), score) for (string, score) in pairs], reviews, d_prod, categories)
)
items_np = items_np.map(lambda (asin, pairs, reviews, d_prod, categories):
(asin, [(toks, scr) for (toks, scr) in pairs if len(toks) > 0], reviews, d_prod, categories)
)
# 3. output
import pandas as pd
df = pd.DataFrame(items_np.collect())
df.to_csv('data/processed/computers_kw.csv')
|
mit
|
Python
|
ab533cf55e87571e757f545d5eaad6f8f62cc31f
|
Make worker resource plot responsive
|
mrocklin/distributed,broxtronix/distributed,dask/distributed,mrocklin/distributed,amosonn/distributed,broxtronix/distributed,mrocklin/distributed,amosonn/distributed,blaze/distributed,dask/distributed,dask/distributed,blaze/distributed,broxtronix/distributed,dask/distributed,amosonn/distributed
|
distributed/diagnostics/worker_monitor.py
|
distributed/diagnostics/worker_monitor.py
|
from __future__ import print_function, division, absolute_import
from collections import defaultdict
from itertools import chain
from toolz import pluck
from ..utils import ignoring
with ignoring(ImportError):
from bokeh.models import ColumnDataSource, DataRange1d, Range1d
from bokeh.palettes import Spectral9
from bokeh.plotting import figure
def resource_profile_plot(width=600, height=300):
names = ['time', 'cpu', 'memory-percent']
source = ColumnDataSource({k: [] for k in names})
x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)
y_range = Range1d(0, 100)
p = figure(width=width, height=height, x_axis_type='datetime',
responsive=True, tools='xpan,xwheel_zoom,box_zoom,resize,reset',
x_range=x_range, y_range=y_range)
p.line(x='time', y='memory-percent', line_width=2, line_alpha=0.8,
color=Spectral9[7], legend='Avg Memory Usage', source=source)
p.line(x='time', y='cpu', line_width=2, line_alpha=0.8,
color=Spectral9[0], legend='Avg CPU Usage', source=source)
p.legend[0].location = 'top_left'
p.yaxis[0].axis_label = 'Percent'
p.xaxis[0].axis_label = 'Time'
p.min_border_right = 10
return source, p
def resource_profile_update(source, worker_buffer, times_buffer):
data = defaultdict(list)
workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))
for name in ['cpu', 'memory-percent']:
data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'
for msg in worker_buffer]
for w in workers]
data['workers'] = workers
data['times'] = [[t * 1000 if w in worker_buffer[i] else 'null'
for i, t in enumerate(times_buffer)]
for w in workers]
source.data.update(data)
def resource_append(lists, msg):
L = list(msg.values())
if not L:
return
for k in ['cpu', 'memory-percent']:
lists[k].append(mean(pluck(k, L)))
lists['time'].append(mean(pluck('time', L)) * 1000)
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
|
from __future__ import print_function, division, absolute_import
from collections import defaultdict
from itertools import chain
from toolz import pluck
from ..utils import ignoring
with ignoring(ImportError):
from bokeh.models import ColumnDataSource, DataRange1d, Range1d
from bokeh.palettes import Spectral9
from bokeh.plotting import figure
def resource_profile_plot(width=600, height=300):
names = ['time', 'cpu', 'memory-percent']
source = ColumnDataSource({k: [] for k in names})
x_range = DataRange1d(follow='end', follow_interval=30000, range_padding=0)
y_range = Range1d(0, 100)
p = figure(width=width, height=height, x_axis_type='datetime',
tools='xpan,xwheel_zoom,box_zoom,resize,reset',
x_range=x_range, y_range=y_range)
p.line(x='time', y='memory-percent', line_width=2, line_alpha=0.8,
color=Spectral9[7], legend='Memory Usage', source=source)
p.line(x='time', y='cpu', line_width=2, line_alpha=0.8,
color=Spectral9[0], legend='CPU Usage', source=source)
p.legend[0].location = 'top_left'
p.yaxis[0].axis_label = 'Percent'
p.xaxis[0].axis_label = 'Time'
p.min_border_right = 10
return source, p
def resource_profile_update(source, worker_buffer, times_buffer):
data = defaultdict(list)
workers = sorted(list(set(chain(*list(w.keys() for w in worker_buffer)))))
for name in ['cpu', 'memory-percent']:
data[name] = [[msg[w][name] if w in msg and name in msg[w] else 'null'
for msg in worker_buffer]
for w in workers]
data['workers'] = workers
data['times'] = [[t * 1000 if w in worker_buffer[i] else 'null'
for i, t in enumerate(times_buffer)]
for w in workers]
source.data.update(data)
def resource_append(lists, msg):
L = list(msg.values())
if not L:
return
for k in ['cpu', 'memory-percent']:
lists[k].append(mean(pluck(k, L)))
lists['time'].append(mean(pluck('time', L)) * 1000)
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
|
bsd-3-clause
|
Python
|
6e65bf0ce5334e2242fee0c36886cfda29a1f4a4
|
Make build.py baseimage pulling optional
|
avatao/challenge-engine,avatao-content/challenge-toolbox,avatao/challenge-engine,avatao/challenge-engine,avatao-content/challenge-toolbox,avatao/challenge-engine,avatao-content/challenge-toolbox,avatao/challenge-engine,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao/challenge-engine,avatao-content/challenge-toolbox,avatao/challenge-engine,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao-content/challenge-toolbox,avatao/challenge-engine
|
build.py
|
build.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- mode: python; -*-
import logging
import subprocess
import sys
import time
import os
from common import get_sys_args, yield_dockerfiles
from common import run_cmd, init_logger
def build_image(repo_path, repo_name):
for dockerfile, image in yield_dockerfiles(repo_path, repo_name):
try:
build_cmd = ['docker', 'build', '-t', image, '-f', dockerfile, repo_path]
if os.environ.get('PULL_BASEIMAGES', '0') == '1':
build_cmd.append('--pull')
run_cmd(build_cmd)
except subprocess.CalledProcessError:
logging.error('Failed to build %s!' % dockerfile)
sys.exit(1)
time.sleep(1)
if __name__ == '__main__':
"""
Build solvable and controller docker images from an avatao challenge repository.
Simply add the challenge repository path as the first argument and the script does the rest.
If a controller or solvable is missing, we skip it.
After a successful build you can use the start.py to run your containers.
"""
init_logger()
build_image(*get_sys_args())
logging.info('Finished. Everything is built.')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- mode: python; -*-
import logging
import subprocess
import sys
import time
from common import get_sys_args, yield_dockerfiles
from common import run_cmd, init_logger
def build_image(repo_path, repo_name):
for dockerfile, image in yield_dockerfiles(repo_path, repo_name):
try:
run_cmd(['docker', 'build', '--pull', '-t', image, '-f', dockerfile, repo_path])
except subprocess.CalledProcessError:
logging.error('Failed to build %s!' % dockerfile)
sys.exit(1)
time.sleep(1)
if __name__ == '__main__':
"""
Build solvable and controller docker images from an avatao challenge repository.
Simply add the challenge repository path as the first argument and the script does the rest.
If a controller or solvable is missing, we skip it.
After a successful build you can use the start.py to run your containers.
"""
init_logger()
build_image(*get_sys_args())
logging.info('Finished. Everything is built.')
|
apache-2.0
|
Python
|
b68669f07aba05a4e96f900df71d6179382cd6f1
|
bump version
|
arnehilmann/yum-repos,arnehilmann/yumrepos,arnehilmann/yumrepos,arnehilmann/yum-repos
|
build.py
|
build.py
|
from pybuilder.core import use_plugin, init, Author, task
use_plugin("python.core")
use_plugin("python.unittest")
use_plugin("python.integrationtest")
use_plugin("python.install_dependencies")
use_plugin("python.flake8")
use_plugin("python.coverage")
use_plugin("python.distutils")
use_plugin('copy_resources')
use_plugin('filter_resources')
name = "yum-repos"
summary = "yum-repos: simple yum repositories with minimal rest api"
url = "https://github.com/arnehilmann/yum-repos"
version = "0.8.4"
authors = [Author('Arne Hilmann', '[email protected]')]
description = """yum-repos
- serve yum repositories as simple folders
- ... via web server
- offer rest api for
- create/remove/link of repositories
- upload/stage/remove of rpms
"""
default_task = ["clean", "analyze", "publish"]
@task
def gittag(project, logger):
logger.info("The following commands create a new release, triggering all the fun stuff:")
logger.info("git tag -a v{0} -m v{0}".format(project.version))
logger.info("git push --tags")
@init
def set_properties(project):
project.build_depends_on('requests')
project.depends_on("flask")
try:
import functools.lru_cache
except ImportError:
pass
# project.depends_on("backports.functools_lru_cache")
project.set_property('copy_resources_target', '$dir_dist')
project.get_property('copy_resources_glob').extend(['setup.*cfg'])
project.get_property('filter_resources_glob').extend(['**/setup.*cfg'])
|
from pybuilder.core import use_plugin, init, Author, task
use_plugin("python.core")
use_plugin("python.unittest")
use_plugin("python.integrationtest")
use_plugin("python.install_dependencies")
use_plugin("python.flake8")
use_plugin("python.coverage")
use_plugin("python.distutils")
use_plugin('copy_resources')
use_plugin('filter_resources')
name = "yum-repos"
summary = "yum-repos: simple yum repositories with minimal rest api"
url = "https://github.com/arnehilmann/yum-repos"
version = "0.8.2"
authors = [Author('Arne Hilmann', '[email protected]')]
description = """yum-repos
- serve yum repositories as simple folders
- ... via web server
- offer rest api for
- create/remove/link of repositories
- upload/stage/remove of rpms
"""
default_task = ["clean", "analyze", "publish"]
@task
def gittag(project, logger):
logger.info("The following commands create a new release, triggering all the fun stuff:")
logger.info("git tag -a v{0} -m v{0}".format(project.version))
logger.info("git push --tags")
@init
def set_properties(project):
project.build_depends_on('requests')
project.depends_on("flask")
try:
import functools.lru_cache
except ImportError:
pass
# project.depends_on("backports.functools_lru_cache")
project.set_property('copy_resources_target', '$dir_dist')
project.get_property('copy_resources_glob').extend(['setup.*cfg'])
project.get_property('filter_resources_glob').extend(['**/setup.*cfg'])
|
apache-2.0
|
Python
|
83b12f568ba8843ac6bff4c5179d8200d88505b0
|
Fix an issue where the .VERSION file output from build.py had multiple commit hashes
|
jenkinsci/coverity-plugin,jenkinsci/coverity-plugin,jenkinsci/coverity-plugin,jenkinsci/coverity-plugin
|
build.py
|
build.py
|
#/*******************************************************************************
# * Copyright (c) 2016 Synopsys, Inc
# * All rights reserved. This program and the accompanying materials
# * are made available under the terms of the Eclipse Public License v1.0
# * which accompanies this distribution, and is available at
# * http://www.eclipse.org/legal/epl-v10.html
# *
# * Contributors:
# * Synopsys, Inc - initial implementation and documentation
# *******************************************************************************/
import sys
import subprocess
import re
import json
import shutil
if __name__ == "__main__":
# Check to make sure we have the correct number of arguments
if len(sys.argv) != 4:
print "Incorrect number of arguments given. Build.py takes three arguments, first is the version number, second is the build number $BUILD_NUMBER and third is build id $BUILD_ID"
sys.exit(-1)
# Save version, build number and id that was passed in from jenkins
version = sys.argv[1]
build_number = sys.argv[2]
build_id = sys.argv[3]
# git log for the current commit id hash
output = subprocess.Popen("git log --pretty=format:'%H' -n 1", stdout=subprocess.PIPE, shell=True)
commit_id = output.stdout.read()
# Generate the json output text
json_output = json.dumps({ "commit_id" : commit_id.strip(), "build_number" : build_number, "build_id" : build_id }, indent=4)
# Run the typical build for jenkins
subprocess.check_call("mvn clean install", shell=True)
# write the version output file
version_file = open("./target/coverity.hpi.VERSION","w")
version_file.write(json_output)
# move the .hpi file to a versioned file
shutil.move("./target/coverity.hpi", "./target/coverity-{0}.hpi".format(version))
|
#/*******************************************************************************
# * Copyright (c) 2016 Synopsys, Inc
# * All rights reserved. This program and the accompanying materials
# * are made available under the terms of the Eclipse Public License v1.0
# * which accompanies this distribution, and is available at
# * http://www.eclipse.org/legal/epl-v10.html
# *
# * Contributors:
# * Synopsys, Inc - initial implementation and documentation
# *******************************************************************************/
import sys
import subprocess
import re
import json
import shutil
if __name__ == "__main__":
# Check to make sure we have the correct number of arguments
if len(sys.argv) != 4:
print "Incorrect number of arguments given. Build.py takes three arguments, first is the version number, second is the build number $BUILD_NUMBER and third is build id $BUILD_ID"
sys.exit(-1)
# Save version, build number and id that was passed in from jenkins
version = sys.argv[1]
build_number = sys.argv[2]
build_id = sys.argv[3]
# Grep git commit id
output = subprocess.Popen("git log --name-status HEAD^..HEAD | grep \"commit*\"", stdout=subprocess.PIPE, shell=True)
commit = output.stdout.read()
# Remove all head information, so that only the commit id is left
commit_id = re.sub(r'\(.*?\)','',commit)
commit_id = re.sub("commit","",commit_id)
# Generate the json output text
json_output = json.dumps({ "commit_id" : commit_id.strip(), "build_number" : build_number, "build_id" : build_id }, indent=4)
# Run the typical build for jenkins
subprocess.check_call("mvn clean install", shell=True)
# write the version output file
version_file = open("./target/coverity.hpi.VERSION","w")
version_file.write(json_output)
# move the .hpi file to a versioned file
shutil.move("./target/coverity.hpi", "./target/coverity-{0}.hpi".format(version))
|
epl-1.0
|
Python
|
fe55a3e5ba9f4a368d39fcc3316a471df547d714
|
Bump version to 0.8.1+dev
|
python-hyper/h11
|
h11/_version.py
|
h11/_version.py
|
# This file must be kept very simple, because it is consumed from several
# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc.
# We use a simple scheme:
# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev
# where the +dev versions are never released into the wild, they're just what
# we stick into the VCS in between releases.
#
# This is compatible with PEP 440:
# http://legacy.python.org/dev/peps/pep-0440/
# via the use of the "local suffix" "+dev", which is disallowed on index
# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we
# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before*
# 1.0.0.)
__version__ = "0.8.1+dev"
|
# This file must be kept very simple, because it is consumed from several
# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc.
# We use a simple scheme:
# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev
# where the +dev versions are never released into the wild, they're just what
# we stick into the VCS in between releases.
#
# This is compatible with PEP 440:
# http://legacy.python.org/dev/peps/pep-0440/
# via the use of the "local suffix" "+dev", which is disallowed on index
# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we
# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before*
# 1.0.0.)
__version__ = "0.8.1"
|
mit
|
Python
|
0e3e9028598c8ebe49c3aec98fbfb584e8f5223b
|
Check both category_links and resource_link
|
uw-it-aca/myuw,uw-it-aca/myuw,uw-it-aca/myuw,uw-it-aca/myuw
|
myuw/management/commands/check_reslinks.py
|
myuw/management/commands/check_reslinks.py
|
"""
Test all the links in the CSV for non-200 status codes (after redirects).
"""
import logging
import sys
import urllib3
from django.core.mail import send_mail
from django.core.management.base import BaseCommand, CommandError
from myuw.dao.category_links import Res_Links, Resource_Links
from myuw.util.settings import get_cronjob_recipient, get_cronjob_sender
# Disable SSL warnings
urllib3.disable_warnings()
# Need limit of 1, otherwise sdb gives us a 403
http = urllib3.PoolManager(1, timeout=8)
# Need to override UA for some links, e.g. LinkedIn
ua = 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **kwargs):
messages = []
links = Res_Links.get_all_links()
messages.append(Res_Links.csv_filename)
verify_links(links, messages)
links = Resource_Links.get_all_links()
messages.append("\n\n{}".format(Resource_Links.csv_filename))
verify_links(links, messages)
send_mail("Check Cetegory and Resource Links",
"\n".join(messages),
"{}@uw.edu".format(get_cronjob_sender()),
["{}@uw.edu".format(get_cronjob_recipient())])
def verify_links(links, messages):
for link in links:
if link.url.startswith("https://sdb."):
continue
status = get_http_status(link.url, messages)
if status not in [200]:
msg = {"title": link.title,
"campus": make_campus_human_readable(link.campus),
"url": link.url,
"status": status}
logger.error(msg)
messages.append("{}\n\n".format(msg))
def get_http_status(url, messages):
"""
Given a url, get the HTTP status code or a human-readable exception.
"""
try:
result = http.request(
'GET',
url,
headers={'User-Agent': ua},
retries=urllib3.Retry(redirect=3, connect=2, read=2)
)
return result.status
except Exception as ex:
logger.error(ex)
messages.append(str(ex))
def make_campus_human_readable(campus):
if campus is None:
return 'All Campuses'
else:
# Capitalize first letter
return campus[0:1].upper() + campus[1:]
|
"""
Test all the links in the CSV for non-200 status codes (after redirects).
"""
import logging
import sys
import urllib3
from django.core.mail import send_mail
from django.core.management.base import BaseCommand, CommandError
from myuw.dao.category_links import Res_Links
from myuw.util.settings import get_cronjob_recipient, get_cronjob_sender
# Disable SSL warnings
urllib3.disable_warnings()
# Need limit of 1, otherwise sdb gives us a 403
http = urllib3.PoolManager(1, timeout=8)
# Need to override UA for some links, e.g. LinkedIn
ua = 'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **kwargs):
messages = []
links = Res_Links.get_all_links()
for link in links:
if link.url.startswith("https://sdb."):
continue
status = get_http_status(link.url, messages)
if status not in [200]:
msg = "{}, {}, URL: {} =status=> {}\n\n".format(
link.title, make_campus_human_readable(link.campus),
link.url, status)
logger.error(msg)
messages.append(msg)
if len(messages):
send_mail("Check Cetegory Links Cron",
"\n".join(messages),
"{}@uw.edu".format(get_cronjob_sender()),
["{}@uw.edu".format(get_cronjob_recipient())])
def get_http_status(url, messages):
"""
Given a url, get the HTTP status code or a human-readable exception.
"""
try:
result = http.request(
'GET',
url,
headers={'User-Agent': ua},
retries=urllib3.Retry(redirect=3, connect=2, read=2)
)
return result.status
except Exception as ex:
messages.append(str(ex))
def make_campus_human_readable(campus):
if campus is None:
return 'All Campuses'
else:
# Capitalize first letter
return campus[0:1].upper() + campus[1:]
|
apache-2.0
|
Python
|
d242870d99634edc4b077b045fe5489039a3c821
|
add shebang to targetselection script
|
desihub/desitarget,desihub/desitarget
|
bin/targetselection.py
|
bin/targetselection.py
|
#!/usr/bin/env python
import numpy
from desitarget.io import read_tractor, write_targets
from desitarget.cuts import LRG, ELG, BGS, QSO
from desitarget import targetmask
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument("--type", choices=["tractor"], default="tractor", help="Assume a type for src files")
ap.add_argument("src", help="File that stores Candidates/Objects")
ap.add_argument("dest", help="File that stores targets")
TYPES = {
'LRG': LRG,
'ELG': ELG,
'BGS': BGS,
'QSO': QSO,
}
def main():
ns = ap.parse_args()
candidates = read_tractor(ns.src)
# FIXME: fits doesn't like u8; there must be a workaround
# but lets stick with i8 for now.
tsbits = numpy.zeros(len(candidates), dtype='i8')
for t in TYPES.keys():
cut = TYPES[t]
bitfield = targetmask.mask(t)
with numpy.errstate(all='ignore'):
mask = cut.apply(candidates)
tsbits[mask] |= bitfield
assert ((tsbits & bitfield) != 0).sum() == mask.sum()
print (t, 'selected', mask.sum())
write_targets(ns.dest, candidates, tsbits)
print ('written to', ns.dest)
if __name__ == "__main__":
main()
|
import numpy
from desitarget.io import read_tractor, write_targets
from desitarget.cuts import LRG, ELG, BGS, QSO
from desitarget import targetmask
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument("--type", choices=["tractor"], default="tractor", help="Assume a type for src files")
ap.add_argument("src", help="File that stores Candidates/Objects")
ap.add_argument("dest", help="File that stores targets")
TYPES = {
'LRG': LRG,
'ELG': ELG,
'BGS': BGS,
'QSO': QSO,
}
def main():
ns = ap.parse_args()
candidates = read_tractor(ns.src)
# FIXME: fits doesn't like u8; there must be a workaround
# but lets stick with i8 for now.
tsbits = numpy.zeros(len(candidates), dtype='i8')
for t in TYPES.keys():
cut = TYPES[t]
bitfield = targetmask.mask(t)
with numpy.errstate(all='ignore'):
mask = cut.apply(candidates)
tsbits[mask] |= bitfield
assert ((tsbits & bitfield) != 0).sum() == mask.sum()
print (t, 'selected', mask.sum())
write_targets(ns.dest, candidates, tsbits)
print ('written to', ns.dest)
if __name__ == "__main__":
main()
|
bsd-3-clause
|
Python
|
22a76a55c373af8f64717b10542b7230e63e9583
|
update person caches to 1hr
|
uw-it-aca/canvas-sis-provisioner,uw-it-aca/canvas-sis-provisioner,uw-it-aca/canvas-sis-provisioner,uw-it-aca/canvas-sis-provisioner
|
sis_provisioner/cache.py
|
sis_provisioner/cache.py
|
from restclients.cache_implementation import TimedCache
import re
class RestClientsCache(TimedCache):
""" A custom cache implementation for Canvas """
url_policies = {}
url_policies["sws"] = (
(re.compile(r"^/student/v5/term/"), 60 * 60 * 10),
(re.compile(r"^/student/v5/course/"), 60 * 5),
)
url_policies["pws"] = (
(re.compile(r"^/identity/v1/person/"), 60 * 60),
(re.compile(r"^/identity/v1/entity/"), 60 * 60),
)
url_policies["canvas"] = (
(re.compile(r"^/api/v1/accounts/\d+/roles"), 60 * 60 * 4),
)
def _get_cache_policy(self, service, url):
for policy in RestClientsCache.url_policies.get(service, []):
if policy[0].match(url):
return policy[1]
return 0
def getCache(self, service, url, headers):
cache_policy = self._get_cache_policy(service, url)
return self._response_from_cache(service, url, headers, cache_policy)
def processResponse(self, service, url, response):
if self._get_cache_policy(service, url):
return self._process_response(service, url, response)
|
from restclients.cache_implementation import TimedCache
import re
class RestClientsCache(TimedCache):
""" A custom cache implementation for Canvas """
url_policies = {}
url_policies["sws"] = (
(re.compile(r"^/student/v5/term/"), 60 * 60 * 10),
(re.compile(r"^/student/v5/course/"), 60 * 5),
)
url_policies["pws"] = (
(re.compile(r"^/identity/v1/person/"), 60 * 60 * 10),
(re.compile(r"^/identity/v1/entity/"), 60 * 60 * 10),
)
url_policies["canvas"] = (
(re.compile(r"^/api/v1/accounts/\d+/roles"), 60 * 60 * 4),
)
def _get_cache_policy(self, service, url):
for policy in RestClientsCache.url_policies.get(service, []):
if policy[0].match(url):
return policy[1]
return 0
def getCache(self, service, url, headers):
cache_policy = self._get_cache_policy(service, url)
return self._response_from_cache(service, url, headers, cache_policy)
def processResponse(self, service, url, response):
if self._get_cache_policy(service, url):
return self._process_response(service, url, response)
|
apache-2.0
|
Python
|
909b14f4d4b82c72e8f3987c1ef82cc075520cb1
|
add @
|
h4llow3En/IAmTalkingToYouBot
|
botpi.py
|
botpi.py
|
import re
re_pi = re.compile(r'(?:^|\s)(((P|p)i)|(π)|(3(\.|,)14\d*))').search
message = "@{name}, eigentlich is π ja 3.1415926535897932384626433832795028841971693\
9937510582097494... aber rechne lieber mit 3, das ist wesentlich einfacher!"
def check_pi(bot_stuff):
match = re_pi(bot_stuff['message'])
print(match)
if match is not None:
bot_stuff['sendMessage'](chat_id=bot_stuff['chat_id'],
text=message.format(
name=bot_stuff['username']))
|
import re
re_pi = re.compile(r'(?:^|\s)(((P|p)i)|(π)|(3(\.|,)14\d*))').search
message = "{name}, eigentlich is π ja 3.1415926535897932384626433832795028841971693\
9937510582097494... aber rechne lieber mit 3, das ist wesentlich einfacher!"
def check_pi(bot_stuff):
match = re_pi(bot_stuff['message'])
print(match)
if match is not None:
bot_stuff['sendMessage'](chat_id=bot_stuff['chat_id'],
text=message.format(
name=bot_stuff['username']))
|
mit
|
Python
|
d43fecb6f645dabf1740cd42aaf25191353a1b77
|
add curriculum to cache policy
|
uw-it-aca/canvas-sis-provisioner,uw-it-aca/canvas-sis-provisioner,uw-it-aca/canvas-sis-provisioner,uw-it-aca/canvas-sis-provisioner
|
sis_provisioner/cache.py
|
sis_provisioner/cache.py
|
from django.conf import settings
from memcached_clients import RestclientPymemcacheClient
from uw_kws import ENCRYPTION_KEY_URL, ENCRYPTION_CURRENT_KEY_URL
import re
ONE_MINUTE = 60
ONE_HOUR = 60 * 60
ONE_DAY = 60 * 60 * 24
ONE_WEEK = 60 * 60 * 24 * 7
ONE_MONTH = 60 * 60 * 24 * 30
NONPERSONAL_NETID_EXCEPTION_GROUP = getattr(
settings, 'NONPERSONAL_NETID_EXCEPTION_GROUP', 'none')
class RestClientsCache(RestclientPymemcacheClient):
def get_cache_expiration_time(self, service, url, status=200):
if 'sws' == service:
if re.match(r'^/student/v\d/course/', url):
return ONE_MINUTE * 5
if re.match(
r'^/student/v\d/(?:campus|college|department|curriculum|term)',
url):
return ONE_DAY
if 'pws' == service:
return ONE_HOUR
if 'kws' == service:
if re.match(r'{}'.format(
ENCRYPTION_KEY_URL.format(r'[\-\da-fA-F]{36}')), url):
return ONE_MONTH
if re.match(r'{}'.format(
ENCRYPTION_CURRENT_KEY_URL.format(r'[\-\da-zA-Z]+')), url):
return ONE_WEEK
if 'gws' == service:
if re.match(r'^/group_sws/v\d/group/{}/effective_member/'.format(
NONPERSONAL_NETID_EXCEPTION_GROUP), url):
return ONE_HOUR
if 'canvas' == service:
if re.match(r'^/api/v\d/accounts/sis_account_id:', url):
return ONE_HOUR * 10
if re.match(r'^/api/v\d/accounts/\d+/roles', url):
return ONE_MONTH
def delete_cached_kws_current_key(self, resource_type):
self.deleteCache('kws', ENCRYPTION_CURRENT_KEY_URL.format(
resource_type))
|
from django.conf import settings
from memcached_clients import RestclientPymemcacheClient
from uw_kws import ENCRYPTION_KEY_URL, ENCRYPTION_CURRENT_KEY_URL
import re
ONE_MINUTE = 60
ONE_HOUR = 60 * 60
ONE_DAY = 60 * 60 * 24
ONE_WEEK = 60 * 60 * 24 * 7
ONE_MONTH = 60 * 60 * 24 * 30
NONPERSONAL_NETID_EXCEPTION_GROUP = getattr(
settings, 'NONPERSONAL_NETID_EXCEPTION_GROUP', 'none')
class RestClientsCache(RestclientPymemcacheClient):
def get_cache_expiration_time(self, service, url, status=200):
if 'sws' == service:
if re.match(r'^/student/v\d/course/', url):
return ONE_MINUTE * 5
if re.match(r'^/student/v\d/(?:campus|college|department|term)',
url):
return ONE_HOUR * 10
if 'pws' == service:
return ONE_HOUR
if 'kws' == service:
if re.match(r'{}'.format(
ENCRYPTION_KEY_URL.format(r'[\-\da-fA-F]{36}')), url):
return ONE_MONTH
if re.match(r'{}'.format(
ENCRYPTION_CURRENT_KEY_URL.format(r'[\-\da-zA-Z]+')), url):
return ONE_WEEK
if 'gws' == service:
if re.match(r'^/group_sws/v\d/group/{}/effective_member/'.format(
NONPERSONAL_NETID_EXCEPTION_GROUP), url):
return ONE_HOUR
if 'canvas' == service:
if re.match(r'^/api/v\d/accounts/sis_account_id:', url):
return ONE_HOUR * 10
if re.match(r'^/api/v\d/accounts/\d+/roles', url):
return ONE_MONTH
def delete_cached_kws_current_key(self, resource_type):
self.deleteCache('kws', ENCRYPTION_CURRENT_KEY_URL.format(
resource_type))
|
apache-2.0
|
Python
|
9bb76df67c436d091d85d75c6968ede89d9194b7
|
add testing framework
|
niilohlin/objective-tools,niilohlin/objective-tools
|
statistics/test.py
|
statistics/test.py
|
import unittest
from numberOfPublicMethods import *
class Tests(unittest.TestCase):
@staticmethod
def listOfOCClasses():
return [
ObjectiveCClass(publicMethods=4, className="TestClass4"),
ObjectiveCClass(publicMethods=5, className="TestClass5"),
ObjectiveCClass(publicMethods=2, className="TestClass2"),
ObjectiveCClass(publicMethods=1, className="TestClass1"),
ObjectiveCClass(publicMethods=4, className="TestClass3")]
def test_headersIsClass(self):
self.assertEqual(headers()[0], "class.h")
def test_propertiesIs4(self):
self.assertEqual(parseFile("class.h"), ObjectiveCClass(publicMethods=4, className="TestClass"))
def test_average(self):
self.assertEqual(calculateAveragePublic([ObjectiveCClass(publicMethods=5, className="")]), 5)
def test_findWorst(self):
classes = Tests.listOfOCClasses()
self.assertEqual(findMostPublic(classes, 1), [ObjectiveCClass(publicMethods=5, className="TestClass5")])
if __name__ == "__main__":
unittest.main()
|
from numberOfPublicMethods import *
def headersIsClass():
return headers()[0] == "class.h"
def propertiesIs4():
return parseFile("class.h") == ObjectiveCClass(publicMethods=4, className="TestClass")
if __name__ == "__main__":
print(headersIsClass())
print(propertiesIs4())
print(calculateAveragePublic([ObjectiveCClass(publicMethods=5, className="")]) == 5)
|
mit
|
Python
|
b689cadb696ce07372588b368a6d3709f636ca8a
|
Edit descriptions
|
nerevu/prometheus-api,nerevu/prometheus-api,nerevu/prometheus-api
|
manage.py
|
manage.py
|
from os.path import abspath
from flask import current_app as app
from app import create_app, db
# from app.model import init_db, populate_db()
from flask.ext.script import Manager
manager = Manager(create_app)
manager.add_option('-m', '--cfgmode', dest='config_mode', default='Development')
manager.add_option('-f', '--cfgfile', dest='config_file', type=abspath)
@manager.command
def createdb():
with app.app_context():
"""Creates database"""
db.create_all()
print 'Database created'
@manager.command
def cleardb():
with app.app_context():
"""Removes all content from database"""
db.drop_all()
print 'Database cleared'
@manager.command
def resetdb():
with app.app_context():
"""Removes all content from database and creates new tables"""
db.drop_all()
db.create_all()
print 'Database reset'
@manager.command
def initdb():
with app.app_context():
"""Initializes database with default values"""
db.drop_all()
db.create_all()
init_db()
print 'Database initialized'
@manager.command
def popdb():
with app.app_context():
"""Populates database with sample data"""
db.drop_all()
db.create_all()
init_db()
populate_db()
print 'Database populated'
if __name__ == '__main__':
manager.run()
|
from os.path import abspath
from flask import current_app as app
from app import create_app, db
# from app.model import init_db, populate_db()
from flask.ext.script import Manager
manager = Manager(create_app)
manager.add_option('-m', '--cfgmode', dest='config_mode', default='Development')
manager.add_option('-f', '--cfgfile', dest='config_file', type=abspath)
@manager.command
def createdb():
with app.app_context():
"""Creates database"""
db.create_all()
print 'Database created'
@manager.command
def cleardb():
with app.app_context():
"""Deletes all database tables"""
db.drop_all()
print 'Database cleared'
@manager.command
def resetdb():
with app.app_context():
"""Removes all content from database"""
db.drop_all()
db.create_all()
print 'Database reset'
@manager.command
def initdb():
with app.app_context():
"""Initializes database with default values"""
db.drop_all()
db.create_all()
init_db()
print 'Database initialized'
@manager.command
def popdb():
with app.app_context():
"""Populates database with sample data"""
db.drop_all()
db.create_all()
init_db()
populate_db()
print 'Database populated'
if __name__ == '__main__':
manager.run()
|
mit
|
Python
|
15a37d1e86d9217eec218aadbe53d633335460ae
|
Fix block name.
|
alexsilva/django-xadmin,alexsilva/django-xadmin,alexsilva/django-xadmin,alexsilva/django-xadmin
|
xadmin/templatetags/xadmin_tags.py
|
xadmin/templatetags/xadmin_tags.py
|
from django import template
from django.template import Library
from django.utils import six
from django.utils.safestring import mark_safe
from xadmin.util import static, vendor as util_vendor
register = Library()
@register.simple_tag(takes_context=True)
def view_block(context, block_name, *args, **kwargs):
if 'admin_view' not in context:
return ""
admin_view = context['admin_view']
nodes = []
method_name = 'block_%s' % block_name.replace('-', '_')
cls_str = str if six.PY3 else basestring
for view in [admin_view] + admin_view.plugins:
if hasattr(view, method_name) and callable(getattr(view, method_name)):
block_func = getattr(view, method_name)
result = block_func(context, nodes, *args, **kwargs)
if result and isinstance(result, cls_str):
nodes.append(result)
if nodes:
return mark_safe(''.join(nodes))
else:
return ""
@register.filter
def admin_urlname(value, arg):
return 'xadmin:%s_%s_%s' % (value.app_label, value.model_name, arg)
static = register.simple_tag(static)
@register.simple_tag(takes_context=True)
def vendor(context, *tags):
return util_vendor(*tags).render()
class BlockcaptureNode(template.Node):
"""https://chriskief.com/2013/11/06/conditional-output-of-a-django-block/"""
def __init__(self, nodelist, varname):
self.nodelist = nodelist
self.varname = varname
def render(self, context):
output = self.nodelist.render(context)
context[self.varname] = str(output)
return ''
@register.tag(name='blockcapture')
def do_blockcapture(parser, token):
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("'blockcapture' node requires a variable name.")
nodelist = parser.parse(('endblockcapture',))
parser.delete_first_token()
return BlockcaptureNode(nodelist, args)
|
from django import template
from django.template import Library
from django.utils import six
from django.utils.safestring import mark_safe
from xadmin.util import static, vendor as util_vendor
register = Library()
@register.simple_tag(takes_context=True)
def view_block(context, block_name, *args, **kwargs):
if 'admin_view' not in context:
return ""
admin_view = context['admin_view']
nodes = []
method_name = 'block_%s' % block_name
cls_str = str if six.PY3 else basestring
for view in [admin_view] + admin_view.plugins:
if hasattr(view, method_name) and callable(getattr(view, method_name)):
block_func = getattr(view, method_name)
result = block_func(context, nodes, *args, **kwargs)
if result and isinstance(result, cls_str):
nodes.append(result)
if nodes:
return mark_safe(''.join(nodes))
else:
return ""
@register.filter
def admin_urlname(value, arg):
return 'xadmin:%s_%s_%s' % (value.app_label, value.model_name, arg)
static = register.simple_tag(static)
@register.simple_tag(takes_context=True)
def vendor(context, *tags):
return util_vendor(*tags).render()
class BlockcaptureNode(template.Node):
"""https://chriskief.com/2013/11/06/conditional-output-of-a-django-block/"""
def __init__(self, nodelist, varname):
self.nodelist = nodelist
self.varname = varname
def render(self, context):
output = self.nodelist.render(context)
context[self.varname] = str(output)
return ''
@register.tag(name='blockcapture')
def do_blockcapture(parser, token):
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("'blockcapture' node requires a variable name.")
nodelist = parser.parse(('endblockcapture',))
parser.delete_first_token()
return BlockcaptureNode(nodelist, args)
|
bsd-3-clause
|
Python
|
1eb025811e5cc7df5b0185d34f053379d52b26ab
|
Remove create_admin command
|
renalreg/radar,renalreg/radar,renalreg/radar,renalreg/radar
|
manage.py
|
manage.py
|
from flask_script import Manager
from radar.app import create_app
from radar.lib.database import db
from radar.lib import fixtures
app = create_app('settings.py')
manager = Manager(app)
@manager.command
def create_tables():
db.drop_all()
db.create_all()
@manager.command
def drop_tables():
db.drop_all()
@manager.command
def load_data():
fixtures.create_fixtures()
db.session.commit()
@manager.command
def reload_data():
create_tables()
load_data()
if __name__ == '__main__':
manager.run()
|
from flask_script import Manager
from radar.app import create_app
from radar.lib.database import db
from radar.models.users import User
from radar.lib import fixtures
app = create_app('settings.py')
manager = Manager(app)
@manager.command
def create_tables():
db.drop_all()
db.create_all()
@manager.command
def drop_tables():
db.drop_all()
@manager.command
def create_admin():
user = User()
user.username = 'admin'
user.email = '[email protected]'
user.set_password('password')
user.is_admin = True
db.session.add(user)
db.session.commit()
@manager.command
def load_data():
fixtures.create_fixtures()
db.session.commit()
@manager.command
def reload_data():
create_tables()
load_data()
if __name__ == '__main__':
manager.run()
|
agpl-3.0
|
Python
|
74959fa6f12d5be7491f1fbf3d99b1678486c311
|
bump version for release
|
srikalyan/slacksocket,bcicen/slacksocket,vektorlab/slacksocket,graywizardx/slacksocket,hfwang/slacksocket
|
slacksocket/version.py
|
slacksocket/version.py
|
version = '0.5.0'
|
version = '0.4.4'
|
mit
|
Python
|
e3e3b59654133bd33c708343976825bb0c68d6f1
|
use development config as default
|
cenkalti/pypi-notifier,cenkalti/pypi-notifier
|
manage.py
|
manage.py
|
#!/usr/bin/env python
import os
import errno
import logging
from flask import current_app
from flask.ext.script import Manager
from pypi_notifier import create_app, db, models, cache
logging.basicConfig(level=logging.DEBUG)
manager = Manager(create_app)
try:
# Must be a class name from config.py
config = os.environ['PYPI_NOTIFIER_CONFIG']
except KeyError:
print "PYPI_NOTIFIER_CONFIG is not found in env, using DevelopmentConfig."
print 'If you want to use another config please set it as ' \
'"export PYPI_NOTIFIER_CONFIG=ProductionConfig".'
config = 'DevelopmentConfig'
manager.add_option('-c', '--config', dest='config', required=False,
default=config)
@manager.shell
def make_shell_context():
return dict(app=current_app, db=db, models=models)
@manager.command
def init_db():
db.create_all()
@manager.command
def drop_db():
try:
os.unlink('/tmp/pypi_notifier.db')
except OSError as e:
if e.errno != errno.ENOENT:
raise
@manager.command
def fetch_package_list():
models.Package.get_all_names()
@manager.command
def clear_cache():
cache.clear()
@manager.command
def find_latest(name):
print models.Package(name).find_latest_version()
@manager.command
def update_users():
models.User.update_all_users_from_github()
@manager.command
def update_repos():
models.Repo.update_all_repos()
@manager.command
def update_packages():
models.Package.update_all_packages()
@manager.command
def send_emails():
models.User.send_emails()
if __name__ == '__main__':
manager.run()
|
#!/usr/bin/env python
import os
import errno
import logging
from flask import current_app
from flask.ext.script import Manager
from pypi_notifier import create_app, db, models, cache
logging.basicConfig(level=logging.DEBUG)
manager = Manager(create_app)
# Must be a class name from config.py
config = os.environ['PYPI_NOTIFIER_CONFIG']
manager.add_option('-c', '--config', dest='config', required=False,
default=config)
@manager.shell
def make_shell_context():
return dict(app=current_app, db=db, models=models)
@manager.command
def init_db():
db.create_all()
@manager.command
def drop_db():
try:
os.unlink('/tmp/pypi_notifier.db')
except OSError as e:
if e.errno != errno.ENOENT:
raise
@manager.command
def fetch_package_list():
models.Package.get_all_names()
@manager.command
def clear_cache():
cache.clear()
@manager.command
def find_latest(name):
print models.Package(name).find_latest_version()
@manager.command
def update_users():
models.User.update_all_users_from_github()
@manager.command
def update_repos():
models.Repo.update_all_repos()
@manager.command
def update_packages():
models.Package.update_all_packages()
@manager.command
def send_emails():
models.User.send_emails()
if __name__ == '__main__':
manager.run()
|
mit
|
Python
|
26fc40f3ca729147e838af4d98362484bed776df
|
Simplify main function in problem58.py
|
mjwestcott/projecteuler,mjwestcott/projecteuler,mjwestcott/projecteuler
|
euler_python/problem58.py
|
euler_python/problem58.py
|
"""
problem58.py
Starting with 1 and spiralling anticlockwise in the following way, a square
spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along the bottom right
diagonal, but what is more interesting is that 8 out of the 13 numbers lying
along both diagonals are prime; that is, a ratio of 8/13 ≈ 62%.
If one complete new layer is wrapped around the spiral above, a square spiral
with side length 9 will be formed. If this process is continued, what is the
side length of the square spiral for which the ratio of primes along both
diagonals first falls below 10%?
"""
from itertools import count
from math import sqrt
from toolset import is_prime, quantify
def square_length(n):
"Given the bottom right corner number, return the square length"
return int(sqrt(n))
def corners(n):
"Given the bottom right corner number, return the four corner numbers"
# 49 --> [49, 43, 37, 31]
x = square_length(n) - 1
return [n, n-x, n-(2*x), n-(3*x)]
def problem58():
length = 7
primes = 8
total = 13
while primes/total > 0.1:
length += 2
primes += quantify(corners(length**2), pred=is_prime)
total += 4
return length
|
"""
problem58.py
Starting with 1 and spiralling anticlockwise in the following way, a square
spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along the bottom right
diagonal, but what is more interesting is that 8 out of the 13 numbers lying
along both diagonals are prime; that is, a ratio of 8/13 ≈ 62%.
If one complete new layer is wrapped around the spiral above, a square spiral
with side length 9 will be formed. If this process is continued, what is the
side length of the square spiral for which the ratio of primes along both
diagonals first falls below 10%?
"""
from itertools import count
from math import sqrt
from toolset import is_prime, quantify
def square_length(n):
"Given the bottom right corner number, return the square length"
return int(sqrt(n))
def corners(n):
"Given the bottom right corner number, return the four corner numbers"
# 49 --> [49, 43, 37, 31]
x = square_length(n) - 1
return [n, n-x, n-(2*x), n-(3*x)]
def problem58():
# Yields all four corners from each new layer, starting at fifth layer.
# next(all_corners) --> [81, 73, 65, 57], [121, 111, 101, 91], ...
all_corners = (corners(x**2) for x in count(start=9, step=2))
primes, total = 8, 13
while True:
cs = next(all_corners)
primes += quantify(cs, pred=is_prime)
total += 4
if primes / total < 0.10:
# cs[0] is the bottom right corner number
return square_length(cs[0])
|
mit
|
Python
|
68e32ab4c763461ffbbea6a3ed698f66fdb48d4d
|
Use only user_id and course_id during the kNN computation: speedup is 5x Previously most time was consumed in db queries and suprisingly in __hash__ methods (seem that hashing a django model takes longer that hashing a int)
|
UrLab/beta402,UrLab/DocHub,UrLab/DocHub,UrLab/beta402,UrLab/beta402,UrLab/DocHub,UrLab/DocHub
|
catalog/predictions.py
|
catalog/predictions.py
|
from catalog.models import Course
import collections
from django.contrib.contenttypes.models import ContentType
from actstream.models import Follow
def distance(v1, v2):
absolute_difference = [abs(c1 - c2) for c1, c2 in zip(v1, v2)]
distance = sum(absolute_difference)
return distance
def get_users_following_dict():
course_type = ContentType.objects.get(app_label="catalog", model="course")
follows = Follow.objects.filter(content_type=course_type).only('user_id', 'object_id')
following_dict = collections.defaultdict(set)
for follow in follows:
following_dict[follow.user_id].add(int(follow.object_id))
return following_dict
def suggest(target_user, K=15):
courses = Course.objects.only('id')
users_following = get_users_following_dict()
vectors = {}
for user_id, following in users_following.items():
vectors[user_id] = [course.id in following for course in courses]
target_vector = vectors[target_user.id]
distances = {user_id: distance(target_vector, vector) for user_id, vector in vectors.items()}
non_null_distances = {user_id: distance for user_id, distance in distances.items() if distance > 0}
get_score = lambda x: x[1]
neighbors = sorted(non_null_distances.items(), key=get_score)[:K]
best_matches = collections.Counter()
target_set = users_following[target_user.id]
for user_id, score in neighbors:
differences = users_following[user_id] - target_set
best_matches.update(differences)
return [(Course.objects.get(id=course_id), hits) for course_id, hits in best_matches.most_common()]
|
from users.models import User
from catalog.models import Course
import collections
from django.contrib.contenttypes.models import ContentType
from actstream.models import Follow
def distance(v1, v2):
absolute_difference = [abs(c1 - c2) for c1, c2 in zip(v1, v2)]
distance = sum(absolute_difference)
return distance
def get_users_following_dict():
course_type = ContentType.objects.get(app_label="catalog", model="course")
follows = Follow.objects\
.filter(content_type=course_type)\
.select_related('user')\
.prefetch_related('follow_object')
following_dict = collections.defaultdict(set)
for follow in follows:
following_dict[follow.user.netid].add(follow.follow_object)
return following_dict
def suggest(target_user, K=15):
courses = Course.objects.all()
users = {user.netid: user for user in User.objects.all()}
users_following = get_users_following_dict()
vectors = {}
for netid, user in users.items():
following = users_following[netid]
vectors[netid] = [course in following for course in courses]
target_vector = vectors[target_user.netid]
distances = {netid: distance(target_vector, vector) for netid, vector in vectors.items()}
non_null_distances = {netid: distance for netid, distance in distances.items() if distance > 0}
get_score = lambda x: x[1]
neighbors = sorted(non_null_distances.items(), key=get_score)[:K]
best_matches = collections.Counter()
target_set = users_following[target_user.netid]
for netid, score in neighbors:
differences = users_following[netid] - target_set
best_matches.update(differences)
return best_matches
|
agpl-3.0
|
Python
|
797c4405aa2b75da9b7bdbb7e0e26f8bae3308b6
|
handle BadPickleGet on restore
|
felipecruz/coopy,felipecruz/coopy
|
coopy/restore.py
|
coopy/restore.py
|
import logging
import fileutils
from snapshot import SnapshotManager
from foundation import RestoreClock
from cPickle import Unpickler, BadPickleGet
logger = logging.getLogger("coopy")
LOG_PREFIX = '[RESTORE] '
def restore(system, basedir):
#save current clock
current_clock = system._clock
#restore from snapshot
system = SnapshotManager(basedir).recover_snapshot()
files = fileutils.last_log_files(basedir)
logger.debug(LOG_PREFIX + "Files found: " + str(files))
if not files:
return system
actions = []
for file in files:
logger.debug(LOG_PREFIX + "Opening " + str(file))
unpickler = Unpickler(open(file,'rb'))
try:
while True:
action = unpickler.load()
logger.debug(LOG_PREFIX + action.action)
actions.append(action)
except BadPickleGet:
logger.error(LOG_PREFIX + "Error unpickling %s" % (str(file)))
except EOFError:
pass
if not actions:
return system
logger.debug(LOG_PREFIX + "Actions re-execution")
for action in actions:
try:
system._clock = RestoreClock(action.timestamps)
action.execute_action(system)
except Exception as e:
logger.debug(LOG_PREFIX + 'Error executing :' + str(action))
system._clock = current_clock
return system
|
import logging
import fileutils
from snapshot import SnapshotManager
from foundation import RestoreClock
from cPickle import Unpickler
logger = logging.getLogger("coopy")
LOG_PREFIX = '[RESTORE] '
def restore(system, basedir):
#save current clock
current_clock = system._clock
#restore from snapshot
system = SnapshotManager(basedir).recover_snapshot()
files = fileutils.last_log_files(basedir)
logger.debug(LOG_PREFIX + "Files found: " + str(files))
if not files:
return system
actions = []
for file in files:
logger.debug(LOG_PREFIX + "Opening " + str(file))
unpickler = Unpickler(open(file,'rb'))
try:
while True:
action = unpickler.load()
logger.debug(LOG_PREFIX + action.action)
actions.append(action)
except EOFError:
pass
if not actions:
return system
logger.debug(LOG_PREFIX + "Actions re-execution")
for action in actions:
try:
system._clock = RestoreClock(action.timestamps)
action.execute_action(system)
except Exception as e:
logger.debug(LOG_PREFIX + 'Error executing :' + str(action))
system._clock = current_clock
return system
|
bsd-3-clause
|
Python
|
4c9b8a55d26df7421decdc05236499f61583ab38
|
fix smart folder content getter
|
ecreall/nova-ideo,ecreall/nova-ideo,ecreall/nova-ideo,ecreall/nova-ideo,ecreall/nova-ideo
|
novaideo/utilities/smart_folder_utility.py
|
novaideo/utilities/smart_folder_utility.py
|
# -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# avalaible on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from hypatia.util import ResultSet
from substanced.util import get_oid, find_objectmap
from novaideo.views.filter import (
find_entities)
from novaideo.utilities.util import (
deepcopy)
from novaideo.views.filter.util import QUERY_OPERATORS
def get_adapted_filter(folder, user):
return {'select': ['metadata_filter',
'contribution_filter', 'temporal_filter',
'text_filter', 'other_filter']}
def get_folder_content(folder, user,
add_query=None,
**args):
_filters = deepcopy(getattr(folder, 'filters', []))
objects = []
if _filters:
query = None
if add_query:
query = QUERY_OPERATORS['and'](query, add_query)
objects = find_entities(
user=user,
add_query=query,
filters=_filters,
filter_op='or',
**args)
oids = [get_oid(c) for c in folder.contents]
if args:
contents = find_entities(
user=user,
intersect=oids,
**args)
oids = contents.ids if not isinstance(contents, list) else contents
if isinstance(objects, list):
objectmap = find_objectmap(folder)
objects = ResultSet(oids, len(oids), objectmap.object_for)
else: # ResultSet
objects.ids = list(objects.ids)
objects.ids.extend(oids)
objects.numids += len(oids)
return objects
|
# -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# avalaible on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from substanced.util import get_oid
from dace.util import get_obj
from novaideo.views.filter import (
find_entities)
from novaideo.utilities.util import (
deepcopy)
from novaideo.views.filter.util import QUERY_OPERATORS
def get_adapted_filter(folder, user):
return {'select': ['metadata_filter',
'contribution_filter', 'temporal_filter',
'text_filter', 'other_filter']}
def get_folder_content(folder, user,
add_query=None,
**args):
_filters = deepcopy(getattr(folder, 'filters', []))
objects = []
if _filters:
query = None
if add_query:
query = QUERY_OPERATORS['and'](query, add_query)
objects = find_entities(
user=user,
add_query=query,
filters=_filters,
filter_op='or',
**args)
oids = [get_oid(c) for c in folder.contents]
if args:
contents = find_entities(
user=user,
intersect=oids,
**args)
oids = contents.ids if not isinstance(contents, list) else contents
if isinstance(objects, list):
objects.extend([get_obj(o) for o in oids])
else: # ResultSet
objects.ids = list(objects.ids)
objects.ids.extend(oids)
objects.numids += len(oids)
return objects
|
agpl-3.0
|
Python
|
70db54eba970f8e8f6c42587675f1525002ea12f
|
Update wrong status code stated in docstring
|
timothycrosley/hug,timothycrosley/hug,timothycrosley/hug
|
hug/redirect.py
|
hug/redirect.py
|
"""hug/redirect.py
Implements convience redirect methods that raise a redirection exception when called
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import falcon
def to(location, code=falcon.HTTP_302):
"""Redirects to the specified location using the provided http_code (defaults to HTTP_302 FOUND)"""
raise falcon.http_status.HTTPStatus(code, {'location': location})
def permanent(location):
"""Redirects to the specified location using HTTP 301 status code"""
to(location, falcon.HTTP_301)
def found(location):
"""Redirects to the specified location using HTTP 302 status code"""
to(location, falcon.HTTP_302)
def see_other(location):
"""Redirects to the specified location using HTTP 303 status code"""
to(location, falcon.HTTP_303)
def temporary(location):
"""Redirects to the specified location using HTTP 307 status code"""
to(location, falcon.HTTP_307)
def not_found(*args, **kwargs):
"""Redirects request handling to the not found render"""
raise falcon.HTTPNotFound()
|
"""hug/redirect.py
Implements convience redirect methods that raise a redirection exception when called
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import falcon
def to(location, code=falcon.HTTP_302):
"""Redirects to the specified location using the provided http_code (defaults to HTTP_302 FOUND)"""
raise falcon.http_status.HTTPStatus(code, {'location': location})
def permanent(location):
"""Redirects to the specified location using HTTP 301 status code"""
to(location, falcon.HTTP_301)
def found(location):
"""Redirects to the specified location using HTTP 302 status code"""
to(location, falcon.HTTP_302)
def see_other(location):
"""Redirects to the specified location using HTTP 303 status code"""
to(location, falcon.HTTP_303)
def temporary(location):
"""Redirects to the specified location using HTTP 304 status code"""
to(location, falcon.HTTP_307)
def not_found(*args, **kwargs):
"""Redirects request handling to the not found render"""
raise falcon.HTTPNotFound()
|
mit
|
Python
|
1db7ef99aef8691600d74b23a751c6a753e2a5da
|
Update : Enhancing timer function and exec times storage
|
oleiade/Hurdles
|
hurdles/base.py
|
hurdles/base.py
|
# -*- coding:utf-8 -*-
# Copyright (c) 2012 theo crevon
#
# See the file LICENSE for copying permission.
import time
from collections import namedtuple
from functools import wraps
from inspect import getmembers, ismethod
ExecTimeCollection = namedtuple('ExecTimeCollection', ['times', 'scale'])
def extra_setup(setup_code):
"""Allows to setup some extra context to it's decorated function.
As a convention, the bench decorated function should always handle
*args and **kwargs. Kwargs will be updated with the extra context
set by the decorator.
Example:
@extra_setup("l = [x for x in xrange(100)]")
def bench_len(self, *args, **kwargs):
print len(kwargs['l'])
"""
def decorator(func):
@wraps(func)
def decorated_function(*args, **kwargs):
exec setup_code in {}, kwargs
return func(*args, **kwargs)
return decorated_function
return decorator
class BenchCase(object):
def __init__(self):
self._benchmarks = []
self.results = {
'exec_times': {},
'averages': {},
}
def setUp(self):
"""Hook method for setting up the benchmark
fixture before exercising it."""
pass
def tearDown(self):
"""Hook method for deconstructing the benchmark
fixture after testing it."""
pass
@property
def benchmarks(self):
if not self._benchmarks:
bench_case_methods = getmembers(self.__class__, predicate=ismethod)
for (method_name, method_value) in bench_case_methods:
if method_name.startswith('bench_'):
self._benchmarks.append((method_name, method_value))
return self._benchmarks
def tick(self, func, *args, **kwargs):
"""Times a function execution in miliseconds"""
start = time.time()
func(*args, **kwargs)
end = time.time()
exec_time = round(((end - start) * 1000), 2)
return exec_time
def run(self, repeat=10):
for method_name, method_value in self.benchmarks:
self.setUp()
exec_times = ExecTimeCollection(times=[self.tick(method_value, self) for x in [0.0] * repeat],
scale='ms')
average = sum(exec_times.times) / repeat
self.results['exec_times'].update({method_name: exec_times})
self.results['averages'].update({method_name: average})
self.tearDown()
|
# -*- coding:utf-8 -*-
# Copyright (c) 2012 theo crevon
#
# See the file LICENSE for copying permission.
import time
from functools import wraps
from inspect import getmembers, ismethod
def time_it(func, *args, **kwargs):
"""
Decorator whichs times a function execution.
"""
start = time.time()
func(*args, **kwargs)
end = time.time()
exec_time = "%s (%0.3f ms)" % (func.func_name, (end - start) * 1000)
return exec_time
def extra_setup(setup_code):
"""Allows to setup some extra context to it's decorated function.
As a convention, the bench decorated function should always handle
*args and **kwargs. Kwargs will be updated with the extra context
set by the decorator.
Example:
@extra_setup("l = [x for x in xrange(100)]")
def bench_len(self, *args, **kwargs):
print len(kwargs['l'])
"""
def decorator(func):
@wraps(func)
def decorated_function(*args, **kwargs):
exec setup_code in {}, kwargs
return func(*args, **kwargs)
return decorated_function
return decorator
class BenchCase(object):
def __init__(self):
self._benchmarks = []
def setUp(self):
"""Hook method for setting up the benchmark
fixture before exercising it."""
pass
def tearDown(self):
"""Hook method for deconstructing the benchmark
fixture after testing it."""
pass
@property
def benchmarks(self):
if not self._benchmarks:
bench_case_methods = getmembers(self.__class__, predicate=ismethod)
for (method_name, method_value) in bench_case_methods:
if method_name.startswith('bench_'):
self._benchmarks.append((method_name, method_value))
return self._benchmarks
def run(self):
for method_name, method_value in self.benchmarks:
self.setUp()
time_it(method_value, self)
self.tearDown()
|
mit
|
Python
|
aebe8c0b586b408b55b13d8ddd3a974c194455a6
|
Update categorical_braninhoo_example.py
|
schevalier/Whetlab-Python-Client,schevalier/Whetlab-Python-Client
|
examples/categorical_braninhoo_example.py
|
examples/categorical_braninhoo_example.py
|
# In this example we will optimize the 'Braninhoo' optimization benchmark with a small twist to
# demonstrate how to set up a categorical variable. There is also a constraint on the function.
import whetlab
import numpy as np
# Define parameters to optimize
parameters = { 'X' : {'type':'float','min':0,'max':15,'size':1},
'Y' : {'type':'float','min':-5,'max':10,'size':1},
'Z' : {'type': 'enum', 'options': ['bad','Good!','OK']}}
#access_token = ''
name = 'Categorical Braninhoo'
description = 'Optimize the categorical braninhoo optimization benchmark'
outcome = {'name':'Negative Categorical Braninhoo output', 'type':'float'}
scientist = whetlab.Experiment(name=name, description=description, parameters=parameters, outcome=outcome)
# Braninhoo function
def categorical_braninhoo(X,Y,Z):
if X > 10:
return np.nan
Z = 1 if Z == 'Good!' else 2 if Z == 'OK' else 3
return np.square(Y - (5.1/(4*np.square(np.pi)))*np.square(X) + (5/np.pi)*X - 6) + 10*(1-(1./(8*np.pi)))*np.cos(X) + 10*Z;
for i in range(10000):
# Get suggested new experiment
job = scientist.suggest()
# Perform experiment
print job
outcome = -categorical_braninhoo(**job)
print outcome
# Inform scientist about the outcome
scientist.update(job,outcome)
|
import whetlab
import numpy as np
# Define parameters to optimize
parameters = { 'X' : {'type':'float','min':0,'max':15,'size':1},
'Y' : {'type':'float','min':-5,'max':10,'size':1},
'Z' : {'type': 'enum', 'options': ['bad','Good!','OK']}}
#access_token = ''
name = 'Categorical Braninhoo'
description = 'Optimize the categorical braninhoo optimization benchmark'
outcome = {'name':'Negative Categorical Braninhoo output', 'type':'float'}
scientist = whetlab.Experiment(name=name, description=description, parameters=parameters, outcome=outcome)
# Braninhoo function
def categorical_braninhoo(X,Y,Z):
if X > 10:
return np.nan
Z = 1 if Z == 'Good!' else 2 if Z == 'OK' else 3
return np.square(Y - (5.1/(4*np.square(np.pi)))*np.square(X) + (5/np.pi)*X - 6) + 10*(1-(1./(8*np.pi)))*np.cos(X) + 10*Z;
for i in range(10000):
# Get suggested new experiment
job = scientist.suggest()
# Perform experiment
print job
outcome = -categorical_braninhoo(**job)
print outcome
# Inform scientist about the outcome
scientist.update(job,outcome)
|
bsd-3-clause
|
Python
|
c3c986e08dadf3ecdd4f94eca6abf36c22a1c209
|
Update DB/ Deploy
|
alexbotello/BastionBot
|
models.py
|
models.py
|
from sqlalchemy import create_engine, Column, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
def db_connect():
"""
Performs database connection
Returns sqlalchemy engine instance
"""
return create_engine('postgres://fcvxvbdsuotypy:a3b010cca1fa4e6949ff39c11c6'
'e0b9edf9ce67a650535436dc349ba29b8c751@ec2-54-243-253-'
'17.compute-1.amazonaws.com:5432/'
'dfl66jjoa0etqc', echo=False)
def create_battletag_table(engine):
Base.metadata.create_all(engine)
class Battletags(Base):
"""
Table to store user battletags
"""
__tablename__ = 'Battletags'
disc_name = Column(String, primary_key=True)
battletag = Column(String, unique=True)
|
from sqlalchemy import create_engine, Column, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
def db_connect():
"""
Performs database connection
Returns sqlalchemy engine instance
"""
return create_engine('postgres://fbcmeskynsvati:aURfAdENt6-kumO0j224GuXRWH'
'@ec2-54-221-235-135.compute-1.amazonaws.com'
':5432/d2cc1tb2t1iges', echo=False)
def create_battletag_table(engine):
Base.metadata.create_all(engine)
class Battletags(Base):
"""
Table to store user battletags
"""
__tablename__ = 'Battletags'
disc_name = Column(String, primary_key=True)
battletag = Column(String, unique=True)
|
mit
|
Python
|
fac8a172a16da011bab9e41afc52f24f833687fc
|
Simplify the date calculation
|
bbolli/twitter-monday
|
monday.py
|
monday.py
|
#! /usr/bin/env python
"""Run this during the week to write last week's short-form entry"""
from __future__ import print_function
from datetime import datetime, timedelta
import errno
import operator
import os
import sys
def sunday_after(dt, offset=1):
"""offset == 3 means 3rd Sunday from now, -2 means two Sundays back"""
if offset == 0:
raise ArgumentError("offset must be nonzero")
if offset > 0:
offset -= 1
dt += timedelta(days=offset * 7)
# 23:59:59 on next Sunday
s = dt + timedelta(days=6 - dt.weekday())
s = s.replace(hour=23, minute=59, second=59, microsecond=0)
# Watch out for DST transition
#s -= s.gmtoff - t.gmtoff
return s
class Week:
# Monday-to-Sunday week of tweets around mid_week
def __init__(self, mid_week):
latest = sunday_after(mid_week, 1)
earliest = sunday_after(mid_week, -1)
"""r = Reader.new
@tweets = []
while true do
tweet = r.next
break if tweet.time <= earliest
@tweets << tweet if tweet.time <= latest
end"""
class MockedTweet:
def __init__(self, nr):
self.time = latest - timedelta(days=nr, hours=nr * 2, minutes=nr * 3)
def __repr__(self):
return "\nMockedTweet from %s" % self.time
self.tweets = [MockedTweet(1), MockedTweet(2), MockedTweet(3), MockedTweet(4)]
self.tweets.sort(key=operator.attrgetter('time'))
@property
def sunday(self):
return sunday_after(self.tweets[0].time)
def entry(tweets, sunday):
return "blosxom entry for week ending %s:\n%r" % (sunday, tweets)
def main():
w = Week(datetime.now() - timedelta(days=7))
sunday = w.sunday
year = '%04d' % sunday.year
path = os.path.join('tweets', year[:-1] + 'x', year)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(path, '%02d-%02d.txt' % (sunday.month, sunday.day))
with open(path, 'w') as f:
f.write(entry(w.tweets, sunday))
print("Wrote", path)
if __name__ == '__main__':
main()
|
#! /usr/bin/env python
"""Run this during the week to write last week's short-form entry"""
from __future__ import print_function
from datetime import datetime, timedelta
import errno
import operator
import os
import sys
def sunday_after(dt, offset=1):
"""offset == 3 means 3rd Sunday from now, -2 means two Sundays back"""
if offset == 0:
raise ArgumentError("offset must be nonzero")
if offset > 0:
offset -= 1
dt += timedelta(days=offset * 7)
# 23:59:59 on next Sunday
days = 6 - dt.weekday()
hours = 23 - dt.hour
mins = 59 - dt.minute
sec = 59 - dt.second
s = dt + timedelta(days=days, hours=hours, minutes=mins, seconds=sec)
s = s.replace(microsecond=0)
# Watch out for DST transition
#s -= s.gmtoff - t.gmtoff
return s
class Week:
# Monday-to-Sunday week of tweets around mid_week
def __init__(self, mid_week):
latest = sunday_after(mid_week, 1)
earliest = sunday_after(mid_week, -1)
"""r = Reader.new
@tweets = []
while true do
tweet = r.next
break if tweet.time <= earliest
@tweets << tweet if tweet.time <= latest
end"""
class MockedTweet:
def __init__(self, nr):
self.time = latest - timedelta(days=nr, hours=nr * 2, minutes=nr * 3)
def __repr__(self):
return "\nMockedTweet from %s" % self.time
self.tweets = [MockedTweet(1), MockedTweet(2), MockedTweet(3), MockedTweet(4)]
self.tweets.sort(key=operator.attrgetter('time'))
@property
def sunday(self):
return sunday_after(self.tweets[0].time)
def entry(tweets, sunday):
return "blosxom entry for week ending %s:\n%r" % (sunday, tweets)
def main():
w = Week(datetime.now() - timedelta(days=7))
sunday = w.sunday
year = '%04d' % sunday.year
path = os.path.join('tweets', year[:-1] + 'x', year)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(path, '%02d-%02d.txt' % (sunday.month, sunday.day))
with open(path, 'w') as f:
f.write(entry(w.tweets, sunday))
print("Wrote", path)
if __name__ == '__main__':
main()
|
mit
|
Python
|
d4ef5e2cb956d7ac7b28497cdc849f7c2bc85712
|
add radio by default
|
dinoperovic/django-shop-catalog,dinoperovic/django-shop-catalog
|
shop_catalog/settings.py
|
shop_catalog/settings.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
SLUG_FIELD_HELP_TEXT = _(
'Can only contain the letters a-z, A-Z, digits, minus and underscores, '
'and can\'t start with a digit.')
PRODUCT_CHANGE_FORM_TEMPLATE = (
'admin/shop_catalog/product_change_form.html')
ATTRIBUTE_TEMPLATE_CHOICES = getattr(
settings, 'SHOP_CATALOG_ATTRIBUTE_TEMPLATE_CHOICES', (
('radio', _('Radio')),
))
HAS_CATEGORIES = getattr(settings, 'SHOP_CATALOG_HAS_CATEGORIES', True)
HAS_BRANDS = getattr(settings, 'SHOP_CATALOG_HAS_BRANDS', True)
HAS_MANUFACTURERS = getattr(settings, 'SHOP_CATALOG_HAS_MANUFACTURERS', True)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
SLUG_FIELD_HELP_TEXT = _(
'Can only contain the letters a-z, A-Z, digits, minus and underscores, '
'and can\'t start with a digit.')
PRODUCT_CHANGE_FORM_TEMPLATE = (
'admin/shop_catalog/product_change_form.html')
ATTRIBUTE_TEMPLATE_CHOICES = ()
HAS_CATEGORIES = getattr(settings, 'SHOP_CATALOG_HAS_CATEGORIES', True)
HAS_BRANDS = getattr(settings, 'SHOP_CATALOG_HAS_BRANDS', True)
HAS_MANUFACTURERS = getattr(settings, 'SHOP_CATALOG_HAS_MANUFACTURERS', True)
|
bsd-3-clause
|
Python
|
0b635ffb77acb362c34769a72dfd6d0063c32f38
|
Handle range of success codes
|
chargehound/chargehound-python
|
chargehound/api_requestor.py
|
chargehound/api_requestor.py
|
from __future__ import unicode_literals
import chargehound
import requests
from chargehound.error import create_chargehound_error
from chargehound.version import VERSION
class APIRequestor(object):
def parse_response(self, response):
payload = response.json()
if response.status_code < 400:
return payload
else:
raise create_chargehound_error(payload)
def handle_callback(self, callback):
def handle_response(response, **kwargs):
parsed = self.parse_response(response)
callback(parsed)
return handle_response
def get_url(self, path):
return 'https://' + chargehound.host + chargehound.base_path + path
def make_request(self, method, path, params=None, data=None,
callback=None):
headers = {
'accept': 'application/json',
'user-agent': 'Chargehound/v1 PythonBindings/%s' % VERSION
}
auth = (chargehound.api_key, '')
if callback:
hooks = dict(response=self.handle_callback(callback))
else:
hooks = None
if method == 'get':
return self.parse_response(requests.get(self.get_url(path),
auth=auth,
params=params,
headers=headers,
hooks=hooks))
elif method == 'post':
return self.parse_response(requests.post(self.get_url(path),
auth=auth,
json=data,
headers=headers,
hooks=hooks))
def request(self, method, path, params=None, data=None, callback=None):
if callback is None:
return self.make_request(method, path, params, data)
else:
return self.make_request(method, path, params, data, callback)
|
from __future__ import unicode_literals
import chargehound
import requests
from chargehound.error import create_chargehound_error
from chargehound.version import VERSION
class APIRequestor(object):
def parse_response(self, response):
payload = response.json()
if response.status_code == 200:
return payload
else:
raise create_chargehound_error(payload)
def handle_callback(self, callback):
def handle_response(response, **kwargs):
parsed = self.parse_response(response)
callback(parsed)
return handle_response
def get_url(self, path):
return 'https://' + chargehound.host + chargehound.base_path + path
def make_request(self, method, path, params=None, data=None,
callback=None):
headers = {
'accept': 'application/json',
'user-agent': 'Chargehound/v1 PythonBindings/%s' % VERSION
}
auth = (chargehound.api_key, '')
if callback:
hooks = dict(response=self.handle_callback(callback))
else:
hooks = None
if method == 'get':
return self.parse_response(requests.get(self.get_url(path),
auth=auth,
params=params,
headers=headers,
hooks=hooks))
elif method == 'post':
return self.parse_response(requests.post(self.get_url(path),
auth=auth,
json=data,
headers=headers,
hooks=hooks))
def request(self, method, path, params=None, data=None, callback=None):
if callback is None:
return self.make_request(method, path, params, data)
else:
return self.make_request(method, path, params, data, callback)
|
mit
|
Python
|
41537854b93137b9455c194645778d05e94ec33c
|
Fix error when deleting directories.
|
Sable/mclab-ide,Sable/mclab-ide,Sable/mclab-ide,Sable/mclab-ide,Sable/mclab-ide,Sable/mclab-ide
|
ide/projects.py
|
ide/projects.py
|
import errno
import os
import shutil
WORKSPACE_DIR = os.path.expanduser('~/mclab-ide-projects')
def get_all_projects():
mkdir_p(WORKSPACE_DIR)
return map(Project, os.listdir(WORKSPACE_DIR))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
class Project(object):
def __init__(self, name):
self.name = name
self.root = os.path.join(WORKSPACE_DIR, self.name)
def exists(self):
return os.path.exists(self.root)
def create(self):
mkdir_p(self.root)
self.write_file('ide_entry_point.m', '''
function ide_entry_point()
% This function is used as an entry point for profiling runs, which
% provide the data powering features such as jump-to-definition and
% find callers. You should fill it in with code that exercises as
% much of your project as possible.
end'''[1:])
def delete(self):
shutil.rmtree(self.root)
def files(self):
for dirpath, _, paths in os.walk(self.root):
for path in paths:
if not path.startswith('.'):
yield os.path.join(dirpath, path)[len(self.root) + 1:]
def path(self, file):
return os.path.join(self.root, file)
def read_file(self, file):
with open(self.path(file)) as f:
return f.read()
def write_file(self, file, contents):
path = self.path(file)
mkdir_p(os.path.dirname(path))
with open(path, 'w') as f:
f.write(contents)
def delete_file(self, file):
path = self.path(file)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def rename_file(self, src, dest):
src, dest = self.path(src), self.path(dest)
mkdir_p(os.path.dirname(dest))
shutil.move(src, dest)
|
import errno
import os
import shutil
WORKSPACE_DIR = os.path.expanduser('~/mclab-ide-projects')
def get_all_projects():
mkdir_p(WORKSPACE_DIR)
return map(Project, os.listdir(WORKSPACE_DIR))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
class Project(object):
def __init__(self, name):
self.name = name
self.root = os.path.join(WORKSPACE_DIR, self.name)
def exists(self):
return os.path.exists(self.root)
def create(self):
mkdir_p(self.root)
self.write_file('ide_entry_point.m', '''
function ide_entry_point()
% This function is used as an entry point for profiling runs, which
% provide the data powering features such as jump-to-definition and
% find callers. You should fill it in with code that exercises as
% much of your project as possible.
end'''[1:])
def delete(self):
shutil.rmtree(self.root)
def files(self):
for dirpath, _, paths in os.walk(self.root):
for path in paths:
if not path.startswith('.'):
yield os.path.join(dirpath, path)[len(self.root) + 1:]
def path(self, file):
return os.path.join(self.root, file)
def read_file(self, file):
with open(self.path(file)) as f:
return f.read()
def write_file(self, file, contents):
path = self.path(file)
mkdir_p(os.path.dirname(path))
with open(path, 'w') as f:
f.write(contents)
def delete_file(self, file):
os.remove(self.path(file))
def rename_file(self, src, dest):
src, dest = self.path(src), self.path(dest)
mkdir_p(os.path.dirname(dest))
shutil.move(src, dest)
|
apache-2.0
|
Python
|
30089f005f62e84367aa6affd5acbd4920f8086a
|
fix installation on Arch
|
rr-/dotfiles,rr-/dotfiles,rr-/dotfiles
|
cfg/search/__main__.py
|
cfg/search/__main__.py
|
from pathlib import Path
from libdotfiles import HOME_DIR, PKG_DIR, packages, util
FZF_DIR = HOME_DIR / ".fzf"
if util.distro_name() == "arch":
packages.try_install("fzf") # super opener
packages.try_install("the_silver_searcher") # fzf dependency
packages.try_install("ripgrep") # super grep (shell)
elif util.distro_name() == "linuxmint":
packages.try_install("silversearcher-ag")
if not packages.has_installed("ripgrep"):
util.run_verbose(
[
"curl",
"-LO",
"https://github.com/BurntSushi/ripgrep/releases/download/11.0.2/ripgrep_11.0.2_amd64.deb",
]
)
util.run_verbose(["sudo", "dpkg", "-i", "ripgrep_11.0.2_amd64.deb"])
util.run_verbose(
[
"git",
"clone",
"--depth",
"1",
"https://github.com/junegunn/fzf.git",
FZF_DIR,
]
)
util.run_verbose(
[
FZF_DIR / "install",
"--key-bindings",
"--completion",
"--no-update-rc",
]
)
util.create_symlink(PKG_DIR / "agignore", HOME_DIR / ".agignore")
|
from pathlib import Path
from libdotfiles import HOME_DIR, PKG_DIR, packages, util
FZF_DIR = HOME_DIR / ".fzf"
if util.distro_name() == "arch":
packages.try_install("fzf") # super opener
packages.try_install(
"silver-searcher-git"
) # super grep (vim-fzf dependency)
packages.try_install("ripgrep") # super grep (shell)
elif util.distro_name() == "linuxmint":
packages.try_install("silversearcher-ag")
if not packages.has_installed("ripgrep"):
util.run_verbose(
[
"curl",
"-LO",
"https://github.com/BurntSushi/ripgrep/releases/download/11.0.2/ripgrep_11.0.2_amd64.deb",
]
)
util.run_verbose(["sudo", "dpkg", "-i", "ripgrep_11.0.2_amd64.deb"])
util.run_verbose(
[
"git",
"clone",
"--depth",
"1",
"https://github.com/junegunn/fzf.git",
FZF_DIR,
]
)
util.run_verbose(
[
FZF_DIR / "install",
"--key-bindings",
"--completion",
"--no-update-rc",
]
)
util.create_symlink(PKG_DIR / "agignore", HOME_DIR / ".agignore")
|
mit
|
Python
|
a2efba8d942171249b3ed2f28497d84f81cbbb06
|
Add lint test and format generated code (#4114)
|
googleapis/google-cloud-java,googleapis/google-cloud-java,googleapis/google-cloud-java
|
java-language/google-cloud-language/synth.py
|
java-language/google-cloud-language/synth.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.java as java
gapic = gcp.GAPICGenerator()
service = 'language'
versions = ['v1', 'v1beta2']
config_pattern = '/google/cloud/language/artman_language_{version}.yaml'
for version in versions:
library = gapic.java_library(
service=service,
version=version,
config_path=config_pattern.format(version=version),
artman_output_name='')
s.copy(library / f'gapic-google-cloud-{service}-{version}/src', 'src')
s.copy(library / f'grpc-google-cloud-{service}-{version}/src', f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
s.copy(library / f'proto-google-cloud-{service}-{version}/src', f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
java.format_code('./src')
java.format_code(f'../../google-api-grpc/grpc-google-cloud-{service}-{version}/src')
java.format_code(f'../../google-api-grpc/proto-google-cloud-{service}-{version}/src')
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
for version in ["v1", "v1beta2"]:
library = gapic.java_library(
service='language',
version=version,
config_path=f'/google/cloud/language/artman_language_{version}.yaml',
artman_output_name='')
s.copy(library / f'gapic-google-cloud-language-{version}/src', 'src')
s.copy(library / f'grpc-google-cloud-language-{version}/src', f'../../google-api-grpc/grpc-google-cloud-language-{version}/src')
s.copy(library / f'proto-google-cloud-language-{version}/src', f'../../google-api-grpc/proto-google-cloud-language-{version}/src')
|
apache-2.0
|
Python
|
743b2593113a19382e60c3968897c871d98e20a8
|
add alternate shorthand arguments, simplify shell execution
|
willfarrell/alfred-debugger,willfarrell/alfred-debugger
|
alfred.py
|
alfred.py
|
#!/usr/bin/python
import argparse, subprocess, re, sys
VERSION="0.1";
parser = argparse.ArgumentParser(description='''Execute a test''',)
parser.add_argument('-f', '--file', type=str, required=True, help="Filename of php file (ex 'script.php')")
parser.add_argument('-q', '--query', type=str, required=True, help="Value to replace {query} with")
parser.add_argument('--version', action='version', version=VERSION, help="Return version of script")
args = parser.parse_args()
if not re.search(r"\.php$", args.file, re.IGNORECASE):
print 'Common batman, you need a file extension.'
sys.exit(1)
process = subprocess.Popen('cat %s | sed -e "s/{query}/%s/" > .tmp && $EXT .tmp && rm .tmp;' % (args.file, args.query), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print process.communicate()[0]
|
#!/usr/bin/python
import argparse, subprocess, re, sys
VERSION="0.1";
parser = argparse.ArgumentParser(description='''Execute a test''',)
parser.add_argument('--file', type=str, required=True, help="Filename of php file (ex 'script.php')")
parser.add_argument('--query', type=str, required=True, help="Value to replace {query} with")
parser.add_argument('--version', action='version', version=VERSION, help="Return version of script")
args = parser.parse_args()
if not re.search(r"\.php$", args.file, re.IGNORECASE):
print 'Common batman, you need a file extension.'
sys.exit(1)
p = subprocess.Popen('cat %s | sed -e "s/{query}/%s/" > .tmp && $EXT .tmp && rm .tmp;' % (args.file, args.query), shell=True)
if not args.background:
out, err = p.communicate()
if out: print "stdout=", out
if err: print "stderr=", err
|
mit
|
Python
|
9c720cf806364e4eaf40da24691bc224a9288485
|
Clean combo script
|
ustwo/mastermind,ustwo/mastermind
|
combo.py
|
combo.py
|
from libmproxy import filt
import proxyswitch as pswitch
def enable():
settings = ('127.0.0.1', '8080')
pswitch.enable(*settings)
def disable():
pswitch.disable()
def start(context, argv):
context.log(">>> start")
enable()
context.filter = filt.parse("~d ustwo.com")
def request(context, flow):
if flow.match(context.filter):
context.log(">>> request")
def done(context):
disable()
context.log(">>> done")
|
from flask import Flask
from libmproxy import filt
import proxyswitch as pswitch
# app = Flask('proxapp')
# @app.route('/')
# def hello_world():
# return 'Hello World!'
# @app.route('/foo')
# def foo():
# return 'foo'
def enable():
settings = ('127.0.0.1', '8080')
pswitch.enable(*settings)
def disable():
pswitch.disable()
def start(context, argv):
context.log(">>> start")
enable()
context.filter = filt.parse("~d ustwo.com")
# context.app_registry.add(app, "proxapp", 80)
def request(context, flow):
if flow.match(context.filter):
context.log(">>> request")
def done(context):
disable()
context.log(">>> done")
|
mit
|
Python
|
250e166f02642e9d3c41f3854b6e9e12c560daba
|
update script for battle
|
haozai309/hello_python
|
battle.py
|
battle.py
|
# Welcome to Battleship
# http://www.codecademy.com/courses/python-beginner-en-4XuFm/0/1?curriculum_id=4f89dab3d788890003000096
""" In this project you will build a simplified, one-player version of the
classic board game Battleship! In this version of the game, there will be a
single ship hidden in a random location on a 5x5 grid. The player will have 10
guesses to try to sink the ship. """
from random import randint
board = []
for i in range(5):
board.append(["O"]*5)
def print_board(board):
for row in board:
print " ".join(row)
print_board(board)
def random_row(board):
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
print "Battle ship is in row %s, col %s" % (ship_row, ship_col)
def check_guess(row, col, turn):
guess_row = int(raw_input("Guess Row: "))
guess_col = int(raw_input("Guess Col: "))
if guess_row == ship_row and guess_col == ship_col:
print "Congratulations! You sank my battleship"
return True
else:
print "You missed my battleship!"
if guess_row not in range(5) or \
guess_col not in range(5):
print "Oops, that's not even in the ocean."
elif board[guess_row][guess_col] == "X":
print "You guessed that one already."
else:
if guess_row in range(5) and guess_col in range(5):
board[guess_row][guess_col] = "X"
if turn == 3:
print "Game Over"
return False
for turn in range(4):
print
print "%s" % (turn + 1)
if check_guess(ship_row, ship_col, turn):
break
|
# Welcome to Battleship
# http://www.codecademy.com/courses/python-beginner-en-4XuFm/0/1?curriculum_id=4f89dab3d788890003000096
""" In this project you will build a simplified, one-player version of the
classic board game Battleship! In this version of the game, there will be a
single ship hidden in a random location on a 5x5 grid. The player will have 10
guesses to try to sink the ship. """
from random import randint
board = []
for i in range(5):
board.append(["O"]*5)
def print_board(board):
for row in board:
print " ".join(row)
print_board(board)
def random_row(board):
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
guess_row = int(raw_input("Guess Row: "))
guess_col = int(raw_input("Guess Col: "))
print "Battle ship is in row %s, col %s" % (ship_row, ship_col)
if guess_row == ship_row and guess_col == ship_col:
print "Congratulations! You sank my battleship"
|
apache-2.0
|
Python
|
3566e9a4d59779c1fca5cfa3031d03a1bb5a72ba
|
remove process executor option
|
sciyoshi/CheesePrism,whitmo/CheesePrism,sciyoshi/CheesePrism,whitmo/CheesePrism,whitmo/CheesePrism
|
cheeseprism/wsgiapp.py
|
cheeseprism/wsgiapp.py
|
from .jenv import EnvFactory
from cheeseprism.auth import BasicAuthenticationPolicy
from cheeseprism.resources import App
from pyramid.config import Configurator
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from pyramid.settings import asbool
import futures
import logging
import os
logger = logging.getLogger(__name__)
def main(global_config, **settings):
settings = dict(global_config, **settings)
settings.setdefault('jinja2.i18n.domain', 'CheesePrism')
session_factory = UnencryptedCookieSessionFactoryConfig('cheeseprism')
config = Configurator(root_factory=App, settings=settings,
session_factory=session_factory,
authentication_policy=\
BasicAuthenticationPolicy(BasicAuthenticationPolicy.noop_check))
setup_workers(config.registry)
config.add_translation_dirs('locale/')
config.include('.request')
config.include('.views')
config.include('.index')
tempspec = settings.get('cheeseprism.index_templates', '')
config.registry['cp.index_templates'] = EnvFactory.from_str(tempspec)
if asbool(settings.get('cheeseprism.pipcache_mirror', False)):
config.include('.sync.pip')
if asbool(settings.get('cheeseprism.auto_sync', False)):
config.include('.sync.auto')
return config.make_wsgi_app()
def ping_proc(i):
pid = os.getpid()
logger.debug("worker %s up: %s", i, pid)
return pid
def setup_workers(registry):
"""
now thread only
"""
settings = registry.settings
registry['cp.executor_type'] = 'thread'
executor = futures.ThreadPoolExecutor
workers = int(settings.get('cheeseprism.futures.workers', 5))
logging.info("Starting thread executor w/ %s workers", workers)
executor = registry['cp.executor'] = executor(workers)
# -- This initializes our processes/threads
workers = [str(pid) for pid in executor.map(ping_proc, range(workers))]
logger.info("workers: %s", " ".join(workers))
|
from .jenv import EnvFactory
from cheeseprism.auth import BasicAuthenticationPolicy
from cheeseprism.resources import App
from functools import partial
from pyramid.config import Configurator
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from pyramid.settings import asbool
import futures
import logging
import multiprocessing
import os
import signal
logger = logging.getLogger(__name__)
def main(global_config, **settings):
settings = dict(global_config, **settings)
settings.setdefault('jinja2.i18n.domain', 'CheesePrism')
session_factory = UnencryptedCookieSessionFactoryConfig('cheeseprism')
config = Configurator(root_factory=App, settings=settings,
session_factory=session_factory,
authentication_policy=\
BasicAuthenticationPolicy(BasicAuthenticationPolicy.noop_check))
setup_workers(config.registry)
config.add_translation_dirs('locale/')
config.include('.request')
config.include('.views')
config.include('.index')
tempspec = settings.get('cheeseprism.index_templates', '')
config.registry['cp.index_templates'] = EnvFactory.from_str(tempspec)
if asbool(settings.get('cheeseprism.pipcache_mirror', False)):
config.include('.sync.pip')
if asbool(settings.get('cheeseprism.auto_sync', False)):
config.include('.sync.auto')
return config.make_wsgi_app()
def sig_handler(executor, sig, frame, wait=True):
logger.warn("Signal %d recieved: wait: %s", sig, wait)
executor.shutdown(wait)
logger.info("Executor shutdown complete")
def ping_proc(i):
pid = os.getpid()
logger.debug("worker %s up: %s", i, pid)
return pid
def setup_workers(registry, handler=sig_handler):
"""
ATT: Sensitive voodoo. Workers must be setup before any other
threads are launched. Workers must be initialized before signals
are registered.
"""
settings = registry.settings
registry['cp.executor_type'] = executor_type =\
settings.get('cheeseprism.futures', 'thread')
executor = executor_type != 'process' and futures.ThreadPoolExecutor \
or futures.ProcessPoolExecutor
workers = int(settings.get('cheeseprism.futures.workers', 0))
if executor_type == 'process' and workers <= 0:
workers = multiprocessing.cpu_count() + 1
else:
workers = workers <= 0 and 10 or workers
logging.info("PID %s using %s executor with %s workers", os.getpid(), executor_type, workers)
executor = registry['cp.executor'] = executor(workers)
# -- This initializes our processes/threads
workers = [str(pid) for pid in executor.map(ping_proc, range(workers))]
logger.info("workers: %s", " ".join(workers))
# -- Register signals after init so to not have an echo effect
for sig in (signal.SIGHUP, signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
signal.signal(sig, partial(sig_handler, executor))
|
bsd-2-clause
|
Python
|
8eb938086a77a11cb2df8c83f872b9daa519f858
|
fix pyhon3 compat issues in rename.
|
cournape/Bento,cournape/Bento,cournape/Bento,cournape/Bento
|
bento/compat/rename.py
|
bento/compat/rename.py
|
import os.path
import os
import random
import errno
def rename(src, dst):
"Atomic rename on windows."
# This is taken from mercurial
try:
os.rename(src, dst)
except OSError:
# If dst exists, rename will fail on windows, and we cannot
# unlink an opened file. Instead, the destination is moved to
# a temporary location if it already exists.
def tempname(prefix):
for i in range(5):
fn = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
if not os.path.exists(fn):
return fn
raise IOError((errno.EEXIST, "No usable temporary filename found"))
temp = tempname(dst)
os.rename(dst, temp)
try:
os.unlink(temp)
except:
# Some rude AV-scanners on Windows may cause the unlink to
# fail. Not aborting here just leaks the temp file, whereas
# aborting at this point may leave serious inconsistencies.
# Ideally, we would notify the user here.
pass
os.rename(src, dst)
|
import os.path
import os
import random
def rename(src, dst):
"Atomic rename on windows."
# This is taken from mercurial
try:
os.rename(src, dst)
except OSError, err:
# If dst exists, rename will fail on windows, and we cannot
# unlink an opened file. Instead, the destination is moved to
# a temporary location if it already exists.
def tempname(prefix):
for i in range(5):
fn = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
if not os.path.exists(fn):
return fn
raise IOError, (errno.EEXIST, "No usable temporary filename found")
temp = tempname(dst)
os.rename(dst, temp)
try:
os.unlink(temp)
except:
# Some rude AV-scanners on Windows may cause the unlink to
# fail. Not aborting here just leaks the temp file, whereas
# aborting at this point may leave serious inconsistencies.
# Ideally, we would notify the user here.
pass
os.rename(src, dst)
|
bsd-3-clause
|
Python
|
b61a45c13ffa82356f896f0914d2f28dabea7a7f
|
Include credentials for heat calling self
|
redhat-openstack/heat,pshchelo/heat,gonzolino/heat,cwolferh/heat-scratch,cryptickp/heat,takeshineshiro/heat,openstack/heat,steveb/heat,cryptickp/heat,dims/heat,rh-s/heat,maestro-hybrid-cloud/heat,jasondunsmore/heat,rdo-management/heat,pratikmallya/heat,jasondunsmore/heat,cwolferh/heat-scratch,takeshineshiro/heat,noironetworks/heat,srznew/heat,miguelgrinberg/heat,openstack/heat,pshchelo/heat,steveb/heat,gonzolino/heat,rh-s/heat,miguelgrinberg/heat,pratikmallya/heat,maestro-hybrid-cloud/heat,srznew/heat,noironetworks/heat,rdo-management/heat,redhat-openstack/heat,dragorosson/heat,dragorosson/heat,dims/heat
|
heat/engine/clients/os/heat_plugin.py
|
heat/engine/clients/os/heat_plugin.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient import client as hc
from heatclient import exc
from heat.engine.clients import client_plugin
class HeatClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exc
def _create(self):
args = {
'auth_url': self.context.auth_url,
'token': self.auth_token,
'username': None,
'password': None,
'ca_file': self._get_client_option('heat', 'ca_file'),
'cert_file': self._get_client_option('heat', 'cert_file'),
'key_file': self._get_client_option('heat', 'key_file'),
'insecure': self._get_client_option('heat', 'insecure')
}
endpoint = self.get_heat_url()
if self._get_client_option('heat', 'url'):
# assume that the heat API URL is manually configured because
# it is not in the keystone catalog, so include the credentials
# for the standalone auth_password middleware
args['username'] = self.context.username
args['password'] = self.context.password
del(args['token'])
return hc.Client('1', endpoint, **args)
def is_not_found(self, ex):
return isinstance(ex, exc.HTTPNotFound)
def is_over_limit(self, ex):
return isinstance(ex, exc.HTTPOverLimit)
def get_heat_url(self):
heat_url = self._get_client_option('heat', 'url')
if heat_url:
tenant_id = self.context.tenant_id
heat_url = heat_url % {'tenant_id': tenant_id}
else:
endpoint_type = self._get_client_option('heat', 'endpoint_type')
heat_url = self.url_for(service_type='orchestration',
endpoint_type=endpoint_type)
return heat_url
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient import client as hc
from heatclient import exc
from heat.engine.clients import client_plugin
class HeatClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exc
def _create(self):
args = {
'auth_url': self.context.auth_url,
'token': self.auth_token,
'username': None,
'password': None,
'ca_file': self._get_client_option('heat', 'ca_file'),
'cert_file': self._get_client_option('heat', 'cert_file'),
'key_file': self._get_client_option('heat', 'key_file'),
'insecure': self._get_client_option('heat', 'insecure')
}
endpoint = self.get_heat_url()
return hc.Client('1', endpoint, **args)
def is_not_found(self, ex):
return isinstance(ex, exc.HTTPNotFound)
def is_over_limit(self, ex):
return isinstance(ex, exc.HTTPOverLimit)
def get_heat_url(self):
heat_url = self._get_client_option('heat', 'url')
if heat_url:
tenant_id = self.context.tenant_id
heat_url = heat_url % {'tenant_id': tenant_id}
else:
endpoint_type = self._get_client_option('heat', 'endpoint_type')
heat_url = self.url_for(service_type='orchestration',
endpoint_type=endpoint_type)
return heat_url
|
apache-2.0
|
Python
|
05c210f1a5f83ebbea2319f48ba58eb054b32ce2
|
fix indent
|
komsit37/sublime-q,komsit37/sublime-q,komsit37/sublime-q
|
q_out_panel.py
|
q_out_panel.py
|
from . import chain
#show_q_output
class QOutPanelCommand(chain.ChainCommand):
def do(self, edit, input=None):
panel = self.view.window().get_output_panel("q")
syntax_file = "Packages/q KDB/syntax/q_output.tmLanguage"
try:
panel.set_syntax_file(syntax_file)
except Exception:
print("Unable to load syntax file: ", syntax_file)
panel.settings().set("word_wrap", False)
panel.set_read_only(False)
panel.insert(edit, panel.size(), input)
panel.set_read_only(True)
self.view.window().run_command("show_panel", {"panel": "output.q"})
return '' #return something so that the chain will continue
|
from . import chain
#show_q_output
class QOutPanelCommand(chain.ChainCommand):
def do(self, edit, input=None):
panel = self.view.window().get_output_panel("q")
syntax_file = "Packages/q KDB/syntax/q_output.tmLanguage"
try:
sublime.load_binary_resource(syntax_file)
except Exception:
continue
panel.set_syntax_file(syntax_file)
panel.settings().set("word_wrap", False)
panel.set_read_only(False)
panel.insert(edit, panel.size(), input)
panel.set_read_only(True)
self.view.window().run_command("show_panel", {"panel": "output.q"})
return '' #return something so that the chain will continue
|
mit
|
Python
|
62398ce1fde0402a9c4b77ff018e47716ba1fdd3
|
allow restricting the refresh_useractions command by user
|
tndatacommons/tndata_backend,tndatacommons/tndata_backend,izzyalonso/tndata_backend,tndatacommons/tndata_backend,izzyalonso/tndata_backend,izzyalonso/tndata_backend,izzyalonso/tndata_backend,tndatacommons/tndata_backend
|
tndata_backend/goals/management/commands/refresh_useractions.py
|
tndata_backend/goals/management/commands/refresh_useractions.py
|
import logging
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from goals.models import CustomAction, UserAction
logger = logging.getLogger("loggly_logs")
class Command(BaseCommand):
help = 'Updates the next_trigger_date field for stale UserActions and CustomActions.'
def add_arguments(self, parser):
parser.add_argument(
'--user',
action='store',
dest='user',
default=None,
help=("Restrict this command to the given User. "
"Accepts a username, email, or id")
)
def _user_kwargs(self, user):
User = get_user_model()
kwargs = {} # Keyword arguments that get passed to our action querysets.
if user:
try:
if user.isnumeric():
criteria = Q(id=user)
else:
criteria = (Q(username=user) | Q(email=user))
kwargs['user'] = User.objects.get(criteria)
except User.DoesNotExist:
msg = "Could not find user: {0}".format(user)
raise CommandError(msg)
return kwargs
def handle(self, *args, **options):
count = 0
kwargs = self._user_kwargs(options['user'])
for ua in UserAction.objects.stale(**kwargs):
count += 1
ua.save(update_triggers=True) # fields get refreshed on save.
msg = "Refreshed Trigger Date for {0} UserActions".format(count)
logger.error(msg)
self.stderr.write(msg)
count = 0
for ca in CustomAction.objects.stale(**kwargs):
count += 1
ca.save() # fields get refreshed on save.
msg = "Refreshed Trigger Date for {0} CustomActions".format(count)
logger.error(msg)
self.stderr.write(msg)
|
import logging
from django.core.management.base import BaseCommand
from goals.models import CustomAction, UserAction
logger = logging.getLogger("loggly_logs")
class Command(BaseCommand):
help = 'Updates the next_trigger_date field for stale UserActions and CustomActions.'
def handle(self, *args, **options):
count = 0
for ua in UserAction.objects.stale():
count += 1
ua.save(update_triggers=True) # fields get refreshed on save.
msg = "Refreshed Trigger Date for {0} UserActions".format(count)
logger.error(msg)
self.stderr.write(msg)
count = 0
for ca in CustomAction.objects.stale():
count += 1
ca.save() # fields get refreshed on save.
msg = "Refreshed Trigger Date for {0} CustomActions".format(count)
logger.error(msg)
self.stderr.write(msg)
|
mit
|
Python
|
5b6587cbe03ff79a29a400fb1f9b29d889b4edc5
|
Make executable
|
Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed
|
appid.py
|
appid.py
|
#!/usr/bin/env python3
# Find a Steam appid given its name
import json
import os.path
import sys
from fuzzywuzzy import process, fuzz # ImportError? pip install 'fuzzywuzzy[speedup]'
CACHE_FILE = os.path.abspath(__file__ + "/../appid.json")
try:
with open(CACHE_FILE) as f:
appids = json.load(f)
except FileNotFoundError:
import requests # ImportError? pip install requests
print("Downloading Steam appid list...")
r = requests.get("https://api.steampowered.com/ISteamApps/GetAppList/v0001/")
r.raise_for_status()
data = r.json()
appids = {app["name"]: app["appid"] for app in data["applist"]["apps"]["app"]}
with open(CACHE_FILE, "w") as f:
json.dump(appids, f)
print("Downloaded and cached.")
if len(sys.argv) == 1:
print("TODO: Use os.getcwd()")
sys.exit(0)
appnames = list(appids)
def shortest_token_set_ratio(query, choice):
"""Like fuzz.token_set_ratio, but breaks ties by choosing the shortest"""
return fuzz.token_set_ratio(query, choice) * 1000 + 1000 - len(choice)
def show_matches(target):
for name, score in process.extract(target, appnames, limit=10, scorer=shortest_token_set_ratio):
print("\t[%3d%% - %7s] %s" % (score//1000, appids[name], name))
# for arg in sys.argv[1:]: show_matches(arg) # Allow multiple args
show_matches(" ".join(sys.argv[1:])) # Allow unquoted multi-word names
|
# Find a Steam appid given its name
import json
import os.path
import sys
from fuzzywuzzy import process, fuzz # ImportError? pip install 'fuzzywuzzy[speedup]'
CACHE_FILE = os.path.abspath(__file__ + "/../appid.json")
try:
with open(CACHE_FILE) as f:
appids = json.load(f)
except FileNotFoundError:
import requests # ImportError? pip install requests
print("Downloading Steam appid list...")
r = requests.get("https://api.steampowered.com/ISteamApps/GetAppList/v0001/")
r.raise_for_status()
data = r.json()
appids = {app["name"]: app["appid"] for app in data["applist"]["apps"]["app"]}
with open(CACHE_FILE, "w") as f:
json.dump(appids, f)
print("Downloaded and cached.")
if len(sys.argv) == 1:
print("TODO: Use os.getcwd()")
sys.exit(0)
appnames = list(appids)
def shortest_token_set_ratio(query, choice):
"""Like fuzz.token_set_ratio, but breaks ties by choosing the shortest"""
return fuzz.token_set_ratio(query, choice) * 1000 + 1000 - len(choice)
def show_matches(target):
for name, score in process.extract(target, appnames, limit=10, scorer=shortest_token_set_ratio):
print("\t[%3d%% - %7s] %s" % (score//1000, appids[name], name))
# for arg in sys.argv[1:]: show_matches(arg) # Allow multiple args
show_matches(" ".join(sys.argv[1:])) # Allow unquoted multi-word names
|
mit
|
Python
|
d2d090383d93e89bd8ce07d533715612cf472152
|
Support lists of nodes in astpp
|
Suor/flaws
|
astpp.py
|
astpp.py
|
"""
A pretty-printing dump function for the ast module. The code was copied from
the ast.dump function and modified slightly to pretty-print.
Alex Leone (acleone ~AT~ gmail.com), 2010-01-30
"""
from ast import *
def dump(node, annotate_fields=True, include_attributes=False, indent=' '):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node, level=0):
if isinstance(node, AST):
fields = [(a, _format(b, level)) for a, b in iter_fields(node)]
if include_attributes and node._attributes:
fields.extend([(a, _format(getattr(node, a), level))
for a in node._attributes])
return ''.join([
node.__class__.__name__,
'(',
', '.join(('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)),
')'])
elif isinstance(node, list):
lines = ['[']
lines.extend((indent * (level + 2) + _format(x, level + 2) + ','
for x in node))
if len(lines) > 1:
lines.append(indent * (level + 1) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(node)
if isinstance(node, list):
return '\n'.join(_format(n) for n in node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
if __name__ == '__main__':
import sys
for filename in sys.argv[1:]:
print('=' * 50)
print('AST tree for', filename)
print('=' * 50)
f = open(filename, 'r')
fstr = f.read()
f.close()
print(dump(parse(fstr, filename=filename), include_attributes=True))
print()
|
"""
A pretty-printing dump function for the ast module. The code was copied from
the ast.dump function and modified slightly to pretty-print.
Alex Leone (acleone ~AT~ gmail.com), 2010-01-30
"""
from ast import *
def dump(node, annotate_fields=True, include_attributes=False, indent=' '):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node, level=0):
if isinstance(node, AST):
fields = [(a, _format(b, level)) for a, b in iter_fields(node)]
if include_attributes and node._attributes:
fields.extend([(a, _format(getattr(node, a), level))
for a in node._attributes])
return ''.join([
node.__class__.__name__,
'(',
', '.join(('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)),
')'])
elif isinstance(node, list):
lines = ['[']
lines.extend((indent * (level + 2) + _format(x, level + 2) + ','
for x in node))
if len(lines) > 1:
lines.append(indent * (level + 1) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
if __name__ == '__main__':
import sys
for filename in sys.argv[1:]:
print('=' * 50)
print('AST tree for', filename)
print('=' * 50)
f = open(filename, 'r')
fstr = f.read()
f.close()
print(dump(parse(fstr, filename=filename), include_attributes=True))
print()
|
bsd-2-clause
|
Python
|
a1996022dd288b5a986cd07b2694f5af514296e4
|
Delete unnecessary annotation in examples/bucket_policy.py
|
aliyun/aliyun-oss-python-sdk
|
examples/bucket_policy.py
|
examples/bucket_policy.py
|
import os
import oss2
import json
# 以下代码展示了bucket_policy相关API的用法,
# 具体policy书写规则参考官网文档说明
# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')
# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param
# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
# 创建policy_text
policy=dict()
policy["Version"] = "1"
policy["Statement"] = []
statement = dict()
statement["Action"] = ["oss:PutObject"]
statement["Effect"] = "Allow"
statement["Resource"] = ["acs:oss:*:*:*/*"]
policy["Statement"].append(statement)
policy_text = json.dumps(policy)
# Put bolicy_text
print("Put policy text : ", policy_text)
bucket.put_bucket_policy(policy_text)
# Get bucket Policy
result = bucket.get_bucket_policy()
policy_json = json.loads(result.policy)
print("Get policy text: ", policy_json)
# 校验返回的policy
assert len(policy["Statement"]) == len(policy_json["Statement"])
assert policy["Version"] == policy_json["Version"]
policy_resource = policy["Statement"][0]["Resource"][0]
policy_json_resource = policy_json["Statement"][0]["Resource"][0]
assert policy_resource == policy_json_resource
# 删除policy
result = bucket.delete_bucket_policy()
assert int(result.status)//100 == 2
|
import os
import oss2
import json
# 以下代码展示了bucket_policy相关API的用法,
# 具体policy书写规则参考官网文档说明
# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')
# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param
# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
# 创建policy_text
policy=dict()
policy["Version"] = "1"
policy["Statement"] = []
statement = dict()
statement["Action"] = ["oss:PutObject"]
statement["Effect"] = "Allow"
statement["Resource"] = ["acs:oss:*:*:*/*"]
policy["Statement"].append(statement)
policy_text = json.dumps(policy)
# Put bolicy_text
print("Put policy text : ", policy_text)
bucket.put_bucket_policy(policy_text)
# Get bucket Policy
result = bucket.get_bucket_policy()
policy_json = json.loads(result.policy)
print("Get policy text: ", policy_json)
# 校验返回的policy
assert len(policy["Statement"]) == len(policy_json["Statement"])
assert policy["Version"] == policy_json["Version"]
policy_resource = policy["Statement"][0]["Resource"][0]
policy_json_resource = policy_json["Statement"][0]["Resource"][0]
assert policy_resource == policy_json_resource
# 删除policy
result = bucket.delete_bucket_policy()
assert int(result.status)//100 == 2
|
mit
|
Python
|
3665b8859f72ec416682857ab22f7e29fc30f0df
|
Add field on cached alignments to store more information
|
cmunk/protwis,protwis/protwis,fosfataza/protwis,cmunk/protwis,fosfataza/protwis,protwis/protwis,protwis/protwis,fosfataza/protwis,cmunk/protwis,cmunk/protwis,fosfataza/protwis
|
alignment/models.py
|
alignment/models.py
|
from django.db import models
# Create your models here.
class AlignmentConsensus(models.Model):
slug = models.SlugField(max_length=100, unique=True)
alignment = models.BinaryField()
gn_consensus = models.BinaryField(blank=True) # Store conservation calculation for each GN
|
from django.db import models
# Create your models here.
class AlignmentConsensus(models.Model):
slug = models.SlugField(max_length=100, unique=True)
alignment = models.BinaryField()
|
apache-2.0
|
Python
|
61e9b3db58c124cf41ede9fc9a3ad9c01e5bff81
|
add select related query for group social links
|
tomaszroszko/django-social-links
|
sociallinks/templatetags/sociallink_tags.py
|
sociallinks/templatetags/sociallink_tags.py
|
# -*- coding: utf-8 -*-
from django import template
from django.contrib.contenttypes.models import ContentType
from sociallinks.models import SocialLink, SocialLinkGroup
register = template.Library()
@register.assignment_tag
def obj_social_links(obj):
"""return list of social links for obj. Obj is instance of any model
registred in project
Usage:
{% obj_social_links user as user_links %}
{% for link in user_links %}
<a href="{{ link.link }}" class="{{ link.link_type.css_class }}">
{{ link.link_type.name }}
</a>
{% endfor %}
"""
content_type = ContentType.objects.get_for_model(obj.__class__)
return SocialLink.objects.filter(
content_type=content_type,
object_pk=obj.pk).select_related('link_type')
@register.assignment_tag
def group_social_links(slug):
"""return list of social links for slug
sociallinks.models.SocialLinkGroup.slug"""
group = SocialLinkGroup.objects.get(slug=slug)
return SocialLink.objects.filter(
link_group=group).select_related('link_type')
|
# -*- coding: utf-8 -*-
from django import template
from django.contrib.contenttypes.models import ContentType
from sociallinks.models import SocialLink, SocialLinkGroup
register = template.Library()
@register.assignment_tag
def obj_social_links(obj):
"""return list of social links for obj. Obj is instance of any model
registred in project
Usage:
{% obj_social_links user as user_links %}
{% for link in user_links %}
<a href="{{ link.link }}" class="{{ link.link_type.css_class }}">
{{ link.link_type.name }}
</a>
{% endfor %}
"""
content_type = ContentType.objects.get_for_model(obj.__class__)
return SocialLink.objects.filter(
content_type=content_type,
object_pk=obj.pk).select_related('link_type')
@register.assignment_tag
def group_social_links(slug):
"""return list of social links for slug
sociallinks.models.SocialLinkGroup.slug"""
group = SocialLinkGroup.objects.get(slug=slug)
return SocialLink.objects.filter(
link_group=group).select_related('link_type')
|
bsd-3-clause
|
Python
|
709a7139b4f3acaace53e79c7ca1adafd8f24027
|
Use tempfile to save modifications
|
malramsay64/MD-Molecules-Hoomd,malramsay64/MD-Molecules-Hoomd
|
basic.py
|
basic.py
|
"""Run a basic simulation"""
import os
import tempfile
import hoomd
from hoomd import md
import molecule
import numpy as np
import pandas
import TimeDep
import gsd.hoomd
from StepSize import generate_steps
def run_npt(snapshot, temp, steps, **kwargs):
"""Initialise a hoomd simulation"""
with hoomd.context.initialize(kwargs.get('init_args', '')):
system = hoomd.init.read_gsd(snapshot, time_step=0)
md.update.enforce2d()
mol = kwargs.get('mol', molecule.Trimer())
mol.initialize(create=False)
md.integrate.mode_standard(kwargs.get('dt', 0.005))
md.integrate.npt(
group=hoomd.group.rigid_center(),
kT=temp,
tau=kwargs.get('tau', 1.),
P=kwargs.get('press', 13.5),
tauP=kwargs.get('tauP', 1.)
)
dynamics = TimeDep.TimeDep2dRigid(system.take_snapshot(all=True), 0)
for curr_step in generate_steps(steps):
hoomd.run_upto(curr_step)
dynamics.append(system.take_snapshot(all=True), curr_step)
return dynamics.get_all_data()
def read_snapshot(fname, rand=False):
"""Read a hoomd snapshot from a hoomd gsd file
Args:
fname (string): Filename of GSD file to read in
Returns:
class:`hoomd.data.Snapshot`: Hoomd snapshot
"""
with gsd.hoomd.open(fname) as trj:
snapshot = trj.read_frame(0)
if rand:
nbodies = snapshot.particles.body.max() + 1
np.random.shuffle(snapshot.particles.velocity[:nbodies])
np.random.shuffle(snapshot.particles.angmom[:nbodies])
tmp = tempfile.NamedTemporaryFile(delete=False)
with gsd.hoomd.open(tmp.name, 'wb') as tfile:
tfile.append(snapshot)
return tmp.name
def main(directory, temp, steps, iterations=2):
"""Main function to run stuff"""
init_file = directory + "/Trimer-{press:.2f}-{temp:.2f}.gsd".format(
press=13.50, temp=temp)
for iteration in range(iterations):
dynamics = run_npt(read_snapshot(init_file, rand=True), temp, steps)
with pandas.HDFStore(os.path.splitext(init_file)[0]+'.hdf5') as store:
store['dyn{i}'.format(i=iteration)] = dynamics
if __name__ == '__main__':
main(".", 1.30, 1000, 20)
|
"""Run a basic simulation"""
import os
import hoomd
import molecule
import numpy as np
import pandas
import TimeDep
from hoomd import md
import gsd.hoomd
from StepSize import generate_steps
def run_npt(snapshot, temp, steps, **kwargs):
"""Initialise a hoomd simulation"""
with hoomd.context.initialize(kwargs.get('init_args', '')):
system = hoomd.init.read_snapshot(snapshot)
md.update.enforce2d()
mol = kwargs.get('mol', molecule.Trimer())
mol.initialize(create=False)
md.integrate.mode_standard(kwargs.get('dt', 0.005))
md.integrate.npt(
group=hoomd.group.rigid_center(),
kT=temp,
tau=kwargs.get('tau', 1.),
P=kwargs.get('press', 13.5),
tauP=kwargs.get('tauP', 1.)
)
dynamics = TimeDep.TimeDep2dRigid(snapshot, 0)
for curr_step in generate_steps(steps):
hoomd.run_upto(curr_step)
dynamics.append(system.take_snapshot(all=True), curr_step)
return dynamics.get_all_data()
def read_snapshot(fname, rand=False):
"""Read a hoomd snapshot from a hoomd gsd file
Args:
fname (string): Filename of GSD file to read in
Returns:
class:`hoomd.data.Snapshot`: Hoomd snapshot
"""
with gsd.hoomd.open(fname) as trj:
snapshot = trj.read_frame(0)
if rand:
snapshot.particles.angmom
nbodies = snapshot.particles.body.max() + 1
np.random.shuffle(snapshot.particles.velocity[:nbodies])
np.random.shuffle(snapshot.particles.angmom[:nbodies])
return snapshot
def main(directory, temp, steps, iterations=2):
"""Main function to run stuff"""
init_file = directory + "/Trimer-{press}-{temp}.gsd".format(
press=13.50, temp=temp)
for iteration in range(iterations):
dynamics = run_npt(read_snapshot(init_file, rand=True), temp, steps)
with pandas.HDFStore(os.path.splitext(init_file)[0]+'.hdf5') as store:
store['dyn{i}'.format(i=iteration)] = dynamics.get_all_data()
if __name__ == '__main__':
main(".", 1.30, 1000, 20)
|
mit
|
Python
|
5d8e6e47964d80f380db27acd120136a43e80550
|
Fix tool description in argparse help
|
sot/aimpoint_mon,sot/aimpoint_mon
|
aimpoint_mon/make_web_page.py
|
aimpoint_mon/make_web_page.py
|
#!/usr/bin/env python
import os
import argparse
import json
from pathlib import Path
from jinja2 import Template
import pyyaks.logger
def get_opt():
parser = argparse.ArgumentParser(description='Make aimpoint monitor web page')
parser.add_argument("--data-root",
default=".",
help="Root directory for asol and index files")
return parser.parse_args()
# Options
opt = get_opt()
# Set up logging
loglevel = pyyaks.logger.INFO
logger = pyyaks.logger.get_logger(name='make_web_page', level=loglevel,
format="%(asctime)s %(message)s")
def main():
# Files
index_template_file = Path(__file__).parent / 'data' / 'index_template.html'
index_file = os.path.join(opt.data_root, 'index.html')
info_file = os.path.join(opt.data_root, 'info.json')
# Jinja template context
logger.info('Loading info file {}'.format(info_file))
context = json.load(open(info_file, 'r'))
template = Template(open(index_template_file).read())
context['static'] = True
html = template.render(**context)
logger.info('Writing index file {}'.format(index_file))
with open(index_file, 'w') as fh:
fh.write(html)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import os
import argparse
import json
from pathlib import Path
from jinja2 import Template
import pyyaks.logger
def get_opt():
parser = argparse.ArgumentParser(description='Get aimpoint drift data '
'from aspect solution files')
parser.add_argument("--data-root",
default=".",
help="Root directory for asol and index files")
return parser.parse_args()
# Options
opt = get_opt()
# Set up logging
loglevel = pyyaks.logger.INFO
logger = pyyaks.logger.get_logger(name='make_web_page', level=loglevel,
format="%(asctime)s %(message)s")
def main():
# Files
index_template_file = Path(__file__).parent / 'data' / 'index_template.html'
index_file = os.path.join(opt.data_root, 'index.html')
info_file = os.path.join(opt.data_root, 'info.json')
# Jinja template context
logger.info('Loading info file {}'.format(info_file))
context = json.load(open(info_file, 'r'))
template = Template(open(index_template_file).read())
context['static'] = True
html = template.render(**context)
logger.info('Writing index file {}'.format(index_file))
with open(index_file, 'w') as fh:
fh.write(html)
if __name__ == '__main__':
main()
|
bsd-2-clause
|
Python
|
b31217a1a0ed68e0dfee2fdea87aad569c73573f
|
update batch timing
|
codeforamerica/westsac-urban-land-locator,codeforamerica/westsac-urban-land-locator,codeforamerica/westsac-urban-land-locator,codeforamerica/westsac-urban-land-locator
|
clock.py
|
clock.py
|
from apscheduler.schedulers.blocking import BlockingScheduler
from farmsList.imports import every_night_at_1am
from rq import Queue
from worker import conn
import logging
logging.basicConfig()
q = Queue(connection=conn)
sched = BlockingScheduler()
@sched.scheduled_job('cron', hour=21, minute=14)# hour=1)
def scheduled_job():
q.enqueue(every_night_at_1am)
sched.start()
|
from apscheduler.schedulers.blocking import BlockingScheduler
from farmsList.imports import every_night_at_1am
from rq import Queue
from worker import conn
import logging
logging.basicConfig()
q = Queue(connection=conn)
sched = BlockingScheduler()
@sched.scheduled_job('cron', hour=21, minute=11)# hour=1)
def scheduled_job():
q.enqueue(every_night_at_1am)
sched.start()
|
bsd-3-clause
|
Python
|
a34994bb6ae23f04627ab384fa2c2905997925a9
|
Revert rendering.
|
NejcZupec/tictactoe,NejcZupec/tictactoe,NejcZupec/tictactoe
|
web/views.py
|
web/views.py
|
import json
from django.http import Http404, HttpResponse
from django.views.generic import TemplateView
from django.shortcuts import redirect, render
from .models import Game, Player
from .utils import create_new_game, generate_unique_anonymous_username, calculate_stats
class HomeView(TemplateView):
template_name = 'home.html'
class GameView(TemplateView):
template_name = 'game.html'
def get(self, request, game_id, *args, **kwargs):
game = Game.objects.get(id=game_id)
board = [[game.get_field_state(row_index, column_index) for column_index in range(3)] for row_index in range(3)]
game_finished = True if game.get_winner_or_draw() else False
ai_player = game.get_ai_player()
stats = calculate_stats(game)
return render(request, self.template_name, locals())
class Leaderboard(TemplateView):
template_name = 'leaderboard.html'
def get(self, request, *args, **kwargs):
players = Player.objects.all()
return render(request, self.template_name, locals())
def new_game(request, p1_type, p2_type):
"""
Start a new game. Create a Game object and redirects to it.
"""
if p1_type == 'anonymous' and p2_type == 'anonymous':
game = create_new_game('anonymous', 'anonymous')
return redirect(game)
if p1_type == 'anonymous' and p2_type == 'ai_random':
player1 = Player.objects.create(username=generate_unique_anonymous_username(), type=p1_type)
player2, created = Player.objects.get_or_create(username="AI Random", type=p2_type)
game = Game.objects.create(player1=player1, player2=player2)
return redirect(game)
raise Http404
def new_move(request, game_id):
"""
Save a new game's move to database.
"""
game = Game.objects.get(id=game_id)
player = request.POST.get('player')
x = request.POST.get('x')
y = request.POST.get('y')
m, action = game.add_move_and_get_action(player, x, y)
return HttpResponse(str(action))
def rematch(request, game_id):
old_game = Game.objects.get(id=game_id)
game = Game.objects.create(
player1=old_game.player2,
player2=old_game.player1,
)
return redirect(game)
def ai_next_move(request, game_id):
game = Game.objects.get(id=game_id)
x, y = game.get_next_random_move()
return HttpResponse(json.dumps({'x': x, 'y': y}), content_type='application/json')
|
import json
from django.http import Http404, HttpResponse
from django.views.generic import TemplateView
from django.shortcuts import redirect, render
from django.template.loader import render_to_string
from .models import Game, Player
from .utils import create_new_game, generate_unique_anonymous_username, calculate_stats
class HomeView(TemplateView):
template_name = 'home.html'
class GameView(TemplateView):
template_name = 'game.html'
def get(self, request, game_id, *args, **kwargs):
game = Game.objects.get(id=game_id)
board = [[game.get_field_state(row_index, column_index) for column_index in range(3)] for row_index in range(3)]
game_finished = True if game.get_winner_or_draw() else False
ai_player = game.get_ai_player()
stats = calculate_stats(game)
return render(request, self.template_name, locals())
class Leaderboard(TemplateView):
template_name = 'leaderboard.html'
def get(self, request, *args, **kwargs):
players = Player.objects.all()
return render(request, self.template_name, locals())
def new_game(request, p1_type, p2_type):
"""
Start a new game. Create a Game object and redirects to it.
"""
if p1_type == 'anonymous' and p2_type == 'anonymous':
game = create_new_game('anonymous', 'anonymous')
return redirect(game)
if p1_type == 'anonymous' and p2_type == 'ai_random':
player1 = Player.objects.create(username=generate_unique_anonymous_username(), type=p1_type)
player2, created = Player.objects.get_or_create(username="AI Random", type=p2_type)
game = Game.objects.create(player1=player1, player2=player2)
return redirect(game)
raise Http404
def new_move(request, game_id):
"""
Save a new game's move to database.
"""
game = Game.objects.get(id=game_id)
player = request.POST.get('player')
x = request.POST.get('x')
y = request.POST.get('y')
m, action = game.add_move_and_get_action(player, x, y)
return render_to_string(request, str(action))
def rematch(request, game_id):
old_game = Game.objects.get(id=game_id)
game = Game.objects.create(
player1=old_game.player2,
player2=old_game.player1,
)
return redirect(game)
def ai_next_move(request, game_id):
game = Game.objects.get(id=game_id)
x, y = game.get_next_random_move()
return HttpResponse(json.dumps({'x': x, 'y': y}), content_type='application/json')
|
apache-2.0
|
Python
|
f958ef0179f72adb4b8c7243fc30395de1c31d6b
|
add authentication now required by https://issues.apache.org/jira
|
remibergsma/cloudstack-docs-rn,apache/cloudstack-docs-rn,remibergsma/cloudstack-docs-rn,apache/cloudstack-docs-rn,apache/cloudstack-docs-rn,remibergsma/cloudstack-docs-rn
|
utils/jira.py
|
utils/jira.py
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information#
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""jira.py: Output jira issues from https://issues.apache.org/jira into RST format for Apche CloudStack Release-Notes.
Usage:
jira.py FILTERID -p USERNAME -u PASSWORD
jira.py (-h | --help)
jira.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
import requests
import json
import sys
import pprint
if __name__ == '__main__':
arguments = docopt(__doc__, version='jira.py 2.0')
#print(arguments)
#print arguments['FILTERID']
#print arguments['PASSWORD']
#print arguments['USERNAME']
filterurl='https://issues.apache.org/jira/rest/api/2/filter/' + arguments['FILTERID']
r=requests.get(filterurl, auth=(arguments['USERNAME'],arguments['PASSWORD']))
rlist=r.json()['searchUrl']
get_all=requests.get(rlist, auth=(arguments['USERNAME'],arguments['PASSWORD'])).json()
count=get_all['total']
#print count
n, m = divmod(count, 50)
for i in range(n+1):
issueslist=get_all['issues']
for issue in issueslist:
'''assignee=issue['fields']['assignee']['displayName']
reporter=issue['fields']['reporter']['displayName']
'''
print '`'+ issue['key'] + ' <https://issues.apache.org/jira/browse/' + issue['key'] + '>`_' + ' ' + issue['fields']['summary'][:80] + '...'
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information#
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import requests
import json
import sys
import pprint
filterid=str(sys.argv[1])
filterurl='https://issues.apache.org/jira/rest/api/2/filter/' + filterid
r=requests.get(filterurl)
rlist=r.json()['searchUrl']
count=requests.get(rlist).json()['total']
n, m = divmod(count, 50)
for i in range(n+1):
issueslist=requests.get(rlist+'&startAt='+str(i*50)).json()['issues']
for issue in issueslist:
'''assignee=issue['fields']['assignee']['displayName']
reporter=issue['fields']['reporter']['displayName']
'''
print '`'+ issue['key'] + ' <https://issues.apache.org/jira/browse/' + issue['key'] + '>`_' + ' ' + issue['fields']['summary'][:80] + '...'
|
apache-2.0
|
Python
|
b631482e00e59224cb32682f6a9d221748368158
|
Remove os package
|
prontotools/zendesk-tickets-machine,prontotools/zendesk-tickets-machine,prontotools/zendesk-tickets-machine,prontotools/zendesk-tickets-machine
|
fabfile.py
|
fabfile.py
|
from datetime import datetime
from fabric.api import (
cd,
env,
local,
put,
run,
sudo,
task
)
PRODUCTION_IP = '54.154.235.243'
PROJECT_DIRECTORY = '/home/ubuntu/ztm/'
BACKUP_DIRECTORY = '/home/ubuntu/backup/'
COMPOSE_FILE = 'compose-production.yml'
@task
def production():
env.run = sudo
env.hosts = [
'ubuntu@' + PRODUCTION_IP + ':22',
]
@task
def create_project_directory():
run('mkdir -p ' + PROJECT_DIRECTORY)
@task
def update_compose_file():
put('./' + COMPOSE_FILE, PROJECT_DIRECTORY)
@task
def backup():
backup_time = datetime.now().strftime('%Y-%m-%d_%H%M')
with cd(BACKUP_DIRECTORY):
command = 'tar -cjvf ztm-' + backup_time + \
'.tar.bz2 ' + PROJECT_DIRECTORY
env.run(command)
command = 's3cmd sync ' + BACKUP_DIRECTORY + ' ' \
's3://zendesk-tickets-machine'
run(command)
@task
def build():
command = 'docker build -t ' \
'133506877714.dkr.ecr.eu-west-1.amazonaws.com/ztm ' \
'-f ./compose/django/Dockerfile .'
local(command)
@task
def push():
local('docker push 133506877714.dkr.ecr.eu-west-1.amazonaws.com/ztm')
@task
def compose_up():
with cd(PROJECT_DIRECTORY):
env.run('docker-compose -f ' + COMPOSE_FILE + ' pull')
env.run('docker-compose -f ' + COMPOSE_FILE + ' up -d')
@task
def deploy():
build()
push()
create_project_directory()
update_compose_file()
compose_up()
|
from datetime import datetime
import os
from fabric.api import (
cd,
env,
local,
put,
run,
sudo,
task
)
PRODUCTION_IP = '54.154.235.243'
PROJECT_DIRECTORY = '/home/ubuntu/ztm/'
BACKUP_DIRECTORY = '/home/ubuntu/backup/'
COMPOSE_FILE = 'compose-production.yml'
@task
def production():
env.run = sudo
env.hosts = [
'ubuntu@' + PRODUCTION_IP + ':22',
]
@task
def create_project_directory():
run('mkdir -p ' + PROJECT_DIRECTORY)
@task
def update_compose_file():
put('./' + COMPOSE_FILE, PROJECT_DIRECTORY)
@task
def backup():
backup_time = datetime.now().strftime('%Y-%m-%d_%H%M')
with cd(BACKUP_DIRECTORY):
command = 'tar -cjvf ztm-' + backup_time + \
'.tar.bz2 ' + PROJECT_DIRECTORY
env.run(command)
command = 's3cmd sync ' + BACKUP_DIRECTORY + ' ' \
's3://zendesk-tickets-machine'
run(command)
@task
def build():
command = 'docker build -t ' \
'133506877714.dkr.ecr.eu-west-1.amazonaws.com/ztm ' \
'-f ./compose/django/Dockerfile .'
local(command)
@task
def push():
local('docker push 133506877714.dkr.ecr.eu-west-1.amazonaws.com/ztm')
@task
def compose_up():
with cd(PROJECT_DIRECTORY):
env.run('docker-compose -f ' + COMPOSE_FILE + ' pull')
env.run('docker-compose -f ' + COMPOSE_FILE + ' up -d')
@task
def deploy():
build()
push()
create_project_directory()
update_compose_file()
compose_up()
|
mit
|
Python
|
ea2b67cd016492f1869f50f899360c3151977e79
|
Fix docstring term
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
csunplugged/topics/management/commands/_GlossaryTermsLoader.py
|
csunplugged/topics/management/commands/_GlossaryTermsLoader.py
|
"""Custom loader for loading glossary terms."""
import os.path
from django.db import transaction
from utils.BaseLoader import BaseLoader
from topics.models import GlossaryTerm
class GlossaryTermsLoader(BaseLoader):
"""Custom loader for loading glossary terms."""
def __init__(self, glossary_folder_path, glossary_terms, structure_file_path, BASE_PATH):
"""Create the loader for loading glossary terms.
Args:
glossary_folder_path: Folder path to definition files (string).
glossary_terms: List of glossary term slugs (list).
structure_file_path: Path to the config file, used for errors.
BASE_PATH: Base file path (string).
"""
super().__init__(BASE_PATH)
self.glossary_terms = glossary_terms
self.structure_file_path = structure_file_path
self.BASE_PATH = os.path.join(self.BASE_PATH, glossary_folder_path)
@transaction.atomic
def load(self):
"""Load the glossary content into the database."""
for glossary_slug in self.glossary_terms:
filename = "{term}.md".format(term=glossary_slug)
definition_file_path = os.path.join(
self.BASE_PATH,
filename
)
glossary_term_content = self.convert_md_file(
definition_file_path,
self.structure_file_path
)
# Create glossary term and save to database
glossary_term = GlossaryTerm(
slug=glossary_slug,
term=glossary_term_content.title,
definition=glossary_term_content.html_string
)
glossary_term.save()
self.log("Added Glossary Term: {}".format(glossary_term.__str__()))
# Print log output
self.print_load_log()
|
"""Custom loader for loading glossary terms."""
import os.path
from django.db import transaction
from utils.BaseLoader import BaseLoader
from topics.models import GlossaryTerm
class GlossaryTermsLoader(BaseLoader):
"""Custom loader for loading glossary terms."""
def __init__(self, glossary_folder_path, glossary_terms, structure_file_path, BASE_PATH):
"""Create the loader for loading programming exercises.
Args:
glossary_folder_path: Folder path to definition files (string).
glossary_terms: List of glossary term slugs (list).
structure_file_path: Path to the config file, used for errors.
BASE_PATH: Base file path (string).
"""
super().__init__(BASE_PATH)
self.glossary_terms = glossary_terms
self.structure_file_path = structure_file_path
self.BASE_PATH = os.path.join(self.BASE_PATH, glossary_folder_path)
@transaction.atomic
def load(self):
"""Load the glossary content into the database."""
for glossary_slug in self.glossary_terms:
filename = "{term}.md".format(term=glossary_slug)
definition_file_path = os.path.join(
self.BASE_PATH,
filename
)
glossary_term_content = self.convert_md_file(
definition_file_path,
self.structure_file_path
)
# Create glossary term and save to database
glossary_term = GlossaryTerm(
slug=glossary_slug,
term=glossary_term_content.title,
definition=glossary_term_content.html_string
)
glossary_term.save()
self.log("Added Glossary Term: {}".format(glossary_term.__str__()))
# Print log output
self.print_load_log()
|
mit
|
Python
|
179308a7061b2e0b1bb10d5c7757a611196608db
|
change fabfile
|
fabiansinz/pipecontrol,fabiansinz/pipecontrol,fabiansinz/pipecontrol
|
fabfile.py
|
fabfile.py
|
from distutils.util import strtobool
from fabric.api import local, abort, run, sudo
from fabric.context_managers import cd, settings, hide, shell_env
from fabric.contrib.console import confirm
from getpass import getpass
from fabric.utils import puts
from fabric.state import env
env.control_dir = 'pipecontrol'
def with_sudo():
"""
Prompts and sets the sudo password for all following commands.
Use like
fab with_sudo command
"""
env.sudo_password = getpass('Please enter sudo password: ')
env.password = env.sudo_password
def down():
with cd(env.control_dir):
sudo('docker-compose down')
def get_branch(gitdir):
"""
Gets the branch of a git directory.
Args:
gitdir: path of the git directory
Returns: current active branch
"""
return local('git symbolic-ref --short HEAD', capture=True)
def pull():
with cd(env.control_dir):
branch = get_branch(env.control_dir)
sudo('git reset --hard')
sudo('git clean -fd')
sudo('git checkout {}'.format(branch))
sudo('git pull origin ' + branch)
def build():
with cd(env.control_dir):
sudo('docker-compose build pipecontrol')
def start():
with cd(env.control_dir):
sudo('docker-compose up -d pipecontrol')
def sync_files():
local('scp dj_local_conf.json ' + env.host_string + ':' + env.control_dir)
def deploy():
with settings(warn_only=True):
pull()
sync_files()
with_sudo()
down()
build()
start()
|
from distutils.util import strtobool
from fabric.api import local, abort, run, sudo
from fabric.context_managers import cd, settings, hide, shell_env
from fabric.contrib.console import confirm
from getpass import getpass
from fabric.utils import puts
from fabric.state import env
env.control_dir = 'pipecontrol'
def with_sudo():
"""
Prompts and sets the sudo password for all following commands.
Use like
fab with_sudo command
"""
env.sudo_password = getpass('Please enter sudo password: ')
env.password = env.sudo_password
def down():
with cd(env.control_dir):
sudo('docker-compose down')
def get_branch(gitdir):
"""
Gets the branch of a git directory.
Args:
gitdir: path of the git directory
Returns: current active branch
"""
return local('git symbolic-ref --short HEAD', capture=True)
def pull():
with cd(env.control_dir):
branch = get_branch(env.control_dir)
sudo('git reset --hard')
sudo('git clean -fd')
sudo('git checkout {}'.format(branch))
sudo('git pull origin ' + branch)
def build():
with cd(env.control_dir):
sudo('docker-compose build pipecontrol')
def start():
with cd(env.control_dir):
sudo('docker-compose up -d pipecontrol')
def sync_files():
local('scp dj_local_conf.json ' + env.host_string + ':' + env.control_dir)
def deploy():
with settings(warn_only=True):
with_sudo()
down()
pull()
sync_files()
build()
start()
|
mit
|
Python
|
3d2b08a971ded9fa4bf3a3d7c69c15e589b6adab
|
add v0.8.1 (#21798)
|
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
|
var/spack/repos/builtin/packages/py-parso/package.py
|
var/spack/repos/builtin/packages/py-parso/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyParso(PythonPackage):
"""Parso is a Python parser that supports error recovery and round-trip parsing
for different Python versions (in multiple Python versions).
Parso is also able to list multiple syntax errors
in your python file."""
pypi = "parso/parso-0.6.1.tar.gz"
version('0.8.1', sha256='8519430ad07087d4c997fda3a7918f7cfa27cb58972a8c89c2a0295a1c940e9e')
version('0.6.1', sha256='56b2105a80e9c4df49de85e125feb6be69f49920e121406f15e7acde6c9dfc57')
version('0.4.0', sha256='2e9574cb12e7112a87253e14e2c380ce312060269d04bd018478a3c92ea9a376')
depends_on('[email protected]:', type=('build', 'run'), when='@0.8.1:')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'), when='@0.6.1:')
depends_on('[email protected]:2.8,3.3:', type=('build', 'run'), when='@0.4.0:')
depends_on('py-setuptools', type='build')
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyParso(PythonPackage):
"""Parso is a Python parser that supports error recovery and round-trip parsing
for different Python versions (in multiple Python versions).
Parso is also able to list multiple syntax errors
in your python file."""
pypi = "parso/parso-0.6.1.tar.gz"
version('0.6.1', sha256='56b2105a80e9c4df49de85e125feb6be69f49920e121406f15e7acde6c9dfc57')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
|
lgpl-2.1
|
Python
|
deb2bb30b6f584a7a899d7c161605efadee468cf
|
add optional /geo suffix to /pollingstations
|
DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations
|
polling_stations/api/pollingstations.py
|
polling_stations/api/pollingstations.py
|
from rest_framework.decorators import list_route
from rest_framework.mixins import ListModelMixin
from rest_framework.viewsets import GenericViewSet
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from pollingstations.models import PollingStation
class PollingStationSerializer(GeoFeatureModelSerializer):
class Meta:
model = PollingStation
geo_field = 'location'
fields = ('council', 'postcode', 'address', 'location')
class PollingStationViewSet(GenericViewSet, ListModelMixin):
queryset = PollingStation.objects.all()
serializer_class = PollingStationSerializer
@list_route(url_path='geo')
def geo(self, request, format=None):
return self.list(request, format=None)
|
from rest_framework.mixins import ListModelMixin
from rest_framework.viewsets import GenericViewSet
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from pollingstations.models import PollingStation
class PollingStationSerializer(GeoFeatureModelSerializer):
class Meta:
model = PollingStation
geo_field = 'location'
fields = ('council', 'postcode', 'address', 'location')
class PollingStationViewSet(GenericViewSet, ListModelMixin):
queryset = PollingStation.objects.all()
serializer_class = PollingStationSerializer
|
bsd-3-clause
|
Python
|
f961c1031285c5852e46f880668b963d27245ace
|
Rename _table to table
|
davidrobles/mlnd-capstone-code
|
examples/tictactoe-qlearning.py
|
examples/tictactoe-qlearning.py
|
import random
from capstone.environment import Environment
from capstone.game import TicTacToe
from capstone.mdp import GameMDP
from capstone.player import AlphaBeta, RandPlayer
from capstone.util import ZobristHashing
class TabularQLearning(object):
def __init__(self, env, policy=RandPlayer(), alpha=0.1, gamma=0.99, n_episodes=1000):
self.env = env
self.policy = RandPlayer()
self.alpha = alpha
self.gamma = gamma
self.n_episodes = n_episodes
self.table = {}
def max_q_value(self, state, actions):
if not actions:
return 0
best_value = -100000
for next_action in actions:
temp_value = self.table.get((state, next_action), random.random() - 0.5)
if temp_value > best_value:
best_value = temp_value
return best_value
def learn(self):
import random
for episode in range(self.n_episodes):
print('Episode {}'.format(episode))
self.env.reset()
step = 0
while not self.env.is_terminal():
print('Step {}'.format(step))
state = self.env.cur_state()
action = random.choice(self.env.actions())
reward, next_state = self.env.do_action(action)
max_q_value = self.max_q_value(next_state, self.env.actions())
q_value = self.table.get((state, action), 0.1)
update_value = reward + (self.gamma * max_q_value) - q_value
self.table[(state, action)] = q_value + (self.alpha * update_value)
step += 1
print('Results:')
for key, value in self.table.iteritems():
print(key)
print(value)
print('*' * 60)
game = TicTacToe(
'X-O'
'XO-'
'-XO'
)
ab = AlphaBeta()
mdp = GameMDP(game, ab, 1)
env = Environment(mdp)
td0 = TabularQLearning(env)
td0.learn()
|
import random
from capstone.environment import Environment
from capstone.game import TicTacToe
from capstone.mdp import GameMDP
from capstone.player import AlphaBeta, RandPlayer
from capstone.util import ZobristHashing
class TabularQLearning(object):
def __init__(self, env, policy=RandPlayer(), alpha=0.1, gamma=0.99, n_episodes=1000):
self.env = env
self.policy = RandPlayer()
self.alpha = alpha
self.gamma = gamma
self.n_episodes = n_episodes
self._table = {}
def max_q_value(self, state, actions):
if not actions:
return 0
best_value = -100000
for next_action in actions:
temp_value = self._table.get((state, next_action), random.random() - 0.5)
if temp_value > best_value:
best_value = temp_value
return best_value
def learn(self):
import random
for episode in range(self.n_episodes):
print('Episode {}'.format(episode))
self.env.reset()
step = 0
while not self.env.is_terminal():
print('Step {}'.format(step))
state = self.env.cur_state()
action = random.choice(self.env.actions())
reward, next_state = self.env.do_action(action)
max_q_value = self.max_q_value(next_state, self.env.actions())
q_value = self._table.get((state, action), 0.1)
update_value = reward + (self.gamma * max_q_value) - q_value
self._table[(state, action)] = q_value + (self.alpha * update_value)
step += 1
print('Results:')
for key, value in self._table.iteritems():
print(key)
print(value)
print('*' * 60)
game = TicTacToe(
'X-O'
'XO-'
'-XO'
)
ab = AlphaBeta()
mdp = GameMDP(game, ab, 1)
env = Environment(mdp)
td0 = TabularQLearning(env)
td0.learn()
|
mit
|
Python
|
dc40793ad27704c83dbbd2e923bf0cbcd7cb00ed
|
Handle both event instanciation from object and from serialized events
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
polyaxon/event_manager/event_service.py
|
polyaxon/event_manager/event_service.py
|
from libs.services import Service
class EventService(Service):
__all__ = ('record', 'setup')
event_manager = None
def can_handle(self, event_type):
return isinstance(event_type, str) and self.event_manager.knows(event_type)
def get_event(self, event_type, event_data=None, instance=None, **kwargs):
if instance or not event_data:
return self.event_manager.get(
event_type,
).from_instance(instance, **kwargs)
return self.event_manager.get(
event_type,
).from_event_data(event_data=event_data, **kwargs)
def record(self, event_type, event_data=None, instance=None, **kwargs):
""" Validate and record an event.
>>> record('event.action', object_instance)
"""
if not self.is_setup:
return
if not self.can_handle(event_type=event_type):
return
event = self.get_event(event_type=event_type,
event_data=event_data,
instance=instance,
**kwargs)
self.record_event(event)
return event
def record_event(self, event):
""" Record an event.
>>> record_event(Event())
"""
pass
|
from libs.services import Service
class EventService(Service):
__all__ = ('record', 'setup')
event_manager = None
def can_handle(self, event_type):
return isinstance(event_type, str) and self.event_manager.knows(event_type)
def get_event(self, event_type, instance, **kwargs):
return self.event_manager.get(
event_type,
).from_instance(instance, **kwargs)
def record(self, event_type, instance=None, **kwargs):
""" Validate and record an event.
>>> record('event.action', object_instance)
"""
if not self.is_setup:
return
if not self.can_handle(event_type=event_type):
return
event = self.get_event(event_type=event_type, instance=instance, **kwargs)
self.record_event(event)
def record_event(self, event):
""" Record an event.
>>> record_event(Event())
"""
pass
|
apache-2.0
|
Python
|
19e5608b2d8d16e2c80390927658e8f322dd10e6
|
Add new encodings. (#3292)
|
tswast/google-cloud-python,calpeyser/google-cloud-python,tartavull/google-cloud-python,dhermes/google-cloud-python,dhermes/google-cloud-python,calpeyser/google-cloud-python,googleapis/google-cloud-python,GoogleCloudPlatform/gcloud-python,dhermes/gcloud-python,jonparrott/google-cloud-python,googleapis/google-cloud-python,tswast/google-cloud-python,tartavull/google-cloud-python,jonparrott/google-cloud-python,tseaver/gcloud-python,jonparrott/gcloud-python,tseaver/google-cloud-python,tswast/google-cloud-python,jonparrott/gcloud-python,tseaver/google-cloud-python,GoogleCloudPlatform/gcloud-python,tseaver/gcloud-python,dhermes/google-cloud-python,dhermes/gcloud-python,tseaver/google-cloud-python
|
speech/google/cloud/speech/encoding.py
|
speech/google/cloud/speech/encoding.py
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encodings used by the Google Cloud Speech API."""
class Encoding(object):
"""Audio encoding types.
See:
https://cloud.google.com/speech/reference/rest/v1/RecognitionConfig#AudioEncoding
"""
LINEAR16 = 'LINEAR16'
"""LINEAR16 encoding type."""
FLAC = 'FLAC'
"""FLAC encoding type."""
MULAW = 'MULAW'
"""MULAW encoding type."""
AMR = 'AMR'
"""AMR encoding type."""
AMR_WB = 'AMR_WB'
"""AMR_WB encoding type."""
OGG_OPUS = 'OGG_OPUS'
"""OGG_OPUS encoding type."""
SPEEX_WITH_HEADER_BYTE = 'SPEEX_WITH_HEADER_BYTE'
"""SPEEX_WITH_HEADER_BYTE encoding type."""
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encodings used by the Google Cloud Speech API."""
class Encoding(object):
"""Audio encoding types.
See:
https://cloud.google.com/speech/reference/rest/v1/RecognitionConfig#AudioEncoding
"""
LINEAR16 = 'LINEAR16'
"""LINEAR16 encoding type."""
FLAC = 'FLAC'
"""FLAC encoding type."""
MULAW = 'MULAW'
"""MULAW encoding type."""
AMR = 'AMR'
"""AMR encoding type."""
AMR_WB = 'AMR_WB'
"""AMR_WB encoding type."""
|
apache-2.0
|
Python
|
b2f2fcd9322837ea096a28ae584c8d236232c5a5
|
remove jython2.5 compat code
|
bozzzzo/qpid-proton,astitcher/qpid-proton,bozzzzo/qpid-proton,gemmellr/qpid-proton-j,Karm/qpid-proton,ssorj/qpid-proton,gemmellr/qpid-proton,alanconway/qpid-proton,prestona/qpid-proton,kgiusti/qpid-proton,apache/qpid-proton,prestona/qpid-proton,prestona/qpid-proton,Karm/qpid-proton,ChugR/qpid-proton,ChugR/qpid-proton,astitcher/qpid-proton,alanconway/qpid-proton,ChugR/qpid-proton,Karm/qpid-proton,alanconway/qpid-proton,kgiusti/qpid-proton,bozzzzo/qpid-proton,gemmellr/qpid-proton,bozzzzo/qpid-proton,astitcher/qpid-proton,bozzzzo/qpid-proton,Karm/qpid-proton,ChugR/qpid-proton,bozzzzo/qpid-proton,prestona/qpid-proton,Karm/qpid-proton,Karm/qpid-proton,kgiusti/qpid-proton,prestona/qpid-proton,gemmellr/qpid-proton,prestona/qpid-proton,apache/qpid-proton,alanconway/qpid-proton,kgiusti/qpid-proton,alanconway/qpid-proton,prestona/qpid-proton,gemmellr/qpid-proton-j,prestona/qpid-proton,apache/qpid-proton,prestona/qpid-proton,Karm/qpid-proton,Karm/qpid-proton,gemmellr/qpid-proton,prestona/qpid-proton,Karm/qpid-proton,gemmellr/qpid-proton,ssorj/qpid-proton,bozzzzo/qpid-proton,ssorj/qpid-proton,prestona/qpid-proton,Karm/qpid-proton,bozzzzo/qpid-proton,bozzzzo/qpid-proton,kgiusti/qpid-proton,apache/qpid-proton,gemmellr/qpid-proton,apache/qpid-proton,alanconway/qpid-proton,ssorj/qpid-proton,bozzzzo/qpid-proton,Karm/qpid-proton,ChugR/qpid-proton,astitcher/qpid-proton,ssorj/qpid-proton,ssorj/qpid-proton,ChugR/qpid-proton,kgiusti/qpid-proton,astitcher/qpid-proton,Karm/qpid-proton,apache/qpid-proton,prestona/qpid-proton,bozzzzo/qpid-proton,astitcher/qpid-proton
|
proton-j/src/main/resources/cproton.py
|
proton-j/src/main/resources/cproton.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
The cproton module defines a java implementation of the C interface as
exposed to python via swig. This allows tests defined in python to run
against both the C and Java protocol implementations.
"""
# @todo(kgiusti) dynamically set these via filters in the pom.xml file
PN_VERSION_MAJOR = 0
PN_VERSION_MINOR = 0
from ctypes import *
from cobject import *
from cerror import *
from ccodec import *
from cengine import *
from csasl import *
from cssl import *
from cdriver import *
from cmessenger import *
from cmessage import *
from curl import *
from creactor import *
from chandlers import *
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
The cproton module defines a java implementation of the C interface as
exposed to python via swig. This allows tests defined in python to run
against both the C and Java protocol implementations.
"""
# @todo(kgiusti) dynamically set these via filters in the pom.xml file
PN_VERSION_MAJOR = 0
PN_VERSION_MINOR = 0
from ctypes import *
from cobject import *
from cerror import *
from ccodec import *
from cengine import *
from csasl import *
from cssl import *
from cdriver import *
from cmessenger import *
from cmessage import *
from curl import *
from creactor import *
from chandlers import *
# XXX: this is for compatibility, apparently the version of jython we
# use doesn't have next, we should remove this when we upgrade
_DEF = object()
def next(iter, default=_DEF):
try:
return iter.next()
except StopIteration:
if default is _DEF:
raise
else:
return default
|
apache-2.0
|
Python
|
27354d34f4f9117c0a3167f4af7d3e3c83d51c59
|
bump version # from 0.6.0 -> 0.7.0
|
simonsdave/cloudfeaster,simonsdave/cloudfeaster,simonsdave/cloudfeaster
|
cloudfeaster/__init__.py
|
cloudfeaster/__init__.py
|
__version__ = '0.7.0'
|
__version__ = '0.6.0'
|
mit
|
Python
|
38d298a81aa8fcd85b16b3879c1665085e5450be
|
Add description where student should add logic
|
introprogramming/exercises,introprogramming/exercises,introprogramming/exercises
|
exercises/control_flow/prime.py
|
exercises/control_flow/prime.py
|
#!/bin/python
def is_prime(integer):
"""Determines weather integer is prime, returns a boolean value"""
# add logic here to make sure number < 2 are not prime
for i in range(2, integer):
if integer % i == 0:
return False
return True
print("Should be False (0): %r" % is_prime(0))
print("Should be False (1): %r" % is_prime(1))
print("Should be True (2): %r" % is_prime(2))
print("Should be False (8): %r" % is_prime(8))
print("Should be True (17): %r"% is_prime(17))
# Your code below:
|
#!/bin/python
def is_prime(integer):
"""Determines weather integer is prime, returns a boolean value"""
for i in range(2, integer):
if integer % i == 0:
return False
return True
print("Should be False (0): %r" % is_prime(0))
print("Should be False (1): %r" % is_prime(1))
print("Should be True (2): %r" % is_prime(2))
print("Should be False (8): %r" % is_prime(8))
print("Should be True (17): %r"% is_prime(17))
# Your code below:
|
mit
|
Python
|
d5b326d8d368d2ac75c6e078572df8c28704c163
|
Use the app string version of foreign keying. It prevents a circular import.
|
AeroNotix/django-timetracker,AeroNotix/django-timetracker,AeroNotix/django-timetracker
|
vcs/models.py
|
vcs/models.py
|
from django.db import models
class Activity(models.Model):
group = models.CharField(max_length=4)
grouptype = models.TextField()
groupdetail = models.TextField()
details = models.TextField()
disabled = models.BooleanField()
time = models.DecimalField(decimal_places=2, max_digits=10)
unique_together = (("group", "grouptype", "disabled", "time"),)
class ActivityEntry(models.Model):
user = models.ManyToManyField(
'tracker.Tbluser',
related_name="user_foreign"
)
activity = models.ManyToManyField(
Activity,
related_name="activity_foreign"
)
amount = models.BigIntegerField()
def time(self):
return self.activity.time * self.amount
|
from django.db import models
class Activity(models.Model):
group = models.CharField(max_length=4)
grouptype = models.TextField()
groupdetail = models.TextField()
details = models.TextField()
disabled = models.BooleanField()
time = models.DecimalField(decimal_places=2, max_digits=10)
unique_together = (("group", "grouptype", "disabled", "time"),)
class ActivityEntry(models.Model):
from timetracker.tracker.models import Tbluser
user = models.ManyToManyField(
Tbluser,
related_name="user_foreign"
)
activity = models.ManyToManyField(
Activity,
related_name="activity_foreign"
)
amount = models.BigIntegerField()
def time(self):
return self.activity.time * self.amount
|
bsd-3-clause
|
Python
|
3b001eac78349e1e3f6235b14def0fa6752f6fba
|
add more dedup special cases
|
total-impact/software,total-impact/software,total-impact/software,total-impact/depsy,Impactstory/depsy,total-impact/depsy,Impactstory/depsy,total-impact/depsy,Impactstory/depsy,total-impact/software,total-impact/depsy,Impactstory/depsy
|
models/dedup_special_cases.py
|
models/dedup_special_cases.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
data = [
{
"main_profile": ("Hadley Wickham", "cran:reshape"),
"people_to_merge": [
("Hadley Wickham", "cran:GGally")
]
},
{
"main_profile": ("Barry Rowlingson", "cran:geonames"),
"people_to_merge": [
("B. S. Rowlingson", "cran:lgcp"),
("Barry Rowlingson", "cran:stpp")
]
},
{
"main_profile": ("Thomas Robitaille", "pypi:ATpy"),
"people_to_merge": [
("Thomas Robitaille", "pypi:PyAVM"),
("Tom Robitaille", "pypi:spectral-cube")
]
},
{
"main_profile": (u"Eduard Szöcs", "cran:webchem"),
"people_to_merge": [
("Eduard Szoecs", "cran:fortunes")
]
},
{
"main_profile": (u"Daniel Münch", "cran:webchem"),
"people_to_merge": [
("Daniel Muench", "cran:webchem")
]
},
{
"main_profile": (u"Daniel Münch", "cran:webchem"),
"people_to_merge": [
("Daniel Muench", "cran:webchem")
]
},
{
"main_profile": (u"Zhian N. Kamvar", "cran:poppr"),
"people_to_merge": [
("Zhian Kamvar", "cran:mmod")
]
},
{
"main_profile": (u"Min RK", "pypi:ggplot"),
"people_to_merge": [
(u"Min Ragan-Kelley", "pypi:pyzmq")
]
},
{
"main_profile": (u"Benjamin M. Taylor", "cran:cruts"),
"people_to_merge": [
(u"B. M. Taylor", "cran:lgcp")
]
}
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
data = [
{
"main_profile": ("Hadley Wickham", "cran:reshape"),
"people_to_merge": [
("Hadley Wickham", "cran:GGally")
]
},
{
"main_profile": ("Barry Rowlingson", "cran:geonames"),
"people_to_merge": [
("B. S. Rowlingson", "cran:lgcp"),
("Barry Rowlingson", "cran:stpp")
]
},
{
"main_profile": ("Thomas Robitaille", "pypi:ATpy"),
"people_to_merge": [
("Thomas Robitaille", "pypi:PyAVM"),
("Tom Robitaille", "pypi:spectral-cube")
]
},
{
"main_profile": (u"Eduard Szöcs", "cran:webchem"),
"people_to_merge": [
("Eduard Szoecs", "cran:fortunes")
]
},
{
"main_profile": (u"Daniel Münch", "cran:webchem"),
"people_to_merge": [
("Daniel Muench", "cran:webchem")
]
}
]
|
mit
|
Python
|
1163bc40a15eb2461c6ead570db8a8d211f1f5be
|
Sort room table by room name by default
|
agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft
|
web/blueprints/facilities/tables.py
|
web/blueprints/facilities/tables.py
|
from web.blueprints.helpers.table import BootstrapTable, Column
class SiteTable(BootstrapTable):
def __init__(self, *a, **kw):
super().__init__(*a, columns=[
Column('site', 'Site', formatter='table.linkFormatter'),
Column('buildings', 'Buildings', formatter='table.multiBtnFormatter'),
], **kw)
class BuildingLevelRoomTable(BootstrapTable):
def __init__(self, *a, **kw):
super().__init__(*a, columns=[
Column('room', 'Raum', formatter='table.linkFormatter'),
Column('inhabitants', 'Bewohner', formatter='table.multiBtnFormatter'),
], table_args={
'data-sort-name': 'room',
'data-query-params': 'perhaps_all_users_query_params',
}, **kw)
def generate_toolbar(self):
"""Generate a toolbar with a "Display all users" button
"""
yield '<a href="#" id="rooms-toggle-all-users" class="btn btn-default" role="button">'
yield '<span class="glyphicon glyphicon-user"></span>'
yield 'Display all users'
yield '</a>'
class RoomLogTable(BootstrapTable):
def __init__(self, *a, **kw):
super().__init__(*a, columns=[
Column('created_at', 'Erstellt um'),
Column('user', 'Nutzer', formatter='table.linkFormatter'),
Column('message', 'Nachricht'),
], **kw)
|
from web.blueprints.helpers.table import BootstrapTable, Column
class SiteTable(BootstrapTable):
def __init__(self, *a, **kw):
super().__init__(*a, columns=[
Column('site', 'Site', formatter='table.linkFormatter'),
Column('buildings', 'Buildings', formatter='table.multiBtnFormatter'),
], **kw)
class BuildingLevelRoomTable(BootstrapTable):
def __init__(self, *a, **kw):
super().__init__(*a, columns=[
Column('room', 'Raum', formatter='table.linkFormatter'),
Column('inhabitants', 'Bewohner', formatter='table.multiBtnFormatter'),
], table_args={
'data-query-params': 'perhaps_all_users_query_params',
}, **kw)
def generate_toolbar(self):
"""Generate a toolbar with a "Display all users" button
"""
yield '<a href="#" id="rooms-toggle-all-users" class="btn btn-default" role="button">'
yield '<span class="glyphicon glyphicon-user"></span>'
yield 'Display all users'
yield '</a>'
class RoomLogTable(BootstrapTable):
def __init__(self, *a, **kw):
super().__init__(*a, columns=[
Column('created_at', 'Erstellt um'),
Column('user', 'Nutzer', formatter='table.linkFormatter'),
Column('message', 'Nachricht'),
], **kw)
|
apache-2.0
|
Python
|
1abbca6200fa3da0a3216b18b1385f3575edb49a
|
Move import of Django's get_version into django-registration's get_version, to avoid dependency-order problems.
|
myimages/django-registration,Troyhy/django-registration,futurecolors/django-registration,hacklabr/django-registration,akvo/django-registration,sandipagr/django-registration,futurecolors/django-registration,liberation/django-registration,euanlau/django-registration,tdruez/django-registration,Troyhy/django-registration,gone/django-registration,hacklabr/django-registration,mypebble/djregs,ubernostrum/django-registration,euanlau/django-registration,sandipagr/django-registration,dirtycoder/django-registration,danielsamuels/django-registration,liberation/django-registration,awakeup/django-registration,gone/django-registration,kennydude/djregs,akvo/django-registration
|
registration/__init__.py
|
registration/__init__.py
|
VERSION = (0, 9, 0, 'beta', 1)
def get_version():
from django.utils.version import get_version as django_get_version
return django_get_version(VERSION) # pragma: no cover
|
from django.utils.version import get_version as django_get_version
VERSION = (0, 9, 0, 'beta', 1)
def get_version():
return django_get_version(VERSION) # pragma: no cover
|
bsd-3-clause
|
Python
|
db5cb125fb9b0dd4d3b781d0b85250bdc7a52cba
|
fix error msg
|
efiop/dvc,dmpetrov/dataversioncontrol,dataversioncontrol/dvc,dmpetrov/dataversioncontrol,dataversioncontrol/dvc,efiop/dvc
|
dvc/command/add.py
|
dvc/command/add.py
|
from dvc.exceptions import DvcException
from dvc.command.common.base import CmdBase
class CmdAdd(CmdBase):
def run(self):
for target in self.args.targets:
try:
self.project.add(target)
except DvcException as ex:
self.project.logger.error('Failed to add \'{}\''.format(target), ex)
return 1
return 0
|
from dvc.exceptions import DvcException
from dvc.command.common.base import CmdBase
class CmdAdd(CmdBase):
def run(self):
for target in self.args.targets:
try:
self.project.add(target)
except DvcException as ex:
self.project.logger.error('Failed to add {}', ex)
return 1
return 0
|
apache-2.0
|
Python
|
b5d32a3b1b8e85222497c4736c0c6707003dc848
|
Fix broken database IO tests.
|
live-clones/pybtex
|
pybtex/tests/database_test/__init__.py
|
pybtex/tests/database_test/__init__.py
|
# Copyright (C) 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pkgutil
from unittest import TestCase
import yaml
from io import BytesIO, TextIOWrapper, BufferedWriter
from pybtex.plugin import find_plugin
class DatabaseIOTest(TestCase):
def setUp(self):
reference_data = pkgutil.get_data('pybtex', 'tests/database_test/reference_data.yaml')
self.reference_data = yaml.load(reference_data)
def _test_input(self, plugin):
parser = find_plugin('pybtex.database.input', plugin)(encoding='UTF-8')
writer = find_plugin('pybtex.database.output', plugin)(encoding='UTF-8')
stream = BytesIO()
writer_stream = TextIOWrapper(stream, 'UTF-8') if writer.unicode_io else stream
parser_stream = TextIOWrapper(stream, 'UTF-8') if parser.unicode_io else stream
writer.write_stream(self.reference_data, writer_stream)
writer_stream.flush()
stream.seek(0)
parser.parse_stream(parser_stream)
loaded_data = parser.data
self.assertEqual(loaded_data, self.reference_data)
def test_bibtex_input(self):
self._test_input('bibtex')
def test_bibyaml_input(self):
self._test_input('bibyaml')
def test_bibtexml_input(self):
# BibTeXML does not support TeX preambles AFAIK
self.reference_data._preamble = []
self._test_input('bibtexml')
|
# Copyright (C) 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pkgutil
from unittest import TestCase
import yaml
from io import BytesIO, TextIOWrapper, BufferedWriter
from pybtex.plugin import find_plugin
class DatabaseIOTest(TestCase):
def setUp(self):
reference_data = pkgutil.get_data('pybtex', 'tests/database_test/reference_data.yaml')
self.reference_data = yaml.load(reference_data)
def _test_input(self, plugin):
parser = find_plugin('pybtex.database.input', plugin).Parser(encoding='UTF-8')
writer = find_plugin('pybtex.database.output', plugin).Writer(encoding='UTF-8')
stream = BytesIO()
writer_stream = TextIOWrapper(stream, 'UTF-8') if writer.unicode_io else stream
parser_stream = TextIOWrapper(stream, 'UTF-8') if parser.unicode_io else stream
writer.write_stream(self.reference_data, writer_stream)
writer_stream.flush()
stream.seek(0)
parser.parse_stream(parser_stream)
loaded_data = parser.data
self.assertEqual(loaded_data, self.reference_data)
def test_bibtex_input(self):
self._test_input('bibtex')
def test_bibyaml_input(self):
self._test_input('bibyaml')
def test_bibtexml_input(self):
# BibTeXML does not support TeX preambles AFAIK
self.reference_data._preamble = []
self._test_input('bibtexml')
|
mit
|
Python
|
37d01f6088b1cf5673f66f4532dd51c73a0156f1
|
Fix grammar in login error message
|
sehmaschine/django-rest-framework,arpheno/django-rest-framework,ajaali/django-rest-framework,adambain-vokal/django-rest-framework,buptlsl/django-rest-framework,aericson/django-rest-framework,YBJAY00000/django-rest-framework,VishvajitP/django-rest-framework,werthen/django-rest-framework,xiaotangyuan/django-rest-framework,thedrow/django-rest-framework-1,maryokhin/django-rest-framework,James1345/django-rest-framework,hnakamur/django-rest-framework,andriy-s/django-rest-framework,abdulhaq-e/django-rest-framework,tomchristie/django-rest-framework,HireAnEsquire/django-rest-framework,kgeorgy/django-rest-framework,cyberj/django-rest-framework,johnraz/django-rest-framework,rhblind/django-rest-framework,canassa/django-rest-framework,nhorelik/django-rest-framework,jness/django-rest-framework,potpath/django-rest-framework,krinart/django-rest-framework,justanr/django-rest-framework,vstoykov/django-rest-framework,leeahoward/django-rest-framework,canassa/django-rest-framework,qsorix/django-rest-framework,paolopaolopaolo/django-rest-framework,canassa/django-rest-framework,sheppard/django-rest-framework,pombredanne/django-rest-framework,ambivalentno/django-rest-framework,linovia/django-rest-framework,HireAnEsquire/django-rest-framework,agconti/django-rest-framework,cyberj/django-rest-framework,ticosax/django-rest-framework,gregmuellegger/django-rest-framework,ticosax/django-rest-framework,rafaelcaricio/django-rest-framework,sbellem/django-rest-framework,xiaotangyuan/django-rest-framework,kezabelle/django-rest-framework,gregmuellegger/django-rest-framework,wzbozon/django-rest-framework,xiaotangyuan/django-rest-framework,yiyocx/django-rest-framework,yiyocx/django-rest-framework,jerryhebert/django-rest-framework,wedaly/django-rest-framework,rafaelang/django-rest-framework,jpulec/django-rest-framework,callorico/django-rest-framework,buptlsl/django-rest-framework,wangpanjun/django-rest-framework,jpadilla/django-rest-framework,hnakamur/django-rest-framework,delinhabit/django-rest-framework,jerryhebert/django-rest-framework,ossanna16/django-rest-framework,jerryhebert/django-rest-framework,werthen/django-rest-framework,jness/django-rest-framework,elim/django-rest-framework,lubomir/django-rest-framework,uploadcare/django-rest-framework,agconti/django-rest-framework,jpulec/django-rest-framework,tigeraniya/django-rest-framework,waytai/django-rest-framework,akalipetis/django-rest-framework,sheppard/django-rest-framework,atombrella/django-rest-framework,d0ugal/django-rest-framework,pombredanne/django-rest-framework,kgeorgy/django-rest-framework,antonyc/django-rest-framework,wangpanjun/django-rest-framework,nryoung/django-rest-framework,alacritythief/django-rest-framework,tigeraniya/django-rest-framework,adambain-vokal/django-rest-framework,cyberj/django-rest-framework,rafaelcaricio/django-rest-framework,hnakamur/django-rest-framework,VishvajitP/django-rest-framework,linovia/django-rest-framework,jpadilla/django-rest-framework,rhblind/django-rest-framework,maryokhin/django-rest-framework,ticosax/django-rest-framework,wwj718/django-rest-framework,potpath/django-rest-framework,tcroiset/django-rest-framework,tomchristie/django-rest-framework,bluedazzle/django-rest-framework,andriy-s/django-rest-framework,uruz/django-rest-framework,raphaelmerx/django-rest-framework,kennydude/django-rest-framework,mgaitan/django-rest-framework,raphaelmerx/django-rest-framework,YBJAY00000/django-rest-framework,damycra/django-rest-framework,iheitlager/django-rest-framework,tcroiset/django-rest-framework,HireAnEsquire/django-rest-framework,simudream/django-rest-framework,werthen/django-rest-framework,kgeorgy/django-rest-framework,justanr/django-rest-framework,iheitlager/django-rest-framework,ebsaral/django-rest-framework,edx/django-rest-framework,kennydude/django-rest-framework,James1345/django-rest-framework,jpulec/django-rest-framework,edx/django-rest-framework,callorico/django-rest-framework,rafaelang/django-rest-framework,hunter007/django-rest-framework,rafaelcaricio/django-rest-framework,hnarayanan/django-rest-framework,akalipetis/django-rest-framework,sehmaschine/django-rest-framework,hnarayanan/django-rest-framework,edx/django-rest-framework,pombredanne/django-rest-framework,tcroiset/django-rest-framework,antonyc/django-rest-framework,zeldalink0515/django-rest-framework,AlexandreProenca/django-rest-framework,rubendura/django-rest-framework,hnarayanan/django-rest-framework,ezheidtmann/django-rest-framework,justanr/django-rest-framework,yiyocx/django-rest-framework,jpadilla/django-rest-framework,paolopaolopaolo/django-rest-framework,kennydude/django-rest-framework,ossanna16/django-rest-framework,nhorelik/django-rest-framework,AlexandreProenca/django-rest-framework,atombrella/django-rest-framework,gregmuellegger/django-rest-framework,callorico/django-rest-framework,kylefox/django-rest-framework,simudream/django-rest-framework,dmwyatt/django-rest-framework,douwevandermeij/django-rest-framework,douwevandermeij/django-rest-framework,agconti/django-rest-framework,wangpanjun/django-rest-framework,nryoung/django-rest-framework,sbellem/django-rest-framework,wedaly/django-rest-framework,damycra/django-rest-framework,cheif/django-rest-framework,davesque/django-rest-framework,zeldalink0515/django-rest-framework,sehmaschine/django-rest-framework,alacritythief/django-rest-framework,uploadcare/django-rest-framework,cheif/django-rest-framework,mgaitan/django-rest-framework,vstoykov/django-rest-framework,elim/django-rest-framework,jness/django-rest-framework,kezabelle/django-rest-framework,uploadcare/django-rest-framework,fishky/django-rest-framework,aericson/django-rest-framework,paolopaolopaolo/django-rest-framework,damycra/django-rest-framework,MJafarMashhadi/django-rest-framework,ambivalentno/django-rest-framework,delinhabit/django-rest-framework,abdulhaq-e/django-rest-framework,krinart/django-rest-framework,rhblind/django-rest-framework,sheppard/django-rest-framework,ajaali/django-rest-framework,arpheno/django-rest-framework,akalipetis/django-rest-framework,tomchristie/django-rest-framework,wwj718/django-rest-framework,ossanna16/django-rest-framework,alacritythief/django-rest-framework,mgaitan/django-rest-framework,arpheno/django-rest-framework,cheif/django-rest-framework,bluedazzle/django-rest-framework,ambivalentno/django-rest-framework,uruz/django-rest-framework,rubendura/django-rest-framework,vstoykov/django-rest-framework,fishky/django-rest-framework,elim/django-rest-framework,jtiai/django-rest-framework,qsorix/django-rest-framework,lubomir/django-rest-framework,antonyc/django-rest-framework,waytai/django-rest-framework,rubendura/django-rest-framework,nhorelik/django-rest-framework,bluedazzle/django-rest-framework,atombrella/django-rest-framework,aericson/django-rest-framework,d0ugal/django-rest-framework,jtiai/django-rest-framework,rafaelang/django-rest-framework,d0ugal/django-rest-framework,wzbozon/django-rest-framework,ashishfinoit/django-rest-framework,dmwyatt/django-rest-framework,wedaly/django-rest-framework,kylefox/django-rest-framework,raphaelmerx/django-rest-framework,hunter007/django-rest-framework,nryoung/django-rest-framework,wzbozon/django-rest-framework,hunter007/django-rest-framework,ebsaral/django-rest-framework,zeldalink0515/django-rest-framework,abdulhaq-e/django-rest-framework,buptlsl/django-rest-framework,lubomir/django-rest-framework,douwevandermeij/django-rest-framework,tigeraniya/django-rest-framework,thedrow/django-rest-framework-1,johnraz/django-rest-framework,iheitlager/django-rest-framework,kylefox/django-rest-framework,dmwyatt/django-rest-framework,maryokhin/django-rest-framework,ashishfinoit/django-rest-framework,ebsaral/django-rest-framework,MJafarMashhadi/django-rest-framework,davesque/django-rest-framework,YBJAY00000/django-rest-framework,sbellem/django-rest-framework,andriy-s/django-rest-framework,kezabelle/django-rest-framework,krinart/django-rest-framework,leeahoward/django-rest-framework,brandoncazander/django-rest-framework,wwj718/django-rest-framework,uruz/django-rest-framework,brandoncazander/django-rest-framework,fishky/django-rest-framework,adambain-vokal/django-rest-framework,jtiai/django-rest-framework,James1345/django-rest-framework,ashishfinoit/django-rest-framework,thedrow/django-rest-framework-1,linovia/django-rest-framework,simudream/django-rest-framework,davesque/django-rest-framework,delinhabit/django-rest-framework,ezheidtmann/django-rest-framework,potpath/django-rest-framework,VishvajitP/django-rest-framework,leeahoward/django-rest-framework,AlexandreProenca/django-rest-framework,ajaali/django-rest-framework,waytai/django-rest-framework,johnraz/django-rest-framework,qsorix/django-rest-framework,ezheidtmann/django-rest-framework,brandoncazander/django-rest-framework,MJafarMashhadi/django-rest-framework
|
rest_framework/authtoken/serializers.py
|
rest_framework/authtoken/serializers.py
|
from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class AuthTokenSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
if username and password:
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise serializers.ValidationError(msg)
attrs['user'] = user
return attrs
else:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg)
else:
msg = _('Must include "username" and "password"')
raise serializers.ValidationError(msg)
|
from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class AuthTokenSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
if username and password:
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise serializers.ValidationError(msg)
attrs['user'] = user
return attrs
else:
msg = _('Unable to login with provided credentials.')
raise serializers.ValidationError(msg)
else:
msg = _('Must include "username" and "password"')
raise serializers.ValidationError(msg)
|
bsd-2-clause
|
Python
|
059090cd945f51ed0281a967e1ba9502d2dc0a40
|
Fix unittest
|
gpetretto/pymatgen,mbkumar/pymatgen,aykol/pymatgen,dongsenfo/pymatgen,matk86/pymatgen,xhqu1981/pymatgen,nisse3000/pymatgen,tschaume/pymatgen,davidwaroquiers/pymatgen,mbkumar/pymatgen,gVallverdu/pymatgen,ndardenne/pymatgen,johnson1228/pymatgen,vorwerkc/pymatgen,davidwaroquiers/pymatgen,richardtran415/pymatgen,montoyjh/pymatgen,mbkumar/pymatgen,Bismarrck/pymatgen,matk86/pymatgen,setten/pymatgen,fraricci/pymatgen,mbkumar/pymatgen,davidwaroquiers/pymatgen,setten/pymatgen,montoyjh/pymatgen,Bismarrck/pymatgen,tallakahath/pymatgen,vorwerkc/pymatgen,tschaume/pymatgen,Bismarrck/pymatgen,dongsenfo/pymatgen,czhengsci/pymatgen,fraricci/pymatgen,tschaume/pymatgen,davidwaroquiers/pymatgen,aykol/pymatgen,Bismarrck/pymatgen,czhengsci/pymatgen,gVallverdu/pymatgen,xhqu1981/pymatgen,montoyjh/pymatgen,tschaume/pymatgen,matk86/pymatgen,blondegeek/pymatgen,gpetretto/pymatgen,ndardenne/pymatgen,matk86/pymatgen,blondegeek/pymatgen,tschaume/pymatgen,tallakahath/pymatgen,montoyjh/pymatgen,vorwerkc/pymatgen,blondegeek/pymatgen,aykol/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,dongsenfo/pymatgen,richardtran415/pymatgen,gmatteo/pymatgen,Bismarrck/pymatgen,nisse3000/pymatgen,setten/pymatgen,richardtran415/pymatgen,gVallverdu/pymatgen,johnson1228/pymatgen,gpetretto/pymatgen,tallakahath/pymatgen,dongsenfo/pymatgen,vorwerkc/pymatgen,nisse3000/pymatgen,xhqu1981/pymatgen,czhengsci/pymatgen,johnson1228/pymatgen,ndardenne/pymatgen,nisse3000/pymatgen,richardtran415/pymatgen,blondegeek/pymatgen,gpetretto/pymatgen,gmatteo/pymatgen,czhengsci/pymatgen,fraricci/pymatgen,setten/pymatgen,johnson1228/pymatgen
|
pymatgen/apps/borg/tests/test_queen.py
|
pymatgen/apps/borg/tests/test_queen.py
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Mar 18, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 18, 2012"
import unittest2 as unittest
import os
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class BorgQueenTest(unittest.TestCase):
def setUp(self):
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
def test_get_data(self):
data = self.queen.get_data()
self.assertEqual(len(data), 7)
def test_load_data(self):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Mar 18, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 18, 2012"
import unittest2 as unittest
import os
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class BorgQueenTest(unittest.TestCase):
def setUp(self):
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
def test_get_data(self):
data = self.queen.get_data()
self.assertEqual(len(data), 6)
def test_load_data(self):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
mit
|
Python
|
098219137bdf30d7ac1c321f7973e14bfc82bda4
|
Add configuration for PyBullet Ant.
|
google-research/batch-ppo
|
agents/scripts/configs.py
|
agents/scripts/configs.py
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example configurations using the PPO algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-variable
import tensorflow as tf
from agents import algorithms
from agents.scripts import networks
def default():
"""Default configuration for PPO."""
# General
algorithm = algorithms.PPO
num_agents = 30
eval_episodes = 30
use_gpu = False
# Network
network = networks.feed_forward_gaussian
weight_summaries = dict(
all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*')
policy_layers = 200, 100
value_layers = 200, 100
init_mean_factor = 0.1
init_std = 0.35
# Optimization
update_every = 30
update_epochs = 25
optimizer = tf.train.AdamOptimizer
learning_rate = 1e-4
# Losses
discount = 0.995
kl_target = 1e-2
kl_cutoff_factor = 2
kl_cutoff_coef = 1000
kl_init_penalty = 1
return locals()
def pendulum():
"""Configuration for the pendulum classic control task."""
locals().update(default())
# Environment
env = 'Pendulum-v0'
max_length = 200
steps = 2e6 # 2M
return locals()
def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v1'
max_length = 1000
steps = 5e6 # 5M
discount = 0.985
update_every = 60
return locals()
def cheetah():
"""Configuration for MuJoCo's half cheetah task."""
locals().update(default())
# Environment
env = 'HalfCheetah-v1'
max_length = 1000
steps = 1e7 # 10M
discount = 0.99
return locals()
def walker():
"""Configuration for MuJoCo's walker task."""
locals().update(default())
# Environment
env = 'Walker2d-v1'
max_length = 1000
steps = 1e7 # 10M
return locals()
def hopper():
"""Configuration for MuJoCo's hopper task."""
locals().update(default())
# Environment
env = 'Hopper-v1'
max_length = 1000
steps = 1e7 # 10M
update_every = 60
return locals()
def ant():
"""Configuration for MuJoCo's ant task."""
locals().update(default())
# Environment
env = 'Ant-v1'
max_length = 1000
steps = 2e7 # 20M
return locals()
def humanoid():
"""Configuration for MuJoCo's humanoid task."""
locals().update(default())
# Environment
env = 'Humanoid-v1'
max_length = 1000
steps = 5e7 # 50M
update_every = 60
return locals()
def bullet_ant():
locals().update(default())
# Environment
import pybullet_envs # noqa pylint: disable=unused-import
env = 'AntBulletEnv-v0'
max_length = 1000
steps = 3e7 # 30M
update_every = 60
return locals()
|
# Copyright 2017 The TensorFlow Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example configurations using the PPO algorithm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-variable
import tensorflow as tf
from agents import algorithms
from agents.scripts import networks
def default():
"""Default configuration for PPO."""
# General
algorithm = algorithms.PPO
num_agents = 30
eval_episodes = 30
use_gpu = False
# Network
network = networks.feed_forward_gaussian
weight_summaries = dict(
all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*')
policy_layers = 200, 100
value_layers = 200, 100
init_mean_factor = 0.1
init_std = 0.35
# Optimization
update_every = 30
update_epochs = 25
optimizer = tf.train.AdamOptimizer
learning_rate = 1e-4
# Losses
discount = 0.995
kl_target = 1e-2
kl_cutoff_factor = 2
kl_cutoff_coef = 1000
kl_init_penalty = 1
return locals()
def pendulum():
"""Configuration for the pendulum classic control task."""
locals().update(default())
# Environment
env = 'Pendulum-v0'
max_length = 200
steps = 2e6 # 2M
return locals()
def reacher():
"""Configuration for MuJoCo's reacher task."""
locals().update(default())
# Environment
env = 'Reacher-v1'
max_length = 1000
steps = 5e6 # 5M
discount = 0.985
update_every = 60
return locals()
def cheetah():
"""Configuration for MuJoCo's half cheetah task."""
locals().update(default())
# Environment
env = 'HalfCheetah-v1'
max_length = 1000
steps = 1e7 # 10M
discount = 0.99
return locals()
def walker():
"""Configuration for MuJoCo's walker task."""
locals().update(default())
# Environment
env = 'Walker2d-v1'
max_length = 1000
steps = 1e7 # 10M
return locals()
def hopper():
"""Configuration for MuJoCo's hopper task."""
locals().update(default())
# Environment
env = 'Hopper-v1'
max_length = 1000
steps = 1e7 # 10M
update_every = 60
return locals()
def ant():
"""Configuration for MuJoCo's ant task."""
locals().update(default())
# Environment
env = 'Ant-v1'
max_length = 1000
steps = 2e7 # 20M
return locals()
def humanoid():
"""Configuration for MuJoCo's humanoid task."""
locals().update(default())
# Environment
env = 'Humanoid-v1'
max_length = 1000
steps = 5e7 # 50M
update_every = 60
return locals()
|
apache-2.0
|
Python
|
215439b43c27271c95fc208bf683a19619c81b8d
|
Add 3D slic tests (gray not working yet)
|
almarklein/scikit-image,chriscrosscutler/scikit-image,michaelaye/scikit-image,emon10005/scikit-image,blink1073/scikit-image,almarklein/scikit-image,keflavich/scikit-image,GaZ3ll3/scikit-image,SamHames/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image,juliusbierk/scikit-image,ofgulban/scikit-image,keflavich/scikit-image,michaelaye/scikit-image,chintak/scikit-image,Hiyorimi/scikit-image,SamHames/scikit-image,almarklein/scikit-image,Midafi/scikit-image,robintw/scikit-image,rjeli/scikit-image,oew1v07/scikit-image,dpshelio/scikit-image,juliusbierk/scikit-image,Britefury/scikit-image,bennlich/scikit-image,ClinicalGraphics/scikit-image,michaelpacer/scikit-image,Britefury/scikit-image,jwiggins/scikit-image,chriscrosscutler/scikit-image,ClinicalGraphics/scikit-image,youprofit/scikit-image,warmspringwinds/scikit-image,dpshelio/scikit-image,rjeli/scikit-image,michaelpacer/scikit-image,jwiggins/scikit-image,WarrenWeckesser/scikits-image,chintak/scikit-image,bennlich/scikit-image,bsipocz/scikit-image,blink1073/scikit-image,Midafi/scikit-image,ajaybhat/scikit-image,ofgulban/scikit-image,rjeli/scikit-image,warmspringwinds/scikit-image,newville/scikit-image,newville/scikit-image,paalge/scikit-image,SamHames/scikit-image,ajaybhat/scikit-image,almarklein/scikit-image,chintak/scikit-image,youprofit/scikit-image,ofgulban/scikit-image,SamHames/scikit-image,chintak/scikit-image,Hiyorimi/scikit-image,robintw/scikit-image,paalge/scikit-image,bsipocz/scikit-image,oew1v07/scikit-image,vighneshbirodkar/scikit-image,WarrenWeckesser/scikits-image,emon10005/scikit-image,pratapvardhan/scikit-image,vighneshbirodkar/scikit-image,GaZ3ll3/scikit-image,vighneshbirodkar/scikit-image
|
skimage/segmentation/tests/test_slic.py
|
skimage/segmentation/tests/test_slic.py
|
import itertools as it
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from skimage.segmentation import slic
def test_color_2d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=4)
# we expect 4 segments
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
def test_gray_2d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21))
img[:10, :10] = 0.33
img[10:, :10] = 0.67
img[10:, 10:] = 1.00
img += 0.0033 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=4, ratio=20.0, multichannel=False)
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
def test_color_3d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 22, 3))
slices = []
for dim_size in img.shape[:-1]:
midpoint = dim_size // 2
slices.append((slice(None, midpoint), slice(midpoint, None)))
slices = list(it.product(*slices))
colors = list(it.product(*(([0, 1],) * 3)))
for s, c in zip(slices, colors):
img[s] = c
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=8)
assert_equal(len(np.unique(seg)), 8)
for s, c in zip(slices, range(8)):
assert_array_equal(seg[s], c)
def test_gray_3d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 22))
slices = []
for dim_size in img.shape[:-1]:
midpoint = dim_size // 2
slices.append((slice(None, midpoint), slice(midpoint, None)))
slices = list(it.product(*slices))
shades = np.arange(0, 1.000001, 1.0/7)
for s, sh in zip(slices, shades):
img[s] = sh
img += 0.001 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=8, ratio=40.0, multichannel=False)
assert_equal(len(np.unique(seg)), 8)
for s, c in zip(slices, range(8)):
assert_array_equal(seg[s], c)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
|
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from skimage.segmentation import slic
def test_color():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=4)
# we expect 4 segments
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
def test_gray():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21))
img[:10, :10] = 0.33
img[10:, :10] = 0.67
img[10:, 10:] = 1.00
img += 0.0033 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=4, ratio=20.0, multichannel=False)
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
|
bsd-3-clause
|
Python
|
52eb6a3d8188c5e8fbabbe4f4822d0c4ececf48b
|
Add option to see all available ldap attributes via ldapsearch command
|
Princeton-CDH/django-pucas,Princeton-CDH/django-pucas
|
pucas/management/commands/ldapsearch.py
|
pucas/management/commands/ldapsearch.py
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from pucas.ldap import LDAPSearch, LDAPSearchException
class Command(BaseCommand):
help = 'Look up one or more users in LDAP by netid'
def add_arguments(self, parser):
parser.add_argument('netid', nargs='+')
parser.add_argument('--all', '-a', action='store_true',
help='Retrieve all available LDAP attributes')
def handle(self, *args, **options):
ldap_search = LDAPSearch()
print(options['all'])
for netid in options['netid']:
print('\nLooking for %s...' % netid)
try:
info = ldap_search.find_user(netid, all_attributes=options['all'])
# if all attributes were requested, just print the returned
# ldap search object
if options['all']:
print(info)
# otherwise, display attributes configured in settings
else:
for attr in settings.PUCAS_LDAP['ATTRIBUTES']:
print('%-15s %s' % (attr, getattr(info, attr)))
except LDAPSearchException as err:
print(err)
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from pucas.ldap import LDAPSearch, LDAPSearchException
class Command(BaseCommand):
help = 'Look up one or more users in LDAP by netid'
def add_arguments(self, parser):
parser.add_argument('netid', nargs='+')
def handle(self, *args, **options):
ldap_search = LDAPSearch()
for netid in options['netid']:
print('\nLooking for %s...' % netid)
try:
info = ldap_search.find_user(netid)
# display attributes configured in settings
for attr in settings.PUCAS_LDAP['ATTRIBUTES']:
print('%-15s %s' % (attr, getattr(info, attr)))
except LDAPSearchException as err:
print(err)
|
apache-2.0
|
Python
|
ea3a390a88c63a566df567d245e22e505f17dcfe
|
Remove test_TextFile_bad_extension
|
jstutters/Plumbium
|
tests/test_artefacts.py
|
tests/test_artefacts.py
|
import pytest
from pirec import artefacts
def test_Artefact_basename():
"""basename() should strip the extension from an artefact filename."""
img = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert img.basename == 'foo'
def test_Artefact_dirname():
"""dirname() should return the path components up to the filename."""
img = artefacts.Artefact('dir/foo.nii.gz', '.nii.gz', exists=False)
assert img.dirname == 'dir'
img2 = artefacts.Artefact('/dir1/dir2/foo.nii.gz', '.nii.gz', exists=False)
assert img2.dirname == '/dir1/dir2'
img3 = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert img3.dirname == ''
def test_Artefact_basename_with_dir():
"""basename() should still work in a subdirectory."""
img = artefacts.Artefact('dir/foo.nii.gz', '.nii.gz', exists=False)
assert img.basename == 'dir/foo'
def test_Artefact_justname():
"""justname() should work like basename() with no directory components."""
img = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert img.justname == 'foo'
def test_Artefact_justname_with_dir():
"""justname() should strip extension and directory components."""
img = artefacts.Artefact('dir/foo.nii.gz', '.nii.gz', exists=False)
assert img.justname == 'foo'
def test_Artefact_repr():
"""Make sure __repr__() looks correct."""
img = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert repr(img) == "Artefact('foo.nii.gz')"
def test_NiiGzImage_bad_extension():
"""__init__() should raise a ValueError if filename doesn't have the expected extension."""
with pytest.raises(ValueError):
img = artefacts.NiiGzImage('foo.nii.gx', exists=False)
def test_exists(tmpdir):
"""If the file is present and exists=True __init__ should work."""
f = tmpdir.join('foo.txt')
f.write('foo')
filename = str(f)
art = artefacts.Artefact(filename, '.txt')
def test_not_exists(tmpdir):
"""If the file is not present and exists=True __init__ should raise IOError."""
f = tmpdir.join('foo.txt')
filename = str(f)
with pytest.raises(IOError):
art = artefacts.Artefact(filename, '.txt')
def test_not_exists_ok(tmpdir):
"""If the file is not present and exists=False __init__ should work."""
filename = str(tmpdir.join('foo.txt'))
art = artefacts.Artefact(filename, '.txt', exists=False)
|
import pytest
from pirec import artefacts
def test_Artefact_basename():
"""basename() should strip the extension from an artefact filename."""
img = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert img.basename == 'foo'
def test_Artefact_dirname():
"""dirname() should return the path components up to the filename."""
img = artefacts.Artefact('dir/foo.nii.gz', '.nii.gz', exists=False)
assert img.dirname == 'dir'
img2 = artefacts.Artefact('/dir1/dir2/foo.nii.gz', '.nii.gz', exists=False)
assert img2.dirname == '/dir1/dir2'
img3 = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert img3.dirname == ''
def test_Artefact_basename_with_dir():
"""basename() should still work in a subdirectory."""
img = artefacts.Artefact('dir/foo.nii.gz', '.nii.gz', exists=False)
assert img.basename == 'dir/foo'
def test_Artefact_justname():
"""justname() should work like basename() with no directory components."""
img = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert img.justname == 'foo'
def test_Artefact_justname_with_dir():
"""justname() should strip extension and directory components."""
img = artefacts.Artefact('dir/foo.nii.gz', '.nii.gz', exists=False)
assert img.justname == 'foo'
def test_Artefact_repr():
"""Make sure __repr__() looks correct."""
img = artefacts.Artefact('foo.nii.gz', '.nii.gz', exists=False)
assert repr(img) == "Artefact('foo.nii.gz')"
def test_NiiGzImage_bad_extension():
"""__init__() should raise a ValueError if filename doesn't have the expected extension."""
with pytest.raises(ValueError):
img = artefacts.NiiGzImage('foo.nii.gx', exists=False)
def test_TextFile_bad_extension():
with pytest.raises(ValueError):
img = artefacts.NiiGzImage('foo.txx', exists=False)
def test_exists(tmpdir):
"""If the file is present and exists=True __init__ should work."""
f = tmpdir.join('foo.txt')
f.write('foo')
filename = str(f)
art = artefacts.Artefact(filename, '.txt')
def test_not_exists(tmpdir):
"""If the file is not present and exists=True __init__ should raise IOError."""
f = tmpdir.join('foo.txt')
filename = str(f)
with pytest.raises(IOError):
art = artefacts.Artefact(filename, '.txt')
def test_not_exists_ok(tmpdir):
"""If the file is not present and exists=False __init__ should work."""
filename = str(tmpdir.join('foo.txt'))
art = artefacts.Artefact(filename, '.txt', exists=False)
|
mit
|
Python
|
cd9a51ab2fe6b99c0665b8f499363a4d557b4a4d
|
Modify script which split your region in smaller sample
|
aguijarro/DataSciencePython
|
DataWrangling/CaseStudy/sample_file.py
|
DataWrangling/CaseStudy/sample_file.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET # Use cElementTree or lxml if too slow
import os
OSM_FILE = "san-francisco-bay_california.osm" # Replace this with your osm file
SAMPLE_FILE = "sample_sfb.osm"
k = 20 # Parameter: take every k-th top level element
def get_element(osm_file, tags=('node', 'way', 'relation')):
"""Yield element if it is the right type of tag
Reference:
http://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
"""
context = iter(ET.iterparse(osm_file, events=('start', 'end')))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def main():
os.chdir('./data')
with open(SAMPLE_FILE, 'wb') as output:
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<osm>\n ')
# Write every kth top level element
for i, element in enumerate(get_element(OSM_FILE)):
if i % k == 0:
output.write(ET.tostring(element, encoding='utf-8'))
output.write('</osm>')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET # Use cElementTree or lxml if too slow
import os
OSM_FILE = "san-francisco-bay_california.osm" # Replace this with your osm file
SAMPLE_FILE = "sample_sfb.osm"
k = 20 # Parameter: take every k-th top level element
def get_element(osm_file, tags=('node', 'way', 'relation')):
"""Yield element if it is the right type of tag
Reference:
http://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python
"""
context = iter(ET.iterparse(osm_file, events=('start', 'end')))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def main():
os.chdir('./data')
with open(SAMPLE_FILE, 'wb') as output:
output.write('<?xml version="1.0" encoding="UTF-8"?>\n')
output.write('<osm>\n ')
# Write every kth top level element
for i, element in enumerate(get_element(OSM_FILE)):
if i % k == 0:
output.write(ET.tostring(element, encoding='utf-8'))
output.write('</osm>')
|
mit
|
Python
|
cd79441f0c11fbc36f2f0b0098196373006718e4
|
Update binstar-push.py
|
INGEOTEC/b4msa
|
continuous-integration/binstar-push.py
|
continuous-integration/binstar-push.py
|
import os
import glob
# import subprocess
# import traceback
from binstar_client.scripts import cli
def get_token():
token = None
if os.environ.get('TRAVIS_BRANCH', None) == 'master' or os.environ.get('APPVEYOR_REPO_BRANCH', None) == 'master':
token = os.environ.get('BINSTAR_TOKEN', None)
return token
token = get_token()
if token is not None:
cmd = ['-t', token, 'upload', '--force', '-u', 'ingeotec']
cmd.extend(glob.glob('*.tar.bz2'))
# print(cmd)
cli.main(args=cmd)
# try:
# print('*', cmd, platform.system())
# subprocess.check_call(cmd)
# except subprocess.CalledProcessError:
# traceback.print_exc()
|
import os
import glob
# import subprocess
# import traceback
from binstar_client.scripts import cli
def get_token():
token = None
if os.environ.get('TRAVIS_BRANCH', None) == 'master' or os.environ.get('APPVEYOR_REPO_BRANCH', None) == 'master':
token = os.environ.get('BINSTAR_TOKEN', None)
return token
token = get_token()
if token is not None:
cmd = ['-t', token, 'upload', '--force', '-u', 'ingeotec']
cmd.extend(glob.glob('*.tar.bz2'))
print(cmd)
cli.main(args=cmd)
# try:
# print('*', cmd, platform.system())
# subprocess.check_call(cmd)
# except subprocess.CalledProcessError:
# traceback.print_exc()
|
apache-2.0
|
Python
|
b7777486ef36a20e148e3a3d81846f2b330e8622
|
Enable fullpath to be used in get_filenames
|
jason-neal/equanimous-octo-tribble,jason-neal/equanimous-octo-tribble,jason-neal/equanimous-octo-tribble
|
octotribble/Get_filenames.py
|
octotribble/Get_filenames.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""Get names of files that match regular expression.
Possibly better to use the glob module.
"""
import fnmatch
import os
from typing import List
# TODO: Try glob.glob
def get_filenames(path, regexp, regexp2=None, fullpath=False):
# type: (str, str, str) -> List[str]
"""Regexp must be a regular expression as a string.
eg '*.ms.*', '*_2.*', '*.ms.norm.fits*'
regexp2 is if want to match two expressions such as
'*_1*' and '*.ms.fits*'
"""
current_path= os.getcwd()
os.chdir(path)
filelist = []
for file in os.listdir('.'):
if regexp2 is not None: # Match two regular expressions
if fnmatch.fnmatch(file, regexp) and fnmatch.fnmatch(file, regexp2):
filelist.append(file)
else:
if fnmatch.fnmatch(file, regexp):
filelist.append(file)
filelist.sort()
os.chdir(current_path)
if fullpath:
filelist = [os.path.join(path, f) for f in filelist]
return filelist
def main():
# type: () -> None
"""Some test examples."""
path = "/home/jneal/data/BrownDwarfs-PedrosCode/HD30501-1/"
list1 = get_filenames(path, "*.ms.*")
for file in list1:
pass # print file
list2 = get_filenames(path, "*.norm.*", "*_1.*")
for file in list2:
pass # print file
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""Get names of files that match regular expression.
Possibly better to use the glob module.
"""
import fnmatch
import os
from typing import List
# TODO: Try glob.glob
def get_filenames(path, regexp, regexp2=None):
# type: (str, str, str) -> List[str]
"""Regexp must be a regular expression as a string.
eg '*.ms.*', '*_2.*', '*.ms.norm.fits*'
regexp2 is if want to match two expressions such as
'*_1*' and '*.ms.fits*'
"""
os.chdir(path)
filelist = []
for file in os.listdir('.'):
if regexp2 is not None: # Match two regular expressions
if fnmatch.fnmatch(file, regexp) and fnmatch.fnmatch(file, regexp2):
filelist.append(file)
else:
if fnmatch.fnmatch(file, regexp):
filelist.append(file)
filelist.sort()
return filelist
def main():
# type: () -> None
"""Some test examples."""
path = "/home/jneal/data/BrownDwarfs-PedrosCode/HD30501-1/"
list1 = get_filenames(path, "*.ms.*")
for file in list1:
pass # print file
list2 = get_filenames(path, "*.norm.*", "*_1.*")
for file in list2:
pass # print file
if __name__ == '__main__':
main()
|
mit
|
Python
|
0f0ef471e6bb9d873c890df2f538a00bbcae9637
|
print removed
|
akshaybabloo/gollahalli-com,akshaybabloo/gollahalli-com,akshaybabloo/gollahalli-com
|
viewer/templatetags/custom_tags.py
|
viewer/templatetags/custom_tags.py
|
from django import template
import markdown
import datetime
register = template.Library()
@register.filter()
def custom_date(value):
date = datetime.datetime.strptime(value, '%a, %d %b %Y %H:%M:%S %z')
return date.strftime('%d, %b %Y')
@register.filter()
def markdown_data(value):
return markdown.markdown(value)
@register.filter()
def url_replace(value):
value = value.replace("http://", "https://")
return value
|
from django import template
import markdown
import datetime
register = template.Library()
@register.filter()
def custom_date(value):
date = datetime.datetime.strptime(value, '%a, %d %b %Y %H:%M:%S %z')
return date.strftime('%d, %b %Y')
@register.filter()
def markdown_data(value):
return markdown.markdown(value)
@register.filter()
def url_replace(value):
value = value.replace("http://", "https://")
print(value)
return value
|
mit
|
Python
|
0bac442df6fec974aec8cf6d9e4147a2e75cf139
|
Switch from VERSION to $VERSION in model migration.
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
go/vumitools/conversation/migrators.py
|
go/vumitools/conversation/migrators.py
|
from vumi.persist.model import ModelMigrator
class ConversationMigrator(ModelMigrator):
def migrate_from_unversioned(self, mdata):
# Copy stuff that hasn't changed between versions
mdata.copy_values(
'conversation_type',
'start_timestamp', 'end_timestamp', 'created_at',
'delivery_class', 'delivery_tag_pool', 'delivery_tag')
mdata.copy_indexes('user_account_bin', 'groups_bin', 'batches_bin')
# Add stuff that's new in this version
mdata.set_value('$VERSION', 1)
mdata.set_value('name', mdata.old_data['subject'])
config = (mdata.old_data['metadata'] or {}).copy()
config['content'] = mdata.old_data['message']
mdata.set_value('config', config)
# We don't use the constants here because they may change or disappear
# underneath us in the future.
status = u'draft'
if mdata.new_index['batches_bin']:
# ^^^ This kind of hackery is part of the reason for the migration.
status = u'running'
if mdata.new_data['end_timestamp'] is not None:
status = u'finished'
mdata.set_value('status', status, index='status_bin')
# Add indexes for fields with new (or updated) indexes
mdata.add_index('end_timestamp_bin', mdata.new_data['end_timestamp'])
mdata.add_index(
'start_timestamp_bin', mdata.new_data['start_timestamp'])
mdata.add_index('created_at_bin', mdata.new_data['created_at'])
return mdata
|
from vumi.persist.model import ModelMigrator
class ConversationMigrator(ModelMigrator):
def migrate_from_unversioned(self, mdata):
# Copy stuff that hasn't changed between versions
mdata.copy_values(
'conversation_type',
'start_timestamp', 'end_timestamp', 'created_at',
'delivery_class', 'delivery_tag_pool', 'delivery_tag')
mdata.copy_indexes('user_account_bin', 'groups_bin', 'batches_bin')
# Add stuff that's new in this version
mdata.set_value('VERSION', 1)
mdata.set_value('name', mdata.old_data['subject'])
config = (mdata.old_data['metadata'] or {}).copy()
config['content'] = mdata.old_data['message']
mdata.set_value('config', config)
# We don't use the constants here because they may change or disappear
# underneath us in the future.
status = u'draft'
if mdata.new_index['batches_bin']:
# ^^^ This kind of hackery is part of the reason for the migration.
status = u'running'
if mdata.new_data['end_timestamp'] is not None:
status = u'finished'
mdata.set_value('status', status, index='status_bin')
# Add indexes for fields with new (or updated) indexes
mdata.add_index('end_timestamp_bin', mdata.new_data['end_timestamp'])
mdata.add_index(
'start_timestamp_bin', mdata.new_data['start_timestamp'])
mdata.add_index('created_at_bin', mdata.new_data['created_at'])
return mdata
|
bsd-3-clause
|
Python
|
fd7348951e46763dcd06cb673a6b01f6894efe4e
|
Set version as 0.8.8.1
|
Alignak-monitoring-contrib/alignak-webui,Alignak-monitoring-contrib/alignak-webui,Alignak-monitoring-contrib/alignak-webui
|
alignak_webui/__init__.py
|
alignak_webui/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=global-statement
# Copyright (c) 2015-2017:
# Frederic Mohier, [email protected]
#
"""
Alignak - Web User Interface
"""
# Package name
__pkg_name__ = u"alignak_webui"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"demo"
# Application manifest
__application__ = u"Alignak-WebUI"
VERSION = (0, 8, 8, 1)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__short_version__ = '.'.join((str(each) for each in VERSION[:2]))
__author__ = u"Frédéric Mohier"
__author_email__ = u"[email protected]"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-webui"
__doc_url__ = "http://alignak-web-ui.readthedocs.io/?badge=latest"
__description__ = u"Alignak - Web User Interface"
__releasenotes__ = u"""Alignak monitoring framework Web User Interface"""
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Bottle',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
# Application manifest
__manifest__ = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'url': __git_url__,
'doc': __doc_url__
}
# Application configuration object
# Global variable to be used with accessor functions ...
# ... to make it package/module global!
# pylint: disable=invalid-name
app_config = None
def get_app_config():
# pylint: disable=global-variable-not-assigned
"""Return global application configuration"""
global app_config
return app_config
def set_app_config(config):
# pylint: disable=global-statement
"""Update global application configuration"""
global app_config
app_config = config
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=global-statement
# Copyright (c) 2015-2017:
# Frederic Mohier, [email protected]
#
"""
Alignak - Web User Interface
"""
# Package name
__pkg_name__ = u"alignak_webui"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"demo"
# Application manifest
__application__ = u"Alignak-WebUI"
VERSION = (0, 8, 8)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__short_version__ = '.'.join((str(each) for each in VERSION[:2]))
__author__ = u"Frédéric Mohier"
__author_email__ = u"[email protected]"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-webui"
__doc_url__ = "http://alignak-web-ui.readthedocs.io/?badge=latest"
__description__ = u"Alignak - Web User Interface"
__releasenotes__ = u"""Alignak monitoring framework Web User Interface"""
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Bottle',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
# Application manifest
__manifest__ = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'url': __git_url__,
'doc': __doc_url__
}
# Application configuration object
# Global variable to be used with accessor functions ...
# ... to make it package/module global!
# pylint: disable=invalid-name
app_config = None
def get_app_config():
# pylint: disable=global-variable-not-assigned
"""Return global application configuration"""
global app_config
return app_config
def set_app_config(config):
# pylint: disable=global-statement
"""Update global application configuration"""
global app_config
app_config = config
|
agpl-3.0
|
Python
|
2ed5c92aabd349337579792b20613854370aa2ac
|
add log test
|
ll1l11/pymysql-test,ll1l11/pymysql-test
|
kfdda/views/general.py
|
kfdda/views/general.py
|
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask.views import MethodView
from ..core import db, logger
from ..exceptions import NoError, FormValidationError
from ..forms.login import LoginForm
from ..models.user import User
from ..tasks import add
bp = Blueprint('general', __name__)
class IndexView(MethodView):
def get(self):
users = User.query.all()
return ''.join(x.phone for x in users)
class AddView(MethodView):
def get(self):
phone = '13800138000'
email = '[email protected]'
password = '123456'
user = User(phone=phone, email=email, password=password)
db.session.add(user)
db.session.commit()
return 'ok'
class FormErrorView(MethodView):
def get(self):
form = LoginForm()
if not form.validate():
raise FormValidationError(form)
raise NoError()
class CeleryTestView(MethodView):
def get(self):
add.delay(1, 3)
return 'ok'
class ExceptionView(MethodView):
def get(self):
logger.error('this is error')
assert 1 == 2
return '1 == 2'
class LoggerView(MethodView):
def get(self):
logger.debug('log level debug')
logger.info('log level info')
logger.warn('log level warn')
logger.error('log level error')
return 'ok'
bp.add_url_rule('/', view_func=IndexView.as_view('index'))
bp.add_url_rule('/add', view_func=AddView.as_view('error'))
bp.add_url_rule('/form-error', view_func=FormErrorView.as_view('form_error'))
bp.add_url_rule('/celery-test',
view_func=CeleryTestView.as_view('celery_test'))
bp.add_url_rule('/exception', view_func=ExceptionView.as_view('excepiton'))
bp.add_url_rule('/log', view_func=LogView.as_view('log'))
|
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask.views import MethodView
from ..core import db, logger
from ..exceptions import NoError, FormValidationError
from ..forms.login import LoginForm
from ..models.user import User
from ..tasks import add
bp = Blueprint('general', __name__)
class IndexView(MethodView):
def get(self):
users = User.query.all()
return ''.join(x.phone for x in users)
class AddView(MethodView):
def get(self):
phone = '13800138000'
email = '[email protected]'
password = '123456'
user = User(phone=phone, email=email, password=password)
db.session.add(user)
db.session.commit()
return 'ok'
class FormErrorView(MethodView):
def get(self):
form = LoginForm()
if not form.validate():
raise FormValidationError(form)
raise NoError()
class CeleryTestView(MethodView):
def get(self):
add.delay(1, 3)
return 'ok'
class ExceptionView(MethodView):
def get(self):
logger.error('this is error')
assert 1 == 2
return '1 == 2'
bp.add_url_rule('/', view_func=IndexView.as_view('index'))
bp.add_url_rule('/add', view_func=AddView.as_view('error'))
bp.add_url_rule('/form-error', view_func=FormErrorView.as_view('form_error'))
bp.add_url_rule('/celery-test',
view_func=CeleryTestView.as_view('celery_test'))
bp.add_url_rule('/exception', view_func=ExceptionView.as_view('excepiton'))
|
mit
|
Python
|
83a62e80d1b7551f0ccebf4bc95bba27c6bf94bc
|
Add compound nouns tests
|
pwdyson/inflect.py,jazzband/inflect
|
tests/test_compounds.py
|
tests/test_compounds.py
|
import inflect
p = inflect.engine()
def test_compound_1():
assert p.singular_noun("hello-out-there") == "hello-out-there"
def test_compound_2():
assert p.singular_noun("hello out there") == "hello out there"
def test_compound_3():
assert p.singular_noun("continue-to-operate") == "continue-to-operate"
def test_compound_4():
assert p.singular_noun("case of diapers") == "case of diapers"
def test_unit_handling_degree():
test_cases = {
"degree celsius": "degrees celsius",
# 'degree Celsius': 'degrees Celsius',
"degree fahrenheit": "degrees fahrenheit",
"degree rankine": "degrees rankine",
"degree fahrenheit second": "degree fahrenheit seconds",
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
def test_unit_handling_fractional():
test_cases = {
"pound per square inch": "pounds per square inch",
"metre per second": "metres per second",
"kilometre per hour": "kilometres per hour",
"cubic metre per second": "cubic metres per second",
"dollar a year": "dollars a year",
# Correct pluralization of denominator
"foot per square second": "feet per square second",
"mother-in-law per lifetime": "mothers-in-law per lifetime",
"pound-force per square inch": "pounds-force per square inch",
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
def test_unit_handling_combined():
test_cases = {
# Heat transfer coefficient unit
"watt per square meter degree celsius": "watts per square meter degree celsius",
"degree celsius per hour": "degrees celsius per hour",
"degree fahrenheit hour square foot per btuit inch": (
"degree fahrenheit hour square feet per btuit inch"
),
# 'degree Celsius per hour': 'degrees Celsius per hour',
# 'degree Fahrenheit hour square foot per BtuIT inch':
# 'degree Fahrenheit hour square feet per BtuIT inch'
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
def test_unit_open_compound_nouns():
test_cases = {
"high school": "high schools",
"master genie": "master genies",
"MASTER genie": "MASTER genies",
"Blood brother": "Blood brothers",
"prima donna": "prima donnas",
"prima DONNA": "prima DONNAS"
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
def test_unit_open_compound_nouns_classical():
p.classical(all=True)
test_cases = {
"master genie": "master genii",
"MASTER genie": "MASTER genii",
"Blood brother": "Blood brethren",
"prima donna": "prime donne",
"prima DONNA": "prime DONNE"
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
p.classical(all=False)
|
import inflect
p = inflect.engine()
def test_compound_1():
assert p.singular_noun("hello-out-there") == "hello-out-there"
def test_compound_2():
assert p.singular_noun("hello out there") == "hello out there"
def test_compound_3():
assert p.singular_noun("continue-to-operate") == "continue-to-operate"
def test_compound_4():
assert p.singular_noun("case of diapers") == "case of diapers"
def test_unit_handling_degree():
test_cases = {
"degree celsius": "degrees celsius",
# 'degree Celsius': 'degrees Celsius',
"degree fahrenheit": "degrees fahrenheit",
"degree rankine": "degrees rankine",
"degree fahrenheit second": "degree fahrenheit seconds",
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
def test_unit_handling_fractional():
test_cases = {
"pound per square inch": "pounds per square inch",
"metre per second": "metres per second",
"kilometre per hour": "kilometres per hour",
"cubic metre per second": "cubic metres per second",
"dollar a year": "dollars a year",
# Correct pluralization of denominator
"foot per square second": "feet per square second",
"mother-in-law per lifetime": "mothers-in-law per lifetime",
"pound-force per square inch": "pounds-force per square inch",
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
def test_unit_handling_combined():
test_cases = {
# Heat transfer coefficient unit
"watt per square meter degree celsius": "watts per square meter degree celsius",
"degree celsius per hour": "degrees celsius per hour",
"degree fahrenheit hour square foot per btuit inch": (
"degree fahrenheit hour square feet per btuit inch"
),
# 'degree Celsius per hour': 'degrees Celsius per hour',
# 'degree Fahrenheit hour square foot per BtuIT inch':
# 'degree Fahrenheit hour square feet per BtuIT inch'
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
|
mit
|
Python
|
828e75919bd71912baf75a64010efcfcd93d07f1
|
Update library magic to be recursive
|
baubie/SpikeDB,baubie/SpikeDB,baubie/SpikeDB,baubie/SpikeDB
|
library_magic.py
|
library_magic.py
|
import sys
import subprocess
import shutil
copied = []
def update_libraries(executable):
# Find all the dylib files and recursively add dependencies
print "\nChecking dependencies of " + executable
otool_cmd = ["otool", "-L",executable]
execfolder = executable.rsplit("/",1)[0]
otool_out = subprocess.check_output(otool_cmd).split("\n\t")
execname = executable.rsplit("/",1)[1]
for l in otool_out:
s = l.split(".dylib")
if len(s) > 1:
lib = s[0]+".dylib"
libname = lib.rsplit("/",1)[1]
if libname not in copied:
print "Requires: " + lib
new_lib = execfolder+"/"+libname
if (lib != new_lib):
shutil.copyfile(lib, new_lib)
copied.append(libname)
install_name_tool = ["install_name_tool", "-change", lib, "./"+libname, executable]
print "Installing "+lib
subprocess.call(install_name_tool)
new_library = execfolder+"/"+libname
print "Calling on " + new_library
update_libraries(new_library)
# Update libraries on the default executable
update_libraries(sys.argv[1])
|
import sys
import subprocess
import shutil
executable = sys.argv[1]
execfolder = sys.argv[1].rsplit("/",1)[0]
libdir = execfolder+"/lib"
otool_cmd = ["otool", "-L",executable]
# Run otool
otool_out = subprocess.check_output(otool_cmd).split("\n\t")
# Find all the dylib files
for l in otool_out:
s = l.split(".dylib")
if len(s) > 1:
lib = s[0]+".dylib"
libname = lib.rsplit("/",1)[1]
shutil.copyfile(lib, libdir+"/"+libname)
install_name_tool = ["install_name_tool", "-change", lib, "@executable_path/lib/"+libname, executable]
subprocess.call(install_name_tool)
|
bsd-3-clause
|
Python
|
62cd48f8a0fc83261af5c4275a38102fc983d3ff
|
Increase failure tolerance for the Dashboard
|
IATI/IATI-Website-Tests
|
tests/test_dashboard.py
|
tests/test_dashboard.py
|
from datetime import datetime, timedelta
from dateutil import parser as date_parser
import pytest
import pytz
from web_test_base import *
class TestIATIDashboard(WebTestBase):
requests_to_load = {
'Dashboard Homepage': {
'url': 'http://dashboard.iatistandard.org/'
}
}
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = utility.get_links_from_page(loaded_request)
assert "https://github.com/IATI/IATI-Dashboard/" in result
def test_recently_generated(self, loaded_request):
"""
Tests that the dashboard was generated in the past 7 days.
"""
max_delay = timedelta(days=7)
generation_time_xpath = '//*[@id="footer"]/div/p/em[1]'
data_time_xpath = '//*[@id="footer"]/div/p/em[2]'
generation_time_arr = utility.get_text_from_xpath(loaded_request, generation_time_xpath)
data_time_arr = utility.get_text_from_xpath(loaded_request, data_time_xpath)
generation_time = date_parser.parse(generation_time_arr[0])
data_time = date_parser.parse(data_time_arr[0])
now = datetime.now(pytz.utc)
assert len(generation_time_arr) == 1
assert len(data_time_arr) == 1
assert (now - max_delay) < generation_time
assert (now - max_delay) < data_time
|
from datetime import datetime, timedelta
from dateutil import parser as date_parser
import pytest
import pytz
from web_test_base import *
class TestIATIDashboard(WebTestBase):
requests_to_load = {
'Dashboard Homepage': {
'url': 'http://dashboard.iatistandard.org/'
}
}
def test_contains_links(self, loaded_request):
"""
Test that each page contains links to the defined URLs.
"""
result = utility.get_links_from_page(loaded_request)
assert "https://github.com/IATI/IATI-Dashboard/" in result
def test_recently_generated(self, loaded_request):
"""
Tests that the dashboard was generated in the past 2 days.
"""
max_delay = timedelta(days=2)
generation_time_xpath = '//*[@id="footer"]/div/p/em[1]'
data_time_xpath = '//*[@id="footer"]/div/p/em[2]'
generation_time_arr = utility.get_text_from_xpath(loaded_request, generation_time_xpath)
data_time_arr = utility.get_text_from_xpath(loaded_request, data_time_xpath)
generation_time = date_parser.parse(generation_time_arr[0])
data_time = date_parser.parse(data_time_arr[0])
now = datetime.now(pytz.utc)
assert len(generation_time_arr) == 1
assert len(data_time_arr) == 1
assert (now - max_delay) < generation_time
assert (now - max_delay) < data_time
|
mit
|
Python
|
c076fb75d40b85b593bd569eaf7f6e13ab95cdd8
|
Replace Pykka internals misuse with proxies
|
glogiotatidis/mopidy,SuperStarPL/mopidy,tkem/mopidy,rawdlite/mopidy,mokieyue/mopidy,jmarsik/mopidy,priestd09/mopidy,jcass77/mopidy,liamw9534/mopidy,swak/mopidy,ali/mopidy,diandiankan/mopidy,jmarsik/mopidy,dbrgn/mopidy,bencevans/mopidy,jmarsik/mopidy,pacificIT/mopidy,pacificIT/mopidy,kingosticks/mopidy,pacificIT/mopidy,vrs01/mopidy,ZenithDK/mopidy,vrs01/mopidy,diandiankan/mopidy,mokieyue/mopidy,jodal/mopidy,jodal/mopidy,bencevans/mopidy,bencevans/mopidy,priestd09/mopidy,glogiotatidis/mopidy,jmarsik/mopidy,quartz55/mopidy,vrs01/mopidy,bacontext/mopidy,mokieyue/mopidy,quartz55/mopidy,mopidy/mopidy,rawdlite/mopidy,adamcik/mopidy,swak/mopidy,quartz55/mopidy,ZenithDK/mopidy,liamw9534/mopidy,dbrgn/mopidy,dbrgn/mopidy,ali/mopidy,swak/mopidy,ZenithDK/mopidy,glogiotatidis/mopidy,jcass77/mopidy,ZenithDK/mopidy,adamcik/mopidy,swak/mopidy,woutervanwijk/mopidy,diandiankan/mopidy,rawdlite/mopidy,rawdlite/mopidy,bencevans/mopidy,quartz55/mopidy,dbrgn/mopidy,hkariti/mopidy,ali/mopidy,pacificIT/mopidy,ali/mopidy,hkariti/mopidy,adamcik/mopidy,SuperStarPL/mopidy,woutervanwijk/mopidy,mokieyue/mopidy,SuperStarPL/mopidy,mopidy/mopidy,abarisain/mopidy,hkariti/mopidy,priestd09/mopidy,SuperStarPL/mopidy,tkem/mopidy,abarisain/mopidy,bacontext/mopidy,vrs01/mopidy,hkariti/mopidy,tkem/mopidy,diandiankan/mopidy,kingosticks/mopidy,jodal/mopidy,glogiotatidis/mopidy,bacontext/mopidy,jcass77/mopidy,bacontext/mopidy,kingosticks/mopidy,mopidy/mopidy,tkem/mopidy
|
mopidy/frontends/mpd/actor.py
|
mopidy/frontends/mpd/actor.py
|
import logging
import sys
import pykka
from mopidy import settings
from mopidy.core import CoreListener
from mopidy.frontends.mpd import session
from mopidy.utils import encoding, network, process
logger = logging.getLogger('mopidy.frontends.mpd')
class MpdFrontend(pykka.ThreadingActor, CoreListener):
"""
The MPD frontend.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
"""
def __init__(self, core):
super(MpdFrontend, self).__init__()
hostname = network.format_hostname(settings.MPD_SERVER_HOSTNAME)
port = settings.MPD_SERVER_PORT
try:
network.Server(
hostname, port,
protocol=session.MpdSession, protocol_kwargs={'core': core},
max_connections=settings.MPD_SERVER_MAX_CONNECTIONS)
except IOError as error:
logger.error(
u'MPD server startup failed: %s',
encoding.locale_decode(error))
sys.exit(1)
logger.info(u'MPD server running at [%s]:%s', hostname, port)
def on_stop(self):
process.stop_actors_by_class(session.MpdSession)
def send_idle(self, subsystem):
listeners = pykka.ActorRegistry.get_by_class(session.MpdSession)
for listener in listeners:
getattr(listener.proxy(), 'on_idle')(subsystem)
def playback_state_changed(self, old_state, new_state):
self.send_idle('player')
def playlist_changed(self):
self.send_idle('playlist')
def options_changed(self):
self.send_idle('options')
def volume_changed(self):
self.send_idle('mixer')
|
import logging
import sys
import pykka
from mopidy import settings
from mopidy.core import CoreListener
from mopidy.frontends.mpd import session
from mopidy.utils import encoding, network, process
logger = logging.getLogger('mopidy.frontends.mpd')
class MpdFrontend(pykka.ThreadingActor, CoreListener):
"""
The MPD frontend.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
"""
def __init__(self, core):
super(MpdFrontend, self).__init__()
hostname = network.format_hostname(settings.MPD_SERVER_HOSTNAME)
port = settings.MPD_SERVER_PORT
try:
network.Server(
hostname, port,
protocol=session.MpdSession, protocol_kwargs={'core': core},
max_connections=settings.MPD_SERVER_MAX_CONNECTIONS)
except IOError as error:
logger.error(
u'MPD server startup failed: %s',
encoding.locale_decode(error))
sys.exit(1)
logger.info(u'MPD server running at [%s]:%s', hostname, port)
def on_stop(self):
process.stop_actors_by_class(session.MpdSession)
def send_idle(self, subsystem):
# FIXME this should be updated once pykka supports non-blocking calls
# on proxies or some similar solution
pykka.ActorRegistry.broadcast({
'command': 'pykka_call',
'attr_path': ('on_idle',),
'args': [subsystem],
'kwargs': {},
}, target_class=session.MpdSession)
def playback_state_changed(self, old_state, new_state):
self.send_idle('player')
def playlist_changed(self):
self.send_idle('playlist')
def options_changed(self):
self.send_idle('options')
def volume_changed(self):
self.send_idle('mixer')
|
apache-2.0
|
Python
|
c68833f3c464720b676080705d2df4f7e37c4392
|
fix template render() expect Context and not dict
|
nickburlett/feincms,feincms/feincms,pjdelport/feincms,feincms/feincms,nickburlett/feincms,michaelkuty/feincms,hgrimelid/feincms,matthiask/feincms2-content,mjl/feincms,joshuajonah/feincms,michaelkuty/feincms,nickburlett/feincms,mjl/feincms,hgrimelid/feincms,matthiask/django-content-editor,hgrimelid/feincms,matthiask/feincms2-content,mjl/feincms,matthiask/django-content-editor,michaelkuty/feincms,michaelkuty/feincms,joshuajonah/feincms,joshuajonah/feincms,matthiask/feincms2-content,matthiask/django-content-editor,joshuajonah/feincms,feincms/feincms,pjdelport/feincms,matthiask/django-content-editor,pjdelport/feincms,nickburlett/feincms
|
feincms/tests/applicationcontent_urls.py
|
feincms/tests/applicationcontent_urls.py
|
"""
This is a dummy module used to test the ApplicationContent
"""
from django import template
from django.conf.urls.defaults import *
from django.http import HttpResponse, HttpResponseRedirect
def module_root(request):
return 'module_root'
def args_test(request, kwarg1, kwarg2):
return HttpResponse(u'%s-%s' % (kwarg1, kwarg2))
def reverse_test(request):
t = template.Template('home:{% url ac_module_root %} args:{% url ac_args_test "xy" "zzy" %} base:{% url feincms.views.applicationcontent.handler "test" %}')
return t.render(template.Context())
def raises(request):
raise NotImplementedError, 'not really not implemented, but it is as good as anything for the test'
def fragment(request):
t = template.Template('{% load applicationcontent_tags %}{% fragment request "something" %}some things{% endfragment %}')
return t.render(template.Context({'request': request}))
def redirect(request):
return HttpResponseRedirect('../')
urlpatterns = patterns('',
url(r'^$', module_root, name='ac_module_root'),
url(r'^args_test/([^/]+)/([^/]+)/$', args_test, name='ac_args_test'),
url(r'^kwargs_test/(?P<kwarg2>[^/]+)/(?P<kwarg1>[^/]+)/$', args_test),
url(r'^reverse_test/$', reverse_test),
url(r'^raises/$', raises),
url(r'^fragment/$', fragment),
url(r'^redirect/$', redirect),
)
|
"""
This is a dummy module used to test the ApplicationContent
"""
from django import template
from django.conf.urls.defaults import *
from django.http import HttpResponse, HttpResponseRedirect
def module_root(request):
return 'module_root'
def args_test(request, kwarg1, kwarg2):
return HttpResponse(u'%s-%s' % (kwarg1, kwarg2))
def reverse_test(request):
t = template.Template('home:{% url ac_module_root %} args:{% url ac_args_test "xy" "zzy" %} base:{% url feincms.views.applicationcontent.handler "test" %}')
return t.render(template.Context())
def raises(request):
raise NotImplementedError, 'not really not implemented, but it is as good as anything for the test'
def fragment(request):
t = template.Template('{% load applicationcontent_tags %}{% fragment request "something" %}some things{% endfragment %}')
return t.render({'request': request})
def redirect(request):
return HttpResponseRedirect('../')
urlpatterns = patterns('',
url(r'^$', module_root, name='ac_module_root'),
url(r'^args_test/([^/]+)/([^/]+)/$', args_test, name='ac_args_test'),
url(r'^kwargs_test/(?P<kwarg2>[^/]+)/(?P<kwarg1>[^/]+)/$', args_test),
url(r'^reverse_test/$', reverse_test),
url(r'^raises/$', raises),
url(r'^fragment/$', fragment),
url(r'^redirect/$', redirect),
)
|
bsd-3-clause
|
Python
|
8b364a2b9eaff5e038d47a114746458de56b4ed5
|
fix apitype casing for underscore classes
|
telamonian/saga-python,mehdisadeghi/saga-python,luis-rr/saga-python,luis-rr/saga-python,telamonian/saga-python,mehdisadeghi/saga-python,luis-rr/saga-python
|
saga/base.py
|
saga/base.py
|
import string
import saga.utils.logger
import saga.engine.engine
class SimpleBase (object) :
""" This is a very simple API base class which just initializes
the self._logger and self._engine members, but does not perform any further
initialization, nor any adaptor binding. This base is used for API classes
which are not backed by a (single) adaptor (session, task, etc).
"""
def __init__ (self) :
self._apitype = self._get_apitype ()
self._engine = saga.engine.engine.Engine ()
self._logger = saga.utils.logger.getLogger (self._apitype)
print self._engine
self._logger.debug ("[saga.Base] %s.__init__()" % self._apitype)
def get_session (self) :
"""
Returns the session which is managing the object instance. For objects
which do not accept a session handle on construction, this call returns
None.
The object's session is also available via the `session` property.
"""
return self._adaptor.get_session ()
session = property (get_session)
def _get_apitype (self) :
apitype = self.__module__ + '.' + self.__class__.__name__
name_parts = apitype.split ('.')
l = len(name_parts)
if len > 2 :
t1 = name_parts [l-1]
t2 = name_parts [l-2]
t2 = t2.replace ('_', ' ')
t2 = string.capwords (t2)
t2 = t2.replace (' ', '')
if t1 == t2 :
del name_parts[l-2]
apitype = string.join (name_parts, '.')
return apitype
class Base (SimpleBase) :
def __init__ (self, schema, adaptor, adaptor_state, *args, **kwargs) :
print "schema2: %s" % schema
SimpleBase.__init__ (self)
self._adaptor = adaptor
print "schema3: %s" % schema
self._adaptor = self._engine.bind_adaptor (self, self._apitype, schema, adaptor)
self._init_task = self._adaptor.init_instance (adaptor_state, *args, **kwargs)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
import string
import saga.utils.logger
import saga.engine.engine
class SimpleBase (object) :
""" This is a very simple API base class which just initializes
the self._logger and self._engine members, but does not perform any further
initialization, nor any adaptor binding. This base is used for API classes
which are not backed by a (single) adaptor (session, task, etc).
"""
def __init__ (self) :
self._apitype = self._get_apitype ()
self._engine = saga.engine.engine.Engine ()
self._logger = saga.utils.logger.getLogger (self._apitype)
print self._engine
self._logger.debug ("[saga.Base] %s.__init__()" % self._apitype)
def get_session (self) :
"""
Returns the session which is managing the object instance. For objects
which do not accept a session handle on construction, this call returns
None.
The object's session is also available via the `session` property.
"""
return self._adaptor.get_session ()
session = property (get_session)
def _get_apitype (self) :
apitype = self.__module__ + '.' + self.__class__.__name__
name_parts = apitype.split ('.')
l = len(name_parts)
if len > 2 :
t1 = name_parts [l-1]
t2 = name_parts [l-2]
if t1 == string.capwords (t2) :
del name_parts[l-2]
apitype = string.join (name_parts, '.')
return apitype
class Base (SimpleBase) :
def __init__ (self, schema, adaptor, adaptor_state, *args, **kwargs) :
print "schema2: %s" % schema
SimpleBase.__init__ (self)
self._adaptor = adaptor
print "schema3: %s" % schema
self._adaptor = self._engine.bind_adaptor (self, self._apitype, schema, adaptor)
self._init_task = self._adaptor.init_instance (adaptor_state, *args, **kwargs)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
mit
|
Python
|
bfc248e92afdc02a8b3089d7a2a5194b7f55c2fe
|
add in the split out setup_ufw_rules to the api and setupnode
|
bretth/woven
|
woven/api.py
|
woven/api.py
|
#!/usr/bin/env python
"""
The full public woven api
"""
from fabric.state import env
from woven.decorators import run_once_per_node, run_once_per_version
from woven.deployment import deploy_files, mkdirs
from woven.deployment import upload_template
from woven.environment import check_settings, deployment_root, set_env, patch_project, get_project_version, server_state, set_server_state
from woven.environment import set_version_state, version_state, get_packages
from woven.project import deploy_static, deploy_media, deploy_project, deploy_db, deploy_templates
from woven.linux import add_user, install_package, port_is_open, skip_disable_root
from woven.linux import install_packages, post_install_package, post_setupnode, uninstall_packages
from woven.linux import upgrade_packages, setup_ufw, setup_ufw_rules, disable_root
from woven.linux import add_repositories, restrict_ssh, upload_ssh_key
from woven.linux import change_ssh_port, set_timezone, lsb_release, upload_etc
from woven.virtualenv import activate, active_version
from woven.virtualenv import mkvirtualenv, rmvirtualenv, pip_install_requirements
from woven.virtualenv import post_deploy
from woven.webservers import deploy_wsgi, deploy_webconf, start_webserver, stop_webserver, reload_webservers
from woven.webservers import webserver_list
def deploy(overwrite=False):
"""
deploy a versioned project on the host
"""
check_settings()
if overwrite:
rmvirtualenv()
deploy_funcs = [deploy_project,deploy_templates, deploy_static, deploy_media, deploy_webconf, deploy_wsgi]
if not patch_project() or overwrite:
deploy_funcs = [deploy_db,mkvirtualenv,pip_install_requirements] + deploy_funcs
for func in deploy_funcs: func()
def setupnode(overwrite=False):
"""
Install a baseline host. Can be run multiple times
"""
if not port_is_open():
if not skip_disable_root():
disable_root()
port_changed = change_ssh_port()
#avoid trying to take shortcuts if setupnode did not finish
#on previous execution
if server_state('setupnode-incomplete'):
env.overwrite=True
else: set_server_state('setupnode-incomplete')
upload_ssh_key()
restrict_ssh()
add_repositories()
upgrade_packages()
setup_ufw()
uninstall_packages()
install_packages()
upload_etc()
post_install_package()
setup_ufw_rules()
set_timezone()
set_server_state('setupnode-incomplete',delete=True)
#stop and start webservers - and reload nginx
for s in webserver_list():
stop_webserver(s)
start_webserver(s)
|
#!/usr/bin/env python
"""
The full public woven api
"""
from fabric.state import env
from woven.decorators import run_once_per_node, run_once_per_version
from woven.deployment import deploy_files, mkdirs
from woven.deployment import upload_template
from woven.environment import check_settings, deployment_root, set_env, patch_project, get_project_version, server_state, set_server_state
from woven.environment import set_version_state, version_state, get_packages
from woven.project import deploy_static, deploy_media, deploy_project, deploy_db, deploy_templates
from woven.linux import add_user, install_package, port_is_open, skip_disable_root
from woven.linux import install_packages, post_install_package, post_setupnode, uninstall_packages
from woven.linux import upgrade_packages, setup_ufw, disable_root
from woven.linux import add_repositories, restrict_ssh, upload_ssh_key
from woven.linux import change_ssh_port, set_timezone, lsb_release, upload_etc
from woven.virtualenv import activate, active_version
from woven.virtualenv import mkvirtualenv, rmvirtualenv, pip_install_requirements
from woven.virtualenv import post_deploy
from woven.webservers import deploy_wsgi, deploy_webconf, start_webserver, stop_webserver, reload_webservers
from woven.webservers import webserver_list
def deploy(overwrite=False):
"""
deploy a versioned project on the host
"""
check_settings()
if overwrite:
rmvirtualenv()
deploy_funcs = [deploy_project,deploy_templates, deploy_static, deploy_media, deploy_webconf, deploy_wsgi]
if not patch_project() or overwrite:
deploy_funcs = [deploy_db,mkvirtualenv,pip_install_requirements] + deploy_funcs
for func in deploy_funcs: func()
def setupnode(overwrite=False):
"""
Install a baseline host. Can be run multiple times
"""
if not port_is_open():
if not skip_disable_root():
disable_root()
port_changed = change_ssh_port()
#avoid trying to take shortcuts if setupnode did not finish
#on previous execution
if server_state('setupnode-incomplete'): env.overwrite=True
else: set_server_state('setupnode-incomplete')
upload_ssh_key()
restrict_ssh()
add_repositories()
upgrade_packages()
setup_ufw()
uninstall_packages()
install_packages()
upload_etc()
post_install_package()
set_timezone()
set_server_state('setupnode-incomplete',delete=True)
#stop and start webservers - and reload nginx
for s in webserver_list():
stop_webserver(s)
start_webserver(s)
|
bsd-3-clause
|
Python
|
dbb153b4681a1fa73f93855e86d3657e4fff9bfb
|
remove self
|
SiLab-Bonn/pyBAR
|
tests/test_interface.py
|
tests/test_interface.py
|
''' Script to check the readout system interface (software + FPGA firmware).
A global register test is performed with pyBAR and a simulation of the FPGA + FE-I4.
'''
import unittest
import shutil
import mock
from Queue import Empty
import subprocess
import time
import os
from pybar.run_manager import RunManager
from pybar.fei4.register_utils import FEI4RegisterUtils
from pybar.scans.test_register import RegisterTest
def configure_pixel(same_mask_for_all_dc=False):
return
def send_commands(commands, repeat=1, wait_for_finish=True, concatenate=True, byte_padding=False, clear_memory=False, use_timeout=True):
# no timeout for simulation
use_timeout = False
# append some zeros since simulation is more slow
commands = commands.extend(self.register.get_commands("zeros", length=20))
return FEI4RegisterUtils.send_commands(self, commands=commands, repeat=repeat, wait_for_finish=wait_for_finish, concatenate=concatenate, byte_padding=byte_padding, clear_memory=clear_memory, use_timeout=use_timeout)
class TestInterface(unittest.TestCase):
@classmethod
def setUpClass(cls):
subprocess.call('unzip -o test_interface/sim_build.zip', shell=True)
subprocess.Popen(['make', '-f', '../firmware/mio/cosim/Makefile', 'sim_only'])
time.sleep(10) # some time for simulator to start
@classmethod
def tearDownClass(cls):
shutil.rmtree('test_interface/module_test', ignore_errors=True)
shutil.rmtree('./sim_build', ignore_errors=True)
try:
os.remove('./results.xml')
except OSError:
pass
# keep waveform file
# shutil.rmtree('./tb.vcd', ignore_errors=True)
@mock.patch('pybar.fei4.register_utils.FEI4RegisterUtils.configure_pixel', side_effect=lambda *args, **kwargs: configure_pixel(*args, **kwargs))
@mock.patch('pybar.fei4.register_utils.FEI4RegisterUtils.send_commands', side_effect=lambda *args, **kwargs: send_commands(*args, **kwargs))
def test_global_register(self, mock_send_commands, mock_configure_pixel):
run_manager = RunManager('test_interface/configuration.yaml')
run_manager.run_run(RegisterTest, run_conf={'test_pixel': False})
error_msg = 'Global register test failed. '
try:
error_msg += str(run_manager.current_run.err_queue.get(timeout=1)[1])
except Empty:
pass
ok = (run_manager.current_run._run_status == 'FINISHED')
self.assertTrue(ok, msg=error_msg)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestInterface)
unittest.TextTestRunner(verbosity=2).run(suite)
|
''' Script to check the readout system interface (software + FPGA firmware).
A global register test is performed with pyBAR and a simulation of the FPGA + FE-I4.
'''
import unittest
import shutil
import mock
from Queue import Empty
import subprocess
import time
import os
from pybar.run_manager import RunManager
from pybar.fei4.register_utils import FEI4RegisterUtils
from pybar.scans.test_register import RegisterTest
def configure_pixel(self, same_mask_for_all_dc=False):
return
def send_commands(self, commands, repeat=1, wait_for_finish=True, concatenate=True, byte_padding=False, clear_memory=False, use_timeout=True):
# no timeout for simulation
use_timeout = False
# append some zeros since simulation is more slow
commands = commands.extend(self.register.get_commands("zeros", length=20))
return FEI4RegisterUtils.send_commands(self, commands=commands, repeat=repeat, wait_for_finish=wait_for_finish, concatenate=concatenate, byte_padding=byte_padding, clear_memory=clear_memory, use_timeout=use_timeout)
class TestInterface(unittest.TestCase):
@classmethod
def setUpClass(cls):
subprocess.call('unzip -o test_interface/sim_build.zip', shell=True)
subprocess.Popen(['make', '-f', '../firmware/mio/cosim/Makefile', 'sim_only'])
time.sleep(10) # some time for simulator to start
@classmethod
def tearDownClass(cls):
shutil.rmtree('test_interface/module_test', ignore_errors=True)
shutil.rmtree('./sim_build', ignore_errors=True)
try:
os.remove('./results.xml')
except OSError:
pass
# keep waveform file
# shutil.rmtree('./tb.vcd', ignore_errors=True)
@mock.patch('pybar.fei4.register_utils.FEI4RegisterUtils.configure_pixel', side_effect=lambda *args, **kwargs: configure_pixel(*args, **kwargs))
@mock.patch('pybar.fei4.register_utils.FEI4RegisterUtils.send_commands', side_effect=lambda *args, **kwargs: send_commands(*args, **kwargs))
def test_global_register(self, mock_send_commands, mock_configure_pixel):
run_manager = RunManager('test_interface/configuration.yaml')
run_manager.run_run(RegisterTest, run_conf={'test_pixel': False})
error_msg = 'Global register test failed. '
try:
error_msg += str(run_manager.current_run.err_queue.get(timeout=1)[1])
except Empty:
pass
ok = (run_manager.current_run._run_status == 'FINISHED')
self.assertTrue(ok, msg=error_msg)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestInterface)
unittest.TextTestRunner(verbosity=2).run(suite)
|
bsd-3-clause
|
Python
|
2c67dd081895d00ffb33e29d8750b3f80121dfe5
|
Change import
|
suchow/judicious,suchow/judicious,suchow/judicious
|
tests/test_judicious.py
|
tests/test_judicious.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `judicious` package."""
import pytest
import judicious
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `judicious` package."""
import pytest
from judicious import judicious
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
|
mit
|
Python
|
9a278ac9ea0c124cfd108f276bc5d74da6c5c50c
|
Update notebook test
|
GPflow/GPflow
|
tests/test_notebooks.py
|
tests/test_notebooks.py
|
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import sys
import traceback
import nbformat
import pytest
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
# blacklisted notebooks should have a unique basename
BLACKLISTED_NOTEBOOKS = []
def _nbpath():
this_dir = os.path.dirname(__file__)
return os.path.join(this_dir, '../notebooks2')
def get_notebooks():
"""
Returns all notebooks in `_nbpath` that are not blacklisted.
"""
def notebook_blacklisted(nb):
blacklisted_notebooks_basename = map(os.path.basename, BLACKLISTED_NOTEBOOKS)
return os.path.basename(nb) in blacklisted_notebooks_basename
# recursively traverse the notebook directory in search for ipython notebooks
all_notebooks = glob.iglob(os.path.join(_nbpath(), '**', '*.ipynb'), recursive=True)
notebooks_to_test = [nb for nb in all_notebooks if not notebook_blacklisted(nb)]
return notebooks_to_test
def _preproc():
pythonkernel = 'python' + str(sys.version_info[0])
return ExecutePreprocessor(timeout=300, kernel_name=pythonkernel, interrupt_on_timeout=True)
def _exec_notebook(notebook_filename):
with open(notebook_filename) as notebook_file:
nb = nbformat.read(notebook_file, as_version=nbformat.current_nbformat)
try:
meta_data = {'path': os.path.dirname(notebook_filename)}
_preproc().preprocess(nb, {'metadata': meta_data})
except CellExecutionError as cell_error:
traceback.print_exc(file=sys.stdout)
msg = 'Error executing the notebook {0}. See above for error.\nCell error: {1}'
pytest.fail(msg.format(notebook_filename, str(cell_error)))
def _exec_notebook_ts(notebook_filename):
_exec_notebook(notebook_filename)
@pytest.mark.notebooks
@pytest.mark.parametrize('notebook_file', get_notebooks())
def test_notebook(notebook_file):
_exec_notebook_ts(notebook_file)
|
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import sys
import time
import traceback
import nbformat
import pytest
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
from gpflow.test_util import session_context
# blacklisted notebooks should have a unique basename
BLACKLISTED_NOTEBOOKS = []
def _nbpath():
this_dir = os.path.dirname(__file__)
return os.path.join(this_dir, '../notebooks2')
def get_notebooks():
"""
Returns all notebooks in `_nbpath` that are not blacklisted.
"""
def notebook_blacklisted(nb):
blacklisted_notebooks_basename = map(os.path.basename, BLACKLISTED_NOTEBOOKS)
return os.path.basename(nb) in blacklisted_notebooks_basename
# recursively traverse the notebook directory in search for ipython notebooks
all_notebooks = glob.iglob(os.path.join(_nbpath(), '**', '*.ipynb'), recursive=True)
notebooks_to_test = [nb for nb in all_notebooks if not notebook_blacklisted(nb)]
return notebooks_to_test
def _preproc():
pythonkernel = 'python' + str(sys.version_info[0])
return ExecutePreprocessor(timeout=300, kernel_name=pythonkernel, interrupt_on_timeout=True)
def _exec_notebook(notebook_filename):
with open(notebook_filename) as notebook_file:
nb = nbformat.read(notebook_file, as_version=nbformat.current_nbformat)
try:
meta_data = {'path': os.path.dirname(notebook_filename)}
_preproc().preprocess(nb, {'metadata': meta_data})
except CellExecutionError as cell_error:
traceback.print_exc(file=sys.stdout)
msg = 'Error executing the notebook {0}. See above for error.\nCell error: {1}'
pytest.fail(msg.format(notebook_filename, str(cell_error)))
def _exec_notebook_ts(notebook_filename):
with session_context():
ts = time.time()
_exec_notebook(notebook_filename)
elapsed = time.time() - ts
print(notebook_filename, 'took {0} seconds.'.format(elapsed))
@pytest.mark.notebooks
@pytest.mark.parametrize('notebook_file', get_notebooks())
def test_notebook(notebook_file):
_exec_notebook_ts(notebook_file)
|
apache-2.0
|
Python
|
be412947c0fe30dd659298cd7641b5a701310f7d
|
add auth option
|
dmahugh/gitdata
|
gitdata.py
|
gitdata.py
|
"""GitHub query CLI.
cli() --------------> Handle command-line arguments.
"""
import os
import click
from click.testing import CliRunner
#------------------------------------------------------------------------------
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS, options_metavar='<options>')
@click.version_option(version='1.0', prog_name='Photerino')
def cli():
"""\b
---------------
| ? | ? | ? | /// gitdata
---------------
| ? | ? | ? | Retrieve data via GitHub REST API.
---------------
"""
click.echo('/// NOT IMPLEMENTED')
#------------------------------------------------------------------------------
@cli.command()
@click.option('-a', '--auth', default='', help='GitHub username', metavar='<str>')
def members(auth):
click.echo('/// members subcommand')
#------------------------------------------------------------------------------
@cli.command()
@click.option('-a', '--auth', default='', help='GitHub username', metavar='<str>')
def repos(auth):
click.echo('/// repos subcommand')
# code to execute when running standalone: -------------------------------------
if __name__ == '__main__':
print('/// need to implement tests here')
|
"""GitHub query CLI.
cli() --------------> Handle command-line arguments.
"""
import os
import click
from click.testing import CliRunner
#------------------------------------------------------------------------------
@click.group()
@click.version_option(version='1.0', prog_name='Photerino')
def cli():
"""\b
---------------
| ? | ? | ? | /// gitdata
---------------
| ? | ? | ? | Retrieve data via GitHub REST API.
---------------
"""
hexdump(filename=file, offset=offset, totbytes=nbytes)
#------------------------------------------------------------------------------
@cli.command()
def members():
click.echo('/// members subcommand')
#------------------------------------------------------------------------------
@cli.command()
def repos():
click.echo('/// repos subcommand')
# code to execute when running standalone: -------------------------------------
if __name__ == '__main__':
print('/// need to implement tests here')
|
mit
|
Python
|
049d734486627224b87cba72c575450515060c55
|
fix split
|
tlevine/vlermv
|
vlermv/_s3.py
|
vlermv/_s3.py
|
import tempfile
import boto
from ._abstract import AbstractVlermv
def split(x):
return tuple(x.split('/'))
class S3Vlermv(AbstractVlermv):
def __init__(self, bucketname, *args, connect_s3 = boto.connect_s3, **kwargs):
super(S3Vlermv, self).__init__(**kwargs)
self.bucket = connect_s3().create_bucket(bucketname)
def __repr__(self):
return 'S3Vlermv(%s)' % repr(self.bucket.name)
def __setitem__(self, index, obj):
keyname = self.filename(index)
key = self.bucket.new_key(keyname)
with tempfile.NamedTemporaryFile('w+' + self._b()) as tmp:
self.serializer.dump(obj, tmp.file)
tmp.file.close()
key.set_contents_from_filename(tmp.name, replace = True)
def __contains__(self, keyname):
return self.bucket.get_key(keyname) != None
def __getitem__(self, keyname):
key = self.bucket.get_key(keyname)
if key:
with tempfile.NamedTemporaryFile('w+' + self._b()) as tmp:
key.get_contents_to_filename(tmp.name)
tmp.file.seek(0)
value = self.serializer.load(tmp.file)
return value
else:
raise KeyError(keyname)
def keys(self, **kwargs):
for k in self.bucket.list(**kwargs):
yield self.transformer.from_path(split(k.name))
def __delitem__(self, index):
super(S3Vlermv, self).__delitem__(index)
raise NotImplementedError
def __len__(self):
return sum(1 for _ in self.keys())
|
import tempfile
import boto
from ._abstract import AbstractVlermv
def split(x):
return x.split('/')
class S3Vlermv(AbstractVlermv):
def __init__(self, bucketname, *args, connect_s3 = boto.connect_s3, **kwargs):
super(S3Vlermv, self).__init__(**kwargs)
self.bucket = connect_s3().create_bucket(bucketname)
def __repr__(self):
return 'S3Vlermv(%s)' % repr(self.bucket.name)
def __setitem__(self, index, obj):
keyname = self.filename(index)
key = self.bucket.new_key(keyname)
with tempfile.NamedTemporaryFile('w+' + self._b()) as tmp:
self.serializer.dump(obj, tmp.file)
tmp.file.close()
key.set_contents_from_filename(tmp.name, replace = True)
def __contains__(self, keyname):
return self.bucket.get_key(keyname) != None
def __getitem__(self, keyname):
key = self.bucket.get_key(keyname)
if key:
with tempfile.NamedTemporaryFile('w+' + self._b()) as tmp:
key.get_contents_to_filename(tmp.name)
tmp.file.seek(0)
value = self.serializer.load(tmp.file)
return value
else:
raise KeyError(keyname)
def keys(self, **kwargs):
for k in self.bucket.list(**kwargs):
yield self.transformer.from_path(split(k.name))
def __delitem__(self, index):
super(S3Vlermv, self).__delitem__(index)
raise NotImplementedError
def __len__(self):
return sum(1 for _ in self.keys())
|
agpl-3.0
|
Python
|
394473b6d8fb9898a19bb7e3bd2141a59572adec
|
更新 modules users/apps.py, 修正 PEP8 警告
|
yrchen/CommonRepo,yrchen/CommonRepo,yrchen/CommonRepo,yrchen/CommonRepo
|
commonrepo/users/apps.py
|
commonrepo/users/apps.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
from actstream import registry
class UsersAppConfig(AppConfig):
name = 'commonrepo.users'
def ready(self):
registry.register(self.get_model('User'))
import commonrepo.users.signals
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
from actstream import registry
class UsersAppConfig(AppConfig):
name = 'commonrepo.users'
def ready(self):
registry.register(self.get_model('User'))
import commonrepo.users.signals
|
apache-2.0
|
Python
|
dfa39db42cc5ce2c29da2ec0c388865ec7f41030
|
Add allow field to form
|
trbs/django-oauth-toolkit,DeskConnect/django-oauth-toolkit,jensadne/django-oauth-toolkit,vmalavolta/django-oauth-toolkit,JensTimmerman/django-oauth-toolkit,JensTimmerman/django-oauth-toolkit,Gr1N/django-oauth-toolkit,andrefsp/django-oauth-toolkit,jensadne/django-oauth-toolkit,drgarcia1986/django-oauth-toolkit,bleib1dj/django-oauth-toolkit,CloudNcodeInc/django-oauth-toolkit,lzen/django-oauth-toolkit,Gr1N/django-oauth-toolkit,DeskConnect/django-oauth-toolkit,drgarcia1986/django-oauth-toolkit,vmalavolta/django-oauth-toolkit,mjrulesamrat/django-oauth-toolkit,andrefsp/django-oauth-toolkit,StepicOrg/django-oauth-toolkit,bleib1dj/django-oauth-toolkit,trbs/django-oauth-toolkit,CloudNcodeInc/django-oauth-toolkit,mjrulesamrat/django-oauth-toolkit,cheif/django-oauth-toolkit,Knotis/django-oauth-toolkit,lzen/django-oauth-toolkit,svetlyak40wt/django-oauth-toolkit,natgeo/django-oauth-toolkit,cheif/django-oauth-toolkit,Knotis/django-oauth-toolkit,StepicOrg/django-oauth-toolkit,Natgeoed/django-oauth-toolkit
|
oauth2_provider/forms.py
|
oauth2_provider/forms.py
|
from django import forms
class AllowForm(forms.Form):
allow = forms.BooleanField(required=False)
redirect_uri = forms.URLField(widget=forms.HiddenInput())
scopes = forms.CharField(required=False, widget=forms.HiddenInput())
client_id = forms.CharField(widget=forms.HiddenInput())
state = forms.CharField(required=False, widget=forms.HiddenInput())
response_type = forms.CharField(widget=forms.HiddenInput())
|
from django import forms
class AllowForm(forms.Form):
redirect_uri = forms.URLField(widget=forms.HiddenInput())
scopes = forms.CharField(required=False, widget=forms.HiddenInput())
client_id = forms.CharField(widget=forms.HiddenInput())
state = forms.CharField(required=False, widget=forms.HiddenInput())
response_type = forms.CharField(widget=forms.HiddenInput())
|
bsd-2-clause
|
Python
|
b0ff934a2e20916f9e777874b795c9d0942a48e4
|
use app.cfg from its proper place
|
CottageLabs/OpenArticleGauge,CottageLabs/OpenArticleGauge,CottageLabs/OpenArticleGauge
|
openarticlegauge/core.py
|
openarticlegauge/core.py
|
import os, requests, json, redis
from flask import Flask
from openarticlegauge import config, licenses
from flask.ext.login import LoginManager, current_user
login_manager = LoginManager()
def create_app():
app = Flask(__name__)
configure_app(app)
if app.config['INITIALISE_INDEX']: initialise_index(app)
prep_redis(app)
setup_error_email(app)
login_manager.setup_app(app)
return app
def configure_app(app):
app.config.from_object(config)
# parent directory
here = os.path.dirname(os.path.abspath( __file__ ))
config_path = os.path.join(os.path.dirname(here), '..', 'app.cfg') # this file will be in the package dir, app.cfg is at the root of the repo
if os.path.exists(config_path):
app.config.from_pyfile(config_path)
def prep_redis(app):
# wipe the redis temp cache (not the non-temp one)
client = redis.StrictRedis(host=app.config['REDIS_CACHE_HOST'], port=app.config['REDIS_CACHE_PORT'], db=app.config['REDIS_CACHE_DB'])
client.flushdb()
def initialise_index(app):
mappings = app.config["MAPPINGS"]
i = str(app.config['ELASTIC_SEARCH_HOST']).rstrip('/')
i += '/' + app.config['ELASTIC_SEARCH_DB']
for key, mapping in mappings.iteritems():
im = i + '/' + key + '/_mapping'
exists = requests.get(im)
if exists.status_code != 200:
ri = requests.post(i)
r = requests.put(im, json.dumps(mapping))
print key, r.status_code
# put the currently available licences into the licence index
for l in licenses.LICENSES:
r = requests.post(i + '/license/' + l, json.dumps(licenses.LICENSES[l]))
def setup_error_email(app):
ADMINS = app.config.get('ADMINS', '')
if not app.debug and ADMINS:
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler('127.0.0.1',
'[email protected]',
ADMINS, 'error')
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
app = create_app()
|
import os, requests, json, redis
from flask import Flask
from openarticlegauge import config, licenses
from flask.ext.login import LoginManager, current_user
login_manager = LoginManager()
def create_app():
app = Flask(__name__)
configure_app(app)
if app.config['INITIALISE_INDEX']: initialise_index(app)
prep_redis(app)
setup_error_email(app)
login_manager.setup_app(app)
return app
def configure_app(app):
app.config.from_object(config)
# parent directory
here = os.path.dirname(os.path.abspath( __file__ ))
config_path = os.path.join(os.path.dirname(here), 'app.cfg')
if os.path.exists(config_path):
app.config.from_pyfile(config_path)
def prep_redis(app):
# wipe the redis temp cache (not the non-temp one)
client = redis.StrictRedis(host=app.config['REDIS_CACHE_HOST'], port=app.config['REDIS_CACHE_PORT'], db=app.config['REDIS_CACHE_DB'])
client.flushdb()
def initialise_index(app):
mappings = app.config["MAPPINGS"]
i = str(app.config['ELASTIC_SEARCH_HOST']).rstrip('/')
i += '/' + app.config['ELASTIC_SEARCH_DB']
for key, mapping in mappings.iteritems():
im = i + '/' + key + '/_mapping'
exists = requests.get(im)
if exists.status_code != 200:
ri = requests.post(i)
r = requests.put(im, json.dumps(mapping))
print key, r.status_code
# put the currently available licences into the licence index
for l in licenses.LICENSES:
r = requests.post(i + '/license/' + l, json.dumps(licenses.LICENSES[l]))
def setup_error_email(app):
ADMINS = app.config.get('ADMINS', '')
if not app.debug and ADMINS:
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler('127.0.0.1',
'[email protected]',
ADMINS, 'error')
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
app = create_app()
|
bsd-3-clause
|
Python
|
da1dbddaa47e087b19dbeb1b256b337a3e77ed73
|
Fix os not defined
|
amolenaar/gaphor,amolenaar/gaphor
|
gaphor/services/tests/test_properties.py
|
gaphor/services/tests/test_properties.py
|
import os
import tempfile
from unittest import TestCase
from gaphor.services.properties import FileBackend, Properties
class MockEventManager(list):
def handle(self, event):
self.append(event)
class TestProperties(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
backend = FileBackend(self.tmpdir)
self.events = MockEventManager()
self.properties = Properties(self.events, backend)
def shutDown(self):
self.properties.shutdown()
os.remove(os.path.join(self.tmpdir, FileBackend.RESOURCE_FILE))
os.rmdir(self.tmpdir)
def test_properties(self):
prop = self.properties
prop.set("test1", 2)
assert len(self.events) == 1, self.events
event = self.events[0]
assert "test1" == event.key
assert None is event.old_value
assert 2 == event.new_value
assert 2 == prop("test1")
prop.set("test1", 2)
assert len(self.events) == 1
prop.set("test1", "foo")
assert len(self.events) == 2
event = self.events[1]
assert "test1" == event.key
assert 2 == event.old_value
assert "foo" == event.new_value
assert "foo" == prop("test1")
assert 3 == prop("test2", 3)
assert 3 == prop("test2", 4)
|
import tempfile
from unittest import TestCase
from gaphor.services.properties import FileBackend, Properties
class MockEventManager(list):
def handle(self, event):
self.append(event)
class TestProperties(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
backend = FileBackend(self.tmpdir)
self.events = MockEventManager()
self.properties = Properties(self.events, backend)
def shutDown(self):
self.properties.shutdown()
os.remove(os.path.join(self.tmpdir, FileBackend.RESOURCE_FILE))
os.rmdir(self.tmpdir)
def test_properties(self):
prop = self.properties
prop.set("test1", 2)
assert len(self.events) == 1, self.events
event = self.events[0]
assert "test1" == event.key
assert None is event.old_value
assert 2 == event.new_value
assert 2 == prop("test1")
prop.set("test1", 2)
assert len(self.events) == 1
prop.set("test1", "foo")
assert len(self.events) == 2
event = self.events[1]
assert "test1" == event.key
assert 2 == event.old_value
assert "foo" == event.new_value
assert "foo" == prop("test1")
assert 3 == prop("test2", 3)
assert 3 == prop("test2", 4)
|
lgpl-2.1
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.