commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
e67dc6d0cb46bcce0750246f99c6f99cd3e1ccbc
fix tempita.py for distutils build
matthew-brett/scipy,matthew-brett/scipy,matthew-brett/scipy,matthew-brett/scipy,matthew-brett/scipy
scipy/_build_utils/tempita.py
scipy/_build_utils/tempita.py
import sys import os import argparse from Cython import Tempita as tempita # XXX: If this import ever fails (does it really?), vendor either # cython.tempita or numpy/npy_tempita. def process_tempita(fromfile, outfile=None): """Process tempita templated file and write out the result. The template file is expected to end in `.c.in` or `.pyx.in`: E.g. processing `template.c.in` generates `template.c`. """ if outfile is None: # We're dealing with a distutils build here, write in-place outfile = os.path.splitext(fromfile)[0] from_filename = tempita.Template.from_filename template = from_filename(fromfile, encoding=sys.getdefaultencoding()) content = template.substitute() with open(outfile, 'w') as f: f.write(content) def main(): parser = argparse.ArgumentParser() parser.add_argument("infile", type=str, help="Path to the input file") parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory") parser.add_argument("-i", "--ignore", type=str, help="An ignored input - may be useful to add a " "dependency between custom targets") args = parser.parse_args() if not args.infile.endswith('.in'): raise ValueError(f"Unexpected extension: {args.infile}") outdir_abs = os.path.join(os.getcwd(), args.outdir) outfile = os.path.join(outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]) process_tempita(args.infile, outfile) if __name__ == "__main__": main()
import sys import os import argparse from Cython import Tempita as tempita # XXX: If this import ever fails (does it really?), vendor either # cython.tempita or numpy/npy_tempita. def process_tempita(fromfile, outfile=None): """Process tempita templated file and write out the result. The template file is expected to end in `.c.in` or `.pyx.in`: E.g. processing `template.c.in` generates `template.c`. """ if outfile is None: # We're dealing with a distitutils build here, write in-place outfile = os.path.join(os.path.dirname(__file__), os.path.splitext(os.path.split(fromfile)[1])[0]) from_filename = tempita.Template.from_filename template = from_filename(fromfile, encoding=sys.getdefaultencoding()) content = template.substitute() with open(outfile, 'w') as f: f.write(content) def main(): parser = argparse.ArgumentParser() parser.add_argument("infile", type=str, help="Path to the input file") parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory") parser.add_argument("-i", "--ignore", type=str, help="An ignored input - may be useful to add a " "dependency between custom targets") args = parser.parse_args() if not args.infile.endswith('.in'): raise ValueError(f"Unexpected extension: {args.infile}") outdir_abs = os.path.join(os.getcwd(), args.outdir) outfile = os.path.join(outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]) process_tempita(args.infile, outfile) if __name__ == "__main__": main()
bsd-3-clause
Python
c5bf1d02c9db4adb00dad2d8bd37c1303f846837
Reduce import time (pythainlp/augment.word2vec.bpemb_wv)
PyThaiNLP/pythainlp
pythainlp/augment/word2vec/bpemb_wv.py
pythainlp/augment/word2vec/bpemb_wv.py
# -*- coding: utf-8 -*- from pythainlp.augment.word2vec.core import Word2VecAug from typing import List, Tuple class BPEmbAug: """ Thai Text Augment using word2vec from BPEmb BPEmb: `github.com/bheinzerling/bpemb <https://github.com/bheinzerling/bpemb>`_ """ def __init__(self, lang: str = "th", vs: int = 100000, dim: int = 300): from bpemb import BPEmb self.bpemb_temp = BPEmb(lang=lang, dim=dim, vs=vs) self.model = self.bpemb_temp.emb self.load_w2v() def tokenizer(self, text: str) -> List[str]: """ :param str text: thai text :rtype: List[str] """ return self.bpemb_temp.encode(text) def load_w2v(self): """ Load BPEmb model """ self.aug = Word2VecAug( self.model, tokenize=self.tokenizer, type="model" ) def augment( self, sentence: str, n_sent: int = 1, p: float = 0.7 ) -> List[Tuple[str]]: """ Text Augment using word2vec from BPEmb :param str sentence: thai sentence :param int n_sent: number sentence :param float p: Probability of word :return: list of synonyms :rtype: List[str] :Example: :: from pythainlp.augment.word2vec.bpemb_wv import BPEmbAug aug = BPEmbAug() aug.augment("ผมเรียน", n_sent=2, p=0.5) # output: ['ผมสอน', 'ผมเข้าเรียน'] """ self.sentence = sentence.replace(" ", "▁") self.temp = self.aug.augment(self.sentence, n_sent, p=p) self.temp_new = [] for i in self.temp: self.t = "" for j in i: self.t += j.replace('▁', '') self.temp_new.append(self.t) return self.temp_new
# -*- coding: utf-8 -*- from pythainlp.augment.word2vec.core import Word2VecAug from bpemb import BPEmb from typing import List, Tuple class BPEmbAug: """ Thai Text Augment using word2vec from BPEmb BPEmb: `github.com/bheinzerling/bpemb <https://github.com/bheinzerling/bpemb>`_ """ def __init__(self, lang: str = "th", vs: int = 100000, dim: int = 300): self.bpemb_temp = BPEmb(lang=lang, dim=dim, vs=vs) self.model = self.bpemb_temp.emb self.load_w2v() def tokenizer(self, text: str) -> List[str]: """ :param str text: thai text :rtype: List[str] """ return self.bpemb_temp.encode(text) def load_w2v(self): """ Load BPEmb model """ self.aug = Word2VecAug( self.model, tokenize=self.tokenizer, type="model" ) def augment( self, sentence: str, n_sent: int = 1, p: float = 0.7 ) -> List[Tuple[str]]: """ Text Augment using word2vec from BPEmb :param str sentence: thai sentence :param int n_sent: number sentence :param float p: Probability of word :return: list of synonyms :rtype: List[str] :Example: :: from pythainlp.augment.word2vec.bpemb_wv import BPEmbAug aug = BPEmbAug() aug.augment("ผมเรียน", n_sent=2, p=0.5) # output: ['ผมสอน', 'ผมเข้าเรียน'] """ self.sentence = sentence.replace(" ", "▁") self.temp = self.aug.augment(self.sentence, n_sent, p=p) self.temp_new = [] for i in self.temp: self.t = "" for j in i: self.t += j.replace('▁', '') self.temp_new.append(self.t) return self.temp_new
apache-2.0
Python
c489157661b5738f37c5213f64736d7e88a0700b
Update application_filter.py
Tlinne2/Basic-Python-Projects-
Data-Science-Tools/application_filter.py
Data-Science-Tools/application_filter.py
''' Decision Tree V.1 Simple tool for a company to screen applications in the hiring process Coded By: Tyler Linne Date: 4/27/16 ''' #Import required packages import numpy as np import pandas as pd import pydot from sklearn import tree from IPython.display import Image from sklearn.externals.six import StringIO from sklearn.ensemble import RandomForestClassifier #Import a csv file holding the data the tree needs input_file = "c:/UserName/user/documents/hire_data.csv" df = pd.read_csv(input_file, header = 0) #Set text values in csv to numerical values d = {'Y': 1, 'N': 0} df['Hired'] = df['Hired'].map(d) df['Currently Employed?'] = df['Currently Employed?'].map(d) df['Private School'] = df['Private School'].map(d) df['State School'] = df['State School'].map(d) df['Top-tier school'] = df['Top-tier school'].map(d) df['Internship'] = df['Internship'].map(d) d = { 'AS': 0 'BS': 1, 'MS': 2, 'PhD': 3} df['Level of Education'] = df['Level of Education'].map(d) df.head() #Filter out headers that hold various canidate data features = list(df.columns[:6]) #Create the tree using the desired header and the seperated headers list y = df["Hired"] X = df[features] clf = tree.DecisionTreeClassifier() clf = clf.fit(X,y) #Displaying the tree in a readable format dot_data = StringIO() tree.export_graphviz(clf, out_file=dot_data, feature_names=features) graph = pydot.graph_from_dot_data(dot_data.getvalue()) Image(graph.create_png()) #The code below this point would be ran in a seperate file, with this one imported to it. It is placed in this file for display #Test the validity of the tree using the "Random Forest" methood with a factor of 25 seperate tests clf = RandomForestClassifier(n_estimators=25) clf = clf.fit(X, y) #Predict employment of an employed 10-year veteran with an AS Degree print clf.predict([[10, 1, 4, 0, 0, 0, 0]]) #...and an unemployed 10-year veteran with an AS Degree print clf.predict([[10, 0, 4, 0, 0, 0, 0]])
''' Decision Tree V.1 Simple tool for a company to screen applications in the hiring process Coded By: Tyler Linne Date: 4/27/16 ''' #Import required packages import numpy as np import pandas as pd import pydot from sklearn import tree from IPython.display import Image from sklearn.externals.six import StringIO from sklearn.ensemble import RandomForestClassifier #Import a csv file holding the data the tree needs input_file = "c:/UserName/user/documents/hire_data.csv" df = pd.read_csv(input_file, header = 0) #Set text values in csv to numerical values d = {'Y': 1, 'N': 0} df['Hired'] = df['Hired'].map(d) df['Currently Employed?'] = df['Currently Employed?'].map(d) df['Private School'] = df['Private School'].map(d) df['State School'] = df['State School'].map(d) df['Top-tier school'] = df['Top-tier school'].map(d) df['Internship'] = df['Internship'].map(d) d = { 'AS': 0 'BS': 1, 'MS': 2, 'PhD': 3} df['Level of Education'] = df['Level of Education'].map(d) df.head() #Filter out headers that hold various canidate data features = list(df.columns[:7]) #Create the tree using the desired header and the seperated headers list y = df["Hired"] X = df[features] clf = tree.DecisionTreeClassifier() clf = clf.fit(X,y) #Displaying the tree in a readable format dot_data = StringIO() tree.export_graphviz(clf, out_file=dot_data, feature_names=features) graph = pydot.graph_from_dot_data(dot_data.getvalue()) Image(graph.create_png()) #The code below this point would be ran in a seperate file, with this one imported to it. It is placed in this file for display #Test the validity of the tree using the "Random Forest" methood with a factor of 25 seperate tests clf = RandomForestClassifier(n_estimators=25) clf = clf.fit(X, y) #Predict employment of an employed 10-year veteran with an AS Degree print clf.predict([[10, 1, 4, 0, 0, 0, 0]]) #...and an unemployed 10-year veteran with an AS Degree print clf.predict([[10, 0, 4, 0, 0, 0, 0]])
mit
Python
bfa1a36d1c731f730cabbc914d52c51d5ddf61dc
Add token auth and session
digitalhealthhack/evas,digitalhealthhack/evas
api/evas_api/settings.py
api/evas_api/settings.py
import os import dj_database_url BASE_DIR = os.path.dirname(os.path.dirname(__file__)) SECRET_KEY = '-@y4mtyhs=i-uc7q9b==ur(zhs%gu628wtku%q8d8$-vp1qbza' DEBUG = bool(os.environ.get('DEBUG', False)) TEMPLATE_DEBUG = bool(os.environ.get('TEMPLATE_DEBUG', False)) DATABASES = {'default': dj_database_url.config(default='postgres://localhost/evas_api')} AUTH_USER_MODEL = 'users.User' ALLOWED_HOSTS = [] STATIC_URL = '/static/' ROOT_URLCONF = 'evas_api.urls' WSGI_APPLICATION = 'evas_api.wsgi.application' INSTALLED_APPS = ( # local 'users', # third party apps 'rest_framework', 'south', 'user_management.api', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.BasicAuthentication', 'rest_framework.authentication.SessionAuthentication', ) } LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True
import os import dj_database_url BASE_DIR = os.path.dirname(os.path.dirname(__file__)) SECRET_KEY = '-@y4mtyhs=i-uc7q9b==ur(zhs%gu628wtku%q8d8$-vp1qbza' DEBUG = bool(os.environ.get('DEBUG', False)) TEMPLATE_DEBUG = bool(os.environ.get('TEMPLATE_DEBUG', False)) DATABASES = {'default': dj_database_url.config(default='postgres://localhost/evas_api')} AUTH_USER_MODEL = 'users.User' ALLOWED_HOSTS = [] STATIC_URL = '/static/' ROOT_URLCONF = 'evas_api.urls' WSGI_APPLICATION = 'evas_api.wsgi.application' INSTALLED_APPS = ( # local 'users', # third party apps 'rest_framework', 'south', 'user_management.api', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True
mit
Python
130ce365a0d7c709004da5dedf0e0e35ff6ad3d0
Fix reference error and add script for packing pypi package (#1172)
intel-analytics/BigDL,yangw1234/BigDL,yangw1234/BigDL,intel-analytics/BigDL,intel-analytics/BigDL,yangw1234/BigDL,yangw1234/BigDL,intel-analytics/BigDL
python/dllib/src/bigdl/utils/engine.py
python/dllib/src/bigdl/utils/engine.py
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import os import glob def __prepare_spark_env(): modules = sys.modules if "pyspark" not in modules or "py4j" not in modules: spark_home = os.environ.get('SPARK_HOME', None) if not spark_home: raise ValueError( """Could not find Spark. Pls make sure SPARK_HOME env is set: export SPARK_HOME=path to your spark home directory""") print("Using %s" % spark_home) py4j = glob.glob(os.path.join(spark_home, 'python/lib', 'py4j-*.zip'))[0] pyspark = glob.glob(os.path.join(spark_home, 'python/lib', 'pyspark*.zip'))[0] sys.path.insert(0, py4j) sys.path.insert(0, pyspark) def __prepare_bigdl_env(): jar_dir = os.path.abspath(__file__ + "/../../") jar_paths = glob.glob(os.path.join(jar_dir, "share/lib/*.jar")) conf_paths = glob.glob(os.path.join(jar_dir, "share/conf/*.conf")) def append_path(env_var_name, path): try: print("Adding %s to %s" % (jar_paths[0], env_var_name)) os.environ[env_var_name] = path + ":" + os.environ[ env_var_name] # noqa except KeyError: os.environ[env_var_name] = path if conf_paths and conf_paths: assert len(conf_paths) == 1, "Expecting one jar: %s" % len(jar_paths) assert len(conf_paths) == 1, "Expecting one conf: %s" % len(conf_paths) append_path("SPARK_CLASSPATH", jar_paths[0]) print("Prepending %s to sys.path" % conf_paths[0]) sys.path.insert(0, conf_paths[0]) def prepare_env(): __prepare_spark_env() __prepare_bigdl_env()
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import os import glob def __prepare_spark_env(): modules = sys.modules if "pyspark" not in modules or "py4j" not in modules: spark_home = os.environ.get('SPARK_HOME', None) if not spark_home: raise ValueError( """Could not find Spark. Pls make sure SPARK_HOME env is set: export SPARK_HOME=path to your spark home directory""") py4j = glob.glob(os.path.join(spark_home, 'python/lib', 'py4j-*.zip'))[0] pyspark = glob.glob(os.path.join(spark_home, 'python/lib', 'pyspark*.zip'))[0] sys.path.insert(0, py4j) sys.path.insert(0, pyspark) def __prepare_bigdl_env(): import bigdl.nn.layer jar_dir = os.path.abspath(bigdl.nn.layer.__file__ + "/../../") jar_paths = glob.glob(os.path.join(jar_dir, "share/lib/*.jar")) conf_paths = glob.glob(os.path.join(jar_dir, "share/conf/*.conf")) def append_path(env_var_name, path): try: os.environ[env_var_name] = path + ":" + os.environ[ env_var_name] # noqa except KeyError: os.environ[env_var_name] = path if conf_paths and conf_paths: assert len(conf_paths) == 1, "Expecting one jar: %s" % len(jar_paths) assert len(conf_paths) == 1, "Expecting one conf: %s" % len(conf_paths) print("Adding %s to spark.driver.extraClassPath" % jar_paths[0]) print("Adding %s to spark.executor.extraClassPath" % jar_paths[0]) append_path("spark.driver.extraClassPath", jar_paths[0]) append_path("spark.executor.extraClassPath", jar_paths[0]) append_path("SPARK_CLASSPATH", jar_paths[0]) print("Prepending %s to sys.path" % conf_paths[0]) sys.path.insert(0, conf_paths[0]) def prepare_env(): __prepare_spark_env() __prepare_bigdl_env()
apache-2.0
Python
b0a948e2036ff232c2026e742a42ab9e9c4fbc07
Drop unused IndexTypeVar
ecmwf/cfgrib
cfgrib/abc.py
cfgrib/abc.py
"""Abstract Base Classes for GRIB messsages and containers""" import abc import typing as T MessageIdTypeVar = T.TypeVar("MessageIdTypeVar") MessageTypeVar = T.TypeVar("MessageTypeVar", bound="Message") Message = T.Mapping[str, T.Any] MutableMessage = T.MutableMapping[str, T.Any] Container = T.Mapping[MessageIdTypeVar, MessageTypeVar] class Index(T.Mapping[str, T.List[T.Any]], T.Generic[MessageIdTypeVar, MessageTypeVar]): container: Container[MessageIdTypeVar, MessageTypeVar] index_keys: T.List[str] message_id_index: T.List[T.Tuple[T.Tuple[T.Any, ...], T.List[MessageIdTypeVar]]] filter_by_keys: T.Dict[str, T.Any] = {} @abc.abstractmethod def subindex( self, filter_by_keys: T.Mapping[str, T.Any] = {}, **query: T.Any ) -> "Index[MessageIdTypeVar, MessageTypeVar]": pass @abc.abstractmethod def getone(self, item: str) -> T.Any: pass @abc.abstractmethod def first(self) -> MessageTypeVar: pass
"""Abstract Base Classes for GRIB messsages and containers""" import abc import typing as T MessageIdTypeVar = T.TypeVar("MessageIdTypeVar") MessageTypeVar = T.TypeVar("MessageTypeVar", bound="Message") IndexTypeVar = T.TypeVar("IndexTypeVar", bound="Index") # type: ignore Message = T.Mapping[str, T.Any] MutableMessage = T.MutableMapping[str, T.Any] Container = T.Mapping[MessageIdTypeVar, MessageTypeVar] class Index(T.Mapping[str, T.List[T.Any]], T.Generic[MessageIdTypeVar, MessageTypeVar]): container: Container[MessageIdTypeVar, MessageTypeVar] index_keys: T.List[str] message_id_index: T.List[T.Tuple[T.Tuple[T.Any, ...], T.List[MessageIdTypeVar]]] filter_by_keys: T.Dict[str, T.Any] = {} @abc.abstractmethod def subindex( self, filter_by_keys: T.Mapping[str, T.Any] = {}, **query: T.Any ) -> "Index[MessageIdTypeVar, MessageTypeVar]": pass @abc.abstractmethod def getone(self, item: str) -> T.Any: pass @abc.abstractmethod def first(self) -> MessageTypeVar: pass
apache-2.0
Python
54f978c3ed960997b8a5fa9f0e443b17e702ec85
Update build_flags to point to proper libdir
scanner-research/scanner,scanner-research/scanner,scanner-research/scanner,scanner-research/scanner
python/scannerpy/stdlib/build_flags.py
python/scannerpy/stdlib/build_flags.py
import os.path import sys SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) def get_include(): return os.path.abspath(os.path.join(SCRIPT_DIR, '..', 'include')) def print_include(): sys.stdout.write(get_include()) def get_lib(): return os.path.abspath(os.path.join(SCRIPT_DIR, '..', 'lib')) def print_lib(): sys.stdout.write(get_lib()) def get_cmake(): return os.path.abspath(os.path.join(SCRIPT_DIR, '..', 'cmake', 'Op.cmake')) def print_cmake(): sys.stdout.write(get_cmake()) def get_flags(): return ( '-std=c++11 -I{include} -L{libdir} -lscanner'.format( include=get_include(), libdir=get_lib())) def print_flags(): sys.stdout.write(get_flags())
import os.path import sys SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) def get_include(): return os.path.abspath(os.path.join(SCRIPT_DIR, '..', 'include')) def print_include(): sys.stdout.write(get_include()) def get_lib(): return os.path.abspath(os.path.join(SCRIPT_DIR, '..')) def print_lib(): sys.stdout.write(get_lib()) def get_cmake(): return os.path.abspath(os.path.join(SCRIPT_DIR, '..', 'cmake', 'Op.cmake')) def print_cmake(): sys.stdout.write(get_cmake()) def get_flags(): return ( '-std=c++11 -I{include} -L{libdir} -lscanner'.format( include=get_include(), libdir=get_lib())) def print_flags(): sys.stdout.write(get_flags())
apache-2.0
Python
2027d3347a839ca30abbcb778b34b3f261b266ef
Update Naive Bayes classifier
rushter/MLAlgorithms
mla/naive_bayes.py
mla/naive_bayes.py
import numpy as np from mla.base import BaseEstimator from mla.neuralnet.activations import softmax class NaiveBayesClassifier(BaseEstimator): """Gaussian Naive Bayes.""" # Binary problem. n_classes = 2 def fit(self, X, y=None): self._setup_input(X, y) # Check target labels assert list(np.unique(y)) == [0, 1] self._mean = np.zeros((self.n_classes, self.n_features), dtype=np.float64) self._var = np.zeros((self.n_classes, self.n_features), dtype=np.float64) self._priors = np.zeros(self.n_classes, dtype=np.float64) for c in range(self.n_classes): # Filter features by class X_c = X[y == c] # Calculate mean, variance, prior for each class self._mean[c, :] = X_c.mean(axis=0) self._var[c, :] = X_c.var(axis=0) self._priors[c] = X_c.shape[0] / float(X.shape[0]) def _predict(self, X=None): # Apply _predict_proba for each row predictions = np.apply_along_axis(self._predict_proba, 1, X) # Normalize probabilities return softmax(predictions) def _predict_proba(self, x): """Predict log likelihood for given row.""" output = [] for y in range(self.n_classes): prior = np.log(self._priors[y]) posterior = self._pdf(y, x).sum() prediction = prior + posterior output.append(prediction) return output def _pdf(self, n_class, x): """Calculate Gaussian PDF for each feature.""" # Take specific values mean = self._mean[n_class] var = self._var[n_class] numerator = np.exp(-(x - mean) ** 2 / (2 * var)) denominator = np.sqrt(2 * np.pi * var) return numerator / denominator
import numpy as np from mla.base import BaseEstimator from mla.neuralnet.activations import softmax class NaiveBayesClassifier(BaseEstimator): """Gaussian Naive Bayes.""" # Binary problem. n_classes = 2 def fit(self, X, y=None): self._setup_input(X, y) # Check target labels assert list(np.unique(y)) == [0, 1] self._mean = np.zeros((self.n_classes, self.n_features), dtype=np.float64) self._var = np.zeros((self.n_classes, self.n_features), dtype=np.float64) self._priors = np.zeros(self.n_classes, dtype=np.float64) for c in range(self.n_classes): # Filter features by class X_c = X[y == c] # Calculate mean, variance, prior for each class self._mean[c, :] = X_c.mean(axis=0) self._var[c, :] = X_c.var(axis=0) self._priors[c] = X_c.shape[0] / float(X.shape[0]) def _predict(self, X=None): # Apply _predict_proba for each row predictions = np.apply_along_axis(self._predict_proba, 1, X) # Normalize probabilities return softmax(predictions) def _predict_proba(self, x): """Predict log likelihood for given row.""" output = [] for y in range(self.n_classes): prior = np.log(self._priors[y]) posterior = np.sum([self._pdf(y, d, x) for d in range(self.n_features)]) prediction = prior + posterior output.append(prediction) return output def _pdf(self, n_class, n_feature, x): """Calculate probability density function for normal distribution.""" # Take specific values mean = self._mean[n_class, n_feature] var = self._var[n_class, n_feature] x = x[n_feature] # Avoid division by zero if var < 1e-15: return 0.0 numerator = np.exp(-(float(x) - float(mean)) ** 2 / (2 * var)) denominator = np.sqrt(2 * np.pi * var) return numerator / denominator
mit
Python
fd5c84a1272ded54afaa6323fc10c637474096a3
Increment version to 0.2.5
tdpreece/ApprovalTests.Python,approvals/ApprovalTests.Python,approvals/ApprovalTests.Python,approvals/ApprovalTests.Python
approvaltests/version.py
approvaltests/version.py
version_number = "0.2.5"
version_number = "0.2.4"
apache-2.0
Python
fee8e6371c2884ba8cdc587ecae06093e5f6e4de
restructure python 2 to use a generator
lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler,lunixbochs/project-euler
01-50/02/2.py
01-50/02/2.py
def fib(): a, b = 0, 1 while True: yield b a, b = b, a + b total = 0 i = 0 for i in fib(): if i >= 4000000: break if i % 2 == 0: total += i print total
a, b = 0, 1 total = 0 while b < 4000000: a, b = b, a + b if b % 2 == 0: total += b print total
mit
Python
51fa99659a0b975175a0a33ace21021de7de4b45
Add load_json
Mause/statistical_atlas_of_au
saau/sections/image_provider.py
saau/sections/image_provider.py
import json import inspect from os.path import join, exists def not_implemented(): frame_info = inspect.currentframe().f_back msg = '' if 'self' in frame_info.f_locals: self = frame_info.f_locals['self'] try: msg += self.__name__ + '#' # for static/class methods except AttributeError: msg += self.__class__.__name__ + '.' msg += frame_info.f_code.co_name + '()' return NotImplementedError(msg) class RequiresData: def __init__(self, data_dir): self.data_dir = data_dir def has_required_data(self): raise not_implemented() def obtain_data(self): raise not_implemented() def data_dir_exists(self, name): return exists(self.data_dir_join(name)) def data_dir_join(self, name): return join(self.data_dir, name) def save_json(self, name, data): with open(self.data_dir_join(name), 'w') as fh: json.dump(data, fh, indent=4) return True def load_json(self, name): with open(self.data_dir_join(name)) as fh: return json.load(fh) class ImageProvider(RequiresData): def build_image(self, output_filename): raise not_implemented()
import json import inspect from os.path import join, exists def not_implemented(): frame_info = inspect.currentframe().f_back msg = '' if 'self' in frame_info.f_locals: self = frame_info.f_locals['self'] try: msg += self.__name__ + '#' # for static/class methods except AttributeError: msg += self.__class__.__name__ + '.' msg += frame_info.f_code.co_name + '()' return NotImplementedError(msg) class RequiresData: def __init__(self, data_dir): self.data_dir = data_dir def has_required_data(self): raise not_implemented() def obtain_data(self): raise not_implemented() def data_dir_exists(self, name): return exists(self.data_dir_join(name)) def data_dir_join(self, name): return join(self.data_dir, name) def save_json(self, name, data): with open(self.data_dir_join(name), 'w') as fh: json.dump(data, fh, indent=4) return True class ImageProvider(RequiresData): def build_image(self, output_filename): raise not_implemented()
mit
Python
ee50bea2810676ef655e7ea57565070f7e715741
Validate quotation is non-empty before allowing conversion to template.
OpusVL/odoo-sale-extras
sale_template_quotation/sale.py
sale_template_quotation/sale.py
# -*- coding: utf-8 -*- ############################################################################## # # Template Quotations # Copyright (C) 2015 OpusVL (<http://opusvl.com/>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api, exceptions class TemplateQuotation(models.Model): _inherit = "sale.order" is_template = fields.Boolean(default=False) quotation_ref = fields.Char() @api.one def convert_to_template(self): if not (self.quotation_ref and self.quotation_ref.strip()): raise exceptions.Warning('Quotation Ref is blank.\nYou must set a Quotation Ref before you convert the quotation to a template') self.is_template = True @api.one def write(self, data): if self.is_template and ('state' not in data or data['state'] != 'cancelled'): raise exceptions.Warning('You cannot edit or change state of a quotation template') return super(TemplateQuotation, self).write(data) @api.one def copy(self, default=None): new_default = default or {'is_template': False} return super(TemplateQuotation, self).copy(default=new_default) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# -*- coding: utf-8 -*- ############################################################################## # # Template Quotations # Copyright (C) 2015 OpusVL (<http://opusvl.com/>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api, exceptions class TemplateQuotation(models.Model): _inherit = "sale.order" is_template = fields.Boolean(default=False) quotation_ref = fields.Char() @api.one def convert_to_template(self): self.is_template = True @api.one def write(self, data): if self.is_template and ('state' not in data or data['state'] != 'cancelled'): raise exceptions.Warning('You cannot edit or change state of a quotation template') return super(TemplateQuotation, self).write(data) @api.one def copy(self, default=None): new_default = default or {'is_template': False} return super(TemplateQuotation, self).copy(default=new_default) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
Python
a537f049bfb61488a056333d362d9983e8e9f88d
Fix minor issues in 2020.10.1 file
foxscotch/advent-of-code,foxscotch/advent-of-code
2020/10/p1.py
2020/10/p1.py
# Python 3.8.3 def get_input(): with open('input.txt', 'r') as f: return set(int(i) for i in f.read().split()) def main(): puzzle = get_input() last_joltage = 0 one_jolt = 0 three_jolts = 1 while len(puzzle) != 0: if last_joltage + 1 in puzzle: last_joltage = last_joltage + 1 one_jolt += 1 elif last_joltage + 2 in puzzle: last_joltage = last_joltage + 2 elif last_joltage + 3 in puzzle: last_joltage = last_joltage + 3 three_jolts += 1 puzzle.remove(last_joltage) print(one_jolt, three_jolts) return one_jolt * three_jolts if __name__ == '__main__': import time start = time.perf_counter() print(main()) print(time.perf_counter() - start)
# Python 3.8.3 def get_input(): with open('input.txt', 'r') as f: return set(int(i) for i in f.read().split()) def main(): puzzle = get_input() last_joltage = 0 one_jolt = 0 three_jolts = 1 # this is bad lmao while len(puzzle) != 0: if last_joltage + 1 in puzzle: last_joltage = last_joltage + 1 one_jolt += 1 elif last_joltage + 2 in puzzle: last_joltage = last_joltage + 2 elif last_joltage + 3 in puzzle: last_joltage = last_joltage + 3 three_jolts += 1 puzzle.remove(last_joltage) print(one_jolt, three_jolts) return one_jolt * three_jolts if __name__ == '__main__': import time start = time.perf_counter() print(main()) print(time.perf_counter() - start)
mit
Python
b51e51dc8b1ce66815980bf2e8424f6fe282af66
test type declare with resolve
Evgenus/metaconfig
metaconfig/tests/test_simple.py
metaconfig/tests/test_simple.py
from nose.tools import * from io import StringIO from textwrap import dedent from metaconfig import Config def test_declare_empty(): source = """ --- !declare {} ... """ config = Config() with StringIO(dedent(source)) as stream: config.load(stream) def test_declare_resolve(): source = """ --- !declare type: type: !resolve builtins.type load: !resolve metaconfig.construct_from_sequence ... --- !let integer: !type - 0 string: !type - "" float: !type - 1.0 "null": !type - ~ ... """ config = Config() with StringIO(dedent(source)) as stream: config.load(stream) eq_(int, config.get("integer")) eq_(str, config.get("string")) eq_(float, config.get("float")) eq_(type(None), config.get("null"))
from nose.tools import * from io import StringIO from textwrap import dedent from metaconfig import Config def test_declare_empty(): source = """ --- !declare {} ... """ config = Config() with StringIO(dedent(source)) as stream: config.load(stream)
mit
Python
60b4fc88617f800208f00f24468db6798369fe2e
Add user-supplied arguments in log_handler
gkotton/vmware-nsx,gkotton/vmware-nsx
neutron/openstack/common/log_handler.py
neutron/openstack/common/log_handler.py
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo.config import cfg from neutron.openstack.common import notifier class PublishErrorsHandler(logging.Handler): def emit(self, record): if ('neutron.openstack.common.notifier.log_notifier' in cfg.CONF.notification_driver): return notifier.api.notify(None, 'error.publisher', 'error_notification', notifier.api.ERROR, dict(error=record.getMessage()))
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo.config import cfg from neutron.openstack.common import notifier class PublishErrorsHandler(logging.Handler): def emit(self, record): if ('neutron.openstack.common.notifier.log_notifier' in cfg.CONF.notification_driver): return notifier.api.notify(None, 'error.publisher', 'error_notification', notifier.api.ERROR, dict(error=record.msg))
apache-2.0
Python
80176ae73fc9843f63aca68306523da822d0b258
add "group" property to devices
Sirs0ri/PersonalAssistant
samantha/plugins/plugin.py
samantha/plugins/plugin.py
"""Contains a baseclass for plugins.""" ############################################################################### # # TODO: [ ] default methods # ############################################################################### # standard library imports import logging # related third party imports # application specific imports # pylint: disable=import-error from core import subscribe_to # pylint: enable=import-error __version__ = "1.4.1" # Initialize the logger LOGGER = logging.getLogger(__name__) class Plugin(object): """Baseclass, that holds the mandatory methods a plugin must support.""" def __init__(self, name="Plugin", active=False, logger=None, file_path=None, plugin_type="s"): """Set the plugin's attributes, if they're not set already.""" self.name = name self.uid = "NO_UID" self.is_active = active if logger: self.logger = logger else: self.logger = LOGGER if file_path: self.path = file_path else: self.path = __file__ self.plugin_type = plugin_type self.logger.info("Initialisation of the plugin complete.") def __str__(self): """Return a simple string representation of the plugin.""" return "{} '{}', UID {}".format( ("Device" if self.plugin_type == "d" else "Plugin"), self.name, self.uid) def __repr__(self): """Return a verbose string representation of the plugin.""" return "{type}\t{name:10}\tUID {uid}\tLoaded from {path}".format( type=("Device" if self.plugin_type == "d" else "Plugin"), name=self.name, uid=self.uid, path=self.path) class Device(Plugin): """Baseclass, that holds the mandatory methods a device must support.""" def __init__(self, name="Device", active=False, logger=None, file_path=None, group=None): """Set the plugin's attributes, if they're not set already.""" super(Device, self).__init__(name, active, logger, file_path, "d") self.name = name self.is_available = None self.group = group self.logger.info("Initialisation complete") def turn_on(self, func): @subscribe_to(self.name + "power.on") def function(*args, **kwargs): return func(*args, **kwargs) return function
"""Contains a baseclass for plugins.""" ############################################################################### # # TODO: [ ] default methods # ############################################################################### # standard library imports import logging # related third party imports # application specific imports # pylint: disable=import-error from core import subscribe_to # pylint: enable=import-error __version__ = "1.4.0" # Initialize the logger LOGGER = logging.getLogger(__name__) class Plugin(object): """Baseclass, that holds the mandatory methods a plugin must support.""" def __init__(self, name="Plugin", active=False, logger=None, file_path=None, plugin_type="s"): """Set the plugin's attributes, if they're not set already.""" self.name = name self.uid = "NO_UID" self.is_active = active if logger: self.logger = logger else: self.logger = LOGGER if file_path: self.path = file_path else: self.path = __file__ self.plugin_type = plugin_type self.logger.info("Initialisation of the plugin complete.") def __str__(self): """Return a simple string representation of the plugin.""" return "{} '{}', UID {}".format( ("Device" if self.plugin_type == "d" else "Plugin"), self.name, self.uid) def __repr__(self): """Return a verbose string representation of the plugin.""" return "{type}\t{name:10}\tUID {uid}\tLoaded from {path}".format( type=("Device" if self.plugin_type == "d" else "Plugin"), name=self.name, uid=self.uid, path=self.path) class Device(Plugin): """Baseclass, that holds the mandatory methods a device must support.""" def __init__(self, name="Device", active=False, logger=None, file_path=None): """Set the plugin's attributes, if they're not set already.""" super(Device, self).__init__(name, active, logger, file_path, "d") self.name = name self.is_available = None self.logger.info("Initialisation complete") def turn_on(self, func): @subscribe_to(self.name + "power.on") def function(*args, **kwargs): return func(*args, **kwargs) return function
mit
Python
586d5f34fc508d4a3eaa93bd39c5dc2b41e4878d
Migrate api7 to api8
ClearCorp/odoo-clearcorp,sysadminmatmoz/odoo-clearcorp,ClearCorp-dev/odoo-clearcorp,ClearCorp-dev/odoo-clearcorp,ClearCorp/odoo-clearcorp,sysadminmatmoz/odoo-clearcorp,ClearCorp-dev/odoo-clearcorp,sysadminmatmoz/odoo-clearcorp,ClearCorp-dev/odoo-clearcorp,ClearCorp/odoo-clearcorp,sysadminmatmoz/odoo-clearcorp,ClearCorp/odoo-clearcorp
project_task_state/project_task_state.py
project_task_state/project_task_state.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Addons modules by CLEARCORP S.A. # Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api, _ _TASK_STATE = [('draft', 'New'), ('open', 'In Progress'), ('pending', 'Pending'), ('ready', 'Ready'), ('done', 'Done'), ('cancelled', 'Cancelled')] class project_task_type(models.Model): _inherit = 'project.task.type' state = fields.Selection(_TASK_STATE, 'Related Status', required=True, default='open') task_type = fields.Many2one('task.type', string='Task Type') @api.multi def mark_done(self): values = { 'state': 'done', 'name': _('Done'), 'readonly': 'True', } self.write(values) return True _defaults = { 'fold': False, 'case_default': False } class task(models.Model): _inherit = 'project.task' @api.one @api.depends('stage_id') def _compute_state(self): if self.stage_id: self.state = self.stage_id.state else: self.state = 'draft' state = fields.Selection( _TASK_STATE, string="Status", readonly=True, store=True, compute='_compute_state')
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Addons modules by CLEARCORP S.A. # Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields from dateutil.relativedelta import relativedelta from datetime import datetime _TASK_STATE = [('draft', 'New'), ('open', 'In Progress'), ('pending', 'Pending'), ('ready', 'Ready'), ('done', 'Done'), ('cancelled', 'Cancelled')] class project_task_type(osv.Model): _inherit = 'project.task.type' _columns = { 'state': fields.selection(_TASK_STATE, 'Related Status', required=True), 'task_type':fields.many2one('task.type', string='Task Type'), } def mark_done(self, cr, uid, ids, context=None): values = { 'state': 'done', 'name': _('Done'), 'readonly':'True', } self.write(cr, uid, ids, values, context=context) return True _defaults = { 'state': 'open', 'fold': False, 'case_default': False, } class task(osv.Model): _inherit = 'project.task' _columns = { 'state': fields.related('stage_id', 'state', type="selection", store=True, selection=_TASK_STATE, string="Status", readonly=True, select=True), } def set_kanban_state_blocked(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'kanban_state': 'blocked'}, context=context) def set_kanban_state_normal(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'kanban_state': 'normal'}, context=context) def set_kanban_state_done(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'kanban_state': 'done'}, context=context) return False
agpl-3.0
Python
c3d7e7fdcbea0fc34bfa6d9d517efc4d54dc0b15
add file extension
timothydmorton/qa_explorer,timothydmorton/qa_explorer
scripts/generateQANotebook.py
scripts/generateQANotebook.py
import argparse import os parser = argparse.ArgumentParser() parser.add_argument('repo', help='data repository') parser.add_argument('--tract', type=int) parser.add_argument('--filt', type=str) parser.add_argument('--output', '-o', default='QA', help='output folder') args = parser.parse_args() from explorer.notebook import Coadd_QANotebook coadd_nb = Coadd_QANotebook(args.repo, args.tract, args.filt) if not os.path.exists(args.output): os.makedirs(args.output) coadd_nb.write(os.path.join(args.output, 'coadd_{}_{}.ipynb'.format(args.tract, args.filt)))
import argparse import os parser = argparse.ArgumentParser() parser.add_argument('repo', help='data repository') parser.add_argument('--tract', type=int) parser.add_argument('--filt', type=str) parser.add_argument('--output', '-o', default='QA', help='output folder') args = parser.parse_args() from explorer.notebook import Coadd_QANotebook coadd_nb = Coadd_QANotebook(args.repo, args.tract, args.filt) if not os.path.exists(args.output): os.makedirs(args.output) coadd_nb.write(os.path.join(args.output, 'coadd_{}_{}'.format(args.tract, args.filt)))
mit
Python
1f5bdf4ce98d55339bee0aad16f40439d8a99a33
Upgrade to the most recent version of buildkit.
kgaughan/zoicon-issues
buildkit.py
buildkit.py
""" Common code used in my setup.py files. """ from __future__ import with_statement import os.path import sys def read(*filenames): """Read files relative to the executable.""" files = [] for filename in filenames: full_path = os.path.join(os.path.dirname(sys.argv[0]), filename) with open(full_path, 'r') as fh: files.append(fh.read()) return "\n\n".join(files) def read_requirements(requirements_path): """Read a requirements file, stripping out the detritus.""" requirements = [] to_ignore = ('#', 'svn+', 'git+', 'bzr+', 'hg+') with open(requirements_path, 'r') as fh: for line in fh: line = line.strip() if line == '' or line.startswith(to_ignore): continue if line.startswith('-r '): requirements += read_requirements( os.path.realpath( os.path.join( os.path.dirname(requirements_path), line.split(' ', 1)[1].lstrip()))) else: requirements.append(line) return requirements
""" Common code used in my setup.py files. """ from __future__ import with_statement import re import os.path import sys def read(filename): """Read files relative to this file.""" full_path = os.path.join(os.path.dirname(sys.argv[0]), filename) with open(full_path, 'r') as fh: return fh.read() def get_metadata(module_path): """Extract the metadata from a module file.""" matches = re.finditer( r"^__(\w+?)__ *= *(['\"])(.*?)\2$", read(module_path), re.MULTILINE) return dict( (match.group(1), match.group(3).decode('unicode_escape')) for match in matches) def read_requirements(requirements_path): """Read a requirements file, stripping out the detritus.""" requirements = [] to_ignore = ('#', 'svn+', 'git+', 'bzr+', 'hg+') with open(requirements_path, 'r') as fh: for line in fh: line = line.strip() if line != '' and not line.startswith(to_ignore): requirements.append(line) return requirements
mit
Python
0ba69643331f769116fbda0660377f08b0291e6a
Update searchAndStoreTweets.py
MichaelCurrin/twitterverse,MichaelCurrin/twitterverse
app/utils/insert/searchAndStoreTweets.py
app/utils/insert/searchAndStoreTweets.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Search and Store Tweets utility. Search for tweets in the Twitter API for given input terms, then store the tweet and the tweet's author data locally, updating or adding objects as required. Send search terms as arguments to the command-line tool to search for them. See the usage instructions. The persist value is set based on an answer here: https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse """ import argparse import os import sys # Allow imports to be done when executing this file directly. appDir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) sys.path.insert(0, appDir) from lib import tweets from lib.twitter import auth, search # Setup global API connection object, which needs to be set using a function # on auth. API_CONN = None def searchAndStore(searchQuery, totalCount=200, persist=True): """ Search Twitter API for tweets matching input search terms. @param searchQuery: search query as a string containing terms. @param totalCount: total count of tweets to attempt to get for the search query, as an integer. Defaults to 200, which is the max count of tweets received on a single page from the Twitter API. @param persist. Default True. If set to False, does not store data in the database and only prints to stdout. """ assert API_CONN, ("Authenticate with Twitter API before doing" " a search for tweets.") searchRes = search.fetchTweetsPaging(API_CONN, searchQuery=searchQuery, itemLimit=totalCount) for t in searchRes: if persist: # Add/update tweet author. profileRec = tweets.insertOrUpdateProfile(t.author) # Add/update the tweet. tweetData = tweets.insertOrUpdateTweet(t, profileRec.id) else: text = t.full_text.replace('\n', ' ').replace('\r', ' ') print u'@{0}: {1}'.format(t.author.screen_name, text) def main(): """ Handle command-line arguments then search for and store tweets. """ global API_CONN parser = argparse.ArgumentParser( description="Utility to search for tweets, then store tweet" " and store profile data locally.", formatter_class=argparse.RawTextHelpFormatter ) searchQueryHelp = """\ Search for tweets on Twitter API which match the rule containing one or more terms. See the Twitter API search documentation. Examples: * contains all terms, in any order * wordA wordB * contains at least one of the terms * wordA OR wordB * @handleA OR wordB * contains the term #abc or the phrase "My Quote" * \\#abc OR \\"My Quote\\" * '#abc' OR '"My Quote"'""" parser.add_argument('terms', metavar='TERM', nargs='+', help=searchQueryHelp) parser.add_argument('-c', '--count', metavar='N', type=int, default=200, help="Default 200. Max count of tweets to get for the" " search query.") parser.add_argument('--persist', dest='persist', action='store_true', help="Store tweets and profiles in database." " Defaults to on.") parser.add_argument('--no-persist', dest='persist', action='store_false', help="Print tweet and profile data without storing.") parser.set_defaults(persist=True) args = parser.parse_args() # Combine trimmed list of strings into single string. searchQuery = u' '.join(args.terms) print u'Search query: {0}'.format(searchQuery) # Use app auth for up to 450 search requests per window, rather than 180. API_CONN = auth.getAppOnlyConnection() searchAndStore(args.terms, totalCount=args.count, persist=args.persist) if __name__ == '__main__': main()
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ """ import os import sys #import time # Allow imports to be done when executing this file directly. appDir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)) sys.path.insert(0, appDir) from lib import tweets from lib.twitter import auth, search #def test(): api = auth.getAppOnlyConnection() # Match terms which may start with # or @ or neither. Quoting a phrase # is useful for exact match of a phrase, but then it has to be at start of # query string, which is a known bug on Twitter API. festivalList = ['"MamaCity Improv Fest"', 'MCIF', 'MamaCityImprovFest', 'MamaCityIF'] festivalQuery = ' OR '.join(festivalList) searchQuery = festivalQuery #searchQuery = 'Trump' totalCount = 100 searchRes = search.fetchTweetsPaging(api, searchQuery=searchQuery, itemLimit=totalCount) if False: for t in searchRes: print t.id #, t.author.screen_name if True: for t in searchRes: # Add/update tweet author. profileRec = tweets.insertOrUpdateProfile(t.author) # Add/update the tweet. tweetData = tweets.insertOrUpdateTweet(t, profileRec.id) #if __name__ == '__main__': # test()
mit
Python
28adc3fbce76a562e729aef3ae19bdefd3379586
add taxonomy information to loaded sequences
ctSkennerton/BioSQL-Extensions
scripts/load_into_database.py
scripts/load_into_database.py
#!/usr/bin/env python import sys import argparse from BioSQL import BioSeqDatabase from Bio import SeqIO def add_taxid(inIter, taxid): inIter.annotations['ncbi_taxid'] = taxid yield inIter def load_gff(db, gff_file, fasta_file, fetch_taxonomy=False, taxid=None): from BCBio.GFF import GFFParser with open(fasta_file) as seq_handle: seq_dict = SeqIO.to_dict(SeqIO.parse(seq_handle, "fasta")) parser = GFFParser() recs = parser.parse(gff_file, seq_dict )#, limit_info=limit_info) db.load(add_taxid(recs, taxid), fetch_NCBI_taxonomy=fetch_taxonomy) def load_genbank(db, genbank_file, fetch_taxonomy=False, taxid=None): with open(genbank_file) as fp: db.load(add_taxid(SeqIO.parse(genbank_file, 'genbank'), taxid), fetch_NCBI_taxonomy=fetch_taxonomy) def main(args): server = BioSeqDatabase.open_database(driver="sqlite3",db=args.database) if args.database_name not in server.keys(): server.new_database(args.database_name) db = server[args.database_name] try: if args.gff is not None and args.fasta is not None: load_gff(db, args.gff, args.fasta, args.tax_lookup) server.adaptor.commit() elif args.genbank is not None: load_genbank(db, args.genbank, args.tax_lookup) server.adaptor.commit() except: server.adaptor.rollback() raise if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-d', '--database', help='name of premade biosql database') parser.add_argument('-D', '--database-name', help='namespace of the database that you want to add into', dest='database_name', default='metagenomic_database') parser.add_argument('-f', '--fasta', help='fasta file to add into the database') parser.add_argument('-g', '--gff', help='gff file of reatures to add into the database. Must be paired with a fasta file') parser.add_argument('-G', '--genbank', help='genbank file to add into the database') parser.add_argument('-t', '--lookup-taxonomy', dest='tax_lookup', help='access taxonomy information on NCBI servers', action="store_true", default=False) parser.add_argument('-T', '--taxid', help='supply a ncbi taxonomy id that will be applied to all sequences in the file') args = parser.parse_args() main(args)
#!/usr/bin/env python import sys import argparse from BioSQL import BioSeqDatabase from Bio import SeqIO def load_gff(db, gff_file, fasta_file): from BCBio.GFF import GFFParser with open(fasta_file) as seq_handle: seq_dict = SeqIO.to_dict(SeqIO.parse(seq_handle, "fasta")) parser = GFFParser() recs = parser.parse(gff_file, seq_dict )#, limit_info=limit_info) db.load(recs) def load_genbank(db, genbank_file): with open(genbank_file) as fp: db.load(SeqIO.parse(fp, 'genbank')) def main(args): server = BioSeqDatabase.open_database(driver="sqlite3",db=args.database) if args.database_name not in server.keys(): server.new_database(args.database_name) db = server[args.database_name] try: if args.gff is not None and args.fasta is not None: load_gff(db, args.gff, args.fasta) server.adaptor.commit() elif args.genbank is not None: load_genbank(db, args.genbank) server.adaptor.commit() except: server.adaptor.rollback() raise if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-d', '--database', help='name of premade biosql database') parser.add_argument('-D', '--database-name', help='namespace of the database that you want to add into', dest='database_name', default='metagenomic_database') parser.add_argument('-f', '--fasta', help='fasta file to add into the database') parser.add_argument('-g', '--gff', help='gff file of reatures to add into the database. Must be paired with a fasta file') parser.add_argument('-G', '--genbank', help='genbank file to add into the database') args = parser.parse_args() main(args)
mit
Python
93b52ac533086bfa1747c2f3e10ca98d9e666197
Stop sending stepFinished updates from the tryserver.
eunchong/build,eunchong/build,eunchong/build,eunchong/build
scripts/master/status_push.py
scripts/master/status_push.py
# Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import urlparse from buildbot.status import status_push CR_PASSWORD_FILE = '.code_review_password' class TryServerHttpStatusPush(status_push.HttpStatusPush): """Status push used by try server. Rietveld listens to buildStarted and (step|build)Finished to know if a try job succeeded or not. """ def __init__(self, serverUrl, *args, **kwargs): # Appends the status listener to the base url. serverUrl = urlparse.urljoin(serverUrl, 'status_listener') blackList = [ 'buildETAUpdate', #'buildFinished', #'buildStarted', 'buildedRemoved', 'builderAdded', 'builderChangedState', 'buildsetSubmitted', 'changeAdded', 'logFinished', 'logStarted', 'requestCancelled', 'requestSubmitted', 'shutdown', 'slaveConnected', 'slaveDisconnected', 'start', 'stepETAUpdate', 'stepFinished', 'stepStarted', 'stepText2Changed', 'stepTextChanged', ] if not os.path.isfile(CR_PASSWORD_FILE): logging.warn("The file %s does not exist. " "Connections to rietveld may not work." % CR_PASSWORD_FILE) pwd = '' else: with open(CR_PASSWORD_FILE, 'rb') as f: pwd = f.readline().strip() extra_post_params = { 'password': pwd } status_push.HttpStatusPush.__init__( self, *args, serverUrl=serverUrl, blackList=blackList, extra_post_params=extra_post_params, **kwargs) def setServiceParent(self, parent): """Adds the base_url property, it's not available to Rietveld otherwise.""" self.extra_post_params['base_url'] = parent.buildbotURL status_push.HttpStatusPush.setServiceParent(self, parent)
# Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import urlparse from buildbot.status import status_push CR_PASSWORD_FILE = '.code_review_password' class TryServerHttpStatusPush(status_push.HttpStatusPush): """Status push used by try server. Rietveld listens to buildStarted and (step|build)Finished to know if a try job succeeded or not. """ def __init__(self, serverUrl, *args, **kwargs): # Appends the status listener to the base url. serverUrl = urlparse.urljoin(serverUrl, 'status_listener') blackList = [ 'buildETAUpdate', #'buildFinished', #'buildStarted', 'buildedRemoved', 'builderAdded', 'builderChangedState', 'buildsetSubmitted', 'changeAdded', 'logFinished', 'logStarted', 'requestCancelled', 'requestSubmitted', 'shutdown', 'slaveConnected', 'slaveDisconnected', 'start', 'stepETAUpdate', #'stepFinished', 'stepStarted', 'stepText2Changed', 'stepTextChanged', ] if not os.path.isfile(CR_PASSWORD_FILE): logging.warn("The file %s does not exist. " "Connections to rietveld may not work." % CR_PASSWORD_FILE) pwd = '' else: with open(CR_PASSWORD_FILE, 'rb') as f: pwd = f.readline().strip() extra_post_params = { 'password': pwd } status_push.HttpStatusPush.__init__( self, *args, serverUrl=serverUrl, blackList=blackList, extra_post_params=extra_post_params, **kwargs) def setServiceParent(self, parent): """Adds the base_url property, it's not available to Rietveld otherwise.""" self.extra_post_params['base_url'] = parent.buildbotURL status_push.HttpStatusPush.setServiceParent(self, parent)
bsd-3-clause
Python
4748a68b6bde662245b85ecf2a52fbedffc4ffcb
Remove exception handling.
ibmibmibm/beets,beetbox/beets,shamangeorge/beets,jackwilsdon/beets,sampsyo/beets,shamangeorge/beets,beetbox/beets,jackwilsdon/beets,SusannaMaria/beets,shamangeorge/beets,ibmibmibm/beets,ibmibmibm/beets,jackwilsdon/beets,SusannaMaria/beets,sampsyo/beets,shamangeorge/beets,SusannaMaria/beets,sampsyo/beets,jackwilsdon/beets,beetbox/beets,beetbox/beets,sampsyo/beets,ibmibmibm/beets,SusannaMaria/beets
beetsplug/sonosupdate.py
beetsplug/sonosupdate.py
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2018, Tobias Sauerwein. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Updates a Sonos library whenever the beets library is changed. This is based on the Kodi Update plugin. """ from __future__ import division, absolute_import, print_function from beets.plugins import BeetsPlugin import six import soco class SonosUpdate(BeetsPlugin): def __init__(self): super(SonosUpdate, self).__init__() self.register_listener('database_change', self.listen_for_db_change) def listen_for_db_change(self, lib, model): """Listens for beets db change and register the update""" self.register_listener('cli_exit', self.update) def update(self, lib): """When the client exists try to send refresh request to a Sonos controler. """ self._log.info(u'Requesting a Sonos library update...') device = soco.discovery.any_soco() if device: device.music_library.start_library_update() else: self._log.warning(u'Could not find a Sonos device.') return self._log.info(u'Sonos update triggered')
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2018, Tobias Sauerwein. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Updates a Sonos library whenever the beets library is changed. This is based on the Kodi Update plugin. """ from __future__ import division, absolute_import, print_function from beets.plugins import BeetsPlugin import six import soco class SonosUpdate(BeetsPlugin): def __init__(self): super(SonosUpdate, self).__init__() self.register_listener('database_change', self.listen_for_db_change) def listen_for_db_change(self, lib, model): """Listens for beets db change and register the update""" self.register_listener('cli_exit', self.update) def update(self, lib): """When the client exists try to send refresh request to a Sonos controler. """ self._log.info(u'Requesting a Sonos library update...') # Try to send update request. try: device = soco.discovery.any_soco() device.music_library.start_library_update() except NoneType: self._log.warning(u'Could not find a Sonos device.') return self._log.info(u'Sonos update triggered')
mit
Python
77ddff664ad1e10037a43c3ffabd816387c35e42
Use a comprehension instead of a lambda function
CubicComet/exercism-python-solutions
rotational-cipher/rotational_cipher.py
rotational-cipher/rotational_cipher.py
import string UPPER = string.ascii_uppercase LOWER = string.ascii_lowercase def rotate(s, n): rules = shift_rules(n) return "".join(rules.get(ch, ch) for ch in s) def shift_rules(n): shifted = UPPER[n:] + UPPER[:n] + LOWER[n:] + LOWER[:n] return {k:v for k,v in zip(UPPER+LOWER, shifted)}
import string UPPER = string.ascii_uppercase LOWER = string.ascii_lowercase def rotate(s, n): rules = shift_rules(n) return "".join(map(lambda k: rules.get(k, k), s)) def shift_rules(n): shifted = UPPER[n:] + UPPER[:n] + LOWER[n:] + LOWER[:n] return {k:v for k,v in zip(UPPER+LOWER, shifted)}
agpl-3.0
Python
25beb8ce9f21d5ef5255304700a76ed2d7aaa425
Add initial solution
CubicComet/exercism-python-solutions
rotational-cipher/rotational_cipher.py
rotational-cipher/rotational_cipher.py
import string UPPER = string.ascii_uppercase LOWER = string.ascii_lowercase def rotate(s, n): return "".join(rot_gen(s,n)) def shift_rules(n): shifted = UPPER[n:] + UPPER[:n] + LOWER[n:] + LOWER[:n] return {k:v for k,v in zip(UPPER+LOWER, shifted)} def rot_gen(s, n): rules = shift_rules(n) for ch in s: try: yield rules[ch] except KeyError: yield ch
def rotate(): pass
agpl-3.0
Python
644f66a39fd59b1983eee6f127e13e1585a598cd
Fix breakages from girder-client v2.0
Kitware/candela,Kitware/candela,Kitware/candela,Kitware/candela,Kitware/candela
script/upload-test-images.py
script/upload-test-images.py
import girder_client import os import sys def main(): # Use the API key to authenticate. key = os.environ.get("GIRDER_API_KEY") if key is None: print >>sys.stderr, "Environment variable GIRDER_API_KEY is blank. Cannot upload images." return 1 gc = girder_client.GirderClient(host="data.kitware.com", scheme="https") gc.authenticate(apiKey=key) # Retrieve the target folder, which should be at ~/Public/Travis\ Candela user = gc.get("user/me") if user is None: print >>sys.stderr, "No user logged in; API key may be bad." return 1 travis_build_number = os.environ.get("TRAVIS_BUILD_NUMBER") travis_job_number = os.environ.get("TRAVIS_JOB_NUMBER") folder = gc.loadOrCreateFolder("Public", user["_id"], "user") folder = gc.loadOrCreateFolder("Travis Candela", folder["_id"], "folder") folder = gc.loadOrCreateFolder(travis_build_number, folder["_id"], "folder") folder = gc.loadOrCreateFolder(travis_job_number, folder["_id"], "folder") # Upload the files specified on the command line, creating (or loading) a # folder for each. for imageFile in sys.argv[1:]: (dirname, filename) = os.path.split(imageFile) compName = dirname.split(os.path.sep)[-2] compFolder = gc.loadOrCreateFolder(compName, folder["_id"], "folder") size = os.stat(imageFile).st_size with open(imageFile, "rb") as fd: gc.uploadFile( parentId=compFolder["_id"], stream=fd, name=filename, size=size, parentType="folder") if __name__ == "__main__": sys.exit(main())
import girder_client import os import sys def main(): # Use the API key to authenticate. key = os.environ.get("GIRDER_API_KEY") if key is None: print >>sys.stderr, "Environment variable GIRDER_API_KEY is blank. Cannot upload images." return 1 gc = girder_client.GirderClient(host="data.kitware.com", port=443, scheme="https") gc.authenticate(apiKey=key) # Retrieve the target folder, which should be at ~/Public/Travis\ Candela user = gc.get("user/me") if user is None: print >>sys.stderr, "No user logged in; API key may be bad." return 1 travis_build_number = os.environ.get("TRAVIS_BUILD_NUMBER") travis_job_number = os.environ.get("TRAVIS_JOB_NUMBER") folder = gc.load_or_create_folder("Public", user["_id"], "user") folder = gc.load_or_create_folder("Travis Candela", folder["_id"], "folder") folder = gc.load_or_create_folder(travis_build_number, folder["_id"], "folder") folder = gc.load_or_create_folder(travis_job_number, folder["_id"], "folder") # Upload the files specified on the command line, creating (or loading) a # folder for each. for imageFile in sys.argv[1:]: (dirname, filename) = os.path.split(imageFile) compName = dirname.split(os.path.sep)[-2] compFolder = gc.load_or_create_folder(compName, folder["_id"], "folder") gc._upload_as_item(filename, compFolder["_id"], imageFile) if __name__ == "__main__": sys.exit(main())
apache-2.0
Python
9b0612b0a4d5c483013311d7c7814cc268609cb0
Fix URL in setup.py
byxorna/collins,tumblr/collins,discordianfish/collins,tumblr/collins,tumblr/collins,discordianfish/collins,funzoneq/collins,byxorna/collins,byxorna/collins,box/collins,box/collins,discordianfish/collins,byxorna/collins,tumblr/collins,funzoneq/collins,funzoneq/collins,tumblr/collins,defect/collins,defect/collins,byxorna/collins,defect/collins,funzoneq/collins,discordianfish/collins,discordianfish/collins,funzoneq/collins,funzoneq/collins,defect/collins,box/collins,box/collins,tumblr/collins,defect/collins,defect/collins,discordianfish/collins,byxorna/collins
support/python/collins_client/setup.py
support/python/collins_client/setup.py
#!/usr/bin/env python from setuptools import setup, find_packages setup(name="collins_client", version="0.1.0", description="The python interface to the collins api.", author="John Bunting, Nick Thuesen, Nick Sauro, Will Richard", author_email="[email protected]", url="https://github.com/tumblr/collins/tree/master/support/python/collins_client", packages=find_packages(), keywords='collins infastructure managment', classifiers= [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2 :: Only', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: System :: Systems Administration' ], install_requires= [ 'grequests==0.2.0', ] )
#!/usr/bin/env python from setuptools import setup, find_packages setup(name="collins_client", version="0.1.0", description="The python interface to the collins api.", author="John Bunting, Nick Thuesen, Nick Sauro, Will Richard", author_email="[email protected]", url="https://github.com/tumblr/collins/tree/master/support/python/collins-client", packages=find_packages(), keywords='collins infastructure managment', classifiers= [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2 :: Only', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: System :: Systems Administration' ], install_requires= [ 'grequests==0.2.0', ] )
apache-2.0
Python
e69f45ecc3ee23be203136be02e9648a4930a3e8
Make test.py use difflib instead of filecmp.
mitni455/psdump,mitni455/psdump,alco/psdump,alco/psdump,eric-seekas/psdump,mitni455/psdump,alco/psdump,eric-seekas/psdump,alco/psdump,eric-seekas/psdump,mitni455/psdump,eric-seekas/psdump
test/test.py
test/test.py
# -*- coding: utf-8 -*- import os import sys import filecmp import difflib from subprocess import call def run_test(fmt, sample, arg): psdump = "../build/psdump -o temp -f {0} {1}".format(fmt, arg) call(psdump.split()) with open("temp", "r") as temp_file: temp_lines = temp_file.readlines() with open(sample, "r") as sample_file: sample_lines = sample_file.readlines() # Compare corresponding lines from 'temp' and 'sample' files diff = difflib.ndiff(temp_lines, sample_lines) if filter(lambda x: not x.startswith(' '), diff): print "{0} test failed.".format(fmt.upper()) return 1 return 0 if __name__ == '__main__': dirname = os.path.dirname(sys.argv[0]) os.chdir(dirname) psdfile = "sample.psd" psdfiles = "sample.psd sample.psd" print "### Single input file test" status = 0 status += run_test("text", "sample.txt", psdfile) status += run_test("xml", "sample.xml", psdfile) status += run_test("json", "sample.json", psdfile) if status == 0: print "### OK." print "### Multiple input files test" status = 0 status += run_test("text", "sample_multi.txt", psdfiles) status += run_test("xml", "sample_multi.xml", psdfiles) status += run_test("json", "sample_multi.json", psdfiles) if status == 0: print "### OK."
# -*- coding: utf-8 -*- import os import sys import filecmp import tempfile from subprocess import call def run_test(fmt, sample, arg): temp = tempfile.NamedTemporaryFile() psdump = "../build/psdump -o {0} -f {1} {2}".format(temp.name, fmt, arg) call(psdump.split()) if not filecmp.cmp(temp.name, sample): print "{0} test failed.".format(fmt.upper()) return 1 return 0 if __name__ == '__main__': dirname = os.path.dirname(sys.argv[0]) os.chdir(dirname) psdfile = "sample.psd" psdfiles = "sample.psd sample.psd" print "### Single input file test" status = 0 status += run_test("text", "sample.txt", psdfile) status += run_test("xml", "sample.xml", psdfile) status += run_test("json", "sample.json", psdfile) if status == 0: print "### OK." print "### Multiple input files test" status = 0 status += run_test("text", "sample_multi.txt", psdfiles) status += run_test("xml", "sample_multi.xml", psdfiles) status += run_test("json", "sample_multi.json", psdfiles) if status == 0: print "### OK."
mit
Python
80c5f94f3c2ed02c8603d3eecea23cdb4711ae79
Use hanging indent
antoinearnoud/openfisca-france,SophieIPP/openfisca-france,benjello/openfisca-france,SophieIPP/openfisca-france,adrienpacifico/openfisca-france,adrienpacifico/openfisca-france,benjello/openfisca-france,antoinearnoud/openfisca-france,sgmap/openfisca-france,sgmap/openfisca-france
openfisca_france/tests/test_tax_rates.py
openfisca_france/tests/test_tax_rates.py
# -*- coding: utf-8 -*- # OpenFisca -- A versatile microsimulation software # By: OpenFisca Team <[email protected]> # # Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team # https://github.com/openfisca # # This file is part of OpenFisca. # # OpenFisca is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # OpenFisca is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from openfisca_core import periods from openfisca_core.simulations import average_tax_rate, marginal_tax_rate import openfisca_france TaxBenefitSystem = openfisca_france.init_country() tax_benefit_system = TaxBenefitSystem() def test_average_tax_rate(): year = 2013 simulation = tax_benefit_system.new_scenario().init_single_entity( axes = [ dict( count = 100, name = 'sali', max = 24000, min = 0, ), ], period = periods.period('year', year), parent1 = dict(agem = 40 * 12 + 6), ).new_simulation(debug = True) assert (average_tax_rate(simulation, target_column_name = 'revdisp', varying_column_name = 'revdisp') == 0).all() def test_marginal_tax_rate(): year = 2013 simulation = tax_benefit_system.new_scenario().init_single_entity( axes = [ dict( count = 10000, name = 'sali', max = 1000000, min = 0, ), ], period = periods.period('year', year), parent1 = dict(agem = 40 * 12 + 6), ).new_simulation(debug = True) assert (marginal_tax_rate(simulation, target_column_name = 'revdisp', varying_column_name = 'revdisp') == 0).all() if __name__ == '__main__': import logging import sys logging.basicConfig(level = logging.ERROR, stream = sys.stdout) test_marginal_tax_rate() test_average_tax_rate()
# -*- coding: utf-8 -*- # OpenFisca -- A versatile microsimulation software # By: OpenFisca Team <[email protected]> # # Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team # https://github.com/openfisca # # This file is part of OpenFisca. # # OpenFisca is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # OpenFisca is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from openfisca_core import periods from openfisca_core.simulations import average_tax_rate, marginal_tax_rate import openfisca_france TaxBenefitSystem = openfisca_france.init_country() tax_benefit_system = TaxBenefitSystem() def test_average_tax_rate(): year = 2013 simulation = tax_benefit_system.new_scenario().init_single_entity( axes = [ dict( count = 100, name = 'sali', max = 24000, min = 0, ), ], period = periods.period('year', year), parent1 = dict(agem = 40 * 12 + 6), ).new_simulation(debug = True) assert (average_tax_rate(simulation, target_column_name = 'revdisp', varying_column_name = 'revdisp') == 0).all() def test_marginal_tax_rate(): year = 2013 simulation = tax_benefit_system.new_scenario().init_single_entity( axes = [ dict( count = 10000, name = 'sali', max = 1000000, min = 0, ), ], period = periods.period('year', year), parent1 = dict(agem = 40 * 12 + 6), ).new_simulation(debug = True) assert (marginal_tax_rate(simulation, target_column_name = 'revdisp', varying_column_name = 'revdisp') == 0).all() if __name__ == '__main__': import logging import sys logging.basicConfig(level = logging.ERROR, stream = sys.stdout) test_marginal_tax_rate() test_average_tax_rate()
agpl-3.0
Python
20e7453d143223ae1c95ad32ee49660ceeadf3f7
Prepare for 1.1.0
iffy/crudset
crudset/version.py
crudset/version.py
version = "1.1.0-dev"
version = "1.0.0"
apache-2.0
Python
4510db1e8f2fe3298de395a9d8b1e0783f92c758
update revision
patochectp/navitia,CanalTP/navitia,CanalTP/navitia,Tisseo/navitia,patochectp/navitia,pbougue/navitia,kinnou02/navitia,Tisseo/navitia,kinnou02/navitia,pbougue/navitia,CanalTP/navitia,kinnou02/navitia,CanalTP/navitia,CanalTP/navitia,Tisseo/navitia,xlqian/navitia,pbougue/navitia,xlqian/navitia,xlqian/navitia,xlqian/navitia,kinnou02/navitia,Tisseo/navitia,patochectp/navitia,Tisseo/navitia,xlqian/navitia,patochectp/navitia,pbougue/navitia
source/tyr/migrations/versions/1b59ffb421e4_change_max_nb_crowfly_by_mode_type.py
source/tyr/migrations/versions/1b59ffb421e4_change_max_nb_crowfly_by_mode_type.py
"""change max_nb_crowfly_by_mode to JSONB and set server_default Revision ID: 1b59ffb421e4 Revises: 483639f1f00 Create Date: 2018-08-30 12:42:21.089095 """ # revision identifiers, used by Alembic. revision = '1b59ffb421e4' down_revision = '105823db902c' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects.postgresql import JSONB from navitiacommon import default_values import json def upgrade(): op.drop_column('instance', 'max_nb_crowfly_by_mode') op.add_column('instance', sa.Column('max_nb_crowfly_by_mode', JSONB, server_default=json.dumps(default_values.max_nb_crowfly_by_mode))) def downgrade(): op.drop_column('instance', 'max_nb_crowfly_by_mode') op.add_column('instance', sa.Column('max_nb_crowfly_by_mode', sa.PickleType(pickler=json), nullable=True))
"""change max_nb_crowfly_by_mode to JSONB and set server_default Revision ID: 1b59ffb421e4 Revises: 483639f1f00 Create Date: 2018-08-30 12:42:21.089095 """ # revision identifiers, used by Alembic. revision = '1b59ffb421e4' down_revision = '483639f1f00' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects.postgresql import JSONB from navitiacommon import default_values import json def upgrade(): op.drop_column('instance', 'max_nb_crowfly_by_mode') op.add_column('instance', sa.Column('max_nb_crowfly_by_mode', JSONB, server_default=json.dumps(default_values.max_nb_crowfly_by_mode))) def downgrade(): op.drop_column('instance', 'max_nb_crowfly_by_mode') op.add_column('instance', sa.Column('max_nb_crowfly_by_mode', sa.PickleType(pickler=json), nullable=True))
agpl-3.0
Python
0e1c2dad600da595403ee893b787a29bdc38e215
Bump to 2.1.1
iffy/crudset
crudset/version.py
crudset/version.py
version = "2.1.1"
version = "0.2.0-dev"
apache-2.0
Python
513c7a2f5c5fb5a8c47b3173a8d5854755f7928f
Use factories instead of creating instance from model
python-dirbtuves/website,python-dirbtuves/website,python-dirbtuves/website
pylab/website/tests/test_about_page.py
pylab/website/tests/test_about_page.py
import datetime from django_webtest import WebTest from pylab.core.models import Event from pylab.core.factories import EventFactory class AboutPageTests(WebTest): def test_no_events_on_about_page(self): resp = self.app.get('/about/') self.assertEqual(resp.status_int, 200) self.assertTrue(b'No events yet.' in resp.content) def test_event_list_on_about_page(self): EventFactory( event_type=Event.WEEKLY_MEETING, title='Summer Python workshop', slug='python-workshop', starts=datetime.datetime(2015, 7, 30, 18, 0), ends=datetime.datetime(2015, 7, 30, 20, 0), ) resp = self.app.get('/about/') self.assertEqual(resp.status_int, 200) self.assertTrue(b'Summer Python workshop' in resp.content)
import datetime from django_webtest import WebTest from django.contrib.auth.models import User from pylab.core.models import Event class AboutPageTests(WebTest): def setUp(self): self.user = User.objects.create(username='u1') def test_no_events_on_about_page(self): resp = self.app.get('/about/') self.assertEqual(resp.status_int, 200) self.assertTrue(b'No events yet.' in resp.content) def test_event_list_on_about_page(self): Event.objects.create( author=self.user, starts=datetime.datetime(2015, 9, 3), ends=datetime.datetime(2015, 9, 3), title='Test title', osm_map_link='http://openstreetmap.org/', description='Test description', ) resp = self.app.get('/about/') self.assertEqual(resp.status_int, 200) self.assertTrue(b'Test title' in resp.content)
agpl-3.0
Python
c9ba9b8796de0802f7b941245cc41eb7d59ce7c8
Normalize output path to cp.py
ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,chuan9/chromium-crosswalk,M4sse/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,jaruba/chromium.src,hujiajie/pa-chromium,Chilledheart/chromium,littlstar/chromium.src,mogoweb/chromium-crosswalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,patrickm/chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,Chilledheart/chromium,patrickm/chromium.src,ChromiumWebApps/chromium,littlstar/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,krieger-od/nwjs_chromium.src,pozdnyakov/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,Fireblend/chromium-crosswalk,patrickm/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk,jaruba/chromium.src,ChromiumWebApps/chromium,jaruba/chromium.src,dushu1203/chromium.src,ChromiumWebApps/chromium,M4sse/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,anirudhSK/chromium,ltilve/chromium,ltilve/chromium,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,Just-D/chromium-1,M4sse/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,anirudhSK/chromium,mogoweb/chromium-crosswalk,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,Chilledheart/chromium,dushu1203/chromium.src,fujunwei/chromium-crosswalk,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,pozdnyakov/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ltilve/chromium,axinging/chromium-crosswalk,hujiajie/pa-chromium,anirudhSK/chromium,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,ondra-novak/chromium.src,Chilledheart/chromium,Jonekee/chromium.src,markYoungH/chromium.src,hujiajie/pa-chromium,Just-D/chromium-1,Chilledheart/chromium,dushu1203/chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,jaruba/chromium.src,dushu1203/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,hujiajie/pa-chromium,pozdnyakov/chromium-crosswalk,hgl888/chromium-crosswalk,M4sse/chromium.src,fujunwei/chromium-crosswalk,hujiajie/pa-chromium,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,jaruba/chromium.src,ChromiumWebApps/chromium,markYoungH/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dednal/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,littlstar/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,anirudhSK/chromium,dednal/chromium.src,jaruba/chromium.src,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,M4sse/chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,pozdnyakov/chromium-crosswalk,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,ChromiumWebApps/chromium,patrickm/chromium.src,hujiajie/pa-chromium,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk,hujiajie/pa-chromium,dednal/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,Just-D/chromium-1,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,patrickm/chromium.src,hujiajie/pa-chromium,markYoungH/chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,dednal/chromium.src,pozdnyakov/chromium-crosswalk,Just-D/chromium-1,dednal/chromium.src,ondra-novak/chromium.src,ltilve/chromium,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,Chilledheart/chromium,mogoweb/chromium-crosswalk
build/cp.py
build/cp.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Copy a file. This module works much like the cp posix command - it takes 2 arguments: (src, dst) and copies the file with path |src| to |dst|. """ import os import shutil import sys def Main(src, dst): # Use copy instead of copyfile to ensure the executable bit is copied. return shutil.copy(src, os.path.normpath(dst)) if __name__ == '__main__': sys.exit(Main(sys.argv[1], sys.argv[2]))
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Copy a file. This module works much like the cp posix command - it takes 2 arguments: (src, dst) and copies the file with path |src| to |dst|. """ import shutil import sys def Main(src, dst): # Use copy instead of copyfile to ensure the executable bit is copied. return shutil.copy(src, dst) if __name__ == '__main__': sys.exit(Main(sys.argv[1], sys.argv[2]))
bsd-3-clause
Python
55c00fd6684d6fb378326026475945aea59bfa0b
Fix iterator to list
OPpuolitaival/pyosmo,OPpuolitaival/pyosmo
pyosmo/end_conditions/step_coverage.py
pyosmo/end_conditions/step_coverage.py
from pyosmo.end_conditions.base import OsmoEndCondition class StepCoverage(OsmoEndCondition): """ Stops testing when count is filled """ def __init__(self, coverage_percent): if coverage_percent > 100 or coverage_percent < 0: raise Exception(f"Coverage is {coverage_percent} and it need to be >0 and <1") self.coverage = coverage_percent / 100 def end_test(self, history, model): """ Stops test case when defined number of test steps are executed """ all_steps = list(model.all_steps) steps_used = 0 for step in all_steps: if history.current_test_case.get_step_count(step) > 0: steps_used += 1 current_coverage = steps_used / len(all_steps) return current_coverage >= self.coverage def end_suite(self, history, model): """ Stops test suite when defined number of test cases are executed """ all_steps = model.all_steps steps_used = 0 for step in all_steps: if history.get_step_count(step) > 0: steps_used += 1 current_coverage = steps_used / len(all_steps) return current_coverage >= self.coverage
from pyosmo.end_conditions.base import OsmoEndCondition class StepCoverage(OsmoEndCondition): """ Stops testing when count is filled """ def __init__(self, coverage_percent): if coverage_percent > 100 or coverage_percent < 0: raise Exception(f"Coverage is {coverage_percent} and it need to be >0 and <1") self.coverage = coverage_percent / 100 def end_test(self, history, model): """ Stops test case when defined number of test steps are executed """ all_steps = model.all_steps steps_used = 0 for step in all_steps: if history.current_test_case.get_step_count(step) > 0: steps_used += 1 current_coverage = steps_used / len(all_steps) return current_coverage >= self.coverage def end_suite(self, history, model): """ Stops test suite when defined number of test cases are executed """ all_steps = model.all_steps steps_used = 0 for step in all_steps: if history.get_step_count(step) > 0: steps_used += 1 current_coverage = steps_used / len(all_steps) return current_coverage >= self.coverage
mit
Python
544782d2da7fa918133c70ea4e9d0ffe918dcdb4
Fix documentation for eval and change default display to False
jakelever/kindred,jakelever/kindred
kindred/evalFunctions.py
kindred/evalFunctions.py
import kindred from collections import Counter def evaluate(goldCorpus,testCorpus,metric='f1score',display=False): """ Compares the gold corpus with the test corpus and calculate appropriate metrics. :param goldCorpus: The gold standard set of data :param testCorpus: The test set for comparison :param metric: Which metric to use (precision/recall/f1score). 'all' will provide all three as a tuple :param display: Whether to print (to stdout) specific statistics for each relation type :type goldCorpus: kindred.Corpus :type testCorpus: kindred.Corpus :type metric: str :type display: bool :return: The value of the corresponding metric (or metrics) :rtype: float (or tuple of floats) """ assert isinstance(goldCorpus,kindred.Corpus) assert isinstance(testCorpus,kindred.Corpus) TPs,FPs,FNs = Counter(),Counter(),Counter() #goldTuples = [ ] #for doc in goldCorpus.documents: # relTuples = [ (r.relationType,tuple(r.entityIDs)) for r in doc.getRelations() ] # goldTuples += relTuples goldTuples = [ (r.relationType,tuple(r.entityIDs)) for r in goldCorpus.getRelations() ] testTuples = [ (r.relationType,tuple(r.entityIDs)) for r in testCorpus.getRelations() ] totalSet = set(goldTuples + testTuples) for relation in totalSet: inGold = relation in goldTuples inTest = relation in testTuples relType = relation[0] if inGold and inTest: TPs[relType] += 1 elif inGold: FNs[relType] += 1 elif inTest: FPs[relType] += 1 sortedRelTypes = sorted( list(set( [relation[0] for relation in totalSet] ))) maxLen = max( [len(rt) for rt in sortedRelTypes ] ) formatString = '%-' + str(maxLen) + 's\tTP:%d FP:%d FN:%d\tP:%f R:%f F1:%f' for relType in sortedRelTypes: TP,FP,FN = TPs[relType],FPs[relType],FNs[relType] precision = 0.0 if (TP+FP) == 0 else TP / float(TP+FP) recall = 0.0 if (TP+FN) == 0 else TP / float(TP+FN) f1score = 0.0 if precision==0 or recall == 0 else 2 * (precision*recall) / (precision+recall) if display: print(formatString % (relType,TP,FP,FN,precision,recall,f1score)) TP,FP,FN = sum(TPs.values()),sum(FPs.values()),sum(FNs.values()) precision = 0.0 if (TP+FP) == 0 else TP / float(TP+FP) recall = 0.0 if (TP+FN) == 0 else TP / float(TP+FN) f1score = 0.0 if precision==0 or recall == 0 else 2 * (precision*recall) / (precision+recall) if display: print("-"*50) print(formatString % ("All",TP,FP,FN,precision,recall,f1score)) if metric == 'f1score': return f1score elif metric == 'precision': return precision elif metric == 'recall': return recall elif metric == 'all': return precision,recall,f1score else: raise RuntimeError('Unknown metric: %s' % metric)
import kindred from collections import Counter def evaluate(goldCorpus,testCorpus,metric='f1score',display=True): """ Compares the gold corpus with the test corpus and calculate appropriate metrics. :param goldCorpus: The gold standard set of data :type goldCorpus: kindred.Corpus :param testCorpus: The test set for comparison :type testCorpus: kindred.Corpus :param metric: Which metric to use (precision/recall/f1score) :type metric: str :return: The value of the corresponding metric :rtype: float """ assert isinstance(goldCorpus,kindred.Corpus) assert isinstance(testCorpus,kindred.Corpus) TPs,FPs,FNs = Counter(),Counter(),Counter() #goldTuples = [ ] #for doc in goldCorpus.documents: # relTuples = [ (r.relationType,tuple(r.entityIDs)) for r in doc.getRelations() ] # goldTuples += relTuples goldTuples = [ (r.relationType,tuple(r.entityIDs)) for r in goldCorpus.getRelations() ] testTuples = [ (r.relationType,tuple(r.entityIDs)) for r in testCorpus.getRelations() ] totalSet = set(goldTuples + testTuples) for relation in totalSet: inGold = relation in goldTuples inTest = relation in testTuples relType = relation[0] if inGold and inTest: TPs[relType] += 1 elif inGold: FNs[relType] += 1 elif inTest: FPs[relType] += 1 sortedRelTypes = sorted( list(set( [relation[0] for relation in totalSet] ))) maxLen = max( [len(rt) for rt in sortedRelTypes ] ) formatString = '%-' + str(maxLen) + 's\tTP:%d FP:%d FN:%d\tP:%f R:%f F1:%f' for relType in sortedRelTypes: TP,FP,FN = TPs[relType],FPs[relType],FNs[relType] precision = 0.0 if (TP+FP) == 0 else TP / float(TP+FP) recall = 0.0 if (TP+FN) == 0 else TP / float(TP+FN) f1score = 0.0 if precision==0 or recall == 0 else 2 * (precision*recall) / (precision+recall) if display: print(formatString % (relType,TP,FP,FN,precision,recall,f1score)) TP,FP,FN = sum(TPs.values()),sum(FPs.values()),sum(FNs.values()) precision = 0.0 if (TP+FP) == 0 else TP / float(TP+FP) recall = 0.0 if (TP+FN) == 0 else TP / float(TP+FN) f1score = 0.0 if precision==0 or recall == 0 else 2 * (precision*recall) / (precision+recall) if display: print("-"*50) print(formatString % ("All",TP,FP,FN,precision,recall,f1score)) if metric == 'f1score': return f1score elif metric == 'precision': return precision elif metric == 'recall': return recall elif metric == 'all': return precision,recall,f1score else: raise RuntimeError('Unknown metric: %s' % metric)
mit
Python
95768a09d0bf7f6f3576fc28568c3b7897467541
Add license
tensorflow/cloud,tensorflow/cloud
src/python/tensorflow_cloud/core/tests/examples/multi_file_example/scale_model.py
src/python/tensorflow_cloud/core/tests/examples/multi_file_example/scale_model.py
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow_cloud as tfc tfc.run( entry_point="train_model.py", requirements_txt="requirements.txt", stream_logs=True, )
import tensorflow_cloud as tfc tfc.run( entry_point="train_model.py", requirements_txt="requirements.txt", stream_logs=True, )
apache-2.0
Python
6c2e00084fb11a6d37d55fed247d2e7b6a373823
Fix dependencies.
OCA/partner-contact,BT-rmartin/partner-contact,OCA/partner-contact,BT-rmartin/partner-contact
partner_contact_nationality/__openerp__.py
partner_contact_nationality/__openerp__.py
# -*- encoding: utf-8 -*- # Odoo, Open Source Management Solution # Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. { "name": "Contact's nationality", "version": "1.0.0", "author": "Odoo Community Association (OCA)", "category": "Customer Relationship Management", "website": "https://odoo-community.org/", "depends": [ "partner_contact_base", ], "data": [ "views/res_partner.xml", ], }
# -*- encoding: utf-8 -*- # Odoo, Open Source Management Solution # Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. { "name": "Contact's nationality", "version": "1.0.0", "author": "Odoo Community Association (OCA)", "category": "Customer Relationship Management", "website": "https://odoo-community.org/", "depends": [ "base" ], "data": [ "views/res_partner.xml", ], }
agpl-3.0
Python
86d604f69ac0e42fb05ec84c3b20da03c7d7d109
Fix lint error
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
scripts/download_oai_harvest.py
scripts/download_oai_harvest.py
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- """ Standalone script for downloading the OAI-PMH for Calm. The final output is dumped into a JSON file ``calm_records.json``, which can be useful for doing bulk analysis of the Calm data. """ import collections import json import re from urllib.parse import unquote import requests OAI_URL = 'http://archives.wellcomelibrary.org/oai/OAI.aspx' RESUMPTION_TOKEN_RE = re.compile( r'<resumptionToken[^>]*>(?P<token>[^<]+)</resumptionToken>' ) STREAM_PARSER_RE = re.compile( r'<(?P<name>[A-Za-z0-9]+) urlencoded=\"(?P<value>[^\"]*)\"/?>' ) def fetch_calm_records(): params = { 'verb': 'ListRecords', 'metadataPrefix': 'calm_xml' } while True: r = requests.get(OAI_URL, params=params) # We can't parse the Calm "XML" with an XML parser, because it isn't # actually valid XML. Instead the values are URL-encoded as an # attribute on an XML-like tag, so we unpick those with a regex # and store the values that way. records = r.text.split('</record>') records.pop() for rec in records: d = collections.defaultdict(list) for m in STREAM_PARSER_RE.finditer(rec): d[m.group('name')].append(unquote(m.group('value'))) yield dict(d) # Results from the OAI harvests are paginated, to prevent records # changing order under our feet. The presence of a `resumptionToken` # tells us how to access the next page. try: params['resumptionToken'] = RESUMPTION_TOKEN_RE.search(r.text).group('token') except Exception: raise StopIteration if 'resumptionToken' in params and 'metadataPrefix' in params: del params['metadataPrefix'] all_records = [] for r in fetch_calm_records(): all_records.append(r) if len(all_records) % 1000 == 0: print(f'{len(all_records)}...') json.dump( all_records, open('calm_records.json', 'w'), indent=2, sort_keys=True )
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- """ Standalone script for downloading the OAI-PMH for Calm. The final output is dumped into a JSON file ``calm_records.json``, which can be useful for doing bulk analysis of the Calm data. """ import collections import json import re from urllib.parse import unquote import requests OAI_URL = 'http://archives.wellcomelibrary.org/oai/OAI.aspx' RESUMPTION_TOKEN_RE = re.compile( r'<resumptionToken[^>]*>(?P<token>[^<]+)</resumptionToken>' ) STREAM_PARSER_RE = re.compile( r'<(?P<name>[A-Za-z0-9]+) urlencoded=\"(?P<value>[^\"]*)\"/?>' ) def fetch_calm_records(): params = { 'verb': 'ListRecords', 'metadataPrefix': 'calm_xml' } while True: r = requests.get(OAI_URL, params=params) # We can't parse the Calm "XML" with an XML parser, because it isn't # actually valid XML. Instead the values are URL-encoded as an # attribute on an XML-like tag, so we unpick those with a regex # and store the values that way. records = r.text.split('</record>') records.pop() for rec in records: d = collections.defaultdict(list) for m in STREAM_PARSER_RE.finditer(rec): d[m.group('name')].append(unquote(m.group('value'))) yield dict(d) # Results from the OAI harvests are paginated, to prevent records # changing order under our feet. The presence of a `resumptionToken` # tells us how to access the next page. try: params['resumptionToken'] = RESUMPTION_TOKEN_RE.search(r.text).group('token') except Exception as e: raise StopIteration if 'resumptionToken' in params and 'metadataPrefix' in params: del params['metadataPrefix'] all_records = [] for r in fetch_calm_records(): all_records.append(r) if len(all_records) % 1000 == 0: print(f'{len(d)}...') json.dump( all_records, open('calm_records.json', 'w'), indent=2, sort_keys=True )
mit
Python
96b3904e64617d6c04b0e44506482cd264e28132
use common method for permission checking
geometalab/osmaxx,geometalab/drf-utm-zone-info,geometalab/drf-utm-zone-info,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/osmaxx
osmaxx-py/osmaxx/contrib/auth/frontend_permissions.py
osmaxx-py/osmaxx/contrib/auth/frontend_permissions.py
from django.conf import settings from django.contrib.auth.decorators import login_required, user_passes_test from django.core.urlresolvers import reverse_lazy from django.utils.decorators import method_decorator from rest_framework import permissions FRONTEND_USER_GROUP = settings.OSMAXX_FRONTEND_USER_GROUP def _may_user_access_osmaxx_frontend(user): """ Actual test to check if the user is in the frontend user group, to give access or deny it. Note: Admins have superpowers. """ return user.has_perm('excerptexport.add_extractionorder') def _may_user_access_this_excerpt(user, excerpt): return excerpt.is_public or excerpt.owner == user def frontend_access_required(function=None): """ Decorator for views that checks that the user has the correct access rights, redirecting to the information page if necessary. """ access_denied_info_url = reverse_lazy('excerptexport:access_denied') actual_decorator = user_passes_test( _may_user_access_osmaxx_frontend, login_url=access_denied_info_url ) if function: return actual_decorator(function) return actual_decorator class LoginRequiredMixin(object): """ Login required Mixin for Class Based Views. """ @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(LoginRequiredMixin, self).dispatch(*args, **kwargs) class FrontendAccessRequiredMixin(object): """ Frontend Access Check Mixin for Class Based Views. """ @method_decorator(frontend_access_required) def dispatch(self, *args, **kwargs): return super(FrontendAccessRequiredMixin, self).dispatch(*args, **kwargs) class AuthenticatedAndAccessPermission(permissions.BasePermission): """ Allows access only to authenticated users with frontend permissions. """ def has_permission(self, request, view): return request.user.is_authenticated() and _may_user_access_osmaxx_frontend(request.user) class HasBBoxAccessPermission(permissions.BasePermission): message = 'Accessing this bounding box is not allowed.' def has_object_permission(self, request, view, obj): return _may_user_access_this_excerpt(request.user, obj.excerpt) class HasExcerptAccessPermission(permissions.BasePermission): message = 'Accessing this excerpt is not allowed.' def has_object_permission(self, request, view, obj): return _may_user_access_this_excerpt(request.user, obj)
from django.conf import settings from django.contrib.auth.decorators import login_required, user_passes_test from django.core.urlresolvers import reverse_lazy from django.utils.decorators import method_decorator from rest_framework import permissions FRONTEND_USER_GROUP = settings.OSMAXX_FRONTEND_USER_GROUP def frontend_access_required(function=None): """ Decorator for views that checks that the user has the correct access rights, redirecting to the information page if necessary. """ access_denied_info_url = reverse_lazy('excerptexport:access_denied') actual_decorator = user_passes_test( _may_user_access_osmaxx_frontend, login_url=access_denied_info_url ) if function: return actual_decorator(function) return actual_decorator def _may_user_access_osmaxx_frontend(user): """ Actual test to check if the user is in the frontend user group, to give access or deny it. Note: Admins have superpowers. """ return user.has_perm('excerptexport.add_extractionorder') class LoginRequiredMixin(object): """ Login required Mixin for Class Based Views. """ @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(LoginRequiredMixin, self).dispatch(*args, **kwargs) class FrontendAccessRequiredMixin(object): """ Frontend Access Check Mixin for Class Based Views. """ @method_decorator(frontend_access_required) def dispatch(self, *args, **kwargs): return super(FrontendAccessRequiredMixin, self).dispatch(*args, **kwargs) class AuthenticatedAndAccessPermission(permissions.BasePermission): """ Allows access only to authenticated users with frontend permissions. """ def has_permission(self, request, view): return request.user.is_authenticated() and _may_user_access_osmaxx_frontend(request.user) class HasBBoxAccessPermission(permissions.BasePermission): message = 'Accessing this bounding box is not allowed.' def has_object_permission(self, request, view, obj): return obj.excerpt.is_public or obj.excerpt.owner == request.user class HasExcerptAccessPermission(permissions.BasePermission): message = 'Accessing this excerpt is not allowed.' def has_object_permission(self, request, view, obj): return obj.is_public or obj.owner == request.user
mit
Python
e38e0b61b74316a171d49fa9390ecc736408694d
Simplify nanomsg sample
clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage
samples/nanomsg/hello_world_asyncio.py
samples/nanomsg/hello_world_asyncio.py
import asyncio import sys import nanomsg as nn from nanomsg.asyncio import Socket async def ping(url, ack): with Socket(protocol=nn.NN_PUSH) as sock, sock.connect(url): await sock.send(b'Hello, World!') # Shutdown the endpoint after the other side ack'ed; otherwise # the message could be lost. await ack.wait() async def pong(url, ack): with Socket(protocol=nn.NN_PULL) as sock, sock.bind(url): message = await sock.recv() print(bytes(message.as_memoryview()).decode('ascii')) ack.set() def main(): url = 'inproc://test' print('Play asynchronous ping-pong on %s' % url) loop = asyncio.get_event_loop() ack = asyncio.Event() loop.run_until_complete(asyncio.wait([ asyncio.ensure_future(ping(url, ack)), asyncio.ensure_future(pong(url, ack)), ])) loop.close() return 0 if __name__ == '__main__': sys.exit(main())
import asyncio import sys import nanomsg as nn from nanomsg.asyncio import Socket async def ping(url, barrier): with Socket(protocol=nn.NN_PUSH) as sock, sock.connect(url): await sock.send(b'Hello, World!') # Shutdown the endpoint after the other side ack'ed; otherwise # the message could be lost. await barrier.wait() async def pong(url, barrier): with Socket(protocol=nn.NN_PULL) as sock, sock.bind(url): message = await sock.recv() print(bytes(message.as_memoryview()).decode('ascii')) await barrier.wait() async def close_loop(barrier): await barrier.wait() asyncio.get_event_loop().stop() class Barrier: def __init__(self, parties, *, loop=None): self.parties = parties self._cond = asyncio.Condition(loop=loop) async def wait(self): await self._cond.acquire() try: assert self.parties > 0 self.parties -= 1 if self.parties > 0: await self._cond.wait() else: self._cond.notify_all() assert self.parties == 0 finally: self._cond.release() def main(): barrier = Barrier(3) url = 'inproc://test' print('Play asynchronous ping-pong on %s' % url) asyncio.ensure_future(ping(url, barrier)) asyncio.ensure_future(pong(url, barrier)) asyncio.ensure_future(close_loop(barrier)) loop = asyncio.get_event_loop() try: loop.run_forever() finally: loop.close() return 0 if __name__ == '__main__': sys.exit(main())
mit
Python
f3b6771c43042c599e57d3a26fa678518e12455d
Update jupyterlab/tests/mock_packages/interop/consumer/setup.py
jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab
jupyterlab/tests/mock_packages/interop/consumer/setup.py
jupyterlab/tests/mock_packages/interop/consumer/setup.py
import json from glob import glob import os.path as osp name = 'jlab_mock_consumer' HERE = osp.abspath(osp.dirname(__file__)) with open(osp.join(HERE, 'package.json')) as fid: data = json.load(fid) from setuptools import setup js_name = data['name'] setup(name=name, version=data['version'], py_modules = [name], data_files = [ (f'share/jupyter/labextensions/{js_name}', glob('static/package.json')), (f'share/jupyter/labextensions/{js_name}', glob('static/static/*')) ])
import json from glob import glob import os.path as osp name = 'jlab_mock_consumer' HERE = osp.abspath(osp.dirname(__file__)) with open(osp.join(HERE, 'package.json')) as fid: data = json.load(fid) from setuptools import setup js_name = data['name'] setup(name=name, version=data['version'], py_modules = [name], data_files = [ (f'share/jupyter/labextensions/{js_name}', glob('static/package.json')), (f'share/jupyter/labextensions/{js_name}', glob('static/static/*')) ])
bsd-3-clause
Python
7c3d2f8afbc5c6d1dc7c719f97ca93ffb908d6ce
Add tests of tangential velocity.
harpolea/r3d2
test_SR1d.py
test_SR1d.py
import eos_defns import SR1d from numpy.testing import assert_allclose def test_standard_sod(): """ Relativistic Sod test. Numbers are taken from the General Matlab code, so accuracy isn't perfect. """ eos = eos_defns.eos_gamma_law(5.0/3.0) w_left = SR1d.State(1.0, 0.0, 0.0, 1.5, eos, label="L") w_right = SR1d.State(0.125, 0.0, 0.0, 1.2, eos, label="R") rp = SR1d.RP(w_left, w_right) p_star_matlab = 0.308909954203586 assert_allclose(rp.p_star, p_star_matlab, rtol=1e-6) rarefaction_speeds_matlab = [-0.690065559342354, -0.277995552140227] assert_allclose(rp.waves[0].wave_speed, rarefaction_speeds_matlab, rtol=1e-6) shock_speed_matlab = 0.818591417744604 assert_allclose(rp.waves[2].wave_speed, shock_speed_matlab, rtol=1e-6) def test_bench_3(): """ Test Bench problem 3. Take from Marti & Muller's Living Review (section 6.3). See http://computastrophys.livingreviews.org/Articles/lrca-2015-3 Left and right states have been flipped so it complements the above Sod test. """ eos = eos_defns.eos_gamma_law(5.0/3.0) w_left = SR1d.State(1.0, 0.0, 0.99, 0.015, eos, label="L") w_right = SR1d.State(1.0, 0.0, 0.0, 15000, eos, label="R") rp = SR1d.RP(w_left, w_right) v_shock_ref = 0.927006 v_contact_ref = 0.766706 assert_allclose(rp.waves[0].wave_speed, v_shock_ref, rtol=1e-6) assert_allclose(rp.waves[1].wave_speed, v_contact_ref, rtol=1e-6) def test_bench_4(): """ Test Bench problem 4. Take from Marti & Muller's Living Review (section 6.3). See http://computastrophys.livingreviews.org/Articles/lrca-2015-3 Left and right states have been flipped so it complements the above Sod test. """ eos = eos_defns.eos_gamma_law(5.0/3.0) w_left = SR1d.State(1.0, 0.0, 0.9, 0.015, eos, label="L") w_right = SR1d.State(1.0, 0.0, 0.9, 15000, eos, label="R") rp = SR1d.RP(w_left, w_right) v_shock_ref = 0.445008 v_contact_ref = 0.319371 assert_allclose(rp.waves[0].wave_speed, v_shock_ref, rtol=1e-6) assert_allclose(rp.waves[1].wave_speed, v_contact_ref, rtol=1e-6)
import eos_defns import SR1d from numpy.testing import assert_allclose def test_standard_sod(): """ Relativistic Sod test. Numbers are taken from the General Matlab code, so accuracy isn't perfect. """ eos = eos_defns.eos_gamma_law(5.0/3.0) w_left = SR1d.State(1.0, 0.0, 0.0, 1.5, eos, label="L") w_right = SR1d.State(0.125, 0.0, 0.0, 1.2, eos, label="R") rp = SR1d.RP(w_left, w_right) p_star_matlab = 0.308909954203586 assert_allclose(rp.p_star, p_star_matlab, rtol=1e-6) rarefaction_speeds_matlab = [-0.690065559342354, -0.277995552140227] assert_allclose(rp.waves[0].wave_speed, rarefaction_speeds_matlab, rtol=1e-6) shock_speed_matlab = 0.818591417744604 assert_allclose(rp.waves[2].wave_speed, shock_speed_matlab, rtol=1e-6)
mit
Python
1786702388abc4fe737ee73d64ef5864f42f0c3d
Fix missing offset for Query
SkygearIO/chat,SkygearIO/chat
chat/query.py
chat/query.py
# Copyright 2017 Oursky Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .predicate import Predicate class Query: def __init__(self, record_type, predicate=None, count=False, limit=50, offset=None, include=[]): self.record_type = record_type if predicate is None: predicate = Predicate() self.predicate = predicate self.count = count self.sort = [] self.limit = limit self.offset = offset self.include = include def add_order(self, key, order): self.sort.append([{'$type': 'keypath', '$val': key}, order]) return self
# Copyright 2017 Oursky Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .predicate import Predicate class Query: def __init__(self, record_type, predicate=None, count=False, limit=50, offset=None, include=[]): self.record_type = record_type if predicate is None: predicate = Predicate() self.predicate = predicate self.count = count self.sort = [] self.limit = limit self.offset = None self.include = include def add_order(self, key, order): self.sort.append([{'$type': 'keypath', '$val': key}, order]) return self
apache-2.0
Python
c637eb216e9dc148a588019d22bc96db3565b3fc
Correct breakpoints
haricot/djangocms-bs4forcascade,haricot/djangocms-bs4forcascade
cmsplugin_bs4forcascade/bootstrap4/settings.py
cmsplugin_bs4forcascade/bootstrap4/settings.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.utils.translation import ugettext_lazy as _ from cmsplugin_cascade.extra_fields.config import PluginExtraFieldsConfig CASCADE_PLUGINS = getattr(settings, 'BS4_CASCADE_PLUGINS', ['buttons', 'carousel', 'accordion', 'container', 'image', 'picture','card', 'tabs', 'gallery', 'jumbotron'],) if 'cmsplugin_bs4forcascade' in settings.INSTALLED_APPS: CASCADE_PLUGINS.append('secondary_menu') def set_defaults(config): config.setdefault('bootstrap4', {}) config['bootstrap4'].setdefault( 'breakpoints', ( ('xs', (575, 'mobile', _("mobile phones"), 560, 575)), ('sm', (576, 'phablet', _("phablets"), 576, 767)), ('md', (768, 'tablet', _("tablets"), 768, 991)), ('lg', (992, 'laptop', _("laptops"), 992, 1199)), ('xl', (1200, 'desktop', _("large desktops"), 1200, 1980)),)) for tpl in config['bootstrap4']['breakpoints']: if len(tpl[1]) != 5: msg = "The configuration directive CMSPLUGIN_CASCADE['bootstrap4']['bootstrap4']['{}'] requires 5 parameters" raise ImproperlyConfigured(msg.format(tpl[0])) config['bootstrap4'].setdefault('gutter', 30) config['plugins_with_extra_fields'].setdefault('Bootstrap4ButtonPlugin', PluginExtraFieldsConfig()) config['plugins_with_extra_fields'].setdefault('Bootstrap4RowPlugin', PluginExtraFieldsConfig()) config['plugins_with_extra_fields'].setdefault('BootstrapJumbotronPlugin', PluginExtraFieldsConfig( inline_styles={ 'extra_fields:Paddings': ['margin-top', 'margin-bottom', 'padding-top', 'padding-bottom'], 'extra_units:Paddings': 'px,em' } )) config['plugins_with_extra_render_templates'].setdefault('BootstrapSecondaryMenuPlugin', ( ('cascade/bootstrap4/secmenu-list-group.html', _("List Group")), ('cascade/bootstrap4/secmenu-unstyled-list.html', _("Unstyled List")),)) if os.getenv('DJANGO_CLIENT_FRAMEWORK', '').startswith('angular'): config['bootstrap4']['template_basedir'] = 'angular-ui'
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.utils.translation import ugettext_lazy as _ from cmsplugin_cascade.extra_fields.config import PluginExtraFieldsConfig CASCADE_PLUGINS = getattr(settings, 'BS4_CASCADE_PLUGINS', ['buttons', 'carousel', 'accordion', 'container', 'image', 'picture','card', 'tabs', 'gallery', 'jumbotron'],) if 'cmsplugin_bs4forcascade' in settings.INSTALLED_APPS: CASCADE_PLUGINS.append('secondary_menu') def set_defaults(config): config.setdefault('bootstrap4', {}) config['bootstrap4'].setdefault( 'breakpoints', ( ('xs', (0, 'mobile', _("mobile phones"), 0, 542)), ('sm', (576, 'phablet', _("phablets"), 544, 767)), ('md', (768, 'tablet', _("tablets"), 768, 991)), ('lg', (992, 'laptop', _("laptops"), 992, 1199)), ('xl', (1200, 'desktop', _("large desktops"), 1200, 1980)),)) for tpl in config['bootstrap4']['breakpoints']: if len(tpl[1]) != 5: msg = "The configuration directive CMSPLUGIN_CASCADE['bootstrap4']['bootstrap4']['{}'] requires 5 parameters" raise ImproperlyConfigured(msg.format(tpl[0])) config['bootstrap4'].setdefault('gutter', 30) config['plugins_with_extra_fields'].setdefault('Bootstrap4ButtonPlugin', PluginExtraFieldsConfig()) config['plugins_with_extra_fields'].setdefault('Bootstrap4RowPlugin', PluginExtraFieldsConfig()) config['plugins_with_extra_fields'].setdefault('BootstrapJumbotronPlugin', PluginExtraFieldsConfig( inline_styles={ 'extra_fields:Paddings': ['margin-top', 'margin-bottom', 'padding-top', 'padding-bottom'], 'extra_units:Paddings': 'px,em' } )) config['plugins_with_extra_render_templates'].setdefault('BootstrapSecondaryMenuPlugin', ( ('cascade/bootstrap4/secmenu-list-group.html', _("List Group")), ('cascade/bootstrap4/secmenu-unstyled-list.html', _("Unstyled List")),)) if os.getenv('DJANGO_CLIENT_FRAMEWORK', '').startswith('angular'): config['bootstrap4']['template_basedir'] = 'angular-ui'
mit
Python
d11478f1ad2d6caf16aeff087f2399297eec83d2
Improve qrcode generation, add proper error message to generate_uri assertion about secret length
keybar/keybar
src/keybar/utils/totp.py
src/keybar/utils/totp.py
import io import urllib import time from base64 import b32encode from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.twofactor.totp import TOTP from cryptography.hazmat.primitives.hashes import SHA1 from django.http import HttpResponse from django.utils.encoding import force_bytes from qrcode import QRCode from qrcode.constants import ERROR_CORRECT_H BASE_URI = 'otpauth://{key_type}/{issuer}:{user}?secret={secret}&issuer={issuer}' def generate_qr_code_response(request): user = request.user qrcode = QRCode( error_correction=ERROR_CORRECT_H, box_size=6, border=4 ) uri = generate_uri('totp', bytes(user.totp_secret), user.email, 'keybar') qrcode.add_data(uri) qrcode.make(fit=True) img = qrcode.make_image() stream = io.BytesIO() img.save(stream) return HttpResponse(stream.getvalue(), content_type='image/png') def generate_uri(key_type, secret, user, issuer): """Generate a URI suitable for Google Authenticator. See: https://code.google.com/p/google-authenticator/wiki/KeyUriFormat """ # Google Authenticator breaks if the b32 encoded string contains a padding # thus force the key to be divisible by 5 octets so that we don't have any # padding markers. assert len(secret) % 5 == 0, 'secret not divisible by 5' return BASE_URI.format(**{ 'key_type': urllib.parse.quote(key_type), 'issuer': urllib.parse.quote(issuer), 'user': urllib.parse.quote(user), 'secret': urllib.parse.quote(b32encode(secret)), }) def verify_totp_code(user, code): totp = TOTP(bytes(user.totp_secret), 6, SHA1(), 30, backend=default_backend()) return totp.verify(force_bytes(code), time.time())
import io import urllib import time from base64 import b32encode from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.twofactor.totp import TOTP from cryptography.hazmat.primitives.hashes import SHA1 from django.http import HttpResponse from django.utils.encoding import force_bytes from qrcode import QRCode BASE_URI = 'otpauth://{key_type}/{issuer}:{user}?secret={secret}&issuer={issuer}' def generate_qr_code_response(request): user = request.user qrcode = QRCode() uri = generate_uri('totp', bytes(user.totp_secret), user.email, 'keybar') print(uri) qrcode.add_data(uri) qrcode.make(fit=True) img = qrcode.make_image() stream = io.BytesIO() img.save(stream) return HttpResponse(stream.getvalue(), content_type='image/png') def generate_uri(key_type, secret, user, issuer): """Generate a URI suitable for Google Authenticator. See: https://code.google.com/p/google-authenticator/wiki/KeyUriFormat """ # Google Authenticator breaks if the b32 encoded string contains a padding # thus force the key to be divisible by 5 octets so that we don't have any # padding markers. assert len(secret) % 5 == 0 return BASE_URI.format(**{ 'key_type': urllib.parse.quote(key_type), 'issuer': urllib.parse.quote(issuer), 'user': urllib.parse.quote(user), 'secret': urllib.parse.quote(b32encode(secret)), }) def verify_totp_code(user, code): totp = TOTP(bytes(user.totp_secret), 6, SHA1(), 30, backend=default_backend()) return totp.verify(force_bytes(code), time.time())
bsd-3-clause
Python
0a1700b64a2e496217dd0531ebe8326410fd6cdc
Update yamldumper.py
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
salt/utils/yamldumper.py
salt/utils/yamldumper.py
# -*- coding: utf-8 -*- ''' salt.utils.yamldumper ~~~~~~~~~~~~~~~~~~~~~ ''' from __future__ import absolute_import try: from yaml import CDumper as Dumper except ImportError: from yaml import Dumper from salt.utils.odict import OrderedDict class OrderedDumper(Dumper): ''' A YAML dumper that represents python OrderedDict as simple YAML map. ''' pass def represent_ordereddict(dumper, data): return dumper.represent_dict(data.items()) OrderedDumper.add_representer(OrderedDict, represent_ordereddict)
# -*- coding: utf-8 -*- ''' salt.utils.yamldumper ~~~~~~~~~~~~~~~~~~~~~ ''' from __future__ import absolute_import try: from yaml import CDumper as Dumper except ImportError: from yaml import CDumper as Dumper from salt.utils.odict import OrderedDict class OrderedDumper(Dumper): ''' A YAML dumper that represents python OrderedDict as simple YAML map. ''' pass def represent_ordereddict(dumper, data): return dumper.represent_dict(data.items()) OrderedDumper.add_representer(OrderedDict, represent_ordereddict)
apache-2.0
Python
91f54451fd149506abe57e31f45bc841f9e031ca
Fix unstoppable streaming
benbroce3/PiCamServer,benbroce3/PiCamServer,benbroce3/PiCamServer,benbroce3/PiCamServer
camerav4.py
camerav4.py
import picamera from picamera import PiCamera import time from datetime import datetime import os.path from subprocess32 import Popen print "\nSecurity Camera Logger v4 | Ben Broce & William Hampton\n" print "Streams video to rtsp://pi-ip:8554/ | Captures to pics/[timestamp].jpg" print "Ctrl-C quits.\n" stream = raw_input("Should I stream video or take pictures (v/p)? ") preview = raw_input("Should I display video preview on Pi (y/n)? ") print "Running..." #http://www.raspberry-projects.com/pi/pi-hardware/raspberry-pi-camera/streaming-video-using-vlc-player #http://www.diveintopython.net/scripts_and_streams/stdin_stdout_stderr.html #Ouput video (record) => stream => stdout => | => cvlc livestream => browser if (stream == "v" or stream == "V"): try: Popen(["./livestream.sh"]) finally: print "\n\nExiting..." Popen.terminate() elif (stream == "p" or stream == "P"): length = float(raw_input("How long should I run (in minutes): "))*60 interval = float(raw_input("How often should I take a picture (in seconds): ")) camera = PiCamera() camera.annotate_background = picamera.Color('black') camera.rotation = 180 camera.resolution = (640, 480) counter = 0 try: if (preview == "y" or preview == "Y"): camera.start_preview() while (counter <= length): timestamp = datetime.now().strftime("%m-%d-%Y_%H:%M:%S") camera.annotate_text = timestamp path = 'pics/' + timestamp + '.jpg' camera.capture(path, use_video_port=True) time.sleep(interval) counter += interval finally: print "Exiting..." if (preview == "y" or preview == "Y"): camera.stop_preview() else: print "Invalid input!"
import picamera from picamera import PiCamera import time from datetime import datetime import os.path from subprocess32 import Popen print "\nSecurity Camera Logger v4 | Ben Broce & William Hampton\n" print "Streams video to rtsp://pi-ip:8554/ | Captures to pics/[timestamp].jpg" print "Ctrl-C quits.\n" stream = raw_input("Should I stream video or take pictures (v/p)? ") print "Running..." #http://www.raspberry-projects.com/pi/pi-hardware/raspberry-pi-camera/streaming-video-using-vlc-player #http://www.diveintopython.net/scripts_and_streams/stdin_stdout_stderr.html #Ouput video (record) => stream => stdout => | => cvlc livestream => browser if (stream == "v" or stream == "V"): Popen(["./livestream.sh"]) elif (stream == "p" or stream == "P"): length = float(raw_input("How long should I run (in minutes): "))*60 interval = float(raw_input("How often should I take a picture (in seconds): ")) camera = PiCamera() camera.annotate_background = picamera.Color('black') camera.rotation = 180 camera.resolution = (640, 480) counter = 0 try: camera.start_preview() while (counter <= length): timestamp = datetime.now().strftime("%m-%d-%Y_%H:%M:%S") camera.annotate_text = timestamp path = 'pics/' + timestamp + '.jpg' camera.capture(path, use_video_port=True) time.sleep(interval) counter += interval finally: print "Exiting..." camera.stop_preview() else: print "Invalid input!"
mit
Python
643b8e034f6bdcc2d863f0dda99fa91b1eecb54c
Update __init__.py
dfm/corner.py
corner/__init__.py
corner/__init__.py
# -*- coding: utf-8 -*- __version__ = "2.0.2.dev0" __author__ = "Dan Foreman-Mackey ([email protected])" __copyright__ = "Copyright 2013-2016 Daniel Foreman-Mackey and contributors" __contributors__ = [ # Alphabetical by first name. "Adrian Price-Whelan @adrn", "Brendon Brewer @eggplantbren", "Brigitta Sipocz @bsipocz", "Ekta Patel @ekta1224", "Emily Rice @emilurice", "Geoff Ryan @geoffryan", "Guillaume @ceyzeriat", "Gregory Ashton @ga7g08", "Hanno Rein @hannorein", "Jeremy Heyl @jsheyl", "Kelle Cruz @kelle", "Kyle Barbary @kbarbary", "Marco Tazzari @mtazzari", "Matt Pitkin @mattpitkin", "Phil Marshall @drphilmarshall", "Pierre Gratier @pirg", "Stephan Hoyer @shoyer", "Víctor Zabalza @zblz", "Will Vousden @willvousden", "Wolfgang Kerzendorf @wkerzendorf", ] __bibtex__ = """@article{corner, Author = {Daniel Foreman-Mackey}, Doi = {10.21105/joss.00024}, Title = {corner.py: Scatterplot matrices in Python}, Journal = {The Journal of Open Source Software}, Year = 2016, Volume = 24, Url = {http://dx.doi.org/10.5281/zenodo.45906} }""" try: __CORNER_SETUP__ except NameError: __CORNER_SETUP__ = False if not __CORNER_SETUP__: __all__ = ["corner", "hist2d", "quantile"] from .corner import corner, hist2d, quantile
# -*- coding: utf-8 -*- __version__ = "2.0.2.dev0" __author__ = "Dan Foreman-Mackey ([email protected])" __copyright__ = "Copyright 2013-2016 Daniel Foreman-Mackey and contributors" __contributors__ = [ # Alphabetical by first name. "Adrian Price-Whelan @adrn", "Brendon Brewer @eggplantbren", "Brigitta Sipocz @bsipocz", "Ekta Patel @ekta1224", "Emily Rice @emilurice", "Geoff Ryan @geoffryan", "Guillaume @ceyzeriat", "Gregory Ashton @ga7g08", "Hanno Rein @hannorein", "Kelle Cruz @kelle", "Kyle Barbary @kbarbary", "Marco Tazzari @mtazzari", "Matt Pitkin @mattpitkin", "Phil Marshall @drphilmarshall", "Pierre Gratier @pirg", "Stephan Hoyer @shoyer", "Víctor Zabalza @zblz", "Will Vousden @willvousden", "Wolfgang Kerzendorf @wkerzendorf", ] __bibtex__ = """@article{corner, Author = {Daniel Foreman-Mackey}, Doi = {10.21105/joss.00024}, Title = {corner.py: Scatterplot matrices in Python}, Journal = {The Journal of Open Source Software}, Year = 2016, Volume = 24, Url = {http://dx.doi.org/10.5281/zenodo.45906} }""" try: __CORNER_SETUP__ except NameError: __CORNER_SETUP__ = False if not __CORNER_SETUP__: __all__ = ["corner", "hist2d", "quantile"] from .corner import corner, hist2d, quantile
bsd-2-clause
Python
14efcc349a3b524345808eaf925399bede34c7c6
make file pep8 compliant
Anaconda-Platform/anaconda-client,Anaconda-Platform/anaconda-client,Anaconda-Platform/anaconda-client
binstar_client/errors.py
binstar_client/errors.py
from clyent.errors import ClyentError class BinstarError(ClyentError): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) if not hasattr(self, 'message'): self.message = args[0] if args else None class Unauthorized(BinstarError): pass class Conflict(BinstarError): pass class NotFound(BinstarError, IndexError): def __init__(self, *args, **kwargs): BinstarError.__init__(self, *args, **kwargs) IndexError.__init__(self, *args, **kwargs) self.message = args[0] class UserError(BinstarError): pass class ServerError(BinstarError): pass class ShowHelp(BinstarError): pass class NoMetadataError(BinstarError): pass class NotebookNotExist(BinstarError): def __init__(self, notebook): msg = "{} does not exist.".format(notebook) self.notebook = notebook super(BinstarError, self).__init__(msg)
from clyent.errors import ClyentError class BinstarError(ClyentError): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) if not hasattr(self, 'message'): self.message = args[0] if args else None class Unauthorized(BinstarError): pass class Conflict(BinstarError): pass class NotFound(BinstarError, IndexError): def __init__(self, *args, **kwargs): BinstarError.__init__(self, *args, **kwargs) IndexError.__init__(self, *args, **kwargs) self.message = args[0] class UserError(BinstarError): pass class ServerError(BinstarError): pass class ShowHelp(BinstarError): pass class NoMetadataError(BinstarError): pass class NotebookNotExist(BinstarError): def __init__(self, notebook): msg = "{} does not exist.".format(notebook) self.notebook = notebook super(BinstarError, self).__init__(msg)
bsd-3-clause
Python
600fbdaff54206aaed93e775011b5dcfb054b83c
use url() for /apps urls so we can link to them
davehunt/bedrock,bensternthal/bedrock,mermi/bedrock,CSCI-462-01-2017/bedrock,alexgibson/bedrock,jpetto/bedrock,pmclanahan/bedrock,TheJJ100100/bedrock,craigcook/bedrock,hoosteeno/bedrock,marcoscaceres/bedrock,schalkneethling/bedrock,CSCI-462-01-2017/bedrock,jacshfr/mozilla-bedrock,hoosteeno/bedrock,Jobava/bedrock,pascalchevrel/bedrock,pascalchevrel/bedrock,alexgibson/bedrock,glogiotatidis/bedrock,rishiloyola/bedrock,jgmize/bedrock,rishiloyola/bedrock,gerv/bedrock,amjadm61/bedrock,sgarrity/bedrock,TheJJ100100/bedrock,elin-moco/bedrock,yglazko/bedrock,jacshfr/mozilla-bedrock,MichaelKohler/bedrock,mozilla/bedrock,gerv/bedrock,chirilo/bedrock,Sancus/bedrock,bensternthal/bedrock,mkmelin/bedrock,gauthierm/bedrock,yglazko/bedrock,elin-moco/bedrock,MichaelKohler/bedrock,flodolo/bedrock,marcoscaceres/bedrock,sylvestre/bedrock,kyoshino/bedrock,hoosteeno/bedrock,mkmelin/bedrock,malena/bedrock,jgmize/bedrock,mmmavis/bedrock,mozilla/bedrock,chirilo/bedrock,pmclanahan/bedrock,Jobava/bedrock,amjadm61/bedrock,mmmavis/bedrock,ericawright/bedrock,SujaySKumar/bedrock,mozilla/mwc,dudepare/bedrock,analytics-pros/mozilla-bedrock,dudepare/bedrock,mmmavis/bedrock,gauthierm/bedrock,elin-moco/bedrock,flodolo/bedrock,TheJJ100100/bedrock,jgmize/bedrock,Sancus/bedrock,ericawright/bedrock,ckprice/bedrock,flodolo/bedrock,schalkneethling/bedrock,TheJJ100100/bedrock,marcoscaceres/bedrock,jpetto/bedrock,amjadm61/bedrock,jacshfr/mozilla-bedrock,Sancus/bedrock,malena/bedrock,davehunt/bedrock,mozilla/mwc,ckprice/bedrock,rishiloyola/bedrock,mmmavis/lightbeam-bedrock-website,amjadm61/bedrock,glogiotatidis/bedrock,petabyte/bedrock,Jobava/bedrock,davidwboswell/documentation_autoresponse,pmclanahan/bedrock,mmmavis/bedrock,analytics-pros/mozilla-bedrock,l-hedgehog/bedrock,SujaySKumar/bedrock,TheoChevalier/bedrock,sylvestre/bedrock,mozilla/mwc,mozilla/bedrock,andreadelrio/bedrock,ericawright/bedrock,l-hedgehog/bedrock,davidwboswell/documentation_autoresponse,petabyte/bedrock,andreadelrio/bedrock,gerv/bedrock,glogiotatidis/bedrock,chirilo/bedrock,dudepare/bedrock,mermi/bedrock,craigcook/bedrock,jacshfr/mozilla-bedrock,mmmavis/lightbeam-bedrock-website,SujaySKumar/bedrock,mkmelin/bedrock,glogiotatidis/bedrock,andreadelrio/bedrock,SujaySKumar/bedrock,mahinthjoe/bedrock,Jobava/bedrock,schalkneethling/bedrock,craigcook/bedrock,petabyte/bedrock,davidwboswell/documentation_autoresponse,sgarrity/bedrock,elin-moco/bedrock,l-hedgehog/bedrock,mozilla/bedrock,jacshfr/mozilla-bedrock,mahinthjoe/bedrock,mermi/bedrock,kyoshino/bedrock,mahinthjoe/bedrock,flodolo/bedrock,davehunt/bedrock,gauthierm/bedrock,TheoChevalier/bedrock,pmclanahan/bedrock,davehunt/bedrock,kyoshino/bedrock,mozilla/mwc,kyoshino/bedrock,sylvestre/bedrock,l-hedgehog/bedrock,analytics-pros/mozilla-bedrock,TheoChevalier/bedrock,MichaelKohler/bedrock,bensternthal/bedrock,ericawright/bedrock,amjadm61/bedrock,dudepare/bedrock,mahinthjoe/bedrock,bensternthal/bedrock,CSCI-462-01-2017/bedrock,yglazko/bedrock,ckprice/bedrock,mmmavis/lightbeam-bedrock-website,andreadelrio/bedrock,yglazko/bedrock,malena/bedrock,jgmize/bedrock,craigcook/bedrock,alexgibson/bedrock,MichaelKohler/bedrock,jpetto/bedrock,gerv/bedrock,pascalchevrel/bedrock,sgarrity/bedrock,malena/bedrock,hoosteeno/bedrock,sgarrity/bedrock,rishiloyola/bedrock,mermi/bedrock,petabyte/bedrock,pascalchevrel/bedrock,chirilo/bedrock,CSCI-462-01-2017/bedrock,schalkneethling/bedrock,TheoChevalier/bedrock,jpetto/bedrock,alexgibson/bedrock,mkmelin/bedrock,sylvestre/bedrock,ckprice/bedrock,gauthierm/bedrock,davidwboswell/documentation_autoresponse,analytics-pros/mozilla-bedrock,marcoscaceres/bedrock,Sancus/bedrock
apps/marketplace/urls.py
apps/marketplace/urls.py
from django.conf.urls.defaults import * import views urlpatterns = patterns('', url(r'^$', views.marketplace, name='marketplace'), url(r'^partners/$', views.partners, name='partners') )
from django.conf.urls.defaults import * from views import marketplace, partners urlpatterns = patterns('', (r'^$', marketplace), (r'^partners/$', partners), )
mpl-2.0
Python
38db6404a7f40bc86585c614fc8cbe6691eafb89
update doc
bird-house/birdy
birdy/native/__init__.py
birdy/native/__init__.py
""" The :func:`import_wps` instantiates a class whose methods call WPS processes. The methods are generated at runtime based on the process description provided by the WPS server. Calling a function sends an `execute` request to the server, which returns a response. The response is parsed to convert the outputs in native python whenever possible. `LiteralOutput` objects (string, float, integer, boolean) are automatically converted to their native format. For `ComplexOutput`, the module can either return a link to the output files stored on the server (default), or try to convert the outputs to a python object based on their mime type. So for example, if the mime type is 'application/json', the module would read the remote output file and `json.loads` it to return a `dict`. The behavior of the module can be configured using the :class:`config`, see its docstring for more information. Example ------- If a WPS server with a simple `hello` process is running on the local host on port 5000:: >>> from birdy import import_wps >>> emu = import_wps('http://localhost:5000/') >>> emu.hello <function birdy.native.hello(name)> >>> print(emu.hello.__doc__) "" Just says a friendly Hello. Returns a literal string output with Hello plus the inputed name. Parameters ---------- name : string Please enter your name. Returns ------- output : string A friendly Hello from us. "" # Call the function >>> emu.hello('stranger') 'Hello stranger' """ from .client import BirdyClient, import_wps
""" The :func:`import_wps` function *imports* on the fly a python module whose functions call WPS processes. The module is generated at runtime based on the process description provided by the WPS server. Calling a function sends an `execute` request to the server, which returns a response. The response is parsed to convert the outputs in native python whenever possible. `LiteralOutput` objects (string, float, integer, boolean) are automatically converted to their native format. For `ComplexOutput`, the module can either return a link to the output files stored on the server (default), or try to convert the outputs to a python object based on their mime type. So for example, if the mime type is 'application/json', the module would read the remote output file and `json.loads` it to return a `dict`. The behavior of the module can be configured using the :class:`config`, see its docstring for more information. Example ------- If a WPS server with a simple `hello` process is running on the local host on port 5000:: >>> from birdy import import_wps >>> emu = import_wps('http://localhost:5000/') >>> emu.hello <function birdy.native.hello(name)> >>> print(emu.hello.__doc__) "" Just says a friendly Hello. Returns a literal string output with Hello plus the inputed name. Parameters ---------- name : string Please enter your name. Returns ------- output : string A friendly Hello from us. "" # Call the function >>> emu.hello('stranger') 'Hello stranger' """ from .client import BirdyClient, import_wps
apache-2.0
Python
270812e89e8e0870bfea01367cf645cf5194a806
Add sql constraint identation fixed
jorgescalona/openacademy-project
openacademy/model/openacademy_course.py
openacademy/model/openacademy_course.py
# -*- coding: utf-8 -*- from openerp import models, fields, api ''' This module create model of Course ''' class Course(models.Model): '''This class create model of Course''' _name = 'openacademy.course' # Model odoo name name = fields.Char(string='Title', required=True) # field reserved to identified rec_name description = fields.Text(string='Description') responsible_id = fields.Many2one('res.users', ondelete='set null', string="Responsible", index=True) session_ids = fields.One2many('openacademy.session', 'course_id', string="Sessions") @api.multi def copy(self, default=None): default = dict(default or {}) copied_count = self.search_count( [('name', '=like', u"Copy of {}%".format(self.name))]) if not copied_count: new_name = u"Copy of {}".format(self.name) else: new_name = u"Copy of {} ({})".format(self.name, copied_count) default['name'] = new_name return super(Course, self).copy(default) _sql_constraints = [ ('name_description_check', 'CHECK(name != description)', "The title of the course should not be the description"), ('name_unique', 'UNIQUE(name)', "The course title must be unique"), ]
# -*- coding: utf-8 -*- from openerp import models, fields, api ''' This module create model of Course ''' class Course(models.Model): '''This class create model of Course''' _name = 'openacademy.course' # Model odoo name name = fields.Char(string='Title', required=True) # field reserved to identified rec_name description = fields.Text(string='Description') responsible_id = fields.Many2one('res.users', ondelete='set null', string="Responsible", index=True) session_ids = fields.One2many('openacademy.session', 'course_id', string="Sessions") @api.multi def copy(self, default=None): default = dict(default or {}) copied_count = self.search_count( [('name', '=like', u"Copy of {}%".format(self.name))]) if not copied_count: new_name = u"Copy of {}".format(self.name) else: new_name = u"Copy of {} ({})".format(self.name, copied_count) default['name'] = new_name return super(Course, self).copy(default) _sql_constraints = [ ('name_description_check', 'CHECK(name != description)', "The title of the course should not be the description"), ('name_unique', 'UNIQUE(name)', "The course title must be unique"), ]
apache-2.0
Python
658d37fff628a3efac1e7202416ac7495960d4ad
Add translator in script
phihag/adhocracy,DanielNeugebauer/adhocracy,liqd/adhocracy,alkadis/vcv,phihag/adhocracy,liqd/adhocracy,liqd/adhocracy,alkadis/vcv,alkadis/vcv,DanielNeugebauer/adhocracy,phihag/adhocracy,phihag/adhocracy,DanielNeugebauer/adhocracy,DanielNeugebauer/adhocracy,alkadis/vcv,alkadis/vcv,phihag/adhocracy,liqd/adhocracy,DanielNeugebauer/adhocracy
scripts/replay_notifications.py
scripts/replay_notifications.py
#!/usr/bin/env python """ Replay all events in order to create Notification entries to the database which do not exist yet. """ # boilerplate code. copy that import os import sys from argparse import ArgumentParser sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) # /end boilerplate code from paste.deploy import appconfig import pylons from pylons.i18n.translation import _get_translator from adhocracy.config.environment import load_environment from adhocracy.lib.event.notification import notify from adhocracy.model import meta, Event def load_config(filename): conf = appconfig('config:' + os.path.abspath(filename) + '#content') load_environment(conf.global_conf, conf.local_conf) translator = _get_translator(pylons.config.get('lang')) pylons.translator._push_object(translator) def parse_args(): parser = ArgumentParser(description=__doc__) parser.add_argument("conf_file", help="configuration to use") return parser.parse_args() def main(): args = parse_args() load_config(args.conf_file) all_events = meta.Session.query(Event).all() for event in all_events: notify(event, database_only=True) meta.Session.commit() if __name__ == '__main__': sys.exit(main())
#!/usr/bin/env python """ Replay all events in order to create Notification entries to the database which do not exist yet. """ # boilerplate code. copy that import os import sys from argparse import ArgumentParser sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) # /end boilerplate code from paste.deploy import appconfig from adhocracy.config.environment import load_environment from adhocracy.lib.event.notification import notify from adhocracy.model import meta, Event def load_config(filename): conf = appconfig('config:' + os.path.abspath(filename) + '#content') load_environment(conf.global_conf, conf.local_conf) def parse_args(): parser = ArgumentParser(description=__doc__) parser.add_argument("conf_file", help="configuration to use") return parser.parse_args() def main(): args = parse_args() load_config(args.conf_file) all_events = meta.Session.query(Event).all() for event in all_events: notify(event, database_only=True) meta.Session.commit() if __name__ == '__main__': sys.exit(main())
agpl-3.0
Python
054e2d98a450b75427a7b06c4549373c2f4bc7a3
Remove default id reosolver from open tracing
mociepka/saleor,mociepka/saleor,mociepka/saleor
saleor/core/tracing.py
saleor/core/tracing.py
from functools import partial from graphene.relay import GlobalID from graphene.types.resolver import default_resolver from graphql import ResolveInfo def should_trace(info: ResolveInfo) -> bool: if info.field_name not in info.parent_type.fields: return False resolver = info.parent_type.fields[info.field_name].resolver return not ( resolver is None or is_default_resolver(resolver) or is_introspection_field(info) ) def is_introspection_field(info: ResolveInfo): if info.path is not None: for path in info.path: if isinstance(path, str) and path.startswith("__"): return True return False def is_default_resolver(resolver): default_resolvers = [default_resolver, GlobalID.id_resolver] while isinstance(resolver, partial): resolver = resolver.func if resolver in default_resolvers: return True return resolver in default_resolvers
from functools import partial from graphene.types.resolver import default_resolver from graphql import ResolveInfo def should_trace(info: ResolveInfo) -> bool: if info.field_name not in info.parent_type.fields: return False resolver = info.parent_type.fields[info.field_name].resolver return not ( resolver is None or is_default_resolver(resolver) or is_introspection_field(info) ) def is_introspection_field(info: ResolveInfo): if info.path is not None: for path in info.path: if isinstance(path, str) and path.startswith("__"): return True return False def is_default_resolver(resolver): while isinstance(resolver, partial): resolver = resolver.func if resolver is default_resolver: return True return resolver is default_resolver
bsd-3-clause
Python
91449465489ccc71e4d0b5527f0b4b54526b3c02
update comment
DennyZhang/devops_public,DennyZhang/devops_public,DennyZhang/devops_public,DennyZhang/devops_public
python/parameters_tool/strip_comments.py
python/parameters_tool/strip_comments.py
#!/usr/bin/python ## File : strip_comments.py ## Created : <2017-08-03> ## Updated: Time-stamp: <2017-08-03 18:12:22> ## Description : ## For a block of string, remove useless stuff ## 1. Remove leading whitespace ## 2. Remove tailing whitespace ## 3. Remove any lines start with # ## ## Sample: ## export server_list="# server ip ## ## ## APP ## 138.68.52.73:22 ## ## loadbalancer ## #138.68.254.56:2711 ## #138.68.254.215:2712" ## server_list=$(echo "$server_list" | python ./strip_comments.py) ## server_list: "138.68.52.73:22" ##------------------------------------------------------------------- import os, sys def strip_comment(string): string_list = [] for line in string.split("\n"): line = line.strip() if line.startswith("#") or line == "": continue string_list.append(line) return "\n".join(string_list) if __name__ == '__main__': string = sys.stdin.read() print(strip_comment(string)) ## File : strip_comments.py ends
#!/usr/bin/python ## File : strip_comments.py ## Created : <2017-08-03> ## Updated: Time-stamp: <2017-08-03 18:09:41> ## Description : ## For a block of string, remove useless stuff ## 1. Remove leading whitespace ## 2. Remove tailing whitespace ## 3. Remove any lines start with # ## ## Sample: ## server_list=$(echo "$server_list" | python ./strip_comments.py) ##------------------------------------------------------------------- import os, sys def strip_comment(string): string_list = [] for line in string.split("\n"): line = line.strip() if line.startswith("#") or line == "": continue string_list.append(line) return "\n".join(string_list) if __name__ == '__main__': string = sys.stdin.read() print(strip_comment(string)) ## File : strip_comments.py ends
mit
Python
df6b13a70241b616f49d4dcc25073084c371f5b1
Swap out license with rights
CenterForOpenScience/SHARE,aaxelb/SHARE,zamattiac/SHARE,zamattiac/SHARE,CenterForOpenScience/SHARE,CenterForOpenScience/SHARE,aaxelb/SHARE,zamattiac/SHARE,laurenbarker/SHARE,aaxelb/SHARE,laurenbarker/SHARE,laurenbarker/SHARE
share/models/creative/base.py
share/models/creative/base.py
from django.db import models from share.models.base import ShareObject from share.models.people import Person from share.models.base import TypedShareObjectMeta from share.models.creative.meta import Venue, Institution, Funder, Award, Tag from share.models.fields import ShareForeignKey, ShareManyToManyField class AbstractCreativeWork(ShareObject, metaclass=TypedShareObjectMeta): title = models.TextField() description = models.TextField() contributors = ShareManyToManyField(Person, through='Contributor') institutions = ShareManyToManyField(Institution, through='ThroughInstitutions') venues = ShareManyToManyField(Venue, through='ThroughVenues') funders = ShareManyToManyField(Funder, through='ThroughFunders') awards = ShareManyToManyField(Award, through='ThroughAwards') subject = ShareForeignKey(Tag, related_name='subjected_%(class)s', null=True) # Note: Null allows inserting of None but returns it as an empty string tags = ShareManyToManyField(Tag, related_name='tagged_%(class)s', through='ThroughTags') created = models.DateTimeField(null=True) published = models.DateTimeField(null=True) free_to_read_type = models.URLField(blank=True) free_to_read_date = models.DateTimeField(null=True) rights = models.TextField(blank=True, null=True) language = models.TextField(blank=True, null=True) class CreativeWork(AbstractCreativeWork): pass
from django.db import models from share.models.base import ShareObject from share.models.people import Person from share.models.base import TypedShareObjectMeta from share.models.creative.meta import Venue, Institution, Funder, Award, Tag from share.models.fields import ShareForeignKey, ShareManyToManyField class AbstractCreativeWork(ShareObject, metaclass=TypedShareObjectMeta): title = models.TextField() description = models.TextField() contributors = ShareManyToManyField(Person, through='Contributor') institutions = ShareManyToManyField(Institution, through='ThroughInstitutions') venues = ShareManyToManyField(Venue, through='ThroughVenues') funders = ShareManyToManyField(Funder, through='ThroughFunders') awards = ShareManyToManyField(Award, through='ThroughAwards') subject = ShareForeignKey(Tag, related_name='subjected_%(class)s', null=True) # Note: Null allows inserting of None but returns it as an empty string tags = ShareManyToManyField(Tag, related_name='tagged_%(class)s', through='ThroughTags') created = models.DateTimeField(null=True) published = models.DateTimeField(null=True) free_to_read_type = models.URLField(blank=True) free_to_read_date = models.DateTimeField(null=True) rights = models.TextField() language = models.TextField() class CreativeWork(AbstractCreativeWork): pass
apache-2.0
Python
a2849e7d016c812317fc503dc15f8f3dfec7da0a
use apply_async instead of delay
SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci
mrbelvedereci/build/handlers.py
mrbelvedereci/build/handlers.py
from django.db.models.signals import post_save from django.dispatch import receiver from mrbelvedereci.build.models import Build from mrbelvedereci.build.tasks import run_build @receiver(post_save, sender=Build) def create_repo_webhooks(sender, **kwargs): build = kwargs['instance'] created = kwargs['created'] if not created: return # Queue the background job with a 1 second delay to allow the transaction to commit run_build.apply_async((build.id), countdown=1)
from django.db.models.signals import post_save from django.dispatch import receiver from mrbelvedereci.build.models import Build from mrbelvedereci.build.tasks import run_build @receiver(post_save, sender=Build) def create_repo_webhooks(sender, **kwargs): build = kwargs['instance'] created = kwargs['created'] if not created: return # Queue the background job with a 1 second delay to allow the transaction to commit run_build.delay(build.id, countdown=1)
bsd-3-clause
Python
1bb90728d9ef6c08452d2094e9930b6aa916389e
Remove use of girder.events in queue adapter
Kitware/cumulus,Kitware/cumulus
cumulus/queue/__init__.py
cumulus/queue/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright 2015 Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### from jsonpath_rw import parse from . import sge from . import pbs from . import slurm from . import newt from cumulus.constants import QueueType from cumulus.constants import ClusterType type_to_adapter = { QueueType.SGE: sge.SgeQueueAdapter, QueueType.PBS: pbs.PbsQueueAdapter, QueueType.SLURM: slurm.SlurmQueueAdapter, QueueType.NEWT: newt.NewtQueueAdapter } def get_queue_adapter(cluster, cluster_connection=None): global type_to_adapter # Special case for nersc clusters. They use SLURM ( at the moment ) but the # submission is done using the NEWT REST API. So the scheduler is set the # SLURM but we want to use the NEWT adapter. if cluster['type'] == ClusterType.NEWT: system = QueueType.NEWT else: system = parse('config.scheduler.type').find(cluster) if system: system = system[0].value # Default to SGE else: system = QueueType.SGE if system not in type_to_adapter: raise Exception('Unsupported queuing system: %s' % system) else: cls = type_to_adapter[system] return cls(cluster, cluster_connection) def is_valid_type(type): """ Return True if type is a valid (supported) queueing system, False otherwise. :param The queue type ( 'sge', 'slurm' ...) :returns """ return type in type_to_adapter
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright 2015 Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### from jsonpath_rw import parse from girder import events from . import sge from . import pbs from . import slurm from . import newt from cumulus.constants import QueueType from cumulus.constants import ClusterType type_to_adapter = { QueueType.SGE: sge.SgeQueueAdapter, QueueType.PBS: pbs.PbsQueueAdapter, QueueType.SLURM: slurm.SlurmQueueAdapter, QueueType.NEWT: newt.NewtQueueAdapter } def get_queue_adapter(cluster, cluster_connection=None): global type_to_adapter # Special case for nersc clusters. They use SLURM ( at the moment ) but the # submission is done using the NEWT REST API. So the scheduler is set the # SLURM but we want to use the NEWT adapter. if cluster['type'] == ClusterType.NEWT: system = QueueType.NEWT else: system = parse('config.scheduler.type').find(cluster) if system: system = system[0].value # Default to SGE else: system = QueueType.SGE if system not in type_to_adapter: e = events.trigger('queue.adapter.get', system) if len(e.responses) > 0: cls = e.responses[-1] else: raise Exception('Unsupported queuing system: %s' % system) else: cls = type_to_adapter[system] return cls(cluster, cluster_connection) def is_valid_type(type): """ Return True if type is a valid (supported) queueing system, False otherwise. :param The queue type ( 'sge', 'slurm' ...) :returns """ valid = False if type in type_to_adapter: valid = True else: # See if this type is supported by a plugin e = events.trigger('queue.adapter.get', type) if len(e.responses) > 0: valid = True return valid
apache-2.0
Python
707c4c801a0c35a1503575a6bd8c82fed6c589b6
Update tv example to use data module. Rewrap some text.
chintak/scikit-image,pratapvardhan/scikit-image,dpshelio/scikit-image,paalge/scikit-image,newville/scikit-image,SamHames/scikit-image,keflavich/scikit-image,GaZ3ll3/scikit-image,paalge/scikit-image,ofgulban/scikit-image,ofgulban/scikit-image,blink1073/scikit-image,warmspringwinds/scikit-image,Britefury/scikit-image,oew1v07/scikit-image,emon10005/scikit-image,WarrenWeckesser/scikits-image,chintak/scikit-image,ajaybhat/scikit-image,Hiyorimi/scikit-image,SamHames/scikit-image,GaZ3ll3/scikit-image,almarklein/scikit-image,paalge/scikit-image,emmanuelle/scikits.image,SamHames/scikit-image,juliusbierk/scikit-image,bsipocz/scikit-image,vighneshbirodkar/scikit-image,oew1v07/scikit-image,blink1073/scikit-image,chriscrosscutler/scikit-image,Britefury/scikit-image,jwiggins/scikit-image,bennlich/scikit-image,Hiyorimi/scikit-image,chintak/scikit-image,Midafi/scikit-image,robintw/scikit-image,vighneshbirodkar/scikit-image,rjeli/scikit-image,emmanuelle/scikits.image,almarklein/scikit-image,vighneshbirodkar/scikit-image,chintak/scikit-image,rjeli/scikit-image,SamHames/scikit-image,rjeli/scikit-image,ClinicalGraphics/scikit-image,almarklein/scikit-image,bsipocz/scikit-image,bennlich/scikit-image,emmanuelle/scikits.image,keflavich/scikit-image,ClinicalGraphics/scikit-image,robintw/scikit-image,almarklein/scikit-image,emmanuelle/scikits.image,pratapvardhan/scikit-image,Midafi/scikit-image,michaelaye/scikit-image,youprofit/scikit-image,dpshelio/scikit-image,michaelpacer/scikit-image,warmspringwinds/scikit-image,emon10005/scikit-image,michaelpacer/scikit-image,juliusbierk/scikit-image,chriscrosscutler/scikit-image,WarrenWeckesser/scikits-image,ajaybhat/scikit-image,ofgulban/scikit-image,youprofit/scikit-image,jwiggins/scikit-image,newville/scikit-image,michaelaye/scikit-image
doc/examples/plot_lena_tv_denoise.py
doc/examples/plot_lena_tv_denoise.py
""" ==================================================== Denoising the picture of Lena using total variation ==================================================== In this example, we denoise a noisy version of the picture of Lena using the total variation denoising filter. The result of this filter is an image that has a minimal total variation norm, while being as close to the initial image as possible. The total variation is the L1 norm of the gradient of the image, and minimizing the total variation typically produces "posterized" images with flat domains separated by sharp edges. It is possible to change the degree of posterization by controlling the tradeoff between denoising and faithfulness to the original image. """ import numpy as np import matplotlib.pyplot as plt from scikits.image import data from scikits.image.filter import tv_denoise l = data.lena() l = l[230:290, 220:320] noisy = l + 0.4*l.std()*np.random.random(l.shape) tv_denoised = tv_denoise(noisy, weight=10) plt.figure(figsize=(12,2.8)) plt.subplot(131) plt.imshow(noisy, cmap=plt.cm.gray, vmin=40, vmax=220) plt.axis('off') plt.title('noisy', fontsize=20) plt.subplot(132) plt.imshow(tv_denoised, cmap=plt.cm.gray, vmin=40, vmax=220) plt.axis('off') plt.title('TV denoising', fontsize=20) tv_denoised = tv_denoise(noisy, weight=50) plt.subplot(133) plt.imshow(tv_denoised, cmap=plt.cm.gray, vmin=40, vmax=220) plt.axis('off') plt.title('(more) TV denoising', fontsize=20) plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0, left=0, right=1) plt.show()
""" ==================================================== Denoising the picture of Lena using total variation ==================================================== In this example, we denoise a noisy version of the picture of Lena using the total variation denoising filter. The result of this filter is an image that has a minimal total variation norm, while being as close to the initial image as possible. The total variation is the L1 norm of the gradient of the image, and minimizing the total variation typically produces "posterized" images with flat domains separated by sharp edges. It is possible to change the degree of posterization by controlling the tradeoff between denoising and faithfulness to the original image. """ import numpy as np import scipy from scipy import ndimage import matplotlib.pyplot as plt from scikits.image.filter import tv_denoise l = scipy.misc.lena() l = l[230:290, 220:320] noisy = l + 0.4*l.std()*np.random.random(l.shape) tv_denoised = tv_denoise(noisy, weight=10) plt.figure(figsize=(12,2.8)) plt.subplot(131) plt.imshow(noisy, cmap=plt.cm.gray, vmin=40, vmax=220) plt.axis('off') plt.title('noisy', fontsize=20) plt.subplot(132) plt.imshow(tv_denoised, cmap=plt.cm.gray, vmin=40, vmax=220) plt.axis('off') plt.title('TV denoising', fontsize=20) tv_denoised = tv_denoise(noisy, weight=50) plt.subplot(133) plt.imshow(tv_denoised, cmap=plt.cm.gray, vmin=40, vmax=220) plt.axis('off') plt.title('(more) TV denoising', fontsize=20) plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0, left=0, right=1) plt.show()
bsd-3-clause
Python
507cdda01f9208127f8ce5f1ecadc6d5d521fe4d
fix for flake8
cupy/cupy,niboshi/chainer,okuta/chainer,ktnyt/chainer,kiyukuta/chainer,anaruse/chainer,wkentaro/chainer,niboshi/chainer,okuta/chainer,hvy/chainer,wkentaro/chainer,chainer/chainer,jnishi/chainer,jnishi/chainer,kashif/chainer,chainer/chainer,jnishi/chainer,chainer/chainer,hvy/chainer,tkerola/chainer,okuta/chainer,cupy/cupy,ronekko/chainer,hvy/chainer,keisuke-umezawa/chainer,niboshi/chainer,hvy/chainer,jnishi/chainer,ktnyt/chainer,cupy/cupy,niboshi/chainer,ktnyt/chainer,pfnet/chainer,wkentaro/chainer,delta2323/chainer,keisuke-umezawa/chainer,okuta/chainer,ysekky/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,ktnyt/chainer,wkentaro/chainer,aonotas/chainer,cupy/cupy,rezoo/chainer,chainer/chainer
cupy/manipulation/kind.py
cupy/manipulation/kind.py
from cupy import core # TODO(okuta): Implement asfarray def asfortranarray(a, dtype=None): """Return an array laid out in Fortran order in memory. Args: a (~cupy.ndarray): The input array. dtype (str or dtype object, optional): By default, the data-type is inferred from the input data. Returns: ~cupy.ndarray: The input `a` in Fortran, or column-major, order. .. seealso:: :func:`numpy.asfortranarray` """ return core.asfortranarray(a, dtype) # TODO(okuta): Implement asarray_chkfinite # TODO(okuta): Implement asscalar # TODO(okuta): Implement require
import numpy import cupy from cupy import core # TODO(okuta): Implement asfarray def asfortranarray(a, dtype=None): """Return an array laid out in Fortran order in memory. Args: a (~cupy.ndarray): The input array. dtype (str or dtype object, optional): By default, the data-type is inferred from the input data. Returns: ~cupy.ndarray: The input `a` in Fortran, or column-major, order. .. seealso:: :func:`numpy.asfortranarray` """ return core.asfortranarray(a, dtype) # TODO(okuta): Implement asarray_chkfinite # TODO(okuta): Implement asscalar # TODO(okuta): Implement require
mit
Python
e157cfbf85bab3373ef7b4e5e76da20bd572bebb
modify method name: get_by_name_or_all to get_artist_by_name refactoring methods
EunJung-Seo/art_archive
art_archive_api/utils.py
art_archive_api/utils.py
from flask import abort def get_artist_by_name(model, name): objects = [] if name: objects = model.query.filter_by(name=name) else: objects = model.query objects_count = objects.count() return objects, objects_count def slice_query_set(offset, count, objects_count, objects): if offset >= 0 and objects_count > offset: if count: count += offset else: count = objects_count objects = objects[offset:count] return objects def serialize_artist(artist, images_detail): json_data = {} if images_detail: json_data = artist.serialize_with_images() else: json_data = artist.serialize() return json_data def get_or_abort(model, object_id, code=422): """ get an object with his given id or an abort error (422 is the default) """ result = model.query.get(object_id) return result or abort(code)
from flask import abort def get_by_name_or_all(model, name): objects = [] objects_count = 0 if name: objects = model.query.filter_by(name=name) objects_count = objects.count() else: objects = model.query.all() objects_count = model.query.count() return objects, objects_count def slice_query_set(offset, count, objects_count, objects): if offset >= 0 and objects_count > offset: if count: count += offset else: count = objects_count objects = objects[offset:count] def serialize_artist(artist, images_detail): json_data = {} if images_detail: json_data = artist.serialize_with_images() else: json_data = artist.serialize() return json_data def get_or_abort(model, object_id, code=422): """ get an object with his given id or an abort error (422 is the default) """ result = model.query.get(object_id) return result or abort(code)
mit
Python
9f44888c00d29bd1d1a53eb09ab90b61f33c5e05
Update existing settings migration with minor field change.
snahelou/awx,wwitzel3/awx,snahelou/awx,wwitzel3/awx,snahelou/awx,wwitzel3/awx,snahelou/awx,wwitzel3/awx
awx/main/migrations/0002_v300_changes.py
awx/main/migrations/0002_v300_changes.py
# -*- coding: utf-8 -*- # Copyright (c) 2016 Ansible, Inc. # All Rights Reserved. from __future__ import unicode_literals from django.db import migrations, models from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('main', '0001_initial'), ] operations = [ migrations.CreateModel( name='TowerSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(default=None, editable=False)), ('modified', models.DateTimeField(default=None, editable=False)), ('key', models.CharField(unique=True, max_length=255)), ('description', models.TextField()), ('category', models.CharField(max_length=128)), ('value', models.TextField(blank=True)), ('value_type', models.CharField(max_length=12, choices=[(b'string', 'String'), (b'int', 'Integer'), (b'float', 'Decimal'), (b'json', 'JSON'), (b'bool', 'Boolean'), (b'password', 'Password'), (b'list', 'List')])), ('user', models.ForeignKey(related_name='settings', default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)), ], ), ]
# -*- coding: utf-8 -*- # Copyright (c) 2016 Ansible, Inc. # All Rights Reserved. from __future__ import unicode_literals from django.db import migrations, models from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('main', '0001_initial'), ] operations = [ migrations.CreateModel( name='TowerSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(default=None, editable=False)), ('modified', models.DateTimeField(default=None, editable=False)), ('key', models.CharField(unique=True, max_length=255)), ('description', models.TextField()), ('category', models.CharField(max_length=128)), ('value', models.TextField()), ('value_type', models.CharField(max_length=12, choices=[(b'string', 'String'), (b'int', 'Integer'), (b'float', 'Decimal'), (b'json', 'JSON'), (b'bool', 'Boolean'), (b'password', 'Password'), (b'list', 'List')])), ('user', models.ForeignKey(related_name='settings', default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)), ], ), ]
apache-2.0
Python
e225f443a68b5c25bc55ecdec3a162f11c230fa6
Update tests
platoai/platoai-python,platoai/platoai
tests/run.py
tests/run.py
from __future__ import print_function import datetime from pprint import pprint import voxjar if __name__ == "__main__": metadata = { "identifier": "test_call_identifier", "timestamp": datetime.datetime.now(), # 'company': { # 'id': 'b87cc8ea-6820-11e7-891e-4f389aefc782' # }, "type": { "identifier": "test_call_type_identifier", "name": "test_call_type_name", }, "agents": [ { "identifier": "test_agent_identifier", "name": "test_agent_name", "phoneNumber": 1234567890, } ], "customers": [ { "identifier": "test_customer_identifier", "name": "test_customer_name", "phoneNumber": 9876543210, } ], "direction": "OUTGOING", "options": {"processAudio": True}, } with open("./test.mp4", "rb") as f: client = voxjar.Client() try: pprint(client.push(metadata, audio=f)) except RuntimeError as e: print(e)
from __future__ import print_function import datetime from pprint import pprint # import json import voxjar if __name__ == '__main__': now = datetime.datetime.now() metadata = { 'identifier': 'test_call_identifier', 'timestamp': now, 'company': { 'id': 'b87cc8ea-6820-11e7-891e-4f389aefc782' }, 'type': { 'identifier': 'test_call_type_identifier', 'name': 'test_call_type_name' }, 'agents': [{ 'identifier': 'test_agent_identifier', 'name': 'test_agent_name', 'phoneNumber': 1234567890 }], 'customers': [{ 'identifier': 'test_customer_identifier', 'name': 'test_customer_name', 'phoneNumber': 9876543210 }], 'direction': 'OUTGOING', 'options': { 'processAudio': False }, } with open('./test.wav', 'rb') as f: client = voxjar.Client() print(client) try: pprint(client.push(metadata, audio=f)) except RuntimeError as e: print(e)
apache-2.0
Python
7893695348a23472835e6d6c2d57b8ac4dea2dc3
Document test intention.
zlargon/mosquitto,zlargon/mosquitto,zlargon/mosquitto,zlargon/mosquitto,zlargon/mosquitto
test/broker/03-publish-timeout-qos2.py
test/broker/03-publish-timeout-qos2.py
#!/usr/bin/python # Test whether a PUBLISH to a topic with QoS 2 results in the correct packet # flow. This test introduces delays into the flow in order to force the broker # to send duplicate PUBREC and PUBCOMP messages. import subprocess import socket import time from struct import * rc = 0 keepalive = 600 connect_packet = pack('!BBH6sBBHH21s', 16, 12+2+21,6,"MQIsdp",3,2,keepalive,21,"pub-qos2-timeout-test") connack_packet = pack('!BBBB', 32, 2, 0, 0); mid = 1926 publish_packet = pack('!BBH13sH15s', 48+4, 2+13+2+15, 13, "pub/qos2/test", mid, "timeout-message") pubrec_packet = pack('!BBH', 80, 2, mid) pubrec_dup_packet = pack('!BBH', 80+8, 2, mid) pubrel_packet = pack('!BBH', 96, 2, mid) pubcomp_packet = pack('!BBH', 112, 2, mid) broker = subprocess.Popen(['../../src/mosquitto', '-c', '03-publish-timeout-qos2.conf']) try: time.sleep(0.1) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(8) # 8 seconds timeout is longer than 5 seconds message retry. sock.connect(("localhost", 1888)) sock.send(connect_packet) connack_recvd = sock.recv(256) if connack_recvd != connack_packet: print "FAIL: Connect failed." rc = 1 else: sock.send(publish_packet) pubrec_recvd = sock.recv(256) if pubrec_recvd != pubrec_packet: (cmd, rl, mid_recvd) = unpack('!BBH', pubrec_recvd) print "FAIL: Expected 80,2," + str(mid) + " got " + str(cmd) + "," + str(rl) + "," + str(mid_recvd) rc = 1 else: # Timeout is 8 seconds which means the broker should repeat the PUBREC. pubrec_recvd = sock.recv(256) if pubrec_recvd != pubrec_dup_packet: (cmd, rl, mid_recvd) = unpack('!BBH', pubrec_recvd) print "FAIL: Expected 88,2," + str(mid) + " got " + str(cmd) + "," + str(rl) + "," + str(mid_recvd) rc = 1 else: sock.send(pubrel_packet) pubcomp_recvd = sock.recv(256) if pubcomp_recvd != pubcomp_packet: (cmd, rl, mid_recvd) = unpack('!BBH', pubcomp_recvd) print "FAIL: Expected 112,2," + str(mid) + " got " + str(cmd) + "," + str(rl) + "," + str(mid_recvd) rc = 1 sock.close() finally: broker.terminate() exit(rc)
#!/usr/bin/python # Test whether a PUBLISH to a topic with QoS 2 results in the correct packet # flow. This test introduces delays into the flow in order to force the broker # to send duplicate PUBREC and PUBCOMP messages. import subprocess import socket import time from struct import * rc = 0 keepalive = 600 connect_packet = pack('!BBH6sBBHH21s', 16, 12+2+21,6,"MQIsdp",3,2,keepalive,21,"pub-qos2-timeout-test") connack_packet = pack('!BBBB', 32, 2, 0, 0); mid = 1926 publish_packet = pack('!BBH13sH15s', 48+4, 2+13+2+15, 13, "pub/qos2/test", mid, "timeout-message") pubrec_packet = pack('!BBH', 80, 2, mid) pubrec_dup_packet = pack('!BBH', 80+8, 2, mid) pubrel_packet = pack('!BBH', 96, 2, mid) pubcomp_packet = pack('!BBH', 112, 2, mid) broker = subprocess.Popen(['../../src/mosquitto', '-c', '03-publish-timeout-qos2.conf']) try: time.sleep(0.1) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(8) # 8 seconds timeout is longer than 5 seconds message retry. sock.connect(("localhost", 1888)) sock.send(connect_packet) connack_recvd = sock.recv(256) if connack_recvd != connack_packet: print "FAIL: Connect failed." rc = 1 else: sock.send(publish_packet) pubrec_recvd = sock.recv(256) if pubrec_recvd != pubrec_packet: (cmd, rl, mid_recvd) = unpack('!BBH', pubrec_recvd) print "FAIL: Expected 80,2," + str(mid) + " got " + str(cmd) + "," + str(rl) + "," + str(mid_recvd) rc = 1 else: pubrec_recvd = sock.recv(256) if pubrec_recvd != pubrec_dup_packet: (cmd, rl, mid_recvd) = unpack('!BBH', pubrec_recvd) print "FAIL: Expected 88,2," + str(mid) + " got " + str(cmd) + "," + str(rl) + "," + str(mid_recvd) rc = 1 else: sock.send(pubrel_packet) pubcomp_recvd = sock.recv(256) if pubcomp_recvd != pubcomp_packet: (cmd, rl, mid_recvd) = unpack('!BBH', pubcomp_recvd) print "FAIL: Expected 112,2," + str(mid) + " got " + str(cmd) + "," + str(rl) + "," + str(mid_recvd) rc = 1 sock.close() finally: broker.terminate() exit(rc)
bsd-3-clause
Python
c546192a83dce300ad46193e351229a5969e979d
Remove warming up from TestBase._test_jitted() (#571)
IntelLabs/hpat,IntelLabs/hpat,IntelLabs/hpat,IntelLabs/hpat
sdc/tests/tests_perf/test_perf_base.py
sdc/tests/tests_perf/test_perf_base.py
import os import unittest import numba from sdc.tests.tests_perf.test_perf_utils import * class TestBase(unittest.TestCase): iter_number = 5 results_class = TestResults @classmethod def create_test_results(cls): drivers = [] if is_true(os.environ.get('SDC_TEST_PERF_EXCEL', True)): drivers.append(ExcelResultsDriver('perf_results.xlsx')) if is_true(os.environ.get('SDC_TEST_PERF_CSV', False)): drivers.append(CSVResultsDriver('perf_results.csv')) results = cls.results_class(drivers) if is_true(os.environ.get('LOAD_PREV_RESULTS')): results.load() return results @classmethod def setUpClass(cls): cls.test_results = cls.create_test_results() cls.total_data_length = [] cls.num_threads = int(os.environ.get('NUMBA_NUM_THREADS', config.NUMBA_NUM_THREADS)) cls.threading_layer = os.environ.get('NUMBA_THREADING_LAYER', config.THREADING_LAYER) @classmethod def tearDownClass(cls): # TODO: https://jira.devtools.intel.com/browse/SAT-2371 cls.test_results.print() cls.test_results.dump() def _test_jitted(self, pyfunc, record, *args, **kwargs): # compilation time record["compile_results"] = calc_compilation(pyfunc, *args, **kwargs) cfunc = numba.njit(pyfunc) # execution and boxing time record["test_results"], record["boxing_results"] = \ get_times(cfunc, *args, **kwargs) def _test_python(self, pyfunc, record, *args, **kwargs): record["test_results"], _ = \ get_times(pyfunc, *args, **kwargs) def _test_jit(self, pyfunc, base, *args): record = base.copy() record["test_type"] = 'SDC' self._test_jitted(pyfunc, record, *args) self.test_results.add(**record) def _test_py(self, pyfunc, base, *args): record = base.copy() record["test_type"] = 'Python' self._test_python(pyfunc, record, *args) self.test_results.add(**record)
import os import unittest import numba from sdc.tests.tests_perf.test_perf_utils import * class TestBase(unittest.TestCase): iter_number = 5 results_class = TestResults @classmethod def create_test_results(cls): drivers = [] if is_true(os.environ.get('SDC_TEST_PERF_EXCEL', True)): drivers.append(ExcelResultsDriver('perf_results.xlsx')) if is_true(os.environ.get('SDC_TEST_PERF_CSV', False)): drivers.append(CSVResultsDriver('perf_results.csv')) results = cls.results_class(drivers) if is_true(os.environ.get('LOAD_PREV_RESULTS')): results.load() return results @classmethod def setUpClass(cls): cls.test_results = cls.create_test_results() cls.total_data_length = [] cls.num_threads = int(os.environ.get('NUMBA_NUM_THREADS', config.NUMBA_NUM_THREADS)) cls.threading_layer = os.environ.get('NUMBA_THREADING_LAYER', config.THREADING_LAYER) @classmethod def tearDownClass(cls): # TODO: https://jira.devtools.intel.com/browse/SAT-2371 cls.test_results.print() cls.test_results.dump() def _test_jitted(self, pyfunc, record, *args, **kwargs): # compilation time record["compile_results"] = calc_compilation(pyfunc, *args, **kwargs) cfunc = numba.njit(pyfunc) # Warming up cfunc(*args, **kwargs) # execution and boxing time record["test_results"], record["boxing_results"] = \ get_times(cfunc, *args, **kwargs) def _test_python(self, pyfunc, record, *args, **kwargs): record["test_results"], _ = \ get_times(pyfunc, *args, **kwargs) def _test_jit(self, pyfunc, base, *args): record = base.copy() record["test_type"] = 'SDC' self._test_jitted(pyfunc, record, *args) self.test_results.add(**record) def _test_py(self, pyfunc, base, *args): record = base.copy() record["test_type"] = 'Python' self._test_python(pyfunc, record, *args) self.test_results.add(**record)
bsd-2-clause
Python
d6cfc95c436b7eb4be372795948a8f9097d60015
Remove unused import
dpshelio/astropy-helpers,Cadair/astropy-helpers,larrybradley/astropy-helpers,bsipocz/astropy-helpers,dpshelio/astropy-helpers,larrybradley/astropy-helpers,astropy/astropy-helpers,Cadair/astropy-helpers,bsipocz/astropy-helpers,larrybradley/astropy-helpers,astropy/astropy-helpers,bsipocz/astropy-helpers
astropy_helpers/sphinx/ext/__init__.py
astropy_helpers/sphinx/ext/__init__.py
from __future__ import division, absolute_import, print_function
from __future__ import division, absolute_import, print_function from .numpydoc import setup
bsd-3-clause
Python
bd45223f8606948936d2c0fa1c104a0c2f13d630
Update 8x8 generator
thaynewalker/hog2,thaynewalker/hog2,thaynewalker/hog2,thaynewalker/hog2,thaynewalker/hog2
test/environments/instances/8x8/gen.py
test/environments/instances/8x8/gen.py
#!/usr/bin/python import random import os import errno for i in range(100): s=set() g=set() while len(s) < 20: s.add((random.randint(0,7),random.randint(0,7))) while len(g) < 20: g.add((random.randint(0,7),random.randint(0,7))) start=list(s) goal=list(g) for size in range(1,21): if not os.path.exists("./%d"%size): try: os.makedirs("./%d"%size) except OSError as exc: if exc.errno != errno.EEXIST: raise with open("./%d/%d.csv"%(size,i), "w") as f: for j in range(size): f.write("%d,%d %d,%d\n"%(start[j][0],start[j][1],goal[j][0],goal[j][1]))
#!/usr/bin/python import random import os import errno for i in range(100): s=set() g=set() while len(s) < 20: s.add((random.randint(0,7),random.randint(0,7))) while len(g) < 20: g.add((random.randint(0,7),random.randint(0,7))) start=list(s) goal=list(g) for size in range(2,22,2): if not os.path.exists("./%d"%size): try: os.makedirs("./%d"%size) except OSError as exc: if exc.errno != errno.EEXIST: raise with open("./%d/%d.csv"%(size,i), "w") as f: for j in range(size): f.write("%d,%d %d,%d\n"%(start[j][0],start[j][1],goal[j][0],goal[j][1]))
mit
Python
1a8c06e655b622e7504a615c902ddb9b278f6470
add urdu mapping [skip ci]
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
custom/icds/translations/integrations/const.py
custom/icds/translations/integrations/const.py
from __future__ import absolute_import from __future__ import unicode_literals API_USER = "api" SOURCE_LANGUAGE_MAPPING = { # 'hq_code' : 'transifex_code' 'hin': 'hi', # hindi 'ori': 'or', # oriya 'tam': 'ta', # tamil 'pan': 'pa', # punjabi 'asm': 'as', # assamese 'ben': 'bn', # bengali 'guj': 'gu', # gujarati 'mal': 'ml', # malayalam 'mar': 'mr', # marathi 'snd': 'sd', # sindhi for test 'mri': 'mi', # maori 'khm': 'km', # khmer 'lug': 'lg', # ganda 'tel': 'te', # telugu 'urd': 'ur', # urdu }
from __future__ import absolute_import from __future__ import unicode_literals API_USER = "api" SOURCE_LANGUAGE_MAPPING = { # 'hq_code' : 'transifex_code' 'hin': 'hi', # hindi 'ori': 'or', # oriya 'tam': 'ta', # tamil 'pan': 'pa', # punjabi 'asm': 'as', # assamese 'ben': 'bn', # bengali 'guj': 'gu', # gujarati 'mal': 'ml', # malayalam 'mar': 'mr', # marathi 'snd': 'sd', # sindhi for test 'mri': 'mi', # maori 'khm': 'km', # khmer 'lug': 'lg', # ganda 'tel': 'te', # telugu }
bsd-3-clause
Python
d9304cd7c19e29fc24ba474a5c7983ce3bb88a2b
Fix benchmark name
stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib,stdlib-js/stdlib
lib/node_modules/@stdlib/types/ndarray/ind2sub/benchmark/python/numpy/benchmark.py
lib/node_modules/@stdlib/types/ndarray/ind2sub/benchmark/python/numpy/benchmark.py
#!/usr/bin/env python """Benchmark numpy.unravel_index.""" from __future__ import print_function import timeit NAME = "ind2sub" REPEATS = 3 ITERATIONS = 1000000 def print_version(): """Print the TAP version.""" print("TAP version 13") def print_summary(total, passing): """Print the benchmark summary. # Arguments * `total`: total number of tests * `passing`: number of passing tests """ print("#") print("1.." + str(total)) # TAP plan print("# total " + str(total)) print("# pass " + str(passing)) print("#") print("# ok") def print_results(elapsed): """Print benchmark results. # Arguments * `elapsed`: elapsed time (in seconds) # Examples ``` python python> print_results(0.131009101868) ``` """ rate = ITERATIONS / elapsed print(" ---") print(" iterations: " + str(ITERATIONS)) print(" elapsed: " + str(elapsed)) print(" rate: " + str(rate)) print(" ...") def benchmark(): """Run the benchmark and print benchmark results.""" setup = "import numpy as np; from random import random;" stmt = "y = np.unravel_index(int(random()*1000.0), (10,10,10))" t = timeit.Timer(stmt, setup=setup) print_version() for i in xrange(REPEATS): print("# python::numpy::" + NAME) elapsed = t.timeit(number=ITERATIONS) print_results(elapsed) print("ok " + str(i+1) + " benchmark finished") print_summary(REPEATS, REPEATS) def main(): """Run the benchmark.""" benchmark() if __name__ == "__main__": main()
#!/usr/bin/env python """Benchmark numpy.unravel_index.""" from __future__ import print_function import timeit NAME = "unravel_index" REPEATS = 3 ITERATIONS = 1000000 def print_version(): """Print the TAP version.""" print("TAP version 13") def print_summary(total, passing): """Print the benchmark summary. # Arguments * `total`: total number of tests * `passing`: number of passing tests """ print("#") print("1.." + str(total)) # TAP plan print("# total " + str(total)) print("# pass " + str(passing)) print("#") print("# ok") def print_results(elapsed): """Print benchmark results. # Arguments * `elapsed`: elapsed time (in seconds) # Examples ``` python python> print_results(0.131009101868) ``` """ rate = ITERATIONS / elapsed print(" ---") print(" iterations: " + str(ITERATIONS)) print(" elapsed: " + str(elapsed)) print(" rate: " + str(rate)) print(" ...") def benchmark(): """Run the benchmark and print benchmark results.""" setup = "import numpy as np; from random import random;" stmt = "y = np.unravel_index(int(random()*1000.0), (10,10,10))" t = timeit.Timer(stmt, setup=setup) print_version() for i in xrange(REPEATS): print("# python::numpy::" + NAME) elapsed = t.timeit(number=ITERATIONS) print_results(elapsed) print("ok " + str(i+1) + " benchmark finished") print_summary(REPEATS, REPEATS) def main(): """Run the benchmark.""" benchmark() if __name__ == "__main__": main()
apache-2.0
Python
deb749252a83f59c0bfee3b14abafc5582fb3986
fix 500 - closes #20
letsmeet-click/letsmeet.click,letsmeet-click/letsmeet.click,letsmeet-click/letsmeet.click,letsmeet-click/letsmeet.click
letsmeet/events/views.py
letsmeet/events/views.py
from rules.contrib.views import PermissionRequiredMixin from django.shortcuts import redirect, get_object_or_404 from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ( CreateView, DetailView, UpdateView, ) from .models import Event, EventRSVP, EventComment from .forms import EventUpdateForm, EventCommentCreateForm class CommunityEventMixin: def get_object(self, queryset=None): return get_object_or_404( Event, slug=self.kwargs.get('slug'), community__slug=self.kwargs.get('community_slug')) class EventUpdateView(LoginRequiredMixin, PermissionRequiredMixin, CommunityEventMixin, UpdateView): model = Event template_name = 'events/event_update.html' permission_required = 'event.can_edit' form_class = EventUpdateForm class EventDetailView(CommunityEventMixin, DetailView): model = Event def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['comment_form'] = EventCommentCreateForm() return context class EventRSVPView(LoginRequiredMixin, PermissionRequiredMixin, CommunityEventMixin, DetailView): model = Event template_name = 'events/event_rsvp.html' permission_required = 'event.can_rsvp' allowed_methods = ['post'] def post(self, request, *args, **kwargs): event = self.get_object() answer = self.kwargs.get('answer') if answer == 'reset': try: EventRSVP.objects.get(event=event, user=request.user).delete() except EventRSVP.DoesNotExist: pass else: EventRSVP.objects.get_or_create( event=event, user=request.user, defaults={ 'coming': True if answer == 'yes' else False } ) return redirect(event) class EventCommentCreateView(LoginRequiredMixin, PermissionRequiredMixin, CommunityEventMixin, CreateView): model = EventComment form_class = EventCommentCreateForm template_name = 'events/eventcomment_create.html' permission_required = 'event.can_create_comment' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['event'] = self.get_object() return context def form_valid(self, form): comment = form.save(commit=False) comment.event = self.get_object() comment.user = self.request.user comment.save() return redirect(comment.event)
from rules.contrib.views import PermissionRequiredMixin from django.shortcuts import redirect from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ( CreateView, DetailView, UpdateView, ) from .models import Event, EventRSVP, EventComment from .forms import EventUpdateForm, EventCommentCreateForm class CommunityEventMixin: def get_object(self, queryset=None): obj = Event.objects.get( slug=self.kwargs.get('slug'), community__slug=self.kwargs.get('community_slug')) return obj class EventUpdateView(LoginRequiredMixin, PermissionRequiredMixin, CommunityEventMixin, UpdateView): model = Event template_name = 'events/event_update.html' permission_required = 'event.can_edit' form_class = EventUpdateForm class EventDetailView(CommunityEventMixin, DetailView): model = Event def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['comment_form'] = EventCommentCreateForm() return context class EventRSVPView(LoginRequiredMixin, PermissionRequiredMixin, CommunityEventMixin, DetailView): model = Event template_name = 'events/event_rsvp.html' permission_required = 'event.can_rsvp' allowed_methods = ['post'] def post(self, request, *args, **kwargs): event = self.get_object() answer = self.kwargs.get('answer') if answer == 'reset': try: EventRSVP.objects.get(event=event, user=request.user).delete() except EventRSVP.DoesNotExist: pass else: EventRSVP.objects.get_or_create( event=event, user=request.user, defaults={ 'coming': True if answer == 'yes' else False } ) return redirect(event) class EventCommentCreateView(LoginRequiredMixin, PermissionRequiredMixin, CommunityEventMixin, CreateView): model = EventComment form_class = EventCommentCreateForm template_name = 'events/eventcomment_create.html' permission_required = 'event.can_create_comment' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['event'] = self.get_object() return context def form_valid(self, form): comment = form.save(commit=False) comment.event = self.get_object() comment.user = self.request.user comment.save() return redirect(comment.event)
mit
Python
c4ea2d360ebf0fc9b9d9494ed43e540eaf2282d8
add compatibility import
cpcloud/dask,vikhyat/dask,hainm/dask,mraspaud/dask,jakirkham/dask,freeman-lab/dask,jayhetee/dask,mrocklin/dask,vikhyat/dask,simudream/dask,clarkfitzg/dask,hainm/dask,marianotepper/dask,PhE/dask,jcrist/dask,PhE/dask,PeterDSteinberg/dask,wiso/dask,mikegraham/dask,esc/dask,jayhetee/dask,cowlicks/dask,mraspaud/dask,chrisbarber/dask,jakirkham/dask,minrk/dask,blaze/dask,minrk/dask,blaze/dask,jcrist/dask,mrocklin/dask,marianotepper/dask,freeman-lab/dask,ssanderson/dask,pombredanne/dask,wiso/dask,pombredanne/dask,ContinuumIO/dask,ContinuumIO/dask,gameduell/dask,dask/dask,ssanderson/dask,esc/dask,clarkfitzg/dask,dask/dask,simudream/dask
dask/array/into.py
dask/array/into.py
from __future__ import absolute_import, division, print_function import numpy as np from toolz import merge, accumulate from into import discover, convert, append, into from datashape.dispatch import dispatch from datashape import DataShape from operator import add import itertools from .core import rec_concatenate, Array, getem, get, names, from_array from ..core import flatten from ..compatibility import long @discover.register(Array) def discover_dask_array(a, **kwargs): block = a._get_block(*([0] * a.ndim)) return DataShape(*(a.shape + (discover(block).measure,))) arrays = [np.ndarray] try: import h5py arrays.append(h5py.Dataset) @dispatch(h5py.Dataset, (int, long)) def resize(x, size): s = list(x.shape) s[0] = size return resize(x, tuple(s)) @dispatch(h5py.Dataset, tuple) def resize(x, shape): return x.resize(shape) except ImportError: pass try: import bcolz arrays.append(bcolz.carray) @dispatch(bcolz.carray, (int, long)) def resize(x, size): return x.resize(size) except ImportError: pass @convert.register(Array, tuple(arrays), cost=0.01) def array_to_dask(x, name=None, blockshape=None, **kwargs): return from_array(x, blockshape=blockshape, name=name, **kwargs) @convert.register(np.ndarray, Array, cost=0.5) def dask_to_numpy(x, **kwargs): return rec_concatenate(get(x.dask, x._keys(), **kwargs)) @convert.register(float, Array, cost=0.5) def dask_to_float(x, **kwargs): return x.compute() @append.register(tuple(arrays), Array) def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs): if not inplace: # Resize output dataset to accept new data assert out.shape[1:] == arr.shape[1:] resize(out, out.shape[0] + arr.shape[0]) # elongate return arr.store(out)
from __future__ import absolute_import, division, print_function import numpy as np from toolz import merge, accumulate from into import discover, convert, append, into from datashape.dispatch import dispatch from datashape import DataShape from operator import add import itertools from .core import rec_concatenate, Array, getem, get, names, from_array from ..core import flatten @discover.register(Array) def discover_dask_array(a, **kwargs): block = a._get_block(*([0] * a.ndim)) return DataShape(*(a.shape + (discover(block).measure,))) arrays = [np.ndarray] try: import h5py arrays.append(h5py.Dataset) @dispatch(h5py.Dataset, (int, long)) def resize(x, size): s = list(x.shape) s[0] = size return resize(x, tuple(s)) @dispatch(h5py.Dataset, tuple) def resize(x, shape): return x.resize(shape) except ImportError: pass try: import bcolz arrays.append(bcolz.carray) @dispatch(bcolz.carray, (int, long)) def resize(x, size): return x.resize(size) except ImportError: pass @convert.register(Array, tuple(arrays), cost=0.01) def array_to_dask(x, name=None, blockshape=None, **kwargs): return from_array(x, blockshape=blockshape, name=name, **kwargs) @convert.register(np.ndarray, Array, cost=0.5) def dask_to_numpy(x, **kwargs): return rec_concatenate(get(x.dask, x._keys(), **kwargs)) @convert.register(float, Array, cost=0.5) def dask_to_float(x, **kwargs): return x.compute() @append.register(tuple(arrays), Array) def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs): if not inplace: # Resize output dataset to accept new data assert out.shape[1:] == arr.shape[1:] resize(out, out.shape[0] + arr.shape[0]) # elongate return arr.store(out)
bsd-3-clause
Python
c1d73206436389f27187f3b52ff0daf6e106918c
Fix serialization of Marathon Constraints
burakbostancioglu/marathon-python,elyast/marathon-python,mesosphere/marathon-python,Carles-Figuerola/marathon-python,thefactory/marathon-python,mattrobenolt/marathon-python,mesosphere/marathon-python,burakbostancioglu/marathon-python,Yelp/marathon-python,drewrobb/marathon-python,Rob-Johnson/marathon-python,Carles-Figuerola/marathon-python,drewrobb/marathon-python,elyast/marathon-python,Rob-Johnson/marathon-python,vitan/marathon-python,thefactory/marathon-python,mattrobenolt/marathon-python,Yelp/marathon-python,fengyehong/marathon-python,fengyehong/marathon-python
marathon/models/constraint.py
marathon/models/constraint.py
from ..exceptions import InvalidOperatorError from .base import MarathonObject class MarathonConstraint(MarathonObject): """Marathon placement constraint. See https://mesosphere.github.io/marathon/docs/constraints.html :param str field: constraint operator target :param str operator: must be one of [UNIQUE, CLUSTER, GROUP_BY] :param value: [optional] if `operator` is CLUSTER, constrain tasks to servers where `field` == `value`. If `operator` is GROUP_BY, place at most `value` tasks per group :type value: str, int, or None """ OPERATORS = ['UNIQUE', 'CLUSTER', 'GROUP_BY'] """Valid operators""" def __init__(self, field, operator, value=None): if not operator in self.OPERATORS: raise InvalidOperatorError(operator) self.field = field self.operator = operator self.value = value def __repr__(self): if self.value: template = "MarathonConstraint::{field}:{operator}:{value}" else: template = "MarathonConstraint::{field}:{operator}" return template.format(**self.__dict__) def json_repr(self): """Construct a JSON-friendly representation of the object. :rtype: list """ if self.value: return [self.field, self.operator, self.value] else: return [self.field, self.operator] @classmethod def from_json(cls, obj): """Construct a MarathonConstraint from a parsed response. :param dict attributes: object attributes from parsed response :rtype: :class:`MarathonConstraint` """ if len(obj) == 2: (field, operator) = obj return cls(field, operator) if len(obj) > 2: (field, operator, value) = obj return cls(field, operator, value)
from ..exceptions import InvalidOperatorError from .base import MarathonObject class MarathonConstraint(MarathonObject): """Marathon placement constraint. See https://mesosphere.github.io/marathon/docs/constraints.html :param str field: constraint operator target :param str operator: must be one of [UNIQUE, CLUSTER, GROUP_BY] :param value: [optional] if `operator` is CLUSTER, constrain tasks to servers where `field` == `value`. If `operator` is GROUP_BY, place at most `value` tasks per group :type value: str, int, or None """ OPERATORS = ['UNIQUE', 'CLUSTER', 'GROUP_BY'] """Valid operators""" def __init__(self, field, operator, value=None): if not operator in self.OPERATORS: raise InvalidOperatorError(operator) self.field = field self.operator = operator self.value = value def __repr__(self): if self.value: template = "MarathonConstraint::{field}:{operator}:{value}" else: template = "MarathonConstraint::{field}:{operator}" return template.format(**self.__dict__) @classmethod def json_decode(cls, obj): """Construct a MarathonConstraint from a parsed response. :param dict attributes: object attributes from parsed response :rtype: :class:`MarathonConstraint` """ if len(obj) == 2: (field, operator) = obj return cls(field, operator) if len(obj) > 2: (field, operator, value) = obj return cls(field, operator, value) def json_encode(self): """Construct a JSON-friendly representation of the object. :rtype: dict """ if self.value: return [self.field, self.operator, self.value] else: return [self.field, self.operator]
mit
Python
381dc5a1f92916d8ce66c7eef95e2237ff20b044
fix tests
larrybradley/astropy,joergdietrich/astropy,astropy/astropy,kelle/astropy,mhvk/astropy,lpsinger/astropy,tbabej/astropy,joergdietrich/astropy,AustereCuriosity/astropy,astropy/astropy,larrybradley/astropy,pllim/astropy,lpsinger/astropy,kelle/astropy,larrybradley/astropy,dhomeier/astropy,AustereCuriosity/astropy,AustereCuriosity/astropy,bsipocz/astropy,lpsinger/astropy,pllim/astropy,MSeifert04/astropy,MSeifert04/astropy,stargaser/astropy,astropy/astropy,StuartLittlefair/astropy,lpsinger/astropy,StuartLittlefair/astropy,bsipocz/astropy,larrybradley/astropy,mhvk/astropy,aleksandr-bakanov/astropy,DougBurke/astropy,aleksandr-bakanov/astropy,StuartLittlefair/astropy,StuartLittlefair/astropy,StuartLittlefair/astropy,joergdietrich/astropy,AustereCuriosity/astropy,aleksandr-bakanov/astropy,astropy/astropy,DougBurke/astropy,DougBurke/astropy,dhomeier/astropy,saimn/astropy,funbaker/astropy,pllim/astropy,pllim/astropy,stargaser/astropy,joergdietrich/astropy,stargaser/astropy,saimn/astropy,aleksandr-bakanov/astropy,larrybradley/astropy,tbabej/astropy,saimn/astropy,funbaker/astropy,mhvk/astropy,bsipocz/astropy,dhomeier/astropy,funbaker/astropy,DougBurke/astropy,saimn/astropy,dhomeier/astropy,bsipocz/astropy,tbabej/astropy,tbabej/astropy,astropy/astropy,mhvk/astropy,tbabej/astropy,dhomeier/astropy,funbaker/astropy,lpsinger/astropy,kelle/astropy,MSeifert04/astropy,kelle/astropy,mhvk/astropy,saimn/astropy,pllim/astropy,kelle/astropy,MSeifert04/astropy,stargaser/astropy,joergdietrich/astropy,AustereCuriosity/astropy
astropy/coordinates/tests/test_sites.py
astropy/coordinates/tests/test_sites.py
from __future__ import (absolute_import, division, print_function, unicode_literals) from ...tests.helper import pytest, assert_quantity_allclose from ... import units as u from .. import Latitude, Longitude, EarthLocation, get_site, add_site, remove_site def test_get_site(): # Compare to the IRAF observatory list available at: # http://tdc-www.harvard.edu/iraf/rvsao/bcvcorr/obsdb.html keck = get_site('keck') lon, lat, el = keck.to_geodetic() assert_quantity_allclose(lon, -1*Longitude('155:28.7', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(el, 4160*u.m, atol=1*u.m) keck = get_site('ctio') lon, lat, el = keck.to_geodetic() assert_quantity_allclose(lon, -1*Longitude('70.815', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(lat, Latitude('-30.16527778', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(el, 2215*u.m, atol=1*u.m) def test_add_remove_site(): from ..sites import _site_db #needed for comparison below initlen = len(_site_db) # Test observatory can be added and retrieved new_site_name = 'University of Washington' new_site_location = EarthLocation(-122.3080*u.deg, 47.6550*u.deg, 0*u.m) add_site(new_site_name, new_site_location) retrieved_location = get_site(new_site_name) assert retrieved_location == new_site_location assert len(_site_db) == (initlen + 1) #now see if it can be removed remove_site(new_site_name) assert len(_site_db) == initlen #now check that alias removals works too new_site_names = [new_site_name, 'UW'] add_site(new_site_names, new_site_location) assert len(_site_db) == (initlen + 2) remove_site(new_site_name) assert len(_site_db) == initlen add_site(new_site_names, new_site_location) assert len(_site_db) == (initlen + 2) remove_site(new_site_names[1]) assert len(_site_db) == initlen def test_bad_site(): with pytest.raises(KeyError): get_site('nonexistent site')
from __future__ import (absolute_import, division, print_function, unicode_literals) from ...tests.helper import pytest, assert_quantity_allclose from ... import units as u from .. import Latitude, Longitude, EarthLocation, get_site, add_site, remove_site def test_get_site(): # Compare to the IRAF observatory list available at: # http://tdc-www.harvard.edu/iraf/rvsao/bcvcorr/obsdb.html keck = get_site('keck') lon, lat, el = keck.to_geodetic() assert_quantity_allclose(lon, -1*Longitude('155:28.7', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(el, 4160*u.m, atol=1*u.m) keck = get_site('ctio') lon, lat, el = keck.to_geodetic() assert_quantity_allclose(lon, -1*Longitude('70.815', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(lat, Latitude('-30.16527778', unit=u.deg), atol=0.001*u.deg) assert_quantity_allclose(el, 2215*u.m, atol=1*u.m) def test_add_remove_site(): from ..sites import _site_db #needed for comparison below initlen = len(_site_db) # Test observatory can be added and retrieved new_site_name = 'University of Washington' new_site_location = EarthLocation(-122.3080*u.deg, 47.6550*u.deg, 0*u.m) add_site(new_site_name, new_site_location) retrieved_location = get_site(new_site_name) assert retrieved_location == new_site_location assert len(_site_db) == (initlen + 1) #now see if it can be removed remove_site(new_site_name) assert len(_site_db) == initlen #now try add/remove with aliases new_site_names = [new_site_name, 'UW'] add_site(new_site_names, new_site_location) assert len(_site_db) == (initlen + 2) remove_site(new_site_name, remove_aliases=True) assert len(_site_db) == initlen add_site(new_site_names, new_site_location) assert len(_site_db) == (initlen + 2) remove_site(new_site_names[1], remove_aliases=True) assert len(_site_db) == initlen def test_bad_site(): with pytest.raises(KeyError): get_site('nonexistent site')
bsd-3-clause
Python
bdb8d48e0030474a616ec2e7e6d5f19132bb18e7
Fix account init
vuolter/pyload,vuolter/pyload,vuolter/pyload
module/plugins/accounts/XFileSharingPro.py
module/plugins/accounts/XFileSharingPro.py
# -*- coding: utf-8 -*- from module.plugins.internal.XFSPAccount import XFSPAccount class XFileSharingPro(XFSPAccount): __name__ = "XFileSharingPro" __type__ = "account" __version__ = "0.04" __description__ = """XFileSharingPro multi-purpose account plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "[email protected]")] HOSTER_NAME = None def init(self): if self.HOSTER_NAME: return super(XFileSharingPro, self).init() def loadAccountInfo(self, user, req): return super(XFileSharingPro if self.HOSTER_NAME else XFSPAccount, self).loadAccountInfo(user, req) def login(self, user, data, req): if self.HOSTER_NAME: return super(XFileSharingPro, self).login(user, data, req)
# -*- coding: utf-8 -*- import re from module.plugins.internal.XFSPAccount import XFSPAccount class XFileSharingPro(XFSPAccount): __name__ = "XFileSharingPro" __type__ = "account" __version__ = "0.03" __description__ = """XFileSharingPro multi-purpose account plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "[email protected]")] HOSTER_NAME = None def loadAccountInfo(self, user, req): return super(XFileSharingPro if self.HOSTER_NAME else XFSPAccount, self).loadAccountInfo(user, req) def login(self, user, data, req): if self.HOSTER_NAME: return super(XFileSharingPro, self).login(user, data, req)
agpl-3.0
Python
ee931a528a1483bedc2951dd202f369460c0fec4
Update version
Outernet-Project/bottle-utils-i18n
bottle_utils/__init__.py
bottle_utils/__init__.py
__version__ = '0.3.5' __author__ = 'Outernet Inc <[email protected]>'
__version__ = '0.3.4' __author__ = 'Outernet Inc <[email protected]>'
bsd-2-clause
Python
a806272275fa0071abf038ceed913995c5e99bb5
add support of IAM roles
PressLabs/z3,PressLabs/z3
z3/get.py
z3/get.py
import argparse import sys, re import boto3 import botocore from boto3.s3.transfer import TransferConfig from z3.config import get_config MB = 1024 ** 2 def main(): cfg = get_config() parser = argparse.ArgumentParser( description='Read a key from s3 and write the content to stdout', ) parser.add_argument('name', help='name of S3 key') args = parser.parse_args() config = TransferConfig(max_concurrency=int(cfg['CONCURRENCY']), multipart_chunksize=int(re.sub('M', '', cfg['CHUNK_SIZE'])) * MB) if 'S3_KEY_ID' in cfg: s3 = boto3.client('s3'), aws_access_key_id=cfg['S3_KEY_ID'], aws_secret_access_key=cfg['S3_SECRET']) else: s3 = boto3.client('s3') try: s3.download_fileobj(cfg['BUCKET'], args.name, sys.stdout, Config = config) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": print("The object does not exist.") else: raise if __name__ == '__main__': main()
import argparse import sys import boto.s3 from z3.config import get_config def download(bucket, name): key = bucket.get_key(name) key.get_contents_to_file(sys.stdout) def main(): cfg = get_config() parser = argparse.ArgumentParser( description='Read a key from s3 and write the content to stdout', ) parser.add_argument('name', help='name of S3 key') args = parser.parse_args() bucket = boto.connect_s3( cfg['S3_KEY_ID'], cfg['S3_SECRET']).get_bucket(cfg['BUCKET']) download(bucket, args.name) if __name__ == '__main__': main()
apache-2.0
Python
3a177a48b1e8f51eb9f13b874879b7fa47216897
Add comments to multimission-simulation
gbrammer/grizli
grizli/version.py
grizli/version.py
# Should be one commit behind latest __version__ = "0.1.1-29-gac73d73"
# Should be one commit behind latest __version__ = "0.1.1-25-g3109f16"
mit
Python
6934ba49ff6f594910843d951606f80db67d9b4b
return to older return
markomanninen/hyml
hyml/ext.py
hyml/ext.py
#!/usr/bin/python3 # -*- coding: utf-8 -*- # Copyright (c) Marko Manninen <[email protected]>, 2017 import hy, hy.importer as hyi from jinja2.ext import extract_from_ast import itertools def extract_from_ast(source, keywords): d = None def filter_hy(e): # basicly we are searching for babel keyword expressions here # and when one is found, it is returned along with: # 0 linenumber, keyword itself, and message string global d if isinstance(e, hy.HyExpression) or isinstance(e, list): if isinstance(e, hy.HyExpression): # this could be the keyword we are searching for d = e[0] # flatten list, maybe could be done later... x = list(itertools.chain(*filter(None, map(filter_hy, e)))) # reset keyword d = None return x elif not isinstance(e, hy.HySymbol) and isinstance(e, hy.HyString) and d in keywords: # no comments available, thus only three items are returned # TODO: message context and plural message support return 0, str(d), {"context": str(e), "singular": str(e), "plural": str(e)} return filter_hy(source) def chunks(long_list, n): # split list to n chunks if long_list: for i in range(0, len(long_list), n): t = long_list[i:i + n] # add empty keyword list to the tuple for babel yield tuple(t[:2]+[t[2]["singular"]]+[[]]) def babel_extract(fileobj, *args, **kw): byte = fileobj.read() # unfortunately line breaks (line numbers) are lost at this point... source = "".join(map(chr, byte)) node = hyi.import_buffer_to_hst(source) # map keywords to hy symbols for later comparison if len(args[0]) > 0: keywords = map(hy.HySymbol, args[0]) else: keywords = map(hy.HySymbol, ['ngettext', 'pgettext', 'ungettext', 'dngettext', 'dgettext', 'ugettext', 'gettext', '_', 'N_', 'npgettext']) ast = extract_from_ast(node, keywords) return chunks(ast, 3)
#!/usr/bin/python3 # -*- coding: utf-8 -*- # Copyright (c) Marko Manninen <[email protected]>, 2017 import hy, hy.importer as hyi from jinja2.ext import extract_from_ast import itertools def extract_from_ast(source, keywords): d = None def filter_hy(e): # basicly we are searching for babel keyword expressions here # and when one is found, it is returned along with: # 0 linenumber, keyword itself, and message string global d if isinstance(e, hy.HyExpression) or isinstance(e, list): if isinstance(e, hy.HyExpression): # this could be the keyword we are searching for d = e[0] # flatten list, maybe could be done later... x = list(itertools.chain(*filter(None, map(filter_hy, e)))) # reset keyword d = None return x elif not isinstance(e, hy.HySymbol) and isinstance(e, hy.HyString) and d in keywords: # no comments available, thus only three items are returned # TODO: message context and plural message support return 0, str(d), {"context": str(e), "singular": str(e), "plural": str(e)} return filter_hy(source) def chunks(long_list, n): # split list to n chunks for i in range(0, len(long_list), n): t = long_list[i:i + n] # add empty keyword list to the tuple for babel yield tuple(t[:2]+[t[2]["singular"]]+[[]]) def babel_extract(fileobj, *args, **kw): byte = fileobj.read() # unfortunately line breaks (line numbers) are lost at this point... source = "".join(map(chr, byte)) if source: node = hyi.import_buffer_to_hst(source) if node: # map keywords to hy symbols for later comparison if len(args[0]) > 0: keywords = map(hy.HySymbol, args[0]) else: keywords = map(hy.HySymbol, ['ngettext', 'pgettext', 'ungettext', 'dngettext', 'dgettext', 'ugettext', 'gettext', '_', 'N_', 'npgettext']) ast = extract_from_ast(node, keywords) if ast: return chunks(ast, 3)
mit
Python
2a63c3cc4a795e23ff00d7c2273ee40939ec3dea
mark string literal as regex to avoid runtime warning in python 3
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
custom/aaa/urls.py
custom/aaa/urls.py
from __future__ import absolute_import from __future__ import unicode_literals from django.conf.urls import url, include from custom.aaa.views import ( AggregationScriptPage, LocationFilterAPI, ProgramOverviewReport, ProgramOverviewReportAPI, UnifiedBeneficiaryReport, UnifiedBeneficiaryReportAPI, UnifiedBeneficiaryDetailsReport, UnifiedBeneficiaryDetailsReportAPI, ) dashboardurls = [ url('^program_overview/', ProgramOverviewReport.as_view(), name='program_overview'), url('^unified_beneficiary/$', UnifiedBeneficiaryReport.as_view(), name='unified_beneficiary'), url( r'^unified_beneficiary/(?P<details_type>[\w-]+)/(?P<beneficiary_id>[\w-]+)/$', UnifiedBeneficiaryDetailsReport.as_view(), name='unified_beneficiary_details' ), ] dataurls = [ url('^program_overview/', ProgramOverviewReportAPI.as_view(), name='program_overview_api'), url('^unified_beneficiary/', UnifiedBeneficiaryReportAPI.as_view(), name='unified_beneficiary_api'), url( '^unified_beneficiary_details/', UnifiedBeneficiaryDetailsReportAPI.as_view(), name='unified_beneficiary_details_api' ), url('^location_api/', LocationFilterAPI.as_view(), name='location_api'), url(r'^aggregate/', AggregationScriptPage.as_view(), name=AggregationScriptPage.urlname), ] urlpatterns = [ url(r'^aaa_dashboard/', include(dashboardurls)), url(r'^aaa_dashboard_data/', include(dataurls)), ]
from __future__ import absolute_import from __future__ import unicode_literals from django.conf.urls import url, include from custom.aaa.views import ( AggregationScriptPage, LocationFilterAPI, ProgramOverviewReport, ProgramOverviewReportAPI, UnifiedBeneficiaryReport, UnifiedBeneficiaryReportAPI, UnifiedBeneficiaryDetailsReport, UnifiedBeneficiaryDetailsReportAPI, ) dashboardurls = [ url('^program_overview/', ProgramOverviewReport.as_view(), name='program_overview'), url('^unified_beneficiary/$', UnifiedBeneficiaryReport.as_view(), name='unified_beneficiary'), url( '^unified_beneficiary/(?P<details_type>[\w-]+)/(?P<beneficiary_id>[\w-]+)/$', UnifiedBeneficiaryDetailsReport.as_view(), name='unified_beneficiary_details' ), ] dataurls = [ url('^program_overview/', ProgramOverviewReportAPI.as_view(), name='program_overview_api'), url('^unified_beneficiary/', UnifiedBeneficiaryReportAPI.as_view(), name='unified_beneficiary_api'), url( '^unified_beneficiary_details/', UnifiedBeneficiaryDetailsReportAPI.as_view(), name='unified_beneficiary_details_api' ), url('^location_api/', LocationFilterAPI.as_view(), name='location_api'), url(r'^aggregate/', AggregationScriptPage.as_view(), name=AggregationScriptPage.urlname), ] urlpatterns = [ url(r'^aaa_dashboard/', include(dashboardurls)), url(r'^aaa_dashboard_data/', include(dataurls)), ]
bsd-3-clause
Python
e7ae8140beb50e3091c0bd7ad1db4535540c95df
remove copyright
smlbiobot/SML-Cogs,smlbiobot/SML-Cogs
cwready/cwready.py
cwready/cwready.py
""" Clan War Readiness """ import argparse import itertools import os from collections import defaultdict from random import choice import discord from cogs.utils import checks from cogs.utils.chat_formatting import box from cogs.utils.chat_formatting import pagify from cogs.utils.dataIO import dataIO from discord.ext import commands from discord.ext.commands import Context PATH = os.path.join("data", "cwready") JSON = os.path.join(PATH, "settings.json") def nested_dict(): """Recursively nested defaultdict.""" return defaultdict(nested_dict) class CWReadiness: """Clan War Readinesx""" def __init__(self, bot): """Init.""" self.bot = bot self.settings = nested_dict() self.settings.update(dataIO.load_json(JSON)) def check_folder(): """Check folder.""" os.makedirs(PATH, exist_ok=True) def check_file(): """Check files.""" if not dataIO.is_valid_json(JSON): dataIO.save_json(JSON, {}) def setup(bot): """Setup.""" check_folder() check_file() n = CWReadiness(bot) bot.add_cog(n)
""" Clan War Readiness """ # -*- coding: utf-8 -*- """ The MIT License (MIT) Copyright (c) 2017 SML Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import argparse import itertools import os from collections import defaultdict from random import choice import discord from cogs.utils import checks from cogs.utils.chat_formatting import box from cogs.utils.chat_formatting import pagify from cogs.utils.dataIO import dataIO from discord.ext import commands from discord.ext.commands import Context PATH = os.path.join("data", "cwready") JSON = os.path.join(PATH, "settings.json") def nested_dict(): """Recursively nested defaultdict.""" return defaultdict(nested_dict) class CWReadiness: """Clan War Readinesx""" def __init__(self, bot): """Init.""" self.bot = bot self.settings = nested_dict() self.settings.update(dataIO.load_json(JSON)) def check_folder(): """Check folder.""" os.makedirs(PATH, exist_ok=True) def check_file(): """Check files.""" if not dataIO.is_valid_json(JSON): dataIO.save_json(JSON, {}) def setup(bot): """Setup.""" check_folder() check_file() n = CWReadiness(bot) bot.add_cog(n)
mit
Python
0e13bf2b23df3584109fcf9b62710efbb81b2226
exit with help if no subcommand is supplied.
dattasaurabh82/internetarchive,JesseWeinstein/internetarchive,wumpus/internetarchive,brycedrennan/internetarchive,jjjake/internetarchive
iacli/ia.py
iacli/ia.py
#!/usr/bin/env python """A command line interface for Archive.org. usage: ia [--debug | --help | --version] [<command>] [<args>...] options: -h, --help -v, --version -d, --debug [default: True] commands: help Retrieve help for subcommands. configure Configure `ia`. metadata Retrieve and modify metadata for items on Archive.org. upload Upload items to Archive.org. download Download files from Archive.org. delete Delete files from Archive.org. search Search Archive.org. mine Download item metadata from Archive.org concurrently. catalog Retrieve information about your Archive.org catalog tasks. list List files in a given item. See 'ia help <command>' for more information on a specific command. """ from sys import stderr, exit from subprocess import call from docopt import docopt from internetarchive import __version__ # main() #_________________________________________________________________________________________ def main(): """This script is the CLI driver for ia-wrapper. It dynamically imports and calls the subcommand specified on the command line. It depends on the ``internetarchive`` and ``iacli`` packages. Subcommands can be arbitrarily added to the ``iacli`` package as modules, and can be dynamically executed via this script, ``ia``. """ args = docopt(__doc__, version=__version__, options_first=True) # Get subcommand. cmd = args['<command>'] aliases = dict( md = 'metadata', up = 'upload', do = 'download', rm = 'delete', se = 'search', mi = 'mine', ca = 'catalog', ls = 'list', ) if cmd in aliases: cmd = aliases[cmd] argv = [cmd] + args['<args>'] if cmd == 'help' or not cmd: if not args['<args>']: call(['ia', '--help']) else: call(['ia', args['<args>'][0], '--help']) exit(0) # Dynamically import and call subcommand module specified on the # command line. module = 'iacli.ia_{0}'.format(cmd) try: globals()['ia_module'] = __import__(module, fromlist=['iacli']) except ImportError: stderr.write('error: "{0}" is not an `ia` command!\n'.format(cmd)) exit(1) try: ia_module.main(argv) except KeyboardInterrupt: exit(1) if __name__ == '__main__': main()
#!/usr/bin/env python """A command line interface for Archive.org. usage: ia [--debug] <command> [<args>...] ia --help ia --version options: -h, --help -v, --version -d, --debug [default: True] commands: help Retrieve help for subcommands. configure Configure `ia`. metadata Retrieve and modify metadata for items on Archive.org. upload Upload items to Archive.org. download Download files from Archive.org. delete Delete files from Archive.org. search Search Archive.org. mine Download item metadata from Archive.org concurrently. catalog Retrieve information about your Archive.org catalog tasks. list List files in a given item. See 'ia help <command>' for more information on a specific command. """ from sys import stderr, exit from subprocess import call from docopt import docopt from internetarchive import __version__ # main() #_________________________________________________________________________________________ def main(): """This script is the CLI driver for ia-wrapper. It dynamically imports and calls the subcommand specified on the command line. It depends on the ``internetarchive`` and ``iacli`` packages. Subcommands can be arbitrarily added to the ``iacli`` package as modules, and can be dynamically executed via this script, ``ia``. """ args = docopt(__doc__, version=__version__, options_first=True) # Get subcommand. cmd = args['<command>'] aliases = dict( md = 'metadata', up = 'upload', do = 'download', rm = 'delete', se = 'search', mi = 'mine', ca = 'catalog', ls = 'list', ) if cmd in aliases: cmd = aliases[cmd] argv = [cmd] + args['<args>'] if cmd == 'help': if not args['<args>']: call(['ia', '--help']) else: call(['ia', args['<args>'][0], '--help']) exit(0) if cmd == 'help': if not args['<args>']: call(['ia', '--help']) else: call(['ia', args['<args>'][0], '--help']) exit(0) # Dynamically import and call subcommand module specified on the # command line. module = 'iacli.ia_{0}'.format(cmd) try: globals()['ia_module'] = __import__(module, fromlist=['iacli']) except ImportError: stderr.write('error: "{0}" is not an `ia` command!\n'.format(cmd)) exit(1) try: ia_module.main(argv) except KeyboardInterrupt: exit(1) if __name__ == '__main__': main()
agpl-3.0
Python
f46731c1bfd3be6e7d66b4a1078ca09460d25af5
Add OCA as author of OCA addons
ddico/account-financial-tools,ddico/account-financial-tools
account_partner_required/__openerp__.py
account_partner_required/__openerp__.py
# -*- encoding: utf-8 -*- ############################################################################## # # Account partner required module for OpenERP # Copyright (C) 2014 Acsone (http://acsone.eu). # @author Stéphane Bidoul <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Account partner required', 'version': '0.1', 'category': 'Generic Modules/Accounting', 'license': 'AGPL-3', 'description': """This module adds an option "partner policy" on account types. You have the choice between 3 policies : optional (the default), always (require a partner), and never (forbid a partner). This module is useful to enforce a partner on account move lines on customer and supplier accounts. Module developed by Stéphane Bidoul <[email protected]>, inspired by Alexis de Lattre <[email protected]>'s account_analytic_required module. """, 'author': "ACSONE SA/NV,Odoo Community Association (OCA)", 'website': 'http://acsone.eu/', 'depends': ['account'], 'data': ['account_view.xml'], 'installable': True, }
# -*- encoding: utf-8 -*- ############################################################################## # # Account partner required module for OpenERP # Copyright (C) 2014 Acsone (http://acsone.eu). # @author Stéphane Bidoul <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Account partner required', 'version': '0.1', 'category': 'Generic Modules/Accounting', 'license': 'AGPL-3', 'description': """This module adds an option "partner policy" on account types. You have the choice between 3 policies : optional (the default), always (require a partner), and never (forbid a partner). This module is useful to enforce a partner on account move lines on customer and supplier accounts. Module developed by Stéphane Bidoul <[email protected]>, inspired by Alexis de Lattre <[email protected]>'s account_analytic_required module. """, 'author': 'ACSONE SA/NV', 'website': 'http://acsone.eu/', 'depends': ['account'], 'data': ['account_view.xml'], 'installable': True, }
agpl-3.0
Python
15785b306557bb81643270415944ca1bda3ae0a8
Remove database_project_name from api
globocom/dbaas-zabbix,globocom/dbaas-zabbix
dbaas_zabbix/dbaas_api.py
dbaas_zabbix/dbaas_api.py
# -*- coding: utf-8 -*- class DatabaseAsAServiceApi(object): def __init__(self, databaseinfra, credentials): self.databaseinfra = databaseinfra self.credentials = credentials @property def user(self): return self.credentials.user @property def password(self): return self.credentials.password @property def endpoint(self): return self.credentials.endpoint @property def main_clientgroup(self): return self.credentials.get_parameter_by_name("main_clientgroup") @property def extra_clientgroup(self): return self.credentials.get_parameter_by_name("extra_clientgroup") def extra_parameters(self, group): return self.credentials.get_parameters_by_group(group) @property def alarm_notes(self): return self.credentials.get_parameter_by_name("alarm_notes") @property def instances(self): return self.databaseinfra.instances.all() @property def driver(self): return self.databaseinfra.get_driver() @property def database_instances(self): return self.driver.get_database_instances() @property def non_database_instances(self): return self.driver.get_non_database_instances() @property def hosts(self): return list(set([instance.hostname for instance in self.instances])) @property def databaseifra_name(self): return self.databaseinfra.name @property def secondary_ips(self): return self.databaseinfra.cs_dbinfra_attributes.all() @property def is_ha(self): return self.databaseinfra.plan.is_ha @property def engine_name(self): return self.databaseinfra.engine.engine_type.name
# -*- coding: utf-8 -*- class DatabaseAsAServiceApi(object): def __init__(self, databaseinfra, credentials): self.databaseinfra = databaseinfra self.credentials = credentials @property def user(self): return self.credentials.user @property def password(self): return self.credentials.password @property def endpoint(self): return self.credentials.endpoint @property def main_clientgroup(self): return self.credentials.get_parameter_by_name("main_clientgroup") @property def extra_clientgroup(self): return self.credentials.get_parameter_by_name("extra_clientgroup") def extra_parameters(self, group): return self.credentials.get_parameters_by_group(group) @property def alarm_notes(self): return self.credentials.get_parameter_by_name("alarm_notes") @property def instances(self): return self.databaseinfra.instances.all() @property def driver(self): return self.databaseinfra.get_driver() @property def database_instances(self): return self.driver.get_database_instances() @property def non_database_instances(self): return self.driver.get_non_database_instances() @property def hosts(self): return list(set([instance.hostname for instance in self.instances])) @property def databaseifra_name(self): return self.databaseinfra.name @property def secondary_ips(self): return self.databaseinfra.cs_dbinfra_attributes.all() @property def is_ha(self): return self.databaseinfra.plan.is_ha @property def engine_name(self): return self.databaseinfra.engine.engine_type.name @property def database_project_name(self): return self.databaseinfra.databases.get().project.name
bsd-3-clause
Python
085cee90d03c69d3664de69831fbc18a1412a162
Update scadabr_database_to_kairosdb.py
paladini/ScadaBR_to_KairosDB,paladini/ScadaBR_to_KairosDB
bin/scadabr_database_to_kairosdb.py
bin/scadabr_database_to_kairosdb.py
#!/usr/bin/env python3 import requests import gzip import json import sys import pymysql as mariadb # Creating the connection with MySQL/MariaDB database. # # Attributes: # sys.argv[1] = database name, that was sent by the caller script. # sys.argv[2] = username of the MySQL/MariaDB server. # sys.argv[3] = if has a sys.argv[3], then use it. Otherwise the password is blank/empty. # passwd = '' if (len(sys.argv) <= 3) else sys.argv[3] mariadb_connection = mariadb.connect(user=sys.argv[2],passwd=passwd, database=sys.argv[1]) cursor = mariadb_connection.cursor() # Execute a SQL query on the MySQL/MariaDB database. def execute_query(query): try: cursor.execute(query) return cursor except mariadb.Error as error: print("[ERROR] {}".format(error)) print("For comparison purposes, we've waited for about 15 minutes to export 1.7 million point values in a low-end notebook.") # Querying all data (format: <sensor name>, <dataType>, <measured value>, <timestamp in UNIX format> ) dataPoints = execute_query("SELECT * FROM dataPoints;").fetchall() print("Number of data points detected: %u.\n" % len(dataPoints)) for dp in dataPoints: # We should connect for every data point because this way we avoid a lot of errors/exceptions of timeout. cursor = mariadb.connect(user=sys.argv[2],passwd=passwd, database=sys.argv[1]).cursor() # Creating structured data in KairosDB format data = [] send = { "name": dp[1], "tags": { "sensor_name": dp[1] } } # Parsing point values in KairosDB format ( [[<timestamp1>, <value1>], [<timestamp2>, <value2>], ... [<timestampN>, <valueN>]] ) pointValues = execute_query("SELECT dataType, pointValue, ts FROM pointValues WHERE dataPointId='%u';" % dp[0]) for pv in pointValues: data.append([pv[2], pv[1]]) send["datapoints"] = data print("\t[STATUS] Exporting %u point values from %s." % (pointValues.rowcount, dp[1])) print("\t[STATUS] Sending data to KairosDB. This may take some minutes...") # Gzipping json before send. gzipped = gzip.compress(bytes(json.dumps(send), 'UTF-8')) # Sending gzipped data to KairosDB/Cassandra. headers = {'content-type': 'application/gzip'} requests.post("http://localhost:8080/api/v1/datapoints", data=gzipped, headers=headers) print("\t[STATUS] %u values from data point %s has sent to KairosDB." % (pointValues.rowcount, dp[1])) print("\n[STATUS] Finished!\n")
#!/usr/bin/env python3 import requests import gzip import json import sys import mysql.connector as mariadb # Creating the connection with MySQL/MariaDB database. # # Attributes: # sys.argv[1] = database name, that was sent by the caller script. # sys.argv[2] = username of the MySQL/MariaDB server. # sys.argv[3] = if has a sys.argv[3], then use it. Otherwise the password is blank/empty. # passwd = '' if (len(sys.argv) <= 3) else sys.argv[3] mariadb_connection = mariadb.connect(user=sys.argv[2],password=passwd, database=sys.argv[1]) cursor = mariadb_connection.cursor() # Execute a SQL query on the MySQL/MariaDB database. def execute_query(query): try: cursor.execute(query) return cursor except mariadb.Error as error: print("[ERROR] {}".format(error)) # Querying all data (format: <sensor name>, <dataType>, <measured value>, <timestamp in UNIX format> ) query = execute_query("SELECT dataPoints.xid, pointValues.dataType, \ pointValues.pointValue, pointValues.ts from pointValues \ INNER JOIN dataPoints on \ pointValues.dataPointId=dataPoints.id") # Parsing structured data to KairosDB format send_to_kairos = [] for row in query: temp = {} temp["name"] = row[0] temp["tags"] = { "sensor_name": row[0] } temp["timestamp"] = row[3] temp["value"] = row[2] send_to_kairos.append(temp) print("[STATUS] Data to be exported: " + str(len(send_to_kairos)) + " point values.") print("[STATUS] Sending data to KairosDB...\n\nDepending on how many data you have sent, this may take some minutes.") print("\n For comparison purposes, we've waited for about 15 minutes to export\n 1.7 million point values in a low-end notebook.") # Gzipping json before send. gzipped = gzip.compress(bytes(json.dumps(send_to_kairos), 'UTF-8')) # Sending gzipped data to KairosDB/Cassandra. headers = {'content-type': 'application/gzip'} requests.post("http://localhost:8080/api/v1/datapoints", data=gzipped, headers=headers) print("\n[STATUS] Finished!\n")
mit
Python
3b89f8df57fd345e873d87df7f342fe6a01b49ce
Fix config import from __init__
Neurita/darwin
darwin/__init__.py
darwin/__init__.py
from .utils.logger import setup_logging setup_logging()
from .logger import setup_logging setup_logging()
bsd-3-clause
Python
e972a2436807ff0f5af4282a7842451e07807e5e
bump to 0.0.8
alfredodeza/chacractl,ceph/chacractl
chacractl/__init__.py
chacractl/__init__.py
config = {'verbosity': 'info'} __version__ = '0.0.8'
config = {'verbosity': 'info'} __version__ = '0.0.7'
mit
Python
d88dcaa6e1256452715aa5071cbe326233f03195
format model args
aschn/drf-tracking
rest_framework_tracking/base_models.py
rest_framework_tracking/base_models.py
from django.db import models from django.conf import settings from django.utils.six import python_2_unicode_compatible from .managers import PrefetchUserManager @python_2_unicode_compatible class BaseAPIRequestLog(models.Model): """ Logs Django rest framework API requests """ user = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True, ) requested_at = models.DateTimeField(db_index=True) response_ms = models.PositiveIntegerField(default=0) path = models.CharField( max_length=getattr(settings, 'DRF_TRACKING_PATH_LENGTH', 200), db_index=True, ) view = models.CharField( max_length=getattr(settings, 'DRF_TRACKING_VIEW_LENGTH', 200), null=True, blank=True, db_index=True, ) view_method = models.CharField( max_length=getattr(settings, 'DRF_TRACKING_VIEW_METHOD_LENGTH', 27), null=True, blank=True, db_index=True, ) remote_addr = models.GenericIPAddressField() host = models.URLField() method = models.CharField(max_length=10) query_params = models.TextField(null=True, blank=True) data = models.TextField(null=True, blank=True) response = models.TextField(null=True, blank=True) errors = models.TextField(null=True, blank=True) status_code = models.PositiveIntegerField(null=True, blank=True) objects = PrefetchUserManager() class Meta: abstract = True verbose_name = 'API Request Log' def __str__(self): return '{} {}'.format(self.method, self.path)
from django.db import models from django.conf import settings from django.utils.six import python_2_unicode_compatible from .managers import PrefetchUserManager @python_2_unicode_compatible class BaseAPIRequestLog(models.Model): """ Logs Django rest framework API requests """ user = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True, ) requested_at = models.DateTimeField(db_index=True) response_ms = models.PositiveIntegerField(default=0) path = models.CharField( max_length=getattr(settings, 'DRF_TRACKING_PATH_LENGTH', 200), db_index=True, ) view = models.CharField( max_length=getattr(settings, 'DRF_TRACKING_VIEW_LENGTH', 200), null=True, blank=True, db_index=True, ) view_method = models.CharField( max_length=getattr(settings, 'DRF_TRACKING_VIEW_METHOD_LENGTH', 27), null=True, blank=True, db_index=True, ) remote_addr = models.GenericIPAddressField() host = models.URLField() method = models.CharField(max_length=10) query_params = models.TextField(null=True, blank=True) data = models.TextField(null=True, blank=True) response = models.TextField(null=True, blank=True) errors = models.TextField(null=True, blank=True) status_code = models.PositiveIntegerField(null=True, blank=True) objects = PrefetchUserManager() class Meta: abstract = True verbose_name = 'API Request Log' def __str__(self): return '{} {}'.format(self.method, self.path)
isc
Python
1b0b96d78d03af813b10359e1ee7d7dd47045307
Correct URL to include entire base for API client
bowlofstew/changes,wfxiang08/changes,wfxiang08/changes,wfxiang08/changes,bowlofstew/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes,bowlofstew/changes,dropbox/changes,dropbox/changes,dropbox/changes
changes/api/client.py
changes/api/client.py
import json from flask import current_app class APIError(Exception): pass class APIClient(object): """ An internal API client. >>> client = APIClient(version=0) >>> response = client.get('/projects/') >>> print response """ def __init__(self, version): self.version = version def dispatch(self, url, method, data=None): url = '%s/api/%d/%s' % (current_app.config['BASE_URI'], self.version, url.lstrip('/')) with current_app.test_client() as client: response = client.open(path=url, method=method, data=data) if not (200 <= response.status_code < 300): raise APIError('Request returned invalid status code: %d' % (response.status_code,)) if response.headers['Content-Type'] != 'application/json': raise APIError('Request returned invalid content type: %s' % (response.headers['Content-Type'],)) # TODO(dcramer): ideally we wouldn't encode + decode this return json.loads(response.data) def delete(self, *args, **kwargs): return self.dispatch(method='DELETE', *args, **kwargs) def get(self, *args, **kwargs): return self.dispatch(method='GET', *args, **kwargs) def head(self, *args, **kwargs): return self.dispatch(method='HEAD', *args, **kwargs) def options(self, *args, **kwargs): return self.dispatch(method='OPTIONS', *args, **kwargs) def patch(self, *args, **kwargs): return self.dispatch(method='PATCH', *args, **kwargs) def post(self, *args, **kwargs): return self.dispatch(method='POST', *args, **kwargs) def put(self, *args, **kwargs): return self.dispatch(method='PUT', *args, **kwargs) api_client = APIClient(version=0)
import json from flask import current_app class APIError(Exception): pass class APIClient(object): """ An internal API client. >>> client = APIClient(version=0) >>> response = client.get('/projects/') >>> print response """ def __init__(self, version): self.version = version def dispatch(self, url, method, data=None): url = '/api/%d/%s' % (self.version, url.lstrip('/')) client = current_app.test_client() response = client.open(url, method, data) if not (200 <= response.status_code < 300): raise APIError('Request returned invalid status code: %d' % (response.status_code,)) if response.headers['Content-Type'] != 'application/json': raise APIError('Request returned invalid content type: %s' % (response.headers['Content-Type'],)) # TODO(dcramer): ideally we wouldn't encode + decode this return json.loads(response.data) def delete(self, *args, **kwargs): return self.dispatch(method='DELETE', *args, **kwargs) def get(self, *args, **kwargs): return self.dispatch(method='GET', *args, **kwargs) def head(self, *args, **kwargs): return self.dispatch(method='HEAD', *args, **kwargs) def options(self, *args, **kwargs): return self.dispatch(method='OPTIONS', *args, **kwargs) def patch(self, *args, **kwargs): return self.dispatch(method='PATCH', *args, **kwargs) def post(self, *args, **kwargs): return self.dispatch(method='POST', *args, **kwargs) def put(self, *args, **kwargs): return self.dispatch(method='PUT', *args, **kwargs) api_client = APIClient(version=0)
apache-2.0
Python
2ab57fd58150d9416986eb089430fcd090c9cc31
Fix ipynb tests
Anaconda-Platform/anaconda-client,Anaconda-Platform/anaconda-client,Anaconda-Platform/anaconda-client
binstar_client/inspect_package/ipynb.py
binstar_client/inspect_package/ipynb.py
import os import re import time from ..utils.notebook.inflection import parameterize from ..utils.notebook.data_uri import data_uri_from class IPythonNotebook(object): _name = None _version = None thumbnail_file = None def __init__(self, filename, fileobj, *args, **kwargs): self.filename = filename self.thumbnail_file = kwargs.get('thumbnail_file', None) @property def basename(self): return os.path.basename(self.filename) @property def name(self): if self._name is None: return re.sub('\-ipynb$', '', parameterize(os.path.basename(self.filename))) return self._name @property def version(self): if self._version is None: self._version = time.strftime('%Y.%m.%d-%H%M') return self._version @property def thumbnail(self): if self.thumbnail_file is None: return None return data_uri_from(self.thumbnail_file) def get_package_data(self): if self.thumbnail_file is None: return { 'name': self.name, 'summary': 'IPython notebook' } else: return { 'name': self.name, 'summary': 'IPython notebook', 'thumbnail': self.thumbnail } def inspect_ipynb_package(filename, fileobj, *args, **kwargs): if 'parser_args' in kwargs: thumbnail_file = kwargs['parser_args'].thumbnail ipython_notebook = IPythonNotebook(filename, fileobj, thumbnail_file=thumbnail_file) else: ipython_notebook = IPythonNotebook(filename, fileobj) package_data = ipython_notebook.get_package_data() release_data = { 'version': ipython_notebook.version, 'description': '' } file_data = { 'basename': ipython_notebook.basename, 'attrs': {} } return package_data, release_data, file_data
import os import re import time from ..utils.notebook.inflection import parameterize from ..utils.notebook.data_uri import data_uri_from class IPythonNotebook(object): _name = None _version = None thumbnail_file = None def __init__(self, filename, fileobj, *args, **kwargs): self.filename = filename self.thumbnail_file = kwargs.get('thumbnail_file', None) @property def basename(self): return os.path.basename(self.filename) @property def name(self): if self._name is None: return re.sub('\-ipynb$', '', parameterize(os.path.basename(self.filename))) return self._name @property def version(self): if self._version is None: self._version = time.strftime('%Y.%m.%d-%H%M') return self._version @property def thumbnail(self): if self.thumbnail_file is None: return None return data_uri_from(self.thumbnail_file) def get_package_data(self): if self.thumbnail_file is None: return { 'name': self.name, 'summary': 'IPython notebook' } else: return { 'name': self.name, 'summary': 'IPython notebook', 'thumbnail': self.thumbnail } def inspect_ipynb_package(filename, fileobj, *args, **kwargs): if 'parser_args' in kwargs: thumbnail_file = kwargs['parser_args'].thumbnail ipython_notebook = IPythonNotebook(filename, fileobj, thumbnail_file=thumbnail_file) package_data = ipython_notebook.get_package_data() release_data = { 'version': ipython_notebook.version, 'description': '' } file_data = { 'basename': ipython_notebook.basename, 'attrs': {} } return package_data, release_data, file_data
bsd-3-clause
Python
acc0cf7a9e44ca11384d7d2b0dcd743af6e99ef9
Update version to 1.0.3
vkosuri/ChatterBot,gunthercox/ChatterBot
chatterbot/__init__.py
chatterbot/__init__.py
""" ChatterBot is a machine learning, conversational dialog engine. """ from .chatterbot import ChatBot __version__ = '1.0.3' __author__ = 'Gunther Cox' __email__ = '[email protected]' __url__ = 'https://github.com/gunthercox/ChatterBot' __all__ = ( 'ChatBot', )
""" ChatterBot is a machine learning, conversational dialog engine. """ from .chatterbot import ChatBot __version__ = '1.0.2' __author__ = 'Gunther Cox' __email__ = '[email protected]' __url__ = 'https://github.com/gunthercox/ChatterBot' __all__ = ( 'ChatBot', )
bsd-3-clause
Python
287b7b57056201d1d68c6d3cc963f1b2af2f7e83
Update scripts/alerts/validate_alerts_format.py
GoogleCloudPlatform/monitoring-dashboard-samples,GoogleCloudPlatform/monitoring-dashboard-samples
scripts/alerts/validate_alerts_format.py
scripts/alerts/validate_alerts_format.py
import json import sys import yaml from google.cloud import monitoring_v3 def check_json_in_metadata(path, file_id, file_version): metadata_path = "/".join(path.split("/")[:-1]) + "/metadata.yaml" check_metadata_entries(metadata_path) f = open(metadata_path) data = yaml.safe_load(f) templates_metadata = data.get("alert_policy_templates") for template_metadata in templates_metadata: if template_metadata.get("id") == file_id and template_metadata.get("version") == int(file_version[1]): return raise Exception("{} does not have an entry in {}".format(path, metadata_path)) def check_metadata_entries(path): f = open(path) data = yaml.safe_load(f) templates_metadata = data.get("alert_policy_templates") if not templates_metadata: raise Exception("alert_policy_templates not defined in {}".format(path)) required_fields = ["id", "version", "display_name", "description"] for template_metadata in templates_metadata: for field in required_fields: if field not in template_metadata.keys(): raise Exception("{} missing {}".format(path, field)) def check_json_file_name(path, file_name_parts): if len(file_name_parts) != 3: raise Exception("{} file name not in <name>.<version>.json format".format(path)) file_version = file_name_parts[1] if file_version[0] != "v": raise Exception("{} version does not start with 'v'".format(path)) if not file_version[1].isnumeric(): raise Exception("{} 'v' is not followed by numeric version number".format(path)) def check_is_alert_policy_json(path): f = open(path) try: policy_json = json.dumps(json.load(f)) except: raise Exception("{} content could not be loaded".format(path)) monitoring_v3.AlertPolicy.from_json(policy_json) def main(): path = sys.argv[1] # only run validation script on files added/changed in # alert_templates folder if path.split("/")[0] != "alerts": sys.exit() file_name = path.split("/")[-1] file_name_parts = file_name.split(".") # metadata file added/changed would be checked for expected fields if file_name == "metadata.yaml": check_metadata_entries(path) # all json files added to alerts folder are implictly taken as alert policy jsons # and must follow expected file hierarchy and naming if path.split(".")[-1] == "json": # checking if json file name is in the correct format check_json_file_name(path, file_name_parts) # check if file has entry in metadata.yaml check_json_in_metadata(path, file_name_parts[0], file_name_parts[1]) # checking if json content is indeed an alert policy check_is_alert_policy_json(path) if __name__ == '__main__': main()
import sys import yaml import json from google.cloud import monitoring_v3 def check_json_in_metadata(path, file_id, file_version): metadata_path = "/".join(path.split("/")[:-1]) + "/metadata.yaml" check_metadata_entries(metadata_path) f = open(metadata_path) data = yaml.safe_load(f) templates_metadata = data.get("alert_policy_templates") for template_metadata in templates_metadata: if template_metadata.get("id") == file_id and template_metadata.get("version") == int(file_version[1]): return raise Exception("{} does not have an entry in {}".format(path, metadata_path)) def check_metadata_entries(path): f = open(path) data = yaml.safe_load(f) templates_metadata = data.get("alert_policy_templates") if not templates_metadata: raise Exception("alert_policy_templates not defined in {}".format(path)) required_fields = ["id", "version", "display_name", "description"] for template_metadata in templates_metadata: for field in required_fields: if field not in template_metadata.keys(): raise Exception("{} missing {}".format(path, field)) def check_json_file_name(path, file_name_parts): if len(file_name_parts) != 3: raise Exception("{} file name not in <name>.<version>.json format".format(path)) file_version = file_name_parts[1] if file_version[0] != "v": raise Exception("{} version does not start with 'v'".format(path)) if not file_version[1].isnumeric(): raise Exception("{} 'v' is not followed by numeric version number".format(path)) def check_is_alert_policy_json(path): f = open(path) try: policy_json = json.dumps(json.load(f)) except: raise Exception("{} content could not be loaded".format(path)) monitoring_v3.AlertPolicy.from_json(policy_json) def main(): path = sys.argv[1] # only run validation script on files added/changed in # alert_templates folder if path.split("/")[0] != "alerts": sys.exit() file_name = path.split("/")[-1] file_name_parts = file_name.split(".") # metadata file added/changed would be checked for expected fields if file_name == "metadata.yaml": check_metadata_entries(path) # all json files added to alerts folder are implictly taken as alert policy jsons # and must follow expected file hierarchy and naming if path.split(".")[-1] == "json": # checking if json file name is in the correct format check_json_file_name(path, file_name_parts) # check if file has entry in metadata.yaml check_json_in_metadata(path, file_name_parts[0], file_name_parts[1]) # checking if json content is indeed an alert policy check_is_alert_policy_json(path) if __name__ == '__main__': main()
apache-2.0
Python
fac1e1bbe26e0fb7b82d65e48619cacc742ea747
Update default path
davidgasquez/kaggle-airbnb
notebooks/utils/data_loading.py
notebooks/utils/data_loading.py
"""Wrappers to simplify data loading.""" import pandas as pd # Set default path DEFAULT_PATH = '../data/raw/' def load_users_data(path=DEFAULT_PATH, preprocessed=False): """Load users data into train and test users. Parameters ---------- path: str Path of the folder containing the data. Returns ------- train_users, test_users: DataFrame, DataFrame Loaded DataFrames. """ if not preprocessed: train_users = pd.read_csv(path + 'train_users.csv') test_users = pd.read_csv(path + 'test_users.csv') else: path = '../datasets/processed/' train_users = pd.read_csv(path + 'preprocessed_train_users.csv') test_users = pd.read_csv(path + 'preprocessed_test_users.csv') return train_users, test_users def load_sessions_data(path=DEFAULT_PATH): """Load the users sessions data. Parameters ---------- path: str Path of the folder containing the data. Returns ------- sessions: DataFrame Loaded DataFrame. """ return pd.read_csv(path + 'sessions.csv')
"""Wrappers to simplify data loading.""" import pandas as pd # Set default path DEFAULT_PATH = '../datasets/raw/' def load_users_data(path=DEFAULT_PATH, preprocessed=False): """Load users data into train and test users. Parameters ---------- path: str Path of the folder containing the data. Returns ------- train_users, test_users: DataFrame, DataFrame Loaded DataFrames. """ if not preprocessed: train_users = pd.read_csv(path + 'train_users.csv') test_users = pd.read_csv(path + 'test_users.csv') else: path = '../datasets/processed/' train_users = pd.read_csv(path + 'preprocessed_train_users.csv') test_users = pd.read_csv(path + 'preprocessed_test_users.csv') return train_users, test_users def load_sessions_data(path=DEFAULT_PATH): """Load the users sessions data. Parameters ---------- path: str Path of the folder containing the data. Returns ------- sessions: DataFrame Loaded DataFrame. """ return pd.read_csv(path + 'sessions.csv')
mit
Python
d03571b523ba125be94d68bc50cda74a9a934d6f
fix documents
okuta/chainer,chainer/chainer,wkentaro/chainer,ronekko/chainer,ktnyt/chainer,chainer/chainer,niboshi/chainer,ysekky/chainer,cupy/cupy,jnishi/chainer,anaruse/chainer,tkerola/chainer,okuta/chainer,keisuke-umezawa/chainer,ktnyt/chainer,rezoo/chainer,niboshi/chainer,cupy/cupy,kashif/chainer,pfnet/chainer,keisuke-umezawa/chainer,hvy/chainer,jnishi/chainer,hvy/chainer,wkentaro/chainer,ktnyt/chainer,niboshi/chainer,jnishi/chainer,keisuke-umezawa/chainer,chainer/chainer,ktnyt/chainer,wkentaro/chainer,niboshi/chainer,delta2323/chainer,chainer/chainer,jnishi/chainer,cupy/cupy,aonotas/chainer,wkentaro/chainer,okuta/chainer,keisuke-umezawa/chainer,cupy/cupy,hvy/chainer,hvy/chainer,okuta/chainer,kiyukuta/chainer
chainer/functions/evaluation/r2_score.py
chainer/functions/evaluation/r2_score.py
from chainer import cuda from chainer import function from chainer.utils import type_check class R2_score(function.Function): def __init__(self, sample_weight, multioutput): if sample_weight is not None: raise NotImplementedError() if multioutput in ['uniform_average', 'raw_values']: self.multioutput = multioutput else: raise ValueError("invalid multioutput argument") def check_type_forward(self, in_types): type_check.expect(in_types.size() == 2) pred_type, true_type = in_types type_check.expect( pred_type.dtype.kind == 'f', true_type.dtype.kind == 'f' ) type_check.expect( pred_type.ndim >= true_type.ndim, pred_type.shape == true_type.shape, ) def forward(self, inputs): xp = cuda.get_array_module(*inputs) pred, true = inputs SS_res = xp.sum((pred-true)**2, axis=0) SS_tot = xp.sum((true-xp.mean(true, axis=0))**2, axis=0) if self.multioutput == 'uniform_average': return xp.asarray((1 - SS_res / SS_tot).mean(), dtype=pred.dtype), elif self.multioutput == 'raw_values': return xp.asarray((1 - SS_res / SS_tot), dtype=pred.dtype), def r2_score(pred, true, sample_weight=None, multioutput='uniform_average'): """Computes R^2(coefficient of determination) regression score function. Args: pred(Variable): Variable holding a vector or matrix of estimated \ target values. true(Variable): Variable holding a vector or matrix of correct target \ values. sample_weight: None. multioutput(string): ['uniform_average', 'raw_values']. if \ 'uniform_average', this function return an average of R^2\ score of multiple output. If 'raw_average', this function \ return a set of R^2 score of multiple output. Returns: Variable: A Variable holding a scalar array of the R^2 score if \ 'multioutput' is 'uniform_average' or a vector of R^2 \ scores if 'multioutput' is 'raw_values'. .. note:: This function is non-differentiable. """ return R2_score(sample_weight=sample_weight, multioutput=multioutput)\ (pred, true)
from chainer import cuda from chainer import function from chainer.utils import type_check class R2_score(function.Function): def __init__(self, sample_weight, multioutput): if sample_weight is not None: raise NotImplementedError() if multioutput in ['uniform_average', 'raw_values']: self.multioutput = multioutput else: raise ValueError("invalid multioutput argument") def check_type_forward(self, in_types): type_check.expect(in_types.size() == 2) pred_type, true_type = in_types type_check.expect( pred_type.dtype.kind == 'f', true_type.dtype.kind == 'f' ) type_check.expect( pred_type.ndim >= true_type.ndim, pred_type.shape == true_type.shape, ) def forward(self, inputs): xp = cuda.get_array_module(*inputs) pred, true = inputs SS_res = xp.sum((pred-true)**2, axis=0) SS_tot = xp.sum((true-xp.mean(true, axis=0))**2, axis=0) if self.multioutput == 'uniform_average': return xp.asarray((1 - SS_res / SS_tot).mean(), dtype=pred.dtype), elif self.multioutput == 'raw_values': return xp.asarray((1 - SS_res / SS_tot), dtype=pred.dtype), def r2_score(pred, true, sample_weight=None, multioutput='uniform_average'): """Computes R^2(coefficient of determination) regression score function. Args: pred(Variable): Variable holding a vector or matrix of estimated \ target values true(Variable): Variable holding a vector or matrix of correct target \ values sample_weight: NotImplemented multioutput(string): ['uniform_average', 'raw_values']. if \ 'uniform_average', this function return an average of R^2\ score of multiple output. If 'raw_average', this function \ return a set of R^2 score of multiple output. Returns: Variable: A Variable holding a scalar array of the R^2 score if \ 'multioutput' is 'uniform_average' or a vector of R^2 \ scores if 'multioutput' is 'raw_values'. .. note:: This function is non-differentiable """ return R2_score(sample_weight=sample_weight, multioutput=multioutput)\ (pred, true)
mit
Python
d6c20476bebed1265ccd0ac46e3020fdb3804bdd
Add type to command serialization
bowlofstew/changes,bowlofstew/changes,wfxiang08/changes,dropbox/changes,dropbox/changes,dropbox/changes,bowlofstew/changes,wfxiang08/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes,wfxiang08/changes
changes/api/serializer/models/command.py
changes/api/serializer/models/command.py
from changes.api.serializer import Serializer, register from changes.models import Command @register(Command) class CommandSerializer(Serializer): def serialize(self, instance, attrs): return { 'id': instance.id.hex, 'name': instance.label, 'status': instance.status, 'script': instance.script, 'returnCode': instance.return_code, 'env': dict(instance.env or {}), 'cwd': instance.cwd, 'type': instance.type, 'artifacts': instance.artifacts or [], 'duration': instance.duration, 'dateCreated': instance.date_created, 'dateStarted': instance.date_started, 'dateFinished': instance.date_finished, }
from changes.api.serializer import Serializer, register from changes.models import Command @register(Command) class CommandSerializer(Serializer): def serialize(self, instance, attrs): return { 'id': instance.id.hex, 'name': instance.label, 'status': instance.status, 'script': instance.script, 'returnCode': instance.return_code, 'env': dict(instance.env or {}), 'cwd': instance.cwd, 'artifacts': instance.artifacts or [], 'duration': instance.duration, 'dateCreated': instance.date_created, 'dateStarted': instance.date_started, 'dateFinished': instance.date_finished, }
apache-2.0
Python
76e1f2db2fe3763e1b8638c9044afa341e4d39bf
Fix fileno method in ReactorTransport.
kirkeby/sheared
sheared/reactor/transport.py
sheared/reactor/transport.py
# vim:nowrap:textwidth=0 import random, os, types class StringTransport: def __init__(self): self.input = '' self.output = '' self.closed = 0 def read(self, cnt=4096): cnt = min(cnt, 1 + int(random.random() * (len(self.input) - 1))) data = self.input[:cnt] self.input = self.input[cnt:] return data def write(self, data): if self.closed: raise IOError, 'cannot write to a closed Transport' self.output = self.output + data return len(data) def sendfile(self, file): d = file.read() while not d == '': self.output = self.output + d d = file.read() def close(self): if self.closed: raise IOError, 'already closed' self.closed = 1 def appendInput(self, data): self.input = self.input + data def getOutput(self): return self.output class FileTransport: def __init__(self, reactor, file, other): self.file = file if isinstance(file, types.IntType): self.fileno = file else: self.fileno = file.fileno() def read(self, max=4096): return os.read(self.fileno, max) def write(self, data): while data: cnt = os.write(self.fileno, data) data = data[cnt:] def close(self): os.close(self.fileno) class ReactorTransport: def __init__(self, reactor, file, other): self.reactor = reactor #self.file = self.reactor.prepareFile(file) self.file = file self.other = other self.closed = 0 def read(self, max=4096): return self.reactor.read(self.file, max) def write(self, data): self.reactor.write(self.file, data) def sendfile(self, file): self.reactor.sendfile(file, self.file) def fileno(self): if type(self.file) is types.IntType: return self.file else: return self.file.fileno() def close(self): self.reactor.close(self.file) self.closed = 1
# vim:nowrap:textwidth=0 import random, os, types class StringTransport: def __init__(self): self.input = '' self.output = '' self.closed = 0 def read(self, cnt=4096): cnt = min(cnt, 1 + int(random.random() * (len(self.input) - 1))) data = self.input[:cnt] self.input = self.input[cnt:] return data def write(self, data): if self.closed: raise IOError, 'cannot write to a closed Transport' self.output = self.output + data return len(data) def sendfile(self, file): d = file.read() while not d == '': self.output = self.output + d d = file.read() def close(self): if self.closed: raise IOError, 'already closed' self.closed = 1 def appendInput(self, data): self.input = self.input + data def getOutput(self): return self.output class FileTransport: def __init__(self, reactor, file, other): self.file = file if isinstance(file, types.IntType): self.fileno = file else: self.fileno = file.fileno() def read(self, max=4096): return os.read(self.fileno, max) def write(self, data): while data: cnt = os.write(self.fileno, data) data = data[cnt:] def close(self): os.close(self.fileno) class ReactorTransport: def __init__(self, reactor, file, other): self.reactor = reactor #self.file = self.reactor.prepareFile(file) self.file = file self.other = other self.closed = 0 def read(self, max=4096): return self.reactor.read(self.file, max) def write(self, data): self.reactor.write(self.file, data) def sendfile(self, file): self.reactor.sendfile(file, self.file) def fileno(self): return self.reactor.getfd(self.file) def close(self): self.reactor.close(self.file) self.closed = 1
mit
Python
67ff8c30d07f54c89d8072f480ded59e144d8463
Update range test after reporting change
pombredanne/numba,gmarkall/numba,seibert/numba,sklam/numba,jriehl/numba,cpcloud/numba,sklam/numba,stonebig/numba,shiquanwang/numba,numba/numba,cpcloud/numba,pombredanne/numba,pitrou/numba,gdementen/numba,shiquanwang/numba,gmarkall/numba,sklam/numba,jriehl/numba,jriehl/numba,GaZ3ll3/numba,gmarkall/numba,seibert/numba,cpcloud/numba,stuartarchibald/numba,numba/numba,gdementen/numba,pitrou/numba,stefanseefeld/numba,gdementen/numba,GaZ3ll3/numba,shiquanwang/numba,stuartarchibald/numba,stuartarchibald/numba,pombredanne/numba,stonebig/numba,IntelLabs/numba,stonebig/numba,pitrou/numba,numba/numba,stefanseefeld/numba,gdementen/numba,cpcloud/numba,stonebig/numba,sklam/numba,stefanseefeld/numba,seibert/numba,stefanseefeld/numba,pombredanne/numba,IntelLabs/numba,gdementen/numba,gmarkall/numba,IntelLabs/numba,stefanseefeld/numba,ssarangi/numba,ssarangi/numba,ssarangi/numba,GaZ3ll3/numba,sklam/numba,pitrou/numba,cpcloud/numba,stuartarchibald/numba,jriehl/numba,numba/numba,pombredanne/numba,pitrou/numba,GaZ3ll3/numba,seibert/numba,jriehl/numba,gmarkall/numba,numba/numba,GaZ3ll3/numba,stonebig/numba,stuartarchibald/numba,ssarangi/numba,seibert/numba,ssarangi/numba,IntelLabs/numba,IntelLabs/numba
numba/tests/builtins/test_builtin_range.py
numba/tests/builtins/test_builtin_range.py
""" >>> range_ret1() [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> range_ret2() [1, 2, 3, 4] >>> range_ret3() [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4] >>> forward1() 0 1 2 3 4 5 6 7 8 9 done >>> forward2() 1 2 3 4 done >>> forward3() 5 8 11 14 done >>> backward1() 10 7 4 done >>> backward2() done >>> backward3() -5 -8 -11 -14 done >>> empty_assign() 14 >>> last_value() --------------------- Numba Encountered Errors or Warnings --------------------- <BLANKLINE> Warning 96:10: local variable 'i' might be referenced before assignment <BLANKLINE> -------------------------------------------------------------------------------- 9 """ from numba import * @autojit def range_ret1(): return range(10) @autojit def range_ret2(): return range(1, 5) @autojit def range_ret3(): return range(10, -5, -1) @autojit def forward1(): for i in range(10): print i, print "done" @autojit def forward2(): for i in range(1, 5): print i, print "done" @autojit def forward3(): for i in range(5, 15, 3): print i, print "done" @autojit def backward1(): for i in range(10, 2, -3): print i, print "done" @autojit def backward2(): for i in range(1, 5, -1): print i, print "done" @autojit def backward3(): for i in range(-5, -15, -3): print i, print "done" @autojit def empty_assign(): i = 14 for i in range(10, 4): pass print i @autojit def last_value(): for i in range(10): pass print i if __name__ == '__main__': # backward3() import doctest doctest.testmod()
""" >>> range_ret1() [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> range_ret2() [1, 2, 3, 4] >>> range_ret3() [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4] >>> forward1() 0 1 2 3 4 5 6 7 8 9 done >>> forward2() 1 2 3 4 done >>> forward3() 5 8 11 14 done >>> backward1() 10 7 4 done >>> backward2() done >>> backward3() -5 -8 -11 -14 done >>> empty_assign() 14 >>> last_value() Warning 92:10: local variable 'i' might be referenced before assignment 9 """ from numba import * @autojit def range_ret1(): return range(10) @autojit def range_ret2(): return range(1, 5) @autojit def range_ret3(): return range(10, -5, -1) @autojit def forward1(): for i in range(10): print i, print "done" @autojit def forward2(): for i in range(1, 5): print i, print "done" @autojit def forward3(): for i in range(5, 15, 3): print i, print "done" @autojit def backward1(): for i in range(10, 2, -3): print i, print "done" @autojit def backward2(): for i in range(1, 5, -1): print i, print "done" @autojit def backward3(): for i in range(-5, -15, -3): print i, print "done" @autojit def empty_assign(): i = 14 for i in range(10, 4): pass print i @autojit def last_value(): for i in range(10): pass print i if __name__ == '__main__': backward3() # import doctest # doctest.testmod()
bsd-2-clause
Python
5826f791e824b7aa0b38b76570212b7de6e5d1e2
Index descriptor_data as Text field in ES
genialis/resolwe,jberci/resolwe,genialis/resolwe,jberci/resolwe
resolwe/flow/elastic_indexes/collection.py
resolwe/flow/elastic_indexes/collection.py
"""Elastic Search indexes for Collection model.""" import elasticsearch_dsl as dsl from resolwe.elastic.indices import BaseIndex from resolwe.flow.utils import dict_dot, iterate_schema from ..models import Collection from .base import BaseDocument, BaseIndexMixin class CollectionDocument(BaseDocument): """Document for collection search.""" # Data values extracted from the descriptor. descriptor_data = dsl.Text(multi=True) tags = dsl.Keyword(multi=True) class Meta: """Meta class for collection search document.""" index = 'collection' class CollectionIndexMixin: """Mixin for indices for collection objects used in ``CollectionDocument``.""" def extract_descriptor(self, obj): """Extract data from the descriptor.""" if not obj.descriptor_schema: return [] descriptor = [] for _, _, path in iterate_schema(obj.descriptor, obj.descriptor_schema.schema): try: value = dict_dot(obj.descriptor, path) except KeyError: continue if not isinstance(value, list): value = [value] for item in value: if not isinstance(item, (int, bool, float, str)): continue descriptor.append('{}'.format(item)) return descriptor def get_descriptor_data_value(self, obj): """Extract data from the descriptors.""" return self.extract_descriptor(obj) class CollectionIndex(BaseIndexMixin, CollectionIndexMixin, BaseIndex): """Index for collection objects used in ``CollectionDocument``.""" queryset = Collection.objects.all().prefetch_related( 'descriptor_schema', 'contributor' ) object_type = Collection document_class = CollectionDocument
"""Elastic Search indexes for Collection model.""" import elasticsearch_dsl as dsl from resolwe.elastic.fields import Name from resolwe.elastic.indices import BaseIndex from resolwe.flow.utils import dict_dot, iterate_schema from ..models import Collection from .base import BaseDocument, BaseIndexMixin class CollectionDocument(BaseDocument): """Document for collection search.""" # Data values extracted from the descriptor. descriptor_data = Name(multi=True) tags = dsl.Keyword(multi=True) class Meta: """Meta class for collection search document.""" index = 'collection' class CollectionIndexMixin: """Mixin for indices for collection objects used in ``CollectionDocument``.""" def extract_descriptor(self, obj): """Extract data from the descriptor.""" if not obj.descriptor_schema: return [] descriptor = [] for _, _, path in iterate_schema(obj.descriptor, obj.descriptor_schema.schema): try: value = dict_dot(obj.descriptor, path) except KeyError: continue if not isinstance(value, list): value = [value] for item in value: if not isinstance(item, (int, bool, float, str)): continue descriptor.append('{}'.format(item)) return descriptor def get_descriptor_data_value(self, obj): """Extract data from the descriptors.""" return self.extract_descriptor(obj) class CollectionIndex(BaseIndexMixin, CollectionIndexMixin, BaseIndex): """Index for collection objects used in ``CollectionDocument``.""" queryset = Collection.objects.all().prefetch_related( 'descriptor_schema', 'contributor' ) object_type = Collection document_class = CollectionDocument
apache-2.0
Python
7381f177f392b699eed3d93f2e36b7fa39d33ad0
remove unused import
geometalab/osmaxx,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/osmaxx-frontend,geometalab/osmaxx,geometalab/osmaxx,geometalab/osmaxx-frontend,geometalab/drf-utm-zone-info,geometalab/drf-utm-zone-info
build_and_push_images.py
build_and_push_images.py
#!/usr/bin/env python IMAGES = [ dict(image_name='geometalab/osmaxx-mediator', dockerfile='Dockerfile.mediator'), dict(image_name='geometalab/osmaxx-worker', dockerfile='Dockerfile.worker'), dict(image_name='geometalab/osmaxx-frontend', dockerfile='Dockerfile.frontend'), ] def docker_build(dockerfile, image_name, release, location='.'): subprocess.check_call(['docker', 'build', '-f', dockerfile, '-t', '{}:{}'.format(image_name, release), location]) def docker_push(release, image_name, *args, **kwargs): subprocess.check_call(['docker', 'push', '{}:{}'.format(image_name, release)]) if __name__ == '__main__': import subprocess release = subprocess.check_output(["git", "describe"]).strip().decode() for image in IMAGES: docker_build(release=release, **image) for image in IMAGES: docker_push(release=release, **image) print(release, ' has been pushed, you can now use that in your deployment!')
#!/usr/bin/env python import argparse IMAGES = [ dict(image_name='geometalab/osmaxx-mediator', dockerfile='Dockerfile.mediator'), dict(image_name='geometalab/osmaxx-worker', dockerfile='Dockerfile.worker'), dict(image_name='geometalab/osmaxx-frontend', dockerfile='Dockerfile.frontend'), ] def docker_build(dockerfile, image_name, release, location='.'): subprocess.check_call(['docker', 'build', '-f', dockerfile, '-t', '{}:{}'.format(image_name, release), location]) def docker_push(release, image_name, *args, **kwargs): subprocess.check_call(['docker', 'push', '{}:{}'.format(image_name, release)]) if __name__ == '__main__': import subprocess release = subprocess.check_output(["git", "describe"]).strip().decode() for image in IMAGES: docker_build(release=release, **image) for image in IMAGES: docker_push(release=release, **image) print(release, ' has been pushed, you can now use that in your deployment!')
mit
Python
7ca3308ced87a51ac073e50229d15b0784f5aed7
Update chainerx/_docs/device.py
niboshi/chainer,tkerola/chainer,jnishi/chainer,wkentaro/chainer,okuta/chainer,chainer/chainer,wkentaro/chainer,chainer/chainer,keisuke-umezawa/chainer,hvy/chainer,keisuke-umezawa/chainer,niboshi/chainer,niboshi/chainer,hvy/chainer,jnishi/chainer,hvy/chainer,jnishi/chainer,wkentaro/chainer,hvy/chainer,okuta/chainer,ktnyt/chainer,wkentaro/chainer,keisuke-umezawa/chainer,chainer/chainer,okuta/chainer,ktnyt/chainer,okuta/chainer,keisuke-umezawa/chainer,pfnet/chainer,jnishi/chainer,ktnyt/chainer,niboshi/chainer,ktnyt/chainer,chainer/chainer
chainerx/_docs/device.py
chainerx/_docs/device.py
import chainerx from chainerx import _docs def _set_docs_device(): Device = chainerx.Device _docs.set_doc( Device, """Represents a physical computing unit. """) _docs.set_doc( Device.synchronize, """Synchronizes the device. """) _docs.set_doc( Device.name, """Device name. Returns: str: Device name. """) _docs.set_doc( Device.backend, """Backend to which this device belongs. Returns: ~chainerx.Backend: Backend object. """) _docs.set_doc( Device.context, """Context to which this device belongs. Returns: ~chainerx.Context: Context object. """) _docs.set_doc( Device.index, """Index of this device. Returns: int: Index of this device. """) def set_docs(): _set_docs_device() _docs.set_doc( chainerx.get_device, """get_device(*device) Returns a device specified by the arguments. If the argument is a :class:`~chainerx.Device` instance, it's simply returned. Otherwise, there are three ways to specify a device: .. testcode:: # Specify backend name and device index separately. chainerx.get_device('native', 0) # Specify backend name and device index in a single string. chainerx.get_device('native:0') # Specify only backend name. In this case device index 0 is chosen. chainerx.get_device('native') Returns: ~chainerx.Device: Device object. """) _docs.set_doc( chainerx.get_default_device, """get_default_device() Returns the default device associated with the current thread. Returns: ~chainerx.Device: The default device. .. seealso:: * :func:`chainerx.set_default_device` * :func:`chainerx.device_scope` """) _docs.set_doc( chainerx.set_default_device, """set_default_device(device) Sets a given device as the default device of the current thread. Args: device (~chainerx.Device or str): Device object or device name to set as the default device. .. seealso:: * :func:`chainerx.get_default_device` * :func:`chainerx.device_scope` """) _docs.set_doc( chainerx.device_scope, """device_scope(device) Creates a context manager to temporarily set the default device. Args: device (~chainerx.Device or str): Device object or device name to set as the default device during the context. .. seealso:: * :func:`chainerx.get_default_device` * :func:`chainerx.set_default_device` """)
import chainerx from chainerx import _docs def _set_docs_device(): Device = chainerx.Device _docs.set_doc( Device, """Represents a physical computing unit. """) _docs.set_doc( Device.synchronize, """Synchronizes the device. """) _docs.set_doc( Device.name, """Device name. Returns: str: Device name. """) _docs.set_doc( Device.backend, """Backend to which this device belongs. Returns: ~chainerx.Backend: Backend object. """) _docs.set_doc( Device.context, """Context to which this device belongs. Returns: ~chainerx.Context: Context object. """) _docs.set_doc( Device.index, """Index of this device. Returns: int: Index of this device. """) def set_docs(): _set_docs_device() _docs.set_doc( chainerx.get_device, """get_device(*device) Returns a device specified by the argument. If the argument is a :class:`~chainerx.Device` instance, it's simply returned. Otherwise, there are three ways to specify a device: .. testcode:: # Specify backend name and device index separately. chainerx.get_device('native', 0) # Specify backend name and device index in a single string. chainerx.get_device('native:0') # Specify only backend name. In this case device index 0 is chosen. chainerx.get_device('native') Returns: ~chainerx.Device: Device object. """) _docs.set_doc( chainerx.get_default_device, """get_default_device() Returns the default device associated with the current thread. Returns: ~chainerx.Device: The default device. .. seealso:: * :func:`chainerx.set_default_device` * :func:`chainerx.device_scope` """) _docs.set_doc( chainerx.set_default_device, """set_default_device(device) Sets a given device as the default device of the current thread. Args: device (~chainerx.Device or str): Device object or device name to set as the default device. .. seealso:: * :func:`chainerx.get_default_device` * :func:`chainerx.device_scope` """) _docs.set_doc( chainerx.device_scope, """device_scope(device) Creates a context manager to temporarily set the default device. Args: device (~chainerx.Device or str): Device object or device name to set as the default device during the context. .. seealso:: * :func:`chainerx.get_default_device` * :func:`chainerx.set_default_device` """)
mit
Python