commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
3f43a5358bb58269846e21207bd570046b6aa711 | Create main_queue_thread.py | Python-IoT/Smart-IoT-Planting-System,Python-IoT/Smart-IoT-Planting-System | gateway/src/main_queue_thread.py | gateway/src/main_queue_thread.py | #!/usr/bin/env python
import threading, time
import queue
q = queue.Queue()
def Producer():
n = 0
while n < 1000:
n += 1
q.put(n)
# print('Producer has created %s' % n)
# time.sleep(0.1)
def Consumer():
count = 0
while count < 1000:
count += 1
data = q.get()
# print('Consumer has used %s' % data)
# time.sleep(0.2)
p = threading.Thread(target = Producer, name='')
c = threading.Thread(target = Consumer, name='')
import serial
import time
import json
import threading
from time import ctime,sleep
import queue
q = queue.Queue()
#ser = serial.Serial("/dev/ttyS0", 9600)
ser = serial.Serial("/dev/ttyS0", 9600, timeout=0.2)
recv = ''
def Lora(func):
global recv
while True:
#Waiting for LoRa module message from uart port.
count = ser.inWaiting()
if count != 0:
recv = ser.readline() #readline() need to set timeout, otherwise results block
ser.flushInput()
q.put(recv.decode())
print(recv.decode())
sleep(0.1)
def Lora_json(func):
global recv
while True:
if q.empty():
pass
else:
print(q.qsize())
data = q.get()
# json_lora = json.loads(bytes.decode(recv))
json_lora = json.loads(data)
#Parse JSON
#print(json_lora.get("ID"))
#print(json_lora["ID"])
#if json_lora.get("ID") == '1' : #Device ID-1 existed in gateway database
if int(json_lora.get("ID")) == 1 : #Device ID-1 existed in gateway database
if json_lora.get("CMD") == 'Online':
response = '{"ID":"1", "CMD":"Online", "TYPE":"Light2", "VALUE":"On"}'
print(response.encode())
elif json_lora.get("CMD") == 'Env':
if json_lora.get("TYPE") == 'moisture':
if int(json_lora.get("VALUE")) < 2000: # soil moisture is lower than standard
response = '{"ID":"1", "CMD":"irrigate", "TYPE":"Open", "VALUE":"100"}'
ser.write(str.encode(response))
else:
print('init_device')
#init_device() #Create sqlite table for device 1.
recv = ''
#print("This is %s. %s" % (func,ctime()))
sleep(1)
def gateway_init():
print('gateway init')
print('check gateway database existed or not')
print('dateway database do not exist')
print('read gateway ID from gateway.inf')
print('send ID to server to check gateway database backup on server or not')
#requests.post('http://www.sips.com/gateway', data=json.dumps({'ID': '123456'}))
print('if ID backup on server, download it, otherwise init it')
#url = 'http://www.sips.com/gateway/123456/sips.db'
#r = requests.get(url)
#with open("sips.db", "wb") as code:
# code.write(r.content)
print('init database......')
threads = []
t1 = threading.Thread(target=Lora,args=('Lora Thread',))
threads.append(t1)
t2 = threading.Thread(target=Lora_json,args=('Lora_json_parse Thread',))
threads.append(t2)
if __name__ == '__main__':
gateway_init()
for t in threads:
# t.setDaemon(True)
t.start()
while True:
#print("\nThis is the main thread!")
sleep(2)
| mit | Python |
|
2b2ff2a528f6effd219bd13cd754c33b55e82e61 | add __init__.py, initialized bootstrap extension | michaelgichia/Twezana,michaelgichia/Twezana | app/__init__.py | app/__init__.py | from flask import Flask
from flask.ext.bootstrap import Bootstrap
from config import config
bootstrap = Bootstrap()
moment = Moment()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
return app | mit | Python |
|
387b5732c0b2231580ae04bf5088ef7ce59b0d84 | Add script to normalize the spelling in a dataset | NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts | normalize_dataset.py | normalize_dataset.py | """Create multilabel data set with normalized spelling.
The input consists of a directory of text files containing the dataset in
historic spelling.
The data set consists of:
<sentence id>\t<sentence>\tEmotie_Liefde (embodied emotions labels separated by
_)
<sentence id>\t<sentence>\tNone ('None' if no words were tagged)
Usage: python normalize_dataset.py <input dir> <output dir>
"""
import argparse
import codecs
import os
from collections import Counter
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing text files that should be normalized.')
parser.add_argument('output_dir', help='the directory where the '
'normalized data files should be saved.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
# load hist2modern dictionary
with codecs.open('hist2modern_bwnt.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
text_files = [fi for fi in os.listdir(input_dir) if fi.endswith('.txt')]
for text_file in text_files:
print text_file
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
with codecs.open(in_file, 'rb', 'utf-8') as f:
lines = f.readlines()
with codecs.open(out_file, 'wb', 'utf-8') as f:
for line in lines:
parts = line.split('\t')
words = parts[1].split(' ')
new_words = []
for w in words:
wo = w.lower()
if wo in hist2modern.keys():
new_words.append(hist2modern[wo])
else:
new_words.append(w)
f.write(u'{}\t{}\t{}'.format(parts[0],
' '.join(new_words),
parts[2]))
| apache-2.0 | Python |
|
dee535c8566d0e542891ed10939eec6448483a6f | read in cenque galaxy catalog | changhoonhahn/centralMS,changhoonhahn/centralMS | code/centralms.py | code/centralms.py | '''
'''
import h5py
import numpy as np
# --- local ---
import util as UT
class CentralMS(object):
def __init__(self, cenque='default'):
''' This object reads in the star-forming and quenching
galaxies generated from the CenQue project and is an object
for those galaxies. Unlike CenQue, this object WILL NOT
have extensive functions and will act as a data catalog.
'''
self.cenque = cenque
self.mass = None
self.sfr = None
self.ssfr = None
def _Read_CenQue(self):
''' Read in SF and Quenching galaixes generated from
the CenQue project.
'''
if self.cenque == 'default':
tf = 7
abcrun = 'RHOssfrfq_TinkerFq_Std'
prior = 'updated'
else:
raise NotImplementedError
file = ''.join([UT.dat_dir(), 'cenque/',
'sfms.centrals.',
'tf', str(tf),
'.abc_', abcrun,
'.prior_', prior,
'.hdf5'])
# read in the file and save to object
f = h5py.File(file, 'r')
grp = f['data']
for col in grp.keys():
if col == 'mass':
# make sure to mark as SHAM mass
setattr(self, 'M_sham', grp[col][:])
elif col in ['sfr', 'ssfr']:
continue
else:
setattr(self, col, grp[col][:])
f.close()
return None
def AssignSFR0(cms):
''' Assign initial SFRs to the cms object based on tsnap_genesis
(time when the halo enters the catalog) and mass_genesis
'''
if 'tsnap_genesis' not in cms.__dict__.keys():
# Most likely you did not read in CenQue catalog!
raise ValueError
# Assign SFR to star-forming galaxies
sfr_class[starforming] = 'star-forming'
mu_sf_sfr = AverageLogSFR_sfms(
mass[starforming],
redshift[starforming],
sfms_prop=sfms_dict)
sigma_sf_sfr = ScatterLogSFR_sfms(
mass[starforming],
redshift[starforming],
sfms_prop=sfms_dict)
avg_sfr[starforming] = mu_sf_sfr
delta_sfr[starforming] = sigma_sf_sfr * np.random.randn(ngal_sf)
sfr[starforming] = mu_sf_sfr + delta_sfr[starforming]
ssfr[starforming] = sfr[starforming] - mass[starforming]
if __name__=='__main__':
cms = CentralMS()
cms._Read_CenQue()
| mit | Python |
|
65e689dd66124fcaa0ce8ab9f5029b727fba18e2 | Add solution for compare version numbers | chancyWu/leetcode | src/compare_version_numbers.py | src/compare_version_numbers.py | """
Source : https://oj.leetcode.com/problems/compare-version-numbers/
Author : Changxi Wu
Date : 2015-01-23
Compare two version numbers version1 and version2.
if version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" for "half way to version three", it is the fifth second-level revision of the second first-level revision.
Here is an example of version numbers ordering:
0.1 < 1.1 < 1.2 < 13.37
"""
# @param version1, a string
# @param version2, a string
# @return an integer
def compareVersion(version1, version2):
list1 = map(int, version1.split('.'))
list2 = map(int, version2.split('.'))
max_length = len(list1) if len(list1) > len(list2) else len(list2)
for i in range(max_length):
value1 = value2 = 0
if i < len(list1):
value1 = list1[i]
if i < len(list2):
value2 = list2[i]
if value1 > value2:
return 1
elif value1 < value2:
return -1
return 0
if __name__ == '__main__':
version1_list = ['0.1','1.1','1.2','13.37','1','1.0']
version2_list = ['1.1','1.2','13.37','1','13.37','1.0']
result_list = [-1, -1, -1, 1, -1, 0]
max_length = len(version1_list)
success = True
for i in range(max_length):
result = compareVersion(version1_list[i], version2_list[i])
if result != result_list[i]:
success = False
print 'Input:', version1_list[i], version2_list[i]
print 'Output:', result
print 'Expected:', result_list[i]
if success:
print 'All tests are passed'
| mit | Python |
|
0da01e405849da1d5876ec5a758c378aaf70fab2 | add the canary | carlini/cleverhans,cleverhans-lab/cleverhans,openai/cleverhans,cleverhans-lab/cleverhans,cleverhans-lab/cleverhans,carlini/cleverhans | cleverhans/canary.py | cleverhans/canary.py | import numpy as np
import tensorflow as tf
from cleverhans.utils_tf import infer_devices
def run_canary():
"""
Runs some code that will crash if the GPUs / GPU driver are suffering from
a common bug. This helps to prevent contaminating results in the rest of
the library with incorrect calculations.
"""
# Note: please do not edit this function unless you have access to a machine
# with GPUs suffering from the bug and can verify that the canary still
# crashes after your edits. Due to the transient nature of the GPU bug it is
# not possible to unit test the canary in our continuous integration system.
# Try very hard not to let the canary affect the graph for the rest of the
# python process
canary_graph = tf.Graph()
with canary_graph.as_default():
devices = infer_devices()
num_devices = len(devices)
if num_devices < 3:
# We have never observed GPU failure when less than 3 GPUs were used
return
v = np.random.RandomState([2018, 10, 16]).randn(2, 2)
# Try very hard not to let this Variable end up in any collections used
# by the rest of the python process
w = tf.Variable(v, trainable=False, collections=[])
loss = tf.reduce_sum(tf.square(w))
grads = []
for device in devices:
with tf.device(device):
grad, = tf.gradients(loss, w)
grads.append(grad)
sess = tf.Session()
sess.run(tf.variables_initializer([w]))
grads = sess.run(grads)
first = grads[0]
for grad in grads[1:]:
if not np.allclose(first, grad):
# pylint can't see when we use variables via locals()
# pylint: disable=unused-variable
first_string = str(first)
grad_string = str(grad)
raise RuntimeError("Something is wrong with your GPUs or GPU driver."
"%(num_devices)d different GPUS were asked to "
"calculate the same 2x2 gradient. One returned "
"%(first_string)s and another returned "
"%(grad_string)s. This can usually be fixed by "
"rebooting the machine." % locals())
sess.close()
if __name__ == "__main__":
run_canary()
| mit | Python |
|
c370edc980a34264f61e27d0dd288a7d6adf2d7e | Create consumer.py | kyanyoga/iot_kafka_datagen | bin/consumer.py | bin/consumer.py | # Consumer example to show the producer works: J.Oxenberg
from kafka import KafkaConsumer
consumer = KafkaConsumer(b'test',bootstrap_servers="172.17.136.43")
#wait for messages
for message in consumer:
print(message)
| mit | Python |
|
70b312bde16a8c4fca47e4782f2293f0b96f9751 | Add test_datagen2.py | aidiary/keras_examples,aidiary/keras_examples | cnn/test_datagen2.py | cnn/test_datagen2.py | import os
import shutil
import numpy as np
from scipy.misc import toimage
import matplotlib.pyplot as plt
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
def draw(X, filename):
plt.figure()
pos = 1
for i in range(X.shape[0]):
plt.subplot(4, 4, pos)
img = toimage(X[i])
plt.imshow(img)
plt.axis('off')
pos += 1
plt.savefig(filename)
if __name__ == '__main__':
img_rows, img_cols, img_channels = 32, 32, 3
batch_size = 16
nb_classes = 10
# CIFAR-10データをロード
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# 画素値を0-1に変換
X_train = X_train.astype('float32')
X_train /= 255.0
X_train = X_train[0:batch_size]
y_train = y_train[0:batch_size]
draw(X_train, 'datagen_before.png')
# データ拡張
datagen = ImageDataGenerator(
rotation_range=90,
zca_whitening=True
)
datagen.fit(X_train)
g = datagen.flow(X_train, y_train, batch_size, shuffle=False)
batch = g.next()
print(batch[0].shape)
print(batch[1].shape)
draw(batch[0], 'datagen_after.png')
| mit | Python |
|
2dd5afae12dc7d58c3349f2df2694eeb77ca0298 | Test driving robot via serial input | ctn-waterloo/nengo_pushbot,ctn-waterloo/nengo_pushbot | examples/test_spinn_tracks4.py | examples/test_spinn_tracks4.py | import nengo
import nengo_pushbot
import numpy as np
model = nengo.Network()
with model:
input = nengo.Node(lambda t: [0.5*np.sin(t), 0.5*np.cos(t)])
a = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#c = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#d = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
#nengo.Connection(a, a, transform=[[1.1, 0], [0, 1.1]], filter=0.1)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
bot = nengo_pushbot.PushBot(address=(0xFE, 0xFF, 1, 0, 0))
tracks = nengo_pushbot.Tracks(bot)
#def printout(t, x):
# print t, x
# return []
#tracks2 = nengo.Node(printout, size_in=2)
nengo.Connection(input, a, filter=0.01)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
nengo.Connection(a, tracks, filter=0.01)
#nengo.Connection(b, tracks2, filter=0.01)
#sim_normal = nengo.Simulator(model)
#sim_normal.run(5)
import nengo_spinnaker
sim = nengo_spinnaker.Simulator(model, use_serial=True)
sim.run(1000)
| mit | Python |
|
f1826205782eb56ba6b478c70e671acae6872d35 | Read similarity graph | charanpald/APGL | exp/influence2/GraphReader2.py | exp/influence2/GraphReader2.py | try:
ctypes.cdll.LoadLibrary("/usr/local/lib/libigraph.so")
except:
pass
import igraph
import numpy
from apgl.util.PathDefaults import PathDefaults
import logging
class GraphReader2(object):
"""
A class to read the similarity graph generated from the Arnetminer dataset
"""
def __init__(self, field):
self.field = field
self.eps = 0.1
dirName = PathDefaults.getDataDir() + "reputation/" + self.field + "/arnetminer/"
self.coauthorFilename = dirName + "coauthors.csv"
self.coauthorMatrixFilename = dirName + "coauthorSimilarity.npy"
self.trainExpertsFilename = dirName + "experts_train_matches" + ".csv"
self.testExpertsFilename = dirName + "experts_test_matches" + ".csv"
logging.debug("Publications filename: " + self.coauthorFilename)
logging.debug("Training experts filename: " + self.trainExpertsFilename)
logging.debug("Test experts filename: " + self.testExpertsFilename)
def read(self):
K = numpy.load(self.coauthorMatrixFilename)
K = K.tolist()
graph = igraph.Graph.Weighted_Adjacency(K, mode="PLUS", loops=False)
print(graph.summary())
graph.simplify(combine_edges=sum)
graph.es["invWeight"] = 1.0/numpy.array(graph.es["weight"])
return graph
def readExperts(self, train=False):
"""
Read the experts from a test file. Returns two lists: expertsList is the
list of their names, and expertsIdList is their integer ID.
"""
if not train:
logging.debug("Reading test experts list")
expertsFile = open(self.testExpertsFilename)
else:
logging.debug("Reading training experts list")
expertsFile = open(self.trainExpertsFilename)
expertsList = expertsFile.readlines()
expertsFile.close()
coauthorsFile = open(self.coauthorFilename)
coauthors = coauthorsFile.readlines()
coauthorsFile.close()
expertsIdList = []
for expert in expertsList:
if expert in coauthors:
expertsIdList.append(coauthors.index(expert))
return expertsList, expertsIdList | bsd-3-clause | Python |
|
e598608f21e30aeeec1ea9a8f452047a270fdc4d | add setup.py to build C module 'counts'; in perspective, it should setup cbclib on various systems | isrusin/cbcalc,isrusin/cbcalc | cbclib/setup.py | cbclib/setup.py | from distutils.core import setup, Extension
setup(
name="counts", version="0.1",
ext_modules=[Extension("counts", ["countsmodule.c", "countscalc.c"])]
)
| mit | Python |
|
22769c9d84de432034ef592f94c77b5d5111599d | Create argparser.py | ccjj/andropy | argparser.py | argparser.py | def get_args():
import argparse
import os
from sys import exit
parser = argparse.ArgumentParser(description='Automates android memory dumping')
parser.add_argument('-n', '--samplepath', required=True,help='path of the malware sample-apk')
parser.add_argument('-i', '--interval', required=True, type=int, help='intervals for each memory dump in seconds')
parser.add_argument('-d', '--sdcard', type=int, required=False, help='dump will be saved on the sdcard of the android device instead of being transfered over TCP')
parser.add_argument('-o', '--outputpath', required=False, help='path of the output-path')
parser.add_argument('-c', '--customconfig', required=False, help='path of a custom avd config.ini')
args = parser.parse_args()
if not os.path.isfile(args.samplepath) or (args.customconfig is not None and os.path.isfile(args.customconfig)):
raise Exception("error : one or more specified paths are not pointing to a file")
return args.samplepath, args.interval, args.sdcard, args.outputpath, args.customconfig
if __name__ == '__main__':
import sys
get_args(sys.argv[1:])
#AVDNAME = os.path.splitext(args.samplepath)[0]
#AVDPATH = args.samplepath
#os.path.isfile(fname)
#print(AVDNAME)
| mit | Python |
|
dbe76ab17e795540de6a53b22f90c8af0cb15dbe | Add constants example | ogroleg/svhammer | constants.example.py | constants.example.py | # coding: utf-8
from __future__ import unicode_literals
token = '123456789:dfghdfghdflugdfhg-77fwftfeyfgftre' # bot access_token
sn_stickers = ('CADAgADDwAu0BX', 'CAADA',
'CDAgADEQADfvu0Bh0Xd-rAg', 'CAADAgAADfvu0Bee9LyXSj1_fAg',) # ids
some2_stickers = ('CAADAKwADd_JnDFPYYarHAg', 'CAADAgADJmEyMU5rGAg')
allowed_stickers = sn_stickers + some2_stickers
default_probability = 0.01 # value hidden
del_symbols = '`~1234567890!@#' # symbols to ignore
quotes_dict = { # examples
(0.6, 'університет', 'университет'): """ну що тут сказати
цитата2
@, що Ви мали на увазі?""", # before sending @ will be replaced by username or name
(0.75, sn_stickers): """стікер зі мною детектед
а я непогайно тут виглядаю
цитата3"""}
| mit | Python |
|
d777a19bb804ae1a4268702da00d3138b028b386 | Add a python script to start sysmobts-remote and dump docs | osmocom/osmo-bts,telenoobie/osmo-bts,shimaore/osmo-bts,geosphere/osmo-bts,telenoobie/osmo-bts,shimaore/osmo-bts,osmocom/osmo-bts,shimaore/osmo-bts,geosphere/osmo-bts,geosphere/osmo-bts,telenoobie/osmo-bts,shimaore/osmo-bts,geosphere/osmo-bts,telenoobie/osmo-bts,osmocom/osmo-bts | contrib/dump_docs.py | contrib/dump_docs.py | #!/usr/bin/env python
"""
Start the process and dump the documentation to the doc dir
"""
import socket, subprocess, time,os
env = os.environ
env['L1FWD_BTS_HOST'] = '127.0.0.1'
bts_proc = subprocess.Popen(["./src/osmo-bts-sysmo/sysmobts-remote",
"-c", "./doc/examples/osmo-bts.cfg"], env = env,
stdin=None, stdout=None)
time.sleep(1)
try:
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.setblocking(1)
sck.connect(("localhost", 4241))
sck.recv(4096)
# Now send the command
sck.send("show online-help\r")
xml = ""
while True:
data = sck.recv(4096)
xml = "%s%s" % (xml, data)
if data.endswith('\r\nOsmoBTS> '):
break
# Now write everything until the end to the file
out = open('doc/vty_reference.xml', 'w')
out.write(xml[18:-11])
out.close()
finally:
# Clean-up
bts_proc.kill()
bts_proc.wait()
| agpl-3.0 | Python |
|
436119b2ef8ea12f12b69e0d22dd3441b7e187cd | add ratelimit plugin | Rj48/ircbot,Rouji/Yui | plugins/ratelimit.py | plugins/ratelimit.py | import time
buckets = {}
last_tick = time.time()
timeframe = float(yui.config_val('ratelimit', 'timeframe', default=60.0))
max_msg = float(yui.config_val('ratelimit', 'messages', default=6.0))
ignore_for = 60.0 * float(yui.config_val('ratelimit', 'ignoreMinutes', default=3.0))
@yui.event('postCmd')
def ratelimit(user, msg):
if user not in buckets.keys():
buckets[user] = 1.0
else:
buckets[user] += 1.0
if buckets[user] > max_msg:
yui.ignore(ignore_for, user.nick)
@yui.event('tick')
def tick():
global last_tick
now = time.time()
diff = now - last_tick
for user, n in buckets.items():
n -= ((max_msg / timeframe) * diff)
n = n if n > 0 else 0
buckets[user] = n
last_tick = now
| mit | Python |
|
83579a7e10d66e29fc65c43ba317c6681a393d3e | Add simple hub datapath | MurphyMc/pox,MurphyMc/pox,MurphyMc/pox,noxrepo/pox,noxrepo/pox,noxrepo/pox,noxrepo/pox,MurphyMc/pox,MurphyMc/pox | pox/datapaths/hub.py | pox/datapaths/hub.py | # Copyright 2017 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A simple hub datapath.
Launch it with a number of interface names, and it will pass packets
between them. Requires pxpcap to be built -- see "Building pxpcap"
in the POX manual.
Example:
./pox.py datapaths.hub --ports=eth0,eth1,eth2
"""
from pox.core import core
from Queue import Queue
import pox.lib.packet as pkt
from pox.lib.interfaceio import PCapInterface
class Hub (object):
"""
A simple hub
"""
def __init__ (self, ports=[]):
self._ports = set()
self.rx_bytes = 0
for p in ports:
self.add_port(p)
def add_port (self, port):
p = PCapInterface(port)
p.addListeners(self)
self._ports.add(p)
def _handle_RXData (self, event):
self.rx_bytes += len(event.data)
for port in self._ports:
if port is event.interface: continue
port.write(event.data)
def launch (ports):
ports = ports.replace(","," ").split()
l = Hub()
core.register("hub", l)
for p in ports:
l.add_port(p)
| apache-2.0 | Python |
|
d753fe46507d2e829c0b6ffc3120ec8f9472c4f1 | Add Problem 59 solution. | robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles,robertdimarco/puzzles | project-euler/059.py | project-euler/059.py | '''
Problem 59
19 December 2003
Each character on a computer is assigned a unique code and the preferred standard is ASCII (American Standard Code for Information Interchange). For example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.
A modern encryption method is to take a text file, convert the bytes to ASCII, then XOR each byte with a given value, taken from a secret key. The advantage with the XOR function is that using the same encryption key on the cipher text, restores the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.
For unbreakable encryption, the key is the same length as the plain text message, and the key is made up of random bytes. The user would keep the encrypted message and the encryption key in different locations, and without both "halves", it is impossible to decrypt the message.
Unfortunately, this method is impractical for most users, so the modified method is to use a password as a key. If the password is shorter than the message, which is likely, the key is repeated cyclically throughout the message. The balance for this method is using a sufficiently long password key for security, but short enough to be memorable.
Your task has been made easy, as the encryption key consists of three lower case characters. Using cipher1.txt, a file containing the encrypted ASCII codes, and the knowledge that the plain text must contain common English words, decrypt the message and find the sum of the ASCII values in the original text.
'''
import collections, operator, string
file = open('059.txt', 'r')
encrypted = map(int, file.read().split(','))
length = len(encrypted)
decrypted = [0 for i in range(length)]
chars = range(97, 123)
for i in chars:
for x in range(length)[0::3]:
decrypted[x] = operator.xor(i, encrypted[x])
for j in chars:
for x in range(length)[1::3]:
decrypted[x] = operator.xor(j, encrypted[x])
for k in chars:
for x in range(length)[2::3]:
decrypted[x] = operator.xor(k, encrypted[x])
# Spaces are the most common character in the English language, occurring
# just less than once per every 5 chars (19.18182%), so filter by a high
# frequency of spaces. (See http://www.data-compression.com/english.html)
if (decrypted.count(32) > 0.15*length):
print ''.join(map(chr, decrypted))
print sum([char for char in decrypted])
| mit | Python |
|
d1fcf47d62671abbb2ec8a278460dd64a4de03c2 | Create cryptoseven.py | Laserbear/Python-Scripts | cryptoseven.py | cryptoseven.py | import sys
def strxor(a, b): # xor two strings of different lengths
if len(a) > len(b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)])
else:
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])])
def printAscii(msg):
z = [chr(ord(x)) for x in msg]
x = "".join(z)
print x.encode('hex')
def main():
text = "attack at dawn"
enc = "6c73d5240a948c86981bc294814d".decode('hex')
key = strxor(text, enc)
text2 = "attack at dusk"
enc2 = strxor(text2, key)
print enc2.encode('hex')
main()
| apache-2.0 | Python |
|
baeecbd66e1acd48aa11fdff4c65567c72d88186 | Create client.py | Poogles/ohesteebee | ohesteebee/client.py | ohesteebee/client.py | """Ohessteebee client."""
import requests
from typing import Dict
PutDict = Dict[str, str]
class Ohessteebee:
def __init__(self, endpoint, port=4242):
self.session = requests.Session()
self.req_path = "http://{endpoint}:{port}".format(
endpoint=endpoint, port=port)
def _generate_put_dict(metric: str, timestamp: int, value: int, **kwargs) -> PutDict:
if kwargs:
tags = {**kwargs}
else:
tags = {}
response = {
"metric": metric,
"timestamp": timestamp,
"value": value,
"tags": tags
}
return response
def query(self, metric: str, start_date=None, end_date=None):
"""Get metric from OSTB."""
path = "/api/query"
api_url = self.req_path + path
self.session.get(api_url)
def put(self, metric: str, timestamp: int, **kwargs):
"""Put metric into OSTB."""
path = "/api/put"
api_url = self.req_path + path
data = self._generate_put_dict()
self.sesion.post(api_url)
| apache-2.0 | Python |
|
22494a45d2bce6774bdc50409a71f259841287f5 | add initial GlimError | aacanakin/glim | glim/exception.py | glim/exception.py |
class GlimError(Exception):
pass | mit | Python |
|
151599dd242eb0cb0da4771ca3798d66314719f0 | add queue module | dhain/greennet | greennet/queue.py | greennet/queue.py | import time
from collections import deque
from py.magic import greenlet
from greennet import get_hub
from greennet.hub import Wait
class QueueWait(Wait):
__slots__ = ('queue',)
def __init__(self, task, queue, expires):
super(QueueWait, self).__init__(task, expires)
self.queue = queue
def timeout(self):
getattr(self.queue, self._wait_attr).remove(self)
super(QueueWait, self).timeout()
class PopWait(QueueWait):
"""Wait for a pop to happen."""
__slots__ = ()
_wait_attr = '_pop_waits'
class AppendWait(QueueWait):
"""Wait for an append to happen."""
__slots__ = ()
_wait_attr = '_append_waits'
class Queue(object):
def __init__(self, maxlen=None, hub=None):
self.queue = deque()
self.maxlen = maxlen
self.hub = get_hub() if hub is None else hub
self._append_waits = deque()
self._pop_waits = deque()
def __len__(self):
return len(self.queue)
def full(self):
if self.maxlen is None:
return False
return len(self.queue) >= self.maxlen
def _wait_for_append(self, timeout):
expires = None if timeout is None else time.time() + timeout
wait = AppendWait(greenlet.getcurrent(), self, expires)
if timeout is not None:
self.hub._add_timeout(wait)
self._append_waits.append(wait)
self.hub.run()
def _wait_for_pop(self, timeout):
expires = None if timeout is None else time.time() + timeout
wait = PopWait(greenlet.getcurrent(), self, expires)
if timeout is not None:
self.hub._add_timeout(wait)
self._pop_waits.append(wait)
self.hub.run()
def _popped(self):
if self._pop_waits:
wait = self._pop_waits.popleft()
if wait.expires is not None:
self.hub._remove_timeout(wait)
self.hub.schedule(wait.task)
def _appended(self):
if self._append_waits:
wait = self._append_waits.popleft()
if wait.expires is not None:
self.hub._remove_timeout(wait)
self.hub.schedule(wait.task)
def wait_until_empty(self, timeout=None):
if not self.queue:
return
expires = None if timeout is None else time.time() + timeout
wait = PopWait(greenlet.getcurrent(), self, expires)
if timeout is not None:
self.hub._add_timeout(wait)
while self.queue:
self._pop_waits.append(wait)
self.hub.run()
self._popped()
def pop(self, timeout=None):
if not self.queue:
self._wait_for_append(timeout)
item = self.queue.pop()
self._popped()
return item
def popleft(self, timeout=None):
if not self.queue:
self._wait_for_append(timeout)
item = self.queue.popleft()
self._popped()
return item
def clear(self):
self.queue.clear()
self._popped()
def append(self, item, timeout=None):
if self.full():
self._wait_for_pop(timeout)
self.queue.append(item)
self._appended()
def appendleft(self, item, timeout=None):
if self.full():
self._wait_for_pop(timeout)
self.queue.appendleft(item)
self._appended()
| mit | Python |
|
8d5059fcd672fb4f0fcd7a2b57bf41f57b6269e5 | add mongo handler | telefonicaid/orchestrator,telefonicaid/orchestrator | src/orchestrator/core/mongo.py | src/orchestrator/core/mongo.py | #
# Copyright 2018 Telefonica Espana
#
# This file is part of IoT orchestrator
#
# IoT orchestrator is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# IoT orchestrator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with IoT orchestrator. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# iot_support at tid dot es
#
# Author: IoT team
#
import json
import logging
from orchestrator.common.util import RestOperations
import pymongo
logger = logging.getLogger('orchestrator_core')
class MongoDBOperations(object):
'''
IoT platform: MongoDB
'''
def __init__(self,
MONGODB_URI=None,
CORRELATOR_ID=None,
TRANSACTION_ID=None):
self.MONGODB_URI = MONGO_URI
self.client = pymongo.MongoClient(self.MONGODB_URI)
def checkMongo(self):
try:
client.list_databases()
return True
except Exception, e:
logger.warn("checkMongo exception: %s" % e)
return False
def createIndexes(self, SERVICE_NAME):
try:
databaseName = 'orion-' + SERVICE_NAME
db = self.client[databaseName]
db.entities.create_index("_id.id")
db.entities.create_index("_id.type")
db.entities.create_index("_id.servicePath")
db.entities.create_index("_id.creDate")
except Exception, e:
logger.warn("createIndex exception: %s" % e)
def removeDatabase(self, SERVICE_NAME):
try:
databaseName = 'orion-' + SERVICE_NAME
self.client.drop_database(databaseName)
except Exception, e:
logger.warn("createIndex exception: %s" % e)
| agpl-3.0 | Python |
|
90ec9def45bcc50047d3511943c463f57f771f00 | Bump to 3.2.0 | django-dbbackup/django-dbbackup,mjs7231/django-dbbackup,ZuluPro/django-dbbackup,mjs7231/django-dbbackup,ZuluPro/django-dbbackup,django-dbbackup/django-dbbackup | dbbackup/__init__.py | dbbackup/__init__.py | "Management commands to help backup and restore a project database and media"
VERSION = (3, 2, 0)
__version__ = '.'.join([str(i) for i in VERSION])
__author__ = 'Michael Shepanski'
__email__ = '[email protected]'
__url__ = 'https://github.com/django-dbbackup/django-dbbackup'
default_app_config = 'dbbackup.apps.DbbackupConfig'
| "Management commands to help backup and restore a project database and media"
VERSION = (3, 1, 3)
__version__ = '.'.join([str(i) for i in VERSION])
__author__ = 'Michael Shepanski'
__email__ = '[email protected]'
__url__ = 'https://github.com/django-dbbackup/django-dbbackup'
default_app_config = 'dbbackup.apps.DbbackupConfig'
| bsd-3-clause | Python |
2c29829bb6e0483a3dc7d98bc887ae86a3a233b7 | Fix dir name of preprocess | SaTa999/pyPanair | pyPanair/preprocess/__init__.py | pyPanair/preprocess/__init__.py | mit | Python |
||
3e7f8c5b87a85958bd45636788215db1ba4f2fd8 | Create __init__.py | AlanOndra/Waya | src/site/app/model/__init__.py | src/site/app/model/__init__.py | # -*- coding: utf-8 -*-
| bsd-3-clause | Python |
|
38c2291ab23d86d220446e594d52cce80ea4ec2a | Create Count_Inversions_Array.py | UmassJin/Leetcode | Experience/Count_Inversions_Array.py | Experience/Count_Inversions_Array.py | '''
Inversion Count for an array indicates – how far (or close) the array is from being sorted. If array is already sorted then inversion count is 0. If array is sorted in reverse order that inversion count is the maximum.
Formally speaking, two elements a[i] and a[j] form an inversion if a[i] > a[j] and i < j
Example:
The sequence 2, 4, 1, 3, 5 has three inversions (2, 1), (4, 1), (4, 3).
'''
# Note: G4G Analysis (http://www.geeksforgeeks.org/counting-inversions/)
def count_inver(A):
if not A: return A
length = len(A)
return merge_sort(A, 0, length-1)
def merge_sort(A, left, right):
inver_cnt = 0
if left < right:
mid = (left + right)/2
inver_cnt = merge_sort(A, left, mid)
inver_cnt += merge_sort(A, mid+1, right)
inver_cnt += merge(A, left, mid+1, right)
return inver_cnt
def merge(A, left, mid, right):
i = left; j = mid; k = left
print "i: %d, mid: %d, j: %d, k: %d, right: %d" %(i, mid, j, k, right)
inver_cnt = 0
tmp = [0 for p in xrange(len(A))]
print "tmp: ", tmp
while (i < mid) and (j <= right):
print "A[i]: %d, A[j]: %d" %(A[i], A[j])
if A[i] <= A[j]:
tmp[k] = A[i]
i += 1
k += 1
print "< after: i: %d, j: %d, k: %d, right: %d" %(i, j, k, right)
else:
tmp[k] = A[j]
j += 1
k += 1
print "> after: i: %d, j: %d, k: %d, right: %d" %(i, j, k, right)
inver_cnt += mid - i
print "inver_cnt: ", inver_cnt
while i < mid:
tmp[k] = A[i]
i += 1
k += 1
while j <= right:
tmp[k] = A[j]
j += 1
k ++ 1
A[left:right+1] = tmp[left:right+1]
print "after merge: A", A
return inver_cnt
ilist = [2,4,5,1,3,5]
print count_inver(ilist)
| mit | Python |
|
59b531e11266b2ff8184c04cda92bcc2fad71fa0 | Create core.py | thegreathippo/crispy | crispy/actions/core.py | crispy/actions/core.py | from crispy.actions.attacks import Attack, Melee, Ranged, Throw, Shoot
| mit | Python |
|
d6dc45756cbb30a8f707d683943ccd4ee0391e6b | Add an aws settings for the cms | doganov/edx-platform,nanolearningllc/edx-platform-cypress-2,4eek/edx-platform,defance/edx-platform,utecuy/edx-platform,halvertoluke/edx-platform,andyzsf/edx,IndonesiaX/edx-platform,kursitet/edx-platform,rationalAgent/edx-platform-custom,edry/edx-platform,tiagochiavericosta/edx-platform,hkawasaki/kawasaki-aio8-2,arifsetiawan/edx-platform,rationalAgent/edx-platform-custom,unicri/edx-platform,kalebhartje/schoolboost,Lektorium-LLC/edx-platform,mbareta/edx-platform-ft,zubair-arbi/edx-platform,chrisndodge/edx-platform,cecep-edu/edx-platform,doismellburning/edx-platform,TeachAtTUM/edx-platform,romain-li/edx-platform,nagyistoce/edx-platform,cognitiveclass/edx-platform,alu042/edx-platform,caesar2164/edx-platform,pabloborrego93/edx-platform,kxliugang/edx-platform,jswope00/GAI,jazkarta/edx-platform,bdero/edx-platform,xingyepei/edx-platform,shubhdev/edx-platform,louyihua/edx-platform,devs1991/test_edx_docmode,nanolearning/edx-platform,eduNEXT/edx-platform,waheedahmed/edx-platform,chudaol/edx-platform,MakeHer/edx-platform,ubc/edx-platform,ak2703/edx-platform,adoosii/edx-platform,Stanford-Online/edx-platform,EduPepperPDTesting/pepper2013-testing,arifsetiawan/edx-platform,miptliot/edx-platform,antonve/s4-project-mooc,jswope00/GAI,CourseTalk/edx-platform,kursitet/edx-platform,openfun/edx-platform,Ayub-Khan/edx-platform,OmarIthawi/edx-platform,hastexo/edx-platform,SivilTaram/edx-platform,B-MOOC/edx-platform,pabloborrego93/edx-platform,pku9104038/edx-platform,mjirayu/sit_academy,vismartltd/edx-platform,doganov/edx-platform,auferack08/edx-platform,teltek/edx-platform,knehez/edx-platform,vasyarv/edx-platform,zadgroup/edx-platform,SravanthiSinha/edx-platform,eduNEXT/edunext-platform,DefyVentures/edx-platform,EduPepperPD/pepper2013,kamalx/edx-platform,don-github/edx-platform,jjmiranda/edx-platform,mushtaqak/edx-platform,kursitet/edx-platform,polimediaupv/edx-platform,jelugbo/tundex,jruiperezv/ANALYSE,torchingloom/edx-platform,syjeon/new_edx,analyseuc3m/ANALYSE-v1,solashirai/edx-platform,10clouds/edx-platform,chauhanhardik/populo,jruiperezv/ANALYSE,shurihell/testasia,prarthitm/edxplatform,knehez/edx-platform,don-github/edx-platform,msegado/edx-platform,shubhdev/edx-platform,ubc/edx-platform,shubhdev/edxOnBaadal,MakeHer/edx-platform,deepsrijit1105/edx-platform,procangroup/edx-platform,ampax/edx-platform-backup,EduPepperPDTesting/pepper2013-testing,sudheerchintala/LearnEraPlatForm,defance/edx-platform,mushtaqak/edx-platform,AkA84/edx-platform,dsajkl/123,JioEducation/edx-platform,zofuthan/edx-platform,mcgachey/edx-platform,franosincic/edx-platform,synergeticsedx/deployment-wipro,beacloudgenius/edx-platform,zerobatu/edx-platform,JCBarahona/edX,franosincic/edx-platform,martynovp/edx-platform,solashirai/edx-platform,SravanthiSinha/edx-platform,IITBinterns13/edx-platform-dev,DNFcode/edx-platform,Unow/edx-platform,marcore/edx-platform,Lektorium-LLC/edx-platform,JCBarahona/edX,wwj718/edx-platform,mjirayu/sit_academy,beni55/edx-platform,nikolas/edx-platform,zhenzhai/edx-platform,jolyonb/edx-platform,zofuthan/edx-platform,jolyonb/edx-platform,eestay/edx-platform,cecep-edu/edx-platform,zofuthan/edx-platform,mbareta/edx-platform-ft,Shrhawk/edx-platform,shubhdev/edxOnBaadal,nanolearning/edx-platform,ovnicraft/edx-platform,simbs/edx-platform,kmoocdev/edx-platform,4eek/edx-platform,peterm-itr/edx-platform,bdero/edx-platform,auferack08/edx-platform,bigdatauniversity/edx-platform,hamzehd/edx-platform,waheedahmed/edx-platform,leansoft/edx-platform,kamalx/edx-platform,teltek/edx-platform,nttks/edx-platform,arbrandes/edx-platform,miptliot/edx-platform,Livit/Livit.Learn.EdX,AkA84/edx-platform,shubhdev/openedx,AkA84/edx-platform,appsembler/edx-platform,eduNEXT/edx-platform,Stanford-Online/edx-platform,benpatterson/edx-platform,mjg2203/edx-platform-seas,ESOedX/edx-platform,torchingloom/edx-platform,carsongee/edx-platform,ahmadio/edx-platform,jruiperezv/ANALYSE,chauhanhardik/populo,jonathan-beard/edx-platform,pelikanchik/edx-platform,DNFcode/edx-platform,xinjiguaike/edx-platform,EDUlib/edx-platform,edry/edx-platform,valtech-mooc/edx-platform,nttks/edx-platform,jzoldak/edx-platform,peterm-itr/edx-platform,WatanabeYasumasa/edx-platform,rue89-tech/edx-platform,yokose-ks/edx-platform,jonathan-beard/edx-platform,kalebhartje/schoolboost,morpheby/levelup-by,dsajkl/123,torchingloom/edx-platform,TeachAtTUM/edx-platform,hamzehd/edx-platform,IITBinterns13/edx-platform-dev,ZLLab-Mooc/edx-platform,arifsetiawan/edx-platform,a-parhom/edx-platform,itsjeyd/edx-platform,Unow/edx-platform,cognitiveclass/edx-platform,motion2015/a3,marcore/edx-platform,atsolakid/edx-platform,pepeportela/edx-platform,zhenzhai/edx-platform,MSOpenTech/edx-platform,beni55/edx-platform,ferabra/edx-platform,mjg2203/edx-platform-seas,louyihua/edx-platform,MakeHer/edx-platform,abdoosh00/edraak,fly19890211/edx-platform,don-github/edx-platform,mjg2203/edx-platform-seas,shashank971/edx-platform,zerobatu/edx-platform,JCBarahona/edX,mjirayu/sit_academy,cognitiveclass/edx-platform,bdero/edx-platform,mtlchun/edx,vismartltd/edx-platform,alu042/edx-platform,wwj718/edx-platform,zubair-arbi/edx-platform,xuxiao19910803/edx-platform,shubhdev/edxOnBaadal,ESOedX/edx-platform,appliedx/edx-platform,iivic/BoiseStateX,cselis86/edx-platform,edx/edx-platform,jamesblunt/edx-platform,pomegranited/edx-platform,jonathan-beard/edx-platform,zhenzhai/edx-platform,louyihua/edx-platform,nttks/jenkins-test,hastexo/edx-platform,Softmotions/edx-platform,MSOpenTech/edx-platform,syjeon/new_edx,shubhdev/edx-platform,jswope00/griffinx,shabab12/edx-platform,arbrandes/edx-platform,zubair-arbi/edx-platform,vasyarv/edx-platform,carsongee/edx-platform,ampax/edx-platform-backup,franosincic/edx-platform,eemirtekin/edx-platform,Shrhawk/edx-platform,knehez/edx-platform,y12uc231/edx-platform,bitifirefly/edx-platform,deepsrijit1105/edx-platform,jamesblunt/edx-platform,rhndg/openedx,nttks/jenkins-test,shubhdev/edxOnBaadal,cecep-edu/edx-platform,gymnasium/edx-platform,PepperPD/edx-pepper-platform,morenopc/edx-platform,angelapper/edx-platform,zadgroup/edx-platform,praveen-pal/edx-platform,UOMx/edx-platform,bigdatauniversity/edx-platform,prarthitm/edxplatform,y12uc231/edx-platform,shubhdev/openedx,pepeportela/edx-platform,philanthropy-u/edx-platform,longmen21/edx-platform,morpheby/levelup-by,mushtaqak/edx-platform,ZLLab-Mooc/edx-platform,DNFcode/edx-platform,jruiperezv/ANALYSE,lduarte1991/edx-platform,adoosii/edx-platform,ahmadio/edx-platform,LICEF/edx-platform,vikas1885/test1,y12uc231/edx-platform,Edraak/edx-platform,4eek/edx-platform,abdoosh00/edx-rtl-final,morenopc/edx-platform,eduNEXT/edx-platform,alexthered/kienhoc-platform,kmoocdev2/edx-platform,rismalrv/edx-platform,mtlchun/edx,antonve/s4-project-mooc,etzhou/edx-platform,BehavioralInsightsTeam/edx-platform,cognitiveclass/edx-platform,ovnicraft/edx-platform,valtech-mooc/edx-platform,Unow/edx-platform,OmarIthawi/edx-platform,beni55/edx-platform,abdoosh00/edraak,chauhanhardik/populo_2,cyanna/edx-platform,adoosii/edx-platform,jswope00/GAI,doismellburning/edx-platform,cselis86/edx-platform,msegado/edx-platform,torchingloom/edx-platform,ESOedX/edx-platform,UXE/local-edx,LICEF/edx-platform,Shrhawk/edx-platform,TsinghuaX/edx-platform,ferabra/edx-platform,10clouds/edx-platform,jonathan-beard/edx-platform,hastexo/edx-platform,Edraak/circleci-edx-platform,eemirtekin/edx-platform,marcore/edx-platform,syjeon/new_edx,jazztpt/edx-platform,ahmadiga/min_edx,ovnicraft/edx-platform,UXE/local-edx,sameetb-cuelogic/edx-platform-test,atsolakid/edx-platform,nttks/edx-platform,stvstnfrd/edx-platform,mitocw/edx-platform,beacloudgenius/edx-platform,chauhanhardik/populo,caesar2164/edx-platform,chand3040/cloud_that,eestay/edx-platform,Lektorium-LLC/edx-platform,kamalx/edx-platform,jbassen/edx-platform,IONISx/edx-platform,MakeHer/edx-platform,jzoldak/edx-platform,playm2mboy/edx-platform,jazkarta/edx-platform-for-isc,stvstnfrd/edx-platform,ahmadio/edx-platform,BehavioralInsightsTeam/edx-platform,LICEF/edx-platform,AkA84/edx-platform,shabab12/edx-platform,simbs/edx-platform,eduNEXT/edunext-platform,kxliugang/edx-platform,edx-solutions/edx-platform,antoviaque/edx-platform,RPI-OPENEDX/edx-platform,cyanna/edx-platform,kamalx/edx-platform,cselis86/edx-platform,Ayub-Khan/edx-platform,jswope00/griffinx,Ayub-Khan/edx-platform,valtech-mooc/edx-platform,zadgroup/edx-platform,B-MOOC/edx-platform,rationalAgent/edx-platform-custom,franosincic/edx-platform,ahmadiga/min_edx,openfun/edx-platform,jamiefolsom/edx-platform,shashank971/edx-platform,doganov/edx-platform,jruiperezv/ANALYSE,10clouds/edx-platform,raccoongang/edx-platform,DefyVentures/edx-platform,dkarakats/edx-platform,a-parhom/edx-platform,polimediaupv/edx-platform,appliedx/edx-platform,appliedx/edx-platform,motion2015/a3,wwj718/edx-platform,wwj718/ANALYSE,jazkarta/edx-platform,nttks/edx-platform,tiagochiavericosta/edx-platform,Kalyzee/edx-platform,WatanabeYasumasa/edx-platform,nanolearningllc/edx-platform-cypress,nanolearningllc/edx-platform-cypress,ak2703/edx-platform,ahmedaljazzar/edx-platform,nikolas/edx-platform,jazkarta/edx-platform,openfun/edx-platform,jswope00/griffinx,mtlchun/edx,beacloudgenius/edx-platform,xuxiao19910803/edx,olexiim/edx-platform,miptliot/edx-platform,shashank971/edx-platform,edry/edx-platform,xinjiguaike/edx-platform,eemirtekin/edx-platform,jazkarta/edx-platform-for-isc,rue89-tech/edx-platform,tanmaykm/edx-platform,eduNEXT/edunext-platform,TsinghuaX/edx-platform,IITBinterns13/edx-platform-dev,RPI-OPENEDX/edx-platform,jzoldak/edx-platform,eestay/edx-platform,RPI-OPENEDX/edx-platform,shurihell/testasia,fintech-circle/edx-platform,motion2015/edx-platform,iivic/BoiseStateX,10clouds/edx-platform,EDUlib/edx-platform,alexthered/kienhoc-platform,praveen-pal/edx-platform,morpheby/levelup-by,Edraak/edraak-platform,itsjeyd/edx-platform,arbrandes/edx-platform,jelugbo/tundex,antonve/s4-project-mooc,apigee/edx-platform,AkA84/edx-platform,cecep-edu/edx-platform,pomegranited/edx-platform,devs1991/test_edx_docmode,tanmaykm/edx-platform,romain-li/edx-platform,chudaol/edx-platform,martynovp/edx-platform,kmoocdev/edx-platform,ampax/edx-platform,olexiim/edx-platform,solashirai/edx-platform,EDUlib/edx-platform,nanolearningllc/edx-platform-cypress-2,TeachAtTUM/edx-platform,mahendra-r/edx-platform,nagyistoce/edx-platform,gsehub/edx-platform,chauhanhardik/populo_2,vasyarv/edx-platform,cpennington/edx-platform,waheedahmed/edx-platform,SravanthiSinha/edx-platform,hmcmooc/muddx-platform,ahmadiga/min_edx,zerobatu/edx-platform,hkawasaki/kawasaki-aio8-0,openfun/edx-platform,nanolearningllc/edx-platform-cypress,ahmedaljazzar/edx-platform,ovnicraft/edx-platform,dkarakats/edx-platform,hkawasaki/kawasaki-aio8-0,pelikanchik/edx-platform,EDUlib/edx-platform,lduarte1991/edx-platform,alexthered/kienhoc-platform,alu042/edx-platform,solashirai/edx-platform,dsajkl/reqiop,pepeportela/edx-platform,gsehub/edx-platform,mjirayu/sit_academy,rismalrv/edx-platform,LearnEra/LearnEraPlaftform,xuxiao19910803/edx,WatanabeYasumasa/edx-platform,vismartltd/edx-platform,etzhou/edx-platform,rismalrv/edx-platform,iivic/BoiseStateX,playm2mboy/edx-platform,cselis86/edx-platform,shubhdev/edx-platform,philanthropy-u/edx-platform,xinjiguaike/edx-platform,MakeHer/edx-platform,peterm-itr/edx-platform,xingyepei/edx-platform,chand3040/cloud_that,xinjiguaike/edx-platform,dcosentino/edx-platform,olexiim/edx-platform,apigee/edx-platform,morenopc/edx-platform,devs1991/test_edx_docmode,alexthered/kienhoc-platform,rismalrv/edx-platform,mtlchun/edx,nanolearningllc/edx-platform-cypress-2,shabab12/edx-platform,naresh21/synergetics-edx-platform,sudheerchintala/LearnEraPlatForm,nikolas/edx-platform,benpatterson/edx-platform,pepeportela/edx-platform,dkarakats/edx-platform,IONISx/edx-platform,IONISx/edx-platform,motion2015/edx-platform,cpennington/edx-platform,kxliugang/edx-platform,romain-li/edx-platform,eestay/edx-platform,xuxiao19910803/edx,longmen21/edx-platform,IONISx/edx-platform,RPI-OPENEDX/edx-platform,ubc/edx-platform,teltek/edx-platform,hastexo/edx-platform,ahmadio/edx-platform,rhndg/openedx,CredoReference/edx-platform,hkawasaki/kawasaki-aio8-0,J861449197/edx-platform,zhenzhai/edx-platform,romain-li/edx-platform,pomegranited/edx-platform,angelapper/edx-platform,angelapper/edx-platform,Edraak/circleci-edx-platform,yokose-ks/edx-platform,amir-qayyum-khan/edx-platform,apigee/edx-platform,utecuy/edx-platform,beacloudgenius/edx-platform,inares/edx-platform,Edraak/edx-platform,nikolas/edx-platform,ampax/edx-platform,dsajkl/123,mcgachey/edx-platform,pelikanchik/edx-platform,SivilTaram/edx-platform,apigee/edx-platform,appliedx/edx-platform,PepperPD/edx-pepper-platform,dsajkl/reqiop,etzhou/edx-platform,Semi-global/edx-platform,mcgachey/edx-platform,Edraak/edraak-platform,morenopc/edx-platform,chauhanhardik/populo,shurihell/testasia,olexiim/edx-platform,abdoosh00/edraak,msegado/edx-platform,dsajkl/reqiop,wwj718/ANALYSE,Stanford-Online/edx-platform,simbs/edx-platform,leansoft/edx-platform,synergeticsedx/deployment-wipro,franosincic/edx-platform,kmoocdev/edx-platform,hkawasaki/kawasaki-aio8-2,xuxiao19910803/edx-platform,valtech-mooc/edx-platform,pdehaye/theming-edx-platform,jbzdak/edx-platform,synergeticsedx/deployment-wipro,procangroup/edx-platform,inares/edx-platform,philanthropy-u/edx-platform,kalebhartje/schoolboost,mushtaqak/edx-platform,yokose-ks/edx-platform,analyseuc3m/ANALYSE-v1,shurihell/testasia,shubhdev/openedx,BehavioralInsightsTeam/edx-platform,msegado/edx-platform,adoosii/edx-platform,playm2mboy/edx-platform,xuxiao19910803/edx-platform,nanolearningllc/edx-platform-cypress,unicri/edx-platform,jzoldak/edx-platform,ampax/edx-platform-backup,kalebhartje/schoolboost,mbareta/edx-platform-ft,JCBarahona/edX,IndonesiaX/edx-platform,caesar2164/edx-platform,ak2703/edx-platform,chrisndodge/edx-platform,IndonesiaX/edx-platform,Kalyzee/edx-platform,unicri/edx-platform,jamiefolsom/edx-platform,jbassen/edx-platform,jazkarta/edx-platform-for-isc,Semi-global/edx-platform,utecuy/edx-platform,valtech-mooc/edx-platform,jbzdak/edx-platform,PepperPD/edx-pepper-platform,jelugbo/tundex,hamzehd/edx-platform,nttks/jenkins-test,zhenzhai/edx-platform,gsehub/edx-platform,kursitet/edx-platform,shabab12/edx-platform,nanolearning/edx-platform,edx/edx-platform,vikas1885/test1,mcgachey/edx-platform,chand3040/cloud_that,Semi-global/edx-platform,ampax/edx-platform-backup,Livit/Livit.Learn.EdX,Stanford-Online/edx-platform,gymnasium/edx-platform,procangroup/edx-platform,J861449197/edx-platform,Softmotions/edx-platform,defance/edx-platform,iivic/BoiseStateX,unicri/edx-platform,antoviaque/edx-platform,philanthropy-u/edx-platform,J861449197/edx-platform,dcosentino/edx-platform,nagyistoce/edx-platform,zerobatu/edx-platform,appsembler/edx-platform,dcosentino/edx-platform,dsajkl/123,leansoft/edx-platform,nagyistoce/edx-platform,sameetb-cuelogic/edx-platform-test,jazkarta/edx-platform-for-isc,utecuy/edx-platform,waheedahmed/edx-platform,jelugbo/tundex,a-parhom/edx-platform,abdoosh00/edraak,marcore/edx-platform,kxliugang/edx-platform,ferabra/edx-platform,OmarIthawi/edx-platform,SravanthiSinha/edx-platform,DNFcode/edx-platform,mjirayu/sit_academy,wwj718/ANALYSE,pomegranited/edx-platform,nanolearningllc/edx-platform-cypress-2,LearnEra/LearnEraPlaftform,kmoocdev2/edx-platform,utecuy/edx-platform,motion2015/edx-platform,chudaol/edx-platform,Endika/edx-platform,IndonesiaX/edx-platform,SivilTaram/edx-platform,chrisndodge/edx-platform,EduPepperPDTesting/pepper2013-testing,UOMx/edx-platform,deepsrijit1105/edx-platform,IITBinterns13/edx-platform-dev,cpennington/edx-platform,motion2015/a3,edx-solutions/edx-platform,don-github/edx-platform,edry/edx-platform,J861449197/edx-platform,fly19890211/edx-platform,mtlchun/edx,vikas1885/test1,edx-solutions/edx-platform,fly19890211/edx-platform,PepperPD/edx-pepper-platform,motion2015/edx-platform,romain-li/edx-platform,simbs/edx-platform,fly19890211/edx-platform,doismellburning/edx-platform,Lektorium-LLC/edx-platform,cyanna/edx-platform,xuxiao19910803/edx,leansoft/edx-platform,prarthitm/edxplatform,B-MOOC/edx-platform,UOMx/edx-platform,jelugbo/tundex,praveen-pal/edx-platform,CourseTalk/edx-platform,y12uc231/edx-platform,simbs/edx-platform,devs1991/test_edx_docmode,arifsetiawan/edx-platform,atsolakid/edx-platform,chudaol/edx-platform,proversity-org/edx-platform,halvertoluke/edx-platform,SravanthiSinha/edx-platform,ESOedX/edx-platform,doismellburning/edx-platform,appsembler/edx-platform,fintech-circle/edx-platform,doganov/edx-platform,analyseuc3m/ANALYSE-v1,naresh21/synergetics-edx-platform,jazztpt/edx-platform,synergeticsedx/deployment-wipro,etzhou/edx-platform,raccoongang/edx-platform,TsinghuaX/edx-platform,carsongee/edx-platform,Edraak/edx-platform,mahendra-r/edx-platform,amir-qayyum-khan/edx-platform,edx/edx-platform,MSOpenTech/edx-platform,morenopc/edx-platform,EduPepperPD/pepper2013,hkawasaki/kawasaki-aio8-0,stvstnfrd/edx-platform,gsehub/edx-platform,jbassen/edx-platform,mcgachey/edx-platform,halvertoluke/edx-platform,ubc/edx-platform,abdoosh00/edx-rtl-final,kxliugang/edx-platform,TeachAtTUM/edx-platform,cyanna/edx-platform,chauhanhardik/populo,jjmiranda/edx-platform,knehez/edx-platform,Edraak/edraak-platform,vikas1885/test1,Semi-global/edx-platform,devs1991/test_edx_docmode,dcosentino/edx-platform,edx-solutions/edx-platform,nttks/jenkins-test,rhndg/openedx,devs1991/test_edx_docmode,mitocw/edx-platform,chand3040/cloud_that,alu042/edx-platform,jazztpt/edx-platform,jbzdak/edx-platform,jamiefolsom/edx-platform,pomegranited/edx-platform,vasyarv/edx-platform,EduPepperPDTesting/pepper2013-testing,shashank971/edx-platform,sudheerchintala/LearnEraPlatForm,naresh21/synergetics-edx-platform,shubhdev/openedx,bdero/edx-platform,Softmotions/edx-platform,jamesblunt/edx-platform,kmoocdev2/edx-platform,knehez/edx-platform,EduPepperPD/pepper2013,fintech-circle/edx-platform,praveen-pal/edx-platform,hkawasaki/kawasaki-aio8-1,martynovp/edx-platform,jazkarta/edx-platform,angelapper/edx-platform,teltek/edx-platform,Livit/Livit.Learn.EdX,ahmadio/edx-platform,kamalx/edx-platform,bitifirefly/edx-platform,bitifirefly/edx-platform,antonve/s4-project-mooc,rue89-tech/edx-platform,openfun/edx-platform,ahmedaljazzar/edx-platform,Edraak/circleci-edx-platform,Edraak/edraak-platform,ZLLab-Mooc/edx-platform,gymnasium/edx-platform,BehavioralInsightsTeam/edx-platform,syjeon/new_edx,IONISx/edx-platform,inares/edx-platform,louyihua/edx-platform,hamzehd/edx-platform,caesar2164/edx-platform,jswope00/griffinx,antoviaque/edx-platform,EduPepperPD/pepper2013,beni55/edx-platform,pabloborrego93/edx-platform,analyseuc3m/ANALYSE-v1,Ayub-Khan/edx-platform,dkarakats/edx-platform,auferack08/edx-platform,nanolearning/edx-platform,ahmedaljazzar/edx-platform,proversity-org/edx-platform,ampax/edx-platform-backup,jolyonb/edx-platform,CourseTalk/edx-platform,mitocw/edx-platform,vasyarv/edx-platform,eemirtekin/edx-platform,CredoReference/edx-platform,longmen21/edx-platform,procangroup/edx-platform,tiagochiavericosta/edx-platform,shashank971/edx-platform,ubc/edx-platform,jazztpt/edx-platform,auferack08/edx-platform,jbzdak/edx-platform,jjmiranda/edx-platform,LearnEra/LearnEraPlaftform,jswope00/griffinx,zadgroup/edx-platform,motion2015/edx-platform,playm2mboy/edx-platform,wwj718/edx-platform,chand3040/cloud_that,Shrhawk/edx-platform,rhndg/openedx,dsajkl/123,halvertoluke/edx-platform,kmoocdev2/edx-platform,bitifirefly/edx-platform,dsajkl/reqiop,beacloudgenius/edx-platform,polimediaupv/edx-platform,JioEducation/edx-platform,vismartltd/edx-platform,ZLLab-Mooc/edx-platform,benpatterson/edx-platform,xuxiao19910803/edx,shubhdev/edx-platform,nanolearning/edx-platform,Edraak/edx-platform,SivilTaram/edx-platform,pdehaye/theming-edx-platform,kalebhartje/schoolboost,ampax/edx-platform,atsolakid/edx-platform,xingyepei/edx-platform,naresh21/synergetics-edx-platform,LICEF/edx-platform,motion2015/a3,UOMx/edx-platform,rhndg/openedx,jazkarta/edx-platform-for-isc,amir-qayyum-khan/edx-platform,sudheerchintala/LearnEraPlatForm,jamiefolsom/edx-platform,torchingloom/edx-platform,mjg2203/edx-platform-seas,cognitiveclass/edx-platform,4eek/edx-platform,UXE/local-edx,dcosentino/edx-platform,hkawasaki/kawasaki-aio8-1,PepperPD/edx-pepper-platform,xuxiao19910803/edx-platform,zubair-arbi/edx-platform,jamesblunt/edx-platform,vismartltd/edx-platform,hkawasaki/kawasaki-aio8-1,nttks/edx-platform,halvertoluke/edx-platform,zubair-arbi/edx-platform,stvstnfrd/edx-platform,edry/edx-platform,pku9104038/edx-platform,ferabra/edx-platform,fly19890211/edx-platform,jazkarta/edx-platform,xingyepei/edx-platform,nanolearningllc/edx-platform-cypress-2,martynovp/edx-platform,fintech-circle/edx-platform,rismalrv/edx-platform,eemirtekin/edx-platform,antoviaque/edx-platform,nanolearningllc/edx-platform-cypress,deepsrijit1105/edx-platform,Ayub-Khan/edx-platform,ferabra/edx-platform,dkarakats/edx-platform,WatanabeYasumasa/edx-platform,hkawasaki/kawasaki-aio8-2,chrisndodge/edx-platform,olexiim/edx-platform,raccoongang/edx-platform,rationalAgent/edx-platform-custom,benpatterson/edx-platform,Softmotions/edx-platform,MSOpenTech/edx-platform,ak2703/edx-platform,bigdatauniversity/edx-platform,sameetb-cuelogic/edx-platform-test,bigdatauniversity/edx-platform,zofuthan/edx-platform,leansoft/edx-platform,carsongee/edx-platform,Shrhawk/edx-platform,pdehaye/theming-edx-platform,IndonesiaX/edx-platform,itsjeyd/edx-platform,edx/edx-platform,bigdatauniversity/edx-platform,cecep-edu/edx-platform,shurihell/testasia,inares/edx-platform,motion2015/a3,DefyVentures/edx-platform,polimediaupv/edx-platform,vikas1885/test1,ak2703/edx-platform,rationalAgent/edx-platform-custom,Kalyzee/edx-platform,y12uc231/edx-platform,hmcmooc/muddx-platform,antonve/s4-project-mooc,ampax/edx-platform,mahendra-r/edx-platform,sameetb-cuelogic/edx-platform-test,DefyVentures/edx-platform,andyzsf/edx,hamzehd/edx-platform,cpennington/edx-platform,hkawasaki/kawasaki-aio8-1,kmoocdev/edx-platform,jolyonb/edx-platform,nttks/jenkins-test,xinjiguaike/edx-platform,solashirai/edx-platform,CredoReference/edx-platform,atsolakid/edx-platform,mitocw/edx-platform,beni55/edx-platform,proversity-org/edx-platform,zofuthan/edx-platform,martynovp/edx-platform,JCBarahona/edX,eduNEXT/edunext-platform,4eek/edx-platform,alexthered/kienhoc-platform,jonathan-beard/edx-platform,msegado/edx-platform,Endika/edx-platform,mahendra-r/edx-platform,chudaol/edx-platform,zerobatu/edx-platform,UXE/local-edx,chauhanhardik/populo_2,LearnEra/LearnEraPlaftform,JioEducation/edx-platform,jbzdak/edx-platform,kmoocdev2/edx-platform,pelikanchik/edx-platform,raccoongang/edx-platform,Livit/Livit.Learn.EdX,doganov/edx-platform,inares/edx-platform,OmarIthawi/edx-platform,devs1991/test_edx_docmode,xingyepei/edx-platform,tanmaykm/edx-platform,shubhdev/edxOnBaadal,TsinghuaX/edx-platform,proversity-org/edx-platform,EduPepperPD/pepper2013,eduNEXT/edx-platform,DefyVentures/edx-platform,wwj718/ANALYSE,devs1991/test_edx_docmode,B-MOOC/edx-platform,EduPepperPDTesting/pepper2013-testing,CredoReference/edx-platform,polimediaupv/edx-platform,ahmadiga/min_edx,wwj718/ANALYSE,appsembler/edx-platform,arbrandes/edx-platform,J861449197/edx-platform,lduarte1991/edx-platform,ZLLab-Mooc/edx-platform,nikolas/edx-platform,nagyistoce/edx-platform,mushtaqak/edx-platform,hmcmooc/muddx-platform,jbassen/edx-platform,andyzsf/edx,jswope00/GAI,benpatterson/edx-platform,pdehaye/theming-edx-platform,andyzsf/edx,gymnasium/edx-platform,abdoosh00/edx-rtl-final,amir-qayyum-khan/edx-platform,Unow/edx-platform,etzhou/edx-platform,Kalyzee/edx-platform,DNFcode/edx-platform,Edraak/circleci-edx-platform,Edraak/edx-platform,arifsetiawan/edx-platform,longmen21/edx-platform,tanmaykm/edx-platform,Edraak/circleci-edx-platform,adoosii/edx-platform,jamesblunt/edx-platform,itsjeyd/edx-platform,jbassen/edx-platform,doismellburning/edx-platform,defance/edx-platform,don-github/edx-platform,xuxiao19910803/edx-platform,ovnicraft/edx-platform,hkawasaki/kawasaki-aio8-2,MSOpenTech/edx-platform,Semi-global/edx-platform,prarthitm/edxplatform,chauhanhardik/populo_2,wwj718/edx-platform,peterm-itr/edx-platform,tiagochiavericosta/edx-platform,iivic/BoiseStateX,lduarte1991/edx-platform,morpheby/levelup-by,shubhdev/openedx,bitifirefly/edx-platform,CourseTalk/edx-platform,a-parhom/edx-platform,pku9104038/edx-platform,jazztpt/edx-platform,waheedahmed/edx-platform,pabloborrego93/edx-platform,rue89-tech/edx-platform,kursitet/edx-platform,appliedx/edx-platform,ahmadiga/min_edx,B-MOOC/edx-platform,yokose-ks/edx-platform,zadgroup/edx-platform,jamiefolsom/edx-platform,SivilTaram/edx-platform,LICEF/edx-platform,Endika/edx-platform,longmen21/edx-platform,Kalyzee/edx-platform,cyanna/edx-platform,mahendra-r/edx-platform,unicri/edx-platform,hmcmooc/muddx-platform,miptliot/edx-platform,abdoosh00/edx-rtl-final,EduPepperPDTesting/pepper2013-testing,tiagochiavericosta/edx-platform,mbareta/edx-platform-ft,RPI-OPENEDX/edx-platform,jjmiranda/edx-platform,Softmotions/edx-platform,Endika/edx-platform,rue89-tech/edx-platform,JioEducation/edx-platform,yokose-ks/edx-platform,kmoocdev/edx-platform,pku9104038/edx-platform,cselis86/edx-platform,sameetb-cuelogic/edx-platform-test,playm2mboy/edx-platform,eestay/edx-platform,chauhanhardik/populo_2 | cms/envs/aws.py | cms/envs/aws.py | """
This is the default template for our main set of AWS servers.
"""
import json
from .logsettings import get_logger_config
from .common import *
############################### ALWAYS THE SAME ################################
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
########################### NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(ENV_ROOT / "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
SITE_NAME = ENV_TOKENS['SITE_NAME']
LOG_DIR = ENV_TOKENS['LOG_DIR']
CACHES = ENV_TOKENS['CACHES']
for feature, value in ENV_TOKENS.get('MITX_FEATURES', {}).items():
MITX_FEATURES[feature] = value
LOGGING = get_logger_config(LOG_DIR,
logging_env=ENV_TOKENS['LOGGING_ENV'],
syslog_addr=(ENV_TOKENS['SYSLOG_SERVER'], 514),
debug=False)
REPOS = ENV_TOKENS['REPOS']
############################## SECURE AUTH ITEMS ###############################
# Secret things: passwords, access keys, etc.
with open(ENV_ROOT / "auth.json") as auth_file:
AUTH_TOKENS = json.load(auth_file)
DATABASES = AUTH_TOKENS['DATABASES']
MODULESTORE = AUTH_TOKENS['MODULESTORE']
| agpl-3.0 | Python |
|
b32d659b85901a8e04c6c921928483fda3b3e6e0 | Add the storage utility for parsing the config file structure in a more readable fashion. | kalikaneko/leap_mx,kalikaneko/leap_mx-1,leapcode/leap_mx,kalikaneko/leap_mx-1,leapcode/leap_mx,micah/leap_mx,meskio/leap_mx,meskio/leap_mx,kalikaneko/leap_mx,micah/leap_mx,isislovecruft/leap_mx,isislovecruft/leap_mx | src/leap/mx/util/storage.py | src/leap/mx/util/storage.py |
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = Storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
None
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
return None
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
def __getstate__(self):
return dict(self)
def __setstate__(self, value):
for (k, v) in value.items():
self[k] = v
| agpl-3.0 | Python |
|
49a4d3d5bfed0bb12a0e4cdee50672b23533c128 | move data to new table | geometalab/G4SE-Compass,geometalab/G4SE-Compass,geometalab/G4SE-Compass,geometalab/G4SE-Compass | compass-api/G4SE/api/migrations/0005_auto_20161010_1253.py | compass-api/G4SE/api/migrations/0005_auto_20161010_1253.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3.dev20161004124613 on 2016-10-10 12:53
from __future__ import unicode_literals
from django.db import migrations
from django.utils import timezone
from api.models import GEO_SERVICE_METADATA_AGREED_FIELDS
def _extract_publication_year(record_kwargs):
if record_kwargs['publication_year'] == 'latest':
record_kwargs['is_latest'] = True
years = [int(year) for year in record_kwargs['publication_lineage'].split(',')]
years.sort()
record_kwargs['publication_year'] = years[-1]
else:
record_kwargs['publication_year'] = int(record_kwargs['publication_year'])
return record_kwargs
def _normalize_kwargs(record_kwargs, record_object):
record_kwargs['title'] = getattr(record_object, 'content')
record_kwargs = _extract_publication_year(record_kwargs)
return record_kwargs
def _extract_kwargs_(record_object, from_import):
record_kwargs = {}
fields = GEO_SERVICE_METADATA_AGREED_FIELDS.copy()
fields.remove('is_latest')
fields.remove('title')
for field_name in fields:
record_kwargs[field_name] = getattr(record_object, field_name)
record_kwargs = _normalize_kwargs(record_kwargs, record_object)
record_kwargs['imported'] = from_import
if 'created' not in record_kwargs:
record_kwargs['created'] = timezone.datetime(year=2016, month=9, day=30)
return record_kwargs
def _create_new_entry(apps, model_kwargs):
GeoServiceMetadata = apps.get_model("api", "GeoServiceMetadata")
model_kwargs['geodata_type'] = model_kwargs['geodata_type'].lower()
data_type = model_kwargs['geodata_type']
if data_type not in ['raster', 'vector']:
model_kwargs['geodata_type'] = 'other'
GeoServiceMetadata.objects.create(**model_kwargs)
def forward(apps, schema_editor):
# forward
HarvestedRecord = apps.get_model("api", "HarvestedRecord")
Record = apps.get_model("api", "Record")
for harvested_record in HarvestedRecord.objects.all():
_create_new_entry(apps, _extract_kwargs_(harvested_record, from_import=True))
for record in Record.objects.all():
_create_new_entry(apps, _extract_kwargs_(record, from_import=False))
def _kwargs_from_geo_service_metadata(geoservice_metadata_instance):
result_kwargs = {}
for field_name in GEO_SERVICE_METADATA_AGREED_FIELDS:
result_kwargs[field_name] = getattr(geoservice_metadata_instance, field_name)
result_kwargs['content'] = result_kwargs.pop('title')
return result_kwargs
def backward(apps, schemap_editor):
GeoServiceMetadata = apps.get_model("api", "GeoServiceMetadata")
GeoServiceMetadata.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('api', '0004_geoservicemetadata'),
]
operations = [
migrations.RunPython(forward, backward),
]
| mit | Python |
|
7c8f2464b303b2a40f7434a0c26b7f88c93b6ddf | add migration | qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/accounting/migrations/0036_subscription_skip_invoicing_if_no_feature_charges.py | corehq/apps/accounting/migrations/0036_subscription_skip_invoicing_if_no_feature_charges.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounting', '0035_kill_date_received'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='skip_invoicing_if_no_feature_charges',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| bsd-3-clause | Python |
|
5e765ecf387d52c22371a69df82beacddcd12e38 | Test COREID is read-only. | futurecore/revelation,futurecore/revelation,futurecore/revelation | revelation/test/test_storage.py | revelation/test/test_storage.py | from revelation.test.machine import StateChecker, new_state
def test_coreid_read_only():
state = new_state(rfCOREID=0x808)
# Change by writing to register.
state.rf[0x65] = 0x100
expected_state = StateChecker(rfCOREID=0x808)
expected_state.check(state)
# Change by writing to memory.
# This _is_ possible, because we need to be able to write the COREID
# location when the state is initially constructed.
state.mem.write(0x808f0704, 12, 0x100)
expected_state = StateChecker(rfCOREID=0x100)
expected_state.check(state)
| bsd-3-clause | Python |
|
6af41b8b1ff4a6eb28167a063668a1f173999e5c | Create cornersMapping.py | ipeluffo/itba-infovis-2015 | cornersMapping.py | cornersMapping.py |
import csv
import requests
import time
import json
username = ""
def requestGeoName(row):
#parts = row.split(',')
lng = row[0]
lat = row[1]
r = requests.get("http://api.geonames.org/findNearestIntersectionOSMJSON?lat="+lat+"&lng="+lng+"&username="+username)
if (r.status_code == 200):
return r.json()
else:
return {"error":r.status_code}
def requestNameWsUsig(row):
x = row[0]
y = row[1]
reqReverseGeo = requests.get("http://ws.usig.buenosaires.gob.ar/geocoder/2.2/reversegeocoding?y={0}&x={1}".format(y,x))
resReverseGeo = json.loads(reqReverseGeo.content.replace("(", "").replace(")", ""), encoding="utf-8")
reqConvertirCoord = requests.get("http://ws.usig.buenosaires.gob.ar/rest/convertir_coordenadas?x={0}&y={1}&output=lonlat".format(resReverseGeo["puerta_x"], resReverseGeo["puerta_y"]))
resConvertirCoord = reqConvertirCoord.json()
result = { "intersection" : {
"lng" : resConvertirCoord["resultado"]["x"],
"lat" : resConvertirCoord["resultado"]["y"],
"street1" : resReverseGeo["esquina"],
"street2" : resReverseGeo["esquina"]
}}
return result
with open('mostSearchedPlaces.csv', 'rb') as csvfile:
with open('mostSearchedPlacesWithCorners.csv', 'a') as outputCSV:
csv_writer = csv.writer(outputCSV, delimiter=',')
reader = csv.reader(csvfile, delimiter = ',')
i = 1
for row in reader:
geoNameResult = requestGeoName(row)
# Check if there is no intersection
if (geoNameResult == {}):
geoNameResult = requestNameWsUsig(row)
print(geoNameResult)
if (not geoNameResult.has_key("error")):
row.append(str(geoNameResult["intersection"]["lng"]))
row.append(str(geoNameResult["intersection"]["lat"]))
row.append(geoNameResult["intersection"]["street1"].encode("utf-8"))
row.append(geoNameResult["intersection"]["street2"].encode("utf-8"))
csv_writer.writerow(row)
print("Elemento {0} procesado".format(i))
i += 1
time.sleep(2)
| apache-2.0 | Python |
|
b2d0eaca41f6c697006eeaef38b72af649415d2b | Create models.py | illagrenan/django-cookiecutter-template,illagrenan/django-cookiecutter-template,illagrenan/django-cookiecutter-template,illagrenan/django-cookiecutter-template | {{cookiecutter.repo_name}}/{{cookiecutter.src_dir}}/{{cookiecutter.main_app}}/models.py | {{cookiecutter.repo_name}}/{{cookiecutter.src_dir}}/{{cookiecutter.main_app}}/models.py | # -*- encoding: utf-8 -*-
# ! python2
| mit | Python |
|
d1b4cbfbc3956fc72bd183dbc219c4e7e8bdfb98 | add reproducer for LWT bug with static-column conditions | scylladb/scylla,scylladb/scylla,scylladb/scylla,scylladb/scylla | test/cql-pytest/test_lwt.py | test/cql-pytest/test_lwt.py | # Copyright 2020-present ScyllaDB
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#############################################################################
# Various tests for Light-Weight Transactions (LWT) support in Scylla.
# Note that we have many more LWT tests in the cql-repl framework:
# ../cql/lwt*_test.cql, ../cql/cassandra_cql_test.cql.
#############################################################################
import re
import pytest
from cassandra.protocol import InvalidRequest
from util import new_test_table, unique_key_int
@pytest.fixture(scope="module")
def table1(cql, test_keyspace):
schema='p int, c int, r int, s int static, PRIMARY KEY(p, c)'
with new_test_table(cql, test_keyspace, schema) as table:
yield table
# An LWT UPDATE whose condition uses non-static columns begins by reading
# the clustering row which must be specified by the WHERE. If there is a
# static column in the partition, it is read as well. The value of the all
# these columns - regular and static - is then passed to the condition.
# As discovered in issue #10081, if the row determined by WHERE does NOT
# exist, Scylla still needs to read the static column, but forgets to do so.
# this test reproduces this issue.
@pytest.mark.xfail(reason="Issue #10081")
def test_lwt_missing_row_with_static(cql, table1):
p = unique_key_int()
# Insert into partition p just the static column - and no clustering rows.
cql.execute(f'INSERT INTO {table1}(p, s) values ({p}, 1)')
# Now, do an update with WHERE p={p} AND c=1. This clustering row does
# *not* exist, so we expect to see r=null - and s=1 from before.
r = list(cql.execute(f'UPDATE {table1} SET s=2,r=1 WHERE p={p} AND c=1 IF s=1 and r=null'))
assert len(r) == 1
assert r[0].applied == True
# At this point we should have one row, for c=1
assert list(cql.execute(f'SELECT * FROM {table1} WHERE p={p}')) == [(p, 1, 2, 1)]
# The fact that to reproduce #10081 above we needed the condition (IF) to
# mention a non-static column as well, suggests that Scylla has a different code
# path for the case that the condition has *only* static columns. In fact,
# in that case, the WHERE doesn't even need to specify the clustering key -
# the partition key should be enough. The following test confirms that this
# is indeed the case.
def test_lwt_static_condition(cql, table1):
p = unique_key_int()
cql.execute(f'INSERT INTO {table1}(p, s) values ({p}, 1)')
# When the condition only mentions static (partition-wide) columns,
# it is allowed not to specify the clustering key in the WHERE:
r = list(cql.execute(f'UPDATE {table1} SET s=2 WHERE p={p} IF s=1'))
assert len(r) == 1
assert r[0].applied == True
assert list(cql.execute(f'SELECT * FROM {table1} WHERE p={p}')) == [(p, None, 2, None)]
# When the condition also mentions a non-static column, WHERE must point
# to a clustering column, i.e., mention the clustering key. If the
# clustering key is missing, we get an InvalidRequest error, where the
# message is slightly different between Scylla and Cassandra ("Missing
# mandatory PRIMARY KEY part c" and "Some clustering keys are missing: c",
# respectively.
with pytest.raises(InvalidRequest, match=re.compile('missing', re.IGNORECASE)):
cql.execute(f'UPDATE {table1} SET s=2 WHERE p={p} IF r=1')
| agpl-3.0 | Python |
|
89c17110f9d17e99ea7686e884cfba91b4762d57 | Add starter code for Lahman db | jldbc/pybaseball | pybaseball/lahman.py | pybaseball/lahman.py | ################################################
# WORK IN PROGRESS: ADD LAHMAN DB TO PYBASEBALL
# TODO: Make a callable function that retrieves the Lahman db
# Considerations: users should have a way to pull just the parts they want
# within their code without having to write / save permanently. They should
# also have the option to write and save permanently if desired.
################################################
import requests
import zipfile
from io import BytesIO
from bs4 import BeautifulSoup
# Download zip file and extract all files into working directory
url = "http://seanlahman.com/files/database/baseballdatabank-2017.1.zip"
s=requests.get(url,stream=True)
z = zipfile.ZipFile(BytesIO(s.content))
z.extractall()
| mit | Python |
|
8eafb1b613363f85c9b105812cd5d0047e5ca6ff | Add warp example script | sergionr2/RacingRobot,sergionr2/RacingRobot,sergionr2/RacingRobot,sergionr2/RacingRobot | image_processing/warp_image.py | image_processing/warp_image.py | import argparse
import cv2
import numpy as np
import matplotlib.pyplot as plt
from constants import MAX_WIDTH, MAX_HEIGHT
# Transform Parameters
y = 90
a = 0.75
delta = (MAX_HEIGHT - y) * a
height, width = 500, 320
# Orignal and transformed keypoints
pts1 = np.float32(
[[delta, y],
[MAX_WIDTH - delta, y],
[0, MAX_HEIGHT],
[MAX_WIDTH, MAX_HEIGHT]])
pts2 = np.float32(
[[0, 0],
[width, 0],
[0, height],
[width, height]])
# Translation Matrix
tx, ty = 300, 500
T = np.float32([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
new_height, new_width = height + ty, int(width * 1.5) + tx
# calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(pts1, pts2)
def imshow(im, y=None, delta=None, name=""):
plt.figure(name)
# BGR to RGB
plt.imshow(im[:, :, ::-1])
if y is not None:
plt.plot([0, delta], [MAX_HEIGHT, y])
plt.plot([MAX_WIDTH, MAX_WIDTH - delta], [MAX_HEIGHT, y])
plt.plot([delta, MAX_WIDTH - delta], [y, y])
plt.grid(True)
def showTransform(image, y, delta):
im = image.copy()
for (cx, cy) in pts1:
cv2.circle(im, (int(cx), int(cy)), 8, (0, 255, 0), -1)
imshow(im, y, delta, name="transform")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Transform image to have a top down view')
parser.add_argument('-i', '--input_image', help='Input image', type=str, required=True)
args = parser.parse_args()
image = cv2.imread(args.input_image)
assert image is not None, "Could not read image"
orignal_image = image.copy()
warp = cv2.warpPerspective(orignal_image, np.dot(T, M), (new_width, new_height))
imshow(image, name="original")
showTransform(image, y, delta)
imshow(warp, name="warped")
plt.show()
| mit | Python |
|
77dfcc41b718ed26e9291b9efc47b0589b951fb8 | Create 0001.py | Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python | pylyria/0001/0001.py | pylyria/0001/0001.py | 1
| mit | Python |
|
d412ec65777431cdd696593ddecd0ee37a500b25 | Create 0011.py | Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2 | pylyria/0011/0011.py | pylyria/0011/0011.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
def is_sensitive(word):
sensitive_words = [line.strip() for line in open('sensitive.txt', encoding='utf-8')]
word = word.strip()
if word.lower() in sensitive_words:
return True
else:
return False
if __name__ == "__main__":
while 1:
if is_sensitive(input()):
print('Freedom')
else:
print('Human Rights')
| mit | Python |
|
5052318d2802284a0331fc77fd7d02bdaca39f42 | test if a layer is working fine | tejaskhot/ConvAE-DeSTIN,Tejas-Khot/ConvAE-DeSTIN | scripts/feature_extract_test.py | scripts/feature_extract_test.py | """Feature extraction test"""
import numpy as np;
import sys
import theano;
import theano.tensor as T;
sys.path.append("..")
import scae_destin.datasets as ds;
from scae_destin.convnet import ReLUConvLayer;
from scae_destin.convnet import LCNLayer
n_epochs=1;
batch_size=100;
Xtr, Ytr, Xte, Yte=ds.load_CIFAR10("/home/tejas/Desktop/cifar-10-batches-py");
Xtr=np.mean(Xtr, 3);
Xte=np.mean(Xte, 3);
Xtrain=Xtr.reshape(Xtr.shape[0], Xtr.shape[1]*Xtr.shape[2])
Xtest=Xte.reshape(Xte.shape[0], Xte.shape[1]*Xte.shape[2])
train_set_x, train_set_y=ds.shared_dataset((Xtrain, Ytr));
test_set_x, test_set_y=ds.shared_dataset((Xtest, Yte));
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print "[MESSAGE] The data is loaded"
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
images=X.reshape((batch_size, 1, 32, 32))
layer_0=LCNLayer(filter_size=(7,7),
num_filters=50,
num_channels=1,
fm_size=(32,32),
batch_size=batch_size,
border_mode="full");
extract=theano.function(inputs=[idx],
outputs=layer_0.apply(images),
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]});
print extract(1).shape | apache-2.0 | Python |
|
47ebaa10068313c9b8fbbf2e3ffcf06597f88ff6 | add npy2png file converter | ecell/bioimaging | convert_npy2image.py | convert_npy2image.py | import sys
import math
import copy
import pylab
import numpy
from Image import fromarray
from scipy.misc import imread, toimage
cmin = 0
cmax = 2**8 - 1
def convert(file_in, file_out, index=None) :
i = 0
max_count = 0
while (True) :
try :
input_image = numpy.load(file_in + '/image_%07d.npy' % (i))
except Exception :
break
output_image = file_out + '/image_%07d.png' % (i)
#output_image = file_out + '/image_%07d.png' % (i/26)
# data for tirfm
#image_array = input_image[256-25:256+25,256-25:256+26,1]
#image_array = input_image[256-76:256+76,256-78:256+78,1]
#image_array = input_image[300-50:300+50,300-50:300+50,1]
#image_array = input_image[512-45:512+45,512-45:512+45,1]
image_array = input_image[:,:,1]
#image_exp += numpy.array(image_array)
amax = numpy.amax(image_array)
amin = numpy.amin(image_array)
if (max_count < amax) :
max_count = amax
#print i/26, amax, amin
print i, amax, amin
# 16-bit data format
#image_array.astype('uint16')
#toimage(image_array, low=cmin, high=cmax, mode='I').save(output_image)
# 8-bit data format (for making movie)
toimage(image_array, cmin=cmin, cmax=cmax).save(output_image)
#i += 26
i += 1
print 'Max count : ', max_count, 'ADC'
if __name__=='__main__':
file_in = '/home/masaki/microscopy/images'
file_out = '/home/masaki/microscopy/images_png'
convert(file_in, file_out)
| bsd-3-clause | Python |
|
211e9e9352234f5638036b5b1ec85f998609d587 | Add a primitive MITM proxy | prophile/libdiana | diana/utils/proxy.py | diana/utils/proxy.py | from diana import packet
import argparse
import asyncio
import sys
import socket
from functools import partial
class Buffer:
def __init__(self, provenance):
self.buffer = b''
self.provenance = provenance
def eat(self, data):
self.buffer += data
packets, self.buffer = packet.decode(self.buffer, provenance=self.provenance)
return packets
BLOCKSIZE = 1024
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simple Artemis SBS proxy')
parser.add_argument('proxy_port', type=int, help='Server port')
parser.add_argument('address', help='Server address (DNS, IPv4 or IPv6)')
parser.add_argument('port', type=int, nargs='?', default=2010, help='Server port')
args = parser.parse_args()
loop = asyncio.get_event_loop()
@asyncio.coroutine
def transit(reader, writer, provenance, tag):
buf = Buffer(provenance)
while True:
data = yield from reader.read(BLOCKSIZE)
for pkt in buf.eat(data):
writer.write(packet.encode(pkt, provenance=provenance))
sys.stdout.write('{} {}\n'.format(tag, pkt))
sys.stdout.flush()
@asyncio.coroutine
def handle_p2c(client_reader, client_writer):
server_reader, server_writer = yield from asyncio.open_connection(args.address,
args.port,
loop=loop)
asyncio.async(transit(client_reader, server_writer,
provenance=packet.PacketProvenance.client,
tag='[C>S]'), loop=loop)
asyncio.async(transit(server_reader, client_writer,
provenance=packet.PacketProvenance.server,
tag='[C<S]'), loop=loop)
svr = asyncio.start_server(handle_p2c, '127.0.0.1', args.proxy_port, loop=loop)
server = loop.run_until_complete(svr)
loop.run_forever()
| mit | Python |
|
d890ef34b11200738687ec49a4a005bb9ebe7c2a | make the module executable | ferreum/distanceutils,ferreum/distanceutils | distance/__main__.py | distance/__main__.py | #!/usr/bin/env python
from . import __version__
print(f"distanceutils version {__version__}")
# vim:set sw=4 ts=8 sts=4 et:
| mit | Python |
|
768b61316a10726a3281a514823f280abc142356 | move wild into its own folder | graingert/vcrpy,mgeisler/vcrpy,poussik/vcrpy,agriffis/vcrpy,graingert/vcrpy,gwillem/vcrpy,kevin1024/vcrpy,aclevy/vcrpy,poussik/vcrpy,IvanMalison/vcrpy,kevin1024/vcrpy,bcen/vcrpy,yarikoptic/vcrpy,ByteInternet/vcrpy,ByteInternet/vcrpy | tests/integration/test_wild.py | tests/integration/test_wild.py | import pytest
requests = pytest.importorskip("requests")
import vcr
def test_domain_redirect():
'''Ensure that redirects across domains are considered unique'''
# In this example, seomoz.org redirects to moz.com, and if those
# requests are considered identical, then we'll be stuck in a redirect
# loop.
url = 'http://seomoz.org/'
with vcr.use_cassette('domain_redirect.yaml') as cass:
requests.get(url, headers={'User-Agent': 'vcrpy-test'})
# Ensure that we've now served two responses. One for the original
# redirect, and a second for the actual fetch
assert len(cass) == 2
| mit | Python |
|
c193aebdc76eae285df402463c149bef328c05ef | Add backwards-compatible registration.urls, but have it warn pending deprecation. | dinie/django-registration,FundedByMe/django-registration,dinie/django-registration,FundedByMe/django-registration,Avenza/django-registration | registration/urls.py | registration/urls.py | import warnings
warnings.warn("Using include('registration.urls') is deprecated; use include('registration.backends.default.urls') instead",
PendingDeprecationWarning)
from registration.backends.default.urls import *
| bsd-3-clause | Python |
|
fe88e0d8dc3d513cd11ef9ab4cb3ea332af99202 | Add migration | Ircam-Web/mezzanine-organization,Ircam-Web/mezzanine-organization | organization/network/migrations/0112_auto_20180502_1742.py | organization/network/migrations/0112_auto_20180502_1742.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2018-05-02 15:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization-network', '0111_auto_20180307_1152'),
]
operations = [
migrations.AddField(
model_name='organization',
name='is_main',
field=models.BooleanField(default=False, verbose_name='is main'),
),
migrations.AddField(
model_name='teamlink',
name='title_en',
field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='title'),
),
migrations.AddField(
model_name='teamlink',
name='title_fr',
field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='title'),
),
]
| agpl-3.0 | Python |
|
b82c7343af06c19e6938bd27359289ab067db1e9 | add expectation core (#4357) | great-expectations/great_expectations,great-expectations/great_expectations,great-expectations/great_expectations,great-expectations/great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_sum_to_be.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_sum_to_be.py | """
This is a template for creating custom ColumnExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_aggregate_expectations
"""
from typing import Dict, Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine
from great_expectations.expectations.expectation import ColumnExpectation
# This class defines the Expectation itself
class ExpectColumnSumToBe(ColumnExpectation):
"""Expect the sum of a column to be exactly a value."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {"a": [1, 2, 3, 4, 5]},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "sum_total": 15},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "sum_total": 14},
"out": {"success": False},
},
],
"test_backends": [
{
"backend": "pandas",
"dialects": None,
},
{
"backend": "sqlalchemy",
"dialects": ["sqlite", "postgresql"],
},
{
"backend": "spark",
"dialects": None,
},
],
}
]
# This is a tuple consisting of all Metrics necessary to evaluate the Expectation.
metric_dependencies = ("column.sum",)
# This a tuple of parameter names that can affect whether the Expectation evaluates to True or False.
success_keys = ("sum_total",)
# This dictionary contains default values for any parameters that should have default values.
default_kwarg_values = {}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
return True
# This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation.
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
actual_value = metrics["column.sum"]
predicted_value = self.get_success_kwargs(configuration).get("sum_total")
success = actual_value == predicted_value
return {"success": success, "result": {"observed_value": actual_value}}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"column aggregate expectation",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@joshua-stauffer", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectColumnSumToBe().print_diagnostic_checklist()
| apache-2.0 | Python |
|
10001d5c611e59dd426d829fa7c2242b5f93df0d | add element collection base | lmtierney/watir-snake | watir_snake/element_collection.py | watir_snake/element_collection.py | from importlib import import_module
import watir_snake
class ElementCollection(object):
# TODO: include Enumerable
def __init__(self, query_scope, selector):
self.query_scope = query_scope
self.selector = selector
self.as_list = []
self.elements = []
def __iter__(self):
"""
Yields each element in collection
:rtype: iter
:Example:
divs = browser.divs(class='kls')
for div in divs:
print(div.text)
"""
for e in self.to_list:
yield e
def __len__(self):
"""
Returns the number of elements in the collection
:rtype: int
"""
return len(self.to_list)
def __getitem__(self, idx):
"""
Get the element at the given index
Also note that because of lazy loading, this will return an Element instance even if
the index is out of bounds
:param idx: index of wanted element, 0-indexed
:type idx: int
:return: instance of Element subclass
:rtype: watir_snake.elements.element.Element
"""
return self.to_list[idx] or self._element_class(self.query_scope,
dict(index=idx, **self.selector))
@property
def to_list(self):
"""
This collection as a list
:rtype: list[watir_snake.elements.element.Element
"""
if not self.as_list:
elements = []
for idx, e in enumerate(self._elements):
element = self._element_class(self.query_scope, dict(index=idx, **self.selector))
if self._element_class == watir_snake.elements.HTMLElement:
elements.append(element.to_subtype())
else:
elements.append(element)
self.as_list = elements
return self.as_list
def __eq__(self, other):
"""
Returns true if two element collections are equal.
:param other: other collection
:rtype: bool
:Example:
browser.select_list(name='new_user_languages').options == \
browser.select_list(id='new_user_languages').options #=> True
browser.select_list(name=;new_user_role').options == \
browser.select_list(id='new_user_languages').options #=> false
"""
return self.to_list == other.to_list
eql = __eq__
# private
@property
def _elements(self):
if isinstance(self._query_scope, watir_snake.elements.IFrame):
self.query_scope.switch_to()
else:
getattr(self.query_scope, 'assert_exists')()
element_validator = self._element_validator_class()
selector_builder = self._selector_builder_class(self.query_scope, self.selector,
self._element_class.attribute_list)
locator = self._locator_class(self.query_scope, self.selector, self._selector_builder,
self._element_validator)
if not self.elements:
self.elements = self.locator.locate_all()
return self.elements
@property
def _locator_class(self):
return self._import_module.Locator
@property
def _element_validator_class(self):
return self._import_module.Validator
@property
def _selector_builder_class(self):
return self._import_module.SelectorBuilder
@property
def _import_module(self):
modules = [watir_snake.locator_namespace.__name__, self._element_class_name.lower()]
try:
return import_module('watir_snake.{}.{}.locator'.format(*modules))
except ImportError:
return import_module('watir_snake.{}.element.locator'.format(*modules[:1]))
@property
def _element_class_name(self):
return self._element_class.__name__
@property
def _element_class(self):
return getattr(watir_snake.elements, self.__class__.__name__.replace('Collection', ''))
| mit | Python |
|
a27c9a8ddf6ab1cd264b02afc95754da6b4bb058 | Add partial indexes | ashleywaite/django-more | django-more/indexes.py | django-more/indexes.py | """ Define custom index types useful for SID and utils """
import hashlib
from django.db.models import Index, Q
from django.db import DEFAULT_DB_ALIAS
__all__ = ['PartialIndex']
class PartialIndex(Index):
suffix = "par"
def __init__(self, *args, fields=[], name=None, **kwargs):
self.q_filters = [arg for arg in args if isinstance(arg, Q)]
if kwargs:
self.q_filters.extend([Q(**{kwarg: val}) for kwarg, val in kwargs.items()])
super().__init__(fields, name)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
self.make_qs_compatible()
args += tuple(self.q_filters)
return path, args, kwargs
@staticmethod
def get_where_sql(query):
where, w_params = query.get_compiler(DEFAULT_DB_ALIAS).compile(query.where)
return " WHERE {}".format(where % (*w_params,))
def get_query(self, model):
return model.objects.filter(*self.q_filters).query
def get_sql_create_template_values(self, model, schema_editor, using):
parameters = super().get_sql_create_template_values(model, schema_editor, using=using)
# Create a queryset using the supplied filters to validate and generate WHERE
query = self.get_query(model)
# Access query compiler for WHERE directly
if query.where:
parameters["extra"] = self.get_where_sql(query)
return parameters
def make_qs_compatible(self):
if not hasattr(Q, "deconstruct"):
for q in [qf for qf in self.q_filters if isinstance(qf, Q)]:
q.__class__ = Qcompat
# Almost identical to default implementation but adds WHERE to hashing
def set_name_with_model(self, model):
table_name = model._meta.db_table
column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]
column_names_with_order = [
(('-%s' if order else '%s') % column_name)
for column_name, (field_name, order) in zip(column_names, self.fields_orders)
]
hash_data = [table_name] + column_names_with_order + [self.suffix] + [self.get_where_sql(self.get_query(model))]
self.name = '%s_%s_%s' % (
table_name[:11],
column_names[0][:7],
'%s_%s' % (self._hash_generator(*hash_data), self.suffix),
)
assert len(self.name) <= self.max_name_length, (
'Index too long for multiple database support. Is self.suffix '
'longer than 3 characters?'
)
self.check_name()
def __eq__(self, val):
if isinstance(val, PartialIndex):
# Use cheap repr() comparison on deconstruction to check if the same
return repr(self.deconstruct()) == repr(val.deconstruct())
# This feature is not present in Django 1.11 but is required for deconstruction of
# partial indexes. So if not present when needed, the Qs are wrapped in this
class Qcompat(Q):
def __init__(self, *args, **kwargs):
connector = kwargs.pop('_connector', None)
negated = kwargs.pop('_negated', False)
super(Q, self).__init__(children=list(args) + list(kwargs.items()), connector=connector, negated=negated)
def deconstruct(self):
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
args, kwargs = (), {}
if len(self.children) == 1 and not isinstance(self.children[0], Q):
child = self.children[0]
kwargs = {child[0]: child[1]}
else:
args = tuple(self.children)
kwargs = {'_connector': self.connector}
if self.negated:
kwargs['_negated'] = True
return path, args, kwargs
| bsd-3-clause | Python |
|
07fcdfe3da7d5ffda3ff7139b2f8cd0f02a5ad06 | Create xml_to_text_new.py | BSchilperoort/BR-DTS-Processing | xml_conversion/xml_to_text_new.py | xml_conversion/xml_to_text_new.py | ##Imports
import xml.etree.cElementTree as ET
from glob import glob
from time import time
import os
#############################################################################
# NOTE: When importing xml files, make sure the distances do not change #
# between files in the same folder. This will lead to errors #
#############################################################################
##!>Set working directory to correct folder (BR-DTS-Processing)
#working_directory = r'D:\Github\BR-DTS-Processing'
working_directory = r'C:\Users\Bart\Downloads\BR-DTS-Processing-master'
os.chdir(working_directory)
##Write data to txt file
#Get start time
ta = time()
#Get all xml files from the directory
#leave different channels in different directories!
file_names = sorted(glob(r'xml_conversion\\xml_example_2016\*.xml'))
file_amount = len(file_names)
#Initialise variables
timestamp = [None]
pt100 = [None]
data = [None]
#Open output file, write header
data_filename = r'xml_conversion\output\dts_data_V2.txt'
data_file = open(data_filename, 'w')
data_file.write('Ultima data_file. Next row; distances (m).')
#Get distances from xml
tree = ET.ElementTree(file=file_names[0])
root = tree.getroot()
start_index = float(root[0][4].text)
end_index = float(root[0][5].text)
increment = float(root[0][6].text)
start_time = root[0][7].text
end_time = root[0][8].text
logdata = [x.text for x in root[0][15]]
data_strings = logdata[2:]
data_length = len(data_strings)
temp = [None]*data_length
for ii in range(0, data_length):
temp[ii] = float(data_strings[ii].split(',')[3])
diff = (end_index - start_index)/(data_length - 1)
distances = [str(diff * x + start_index)[0:9] for x in range(data_length)]
#Write distances to file
data_file.write('\n'+';'.join(distances))
#Write Time & temperature header
data_file.write('\nTime\tTemperature')
#Loop over all files and extract the
for ii in range(0,file_amount):
tree = ET.parse(file_names[ii])
root = tree.getroot()
#test if start or end indexes have changed (untested function):
if not (float(root[0][4].text) == start_index and float(root[0][5].text) == end_index):
raise Exception('Distance of file '+file_names[0]+' does not match starting indexes! \n'
'Check if settings were changed in between files')
#Copy timestamp from DTS to .txt
timestamp = root[0][8].text[:-5]
#Get the data values
logdata = [x.text for x in root[0][15]]
data_strings = logdata[2:]
#get the temperature from the xml
#Define full list first, then add values (for speed)
temperature = [None]*data_length
for ii in range(0, data_length):
temperature[ii] = data_strings[ii].split(',')[3][:-1]
#Append to file
file_line = '\n'+timestamp+'\t'+';'.join(temperature)
data_file.write(file_line)
data_file.close()
#Print elapsed time; for code optimization
print('Elapsed time:',time()-ta)
| mit | Python |
|
48e4b9692b29d3fb9f43f37fef70ccc41f47fc0e | Add tests for the errors utility functions | Yaco-Sistemas/yith-library-server,Yaco-Sistemas/yith-library-server,lorenzogil/yith-library-server,lorenzogil/yith-library-server,lorenzogil/yith-library-server,Yaco-Sistemas/yith-library-server | yithlibraryserver/tests/errors.py | yithlibraryserver/tests/errors.py | import unittest
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
from yithlibraryserver.errors import password_not_found, invalid_password_id
class ErrorsTests(unittest.TestCase):
def test_password_not_found(self):
result = password_not_found()
self.assertTrue(isinstance(result, HTTPNotFound))
self.assertTrue(result.content_type, 'application/json')
self.assertTrue(result.body, '{"message": "Password not found"}')
# try a different message
result = password_not_found('test')
self.assertTrue(result.body, '{"message": "test"}')
def test_invalid_password_id(self):
result = invalid_password_id()
self.assertTrue(isinstance(result, HTTPBadRequest))
self.assertTrue(result.content_type, 'application/json')
self.assertTrue(result.body, '{"message": "Invalid password id"}')
# try a different message
result = invalid_password_id('test')
self.assertTrue(result.body, '{"message": "test"}')
| agpl-3.0 | Python |
|
4c225ec7cdafc45840b2459e8804df5818fecd71 | add util module | ecreall/dace | dace/util.py | dace/util.py | from pyramid.threadlocal import get_current_request
from substanced.util import find_objectmap
def get_obj(oid):
request = get_current_request()
objectmap = find_objectmap(request.root)
obj = objectmap.object_for(oid)
return obj
| agpl-3.0 | Python |
|
ddfc28360941a435ae22705dbc46b44cced588e7 | Add demo file. | ProjetPP/PPP-Spell-Checker,ProjetPP/PPP-Spell-Checker | demo/demo.py | demo/demo.py | #!/usr/bin/env python3
import fileinput
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
import ppp_spell_checker
if __name__ == "__main__":
corrector = ppp_spell_checker.StringCorrector('en')
while(True):
print(corrector.correctString(input("")))
| mit | Python |
|
8003f9f643b90cf42bdd8ba0ec8d5dc2f96ba191 | Create list-aws-queue.py | kattymo/GITHUB-Repo-lab11 | list-aws-queue.py | list-aws-queue.py | # This script created a queue
#
# Author - Paul Doyle Nov 2015
#
#
import boto.sqs
import boto.sqs.queue
from boto.sqs.message import Message
from boto.sqs.connection import SQSConnection
from boto.exception import SQSError
import sys
# Get the keys from a specific url and then use them to connect to AWS Service
access_key_id = "AKIAIBKC3KC4HZNSXFIA"
secret_access_key = "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"
# Set up a connection to the AWS service.
conn = boto.sqs.connect_to_region("eu-west-1", aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
# Get a list of the queues that exists and then print the list out
rs = conn.get_all_queues()
for q in rs:
print q.id
| mit | Python |
|
298f297410b9db8b2d211b1d0edddb595f1fa469 | Add timestamp2str() | ronrest/convenience_py,ronrest/convenience_py | datetime/datetime.py | datetime/datetime.py | import datetime
# ==============================================================================
# TIMESTAMP 2 STR
# ==============================================================================
def timestamp2str(t, pattern="%Y-%m-%d %H:%M:%S"):
""" Given a float timestamp it returns the date as a formatted string,
based on the date `pattern` specified """
return datetime.datetime.fromtimestamp(t).strftime(pattern)
| apache-2.0 | Python |
|
0e9e63a48c5f3e02fb49d0068363ac5442b39e37 | Add a body to posts | incuna/django-discussion,lehins/lehins-discussion,lehins/lehins-discussion,incuna/django-discussion,lehins/lehins-discussion | discussion/models.py | discussion/models.py | from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
body = models.TextField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
| from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
| bsd-2-clause | Python |
62beb09ca1ecde8be4945016ae09beaad2dad597 | Create disemvowel_trolls.py | Kunalpod/codewars,Kunalpod/codewars | disemvowel_trolls.py | disemvowel_trolls.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Disemvowel Trolls
#Problem level: 7 kyu
def disemvowel(string):
return ''.join([letter for letter in string if letter.lower() not in ['a', 'e', 'i', 'o', 'u']])
| mit | Python |
|
078bc9ea1375ac8ff7b2bbb92553ae63e5190cd3 | add var.py in package structData to save vars | develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms | trunk/editor/structData/var.py | trunk/editor/structData/var.py | #!/usr/bin/env python
class Var(object):
def __init__(self, name, start_value, set_value=None):
self.name = name
self.start_value = start_value
self.set_value = set_value
| mit | Python |
|
a26f0cc1af189686a24518510095f93b064a36a4 | Add two utility functions for group membership | prophile/django_split | django_split/base.py | django_split/base.py | import six
import datetime
import inflection
from django.contrib.auth.models import User
from .models import ExperimentGroup
from .validation import validate_experiment
EXPERIMENTS = {}
class ExperimentMeta(type):
def __init__(self, name, bases, dict):
super(ExperimentMeta, self).__init__(name, bases, dict)
# Special case: don't do experiment processing on the base class
if (
name == 'Experiment' and
self.__module__ == ExperimentMeta.__module__
):
return
slug = inflection.underscore(name)
if len(slug) > 48:
raise ValueError("Experiment name too long")
if slug in EXPERIMENTS:
raise AssertionError(
"Experiment %s defined multiple times (as %s.%s and %s.%s)" % (
slug,
dict['__module__'],
dict['__qualname__'],
EXPERIMENTS[slug].__module__,
EXPERIMENTS[slug].__qualname__,
),
)
validate_experiment(self)
self.slug = slug
EXPERIMENTS[slug] = self
class Experiment(six.with_metaclass(ExperimentMeta)):
groups = ('control', 'experiment')
control_group = 'control'
superuser_group = None
include_new_users = True
include_old_users = True
metrics = ()
start_date = None
end_date = None
@classmethod
def group(cls, group_name):
# This will raise a ValueError if the group does not exist. Whilst
# group_index is not used if we're before the experiment start date,
# we want to catch errors from using the wrong group name immediately.
group_index = groups.index(group_name)
# TODO: superuser logic
# Until the start of the experiment, all users are in the control group
if datetime.date.today() < self.start_date:
if group_name == self.control_group:
return User.objects.all()
else:
return User.objects.none()
return User.objects.filter(id__in=
ExperimentGroup.objects.filter(
experiment=self.slug,
group=group_index,
),
)
@classmethod
def in_group(cls, user, group):
return user in cls.group(group)
| import six
import inflection
from .validation import validate_experiment
EXPERIMENTS = {}
class ExperimentMeta(type):
def __init__(self, name, bases, dict):
super(ExperimentMeta, self).__init__(name, bases, dict)
# Special case: don't do experiment processing on the base class
if (
name == 'Experiment' and
self.__module__ == ExperimentMeta.__module__
):
return
slug = inflection.underscore(name)
if len(slug) > 48:
raise ValueError("Experiment name too long")
if slug in EXPERIMENTS:
raise AssertionError(
"Experiment %s defined multiple times (as %s.%s and %s.%s)" % (
slug,
dict['__module__'],
dict['__qualname__'],
EXPERIMENTS[slug].__module__,
EXPERIMENTS[slug].__qualname__,
),
)
validate_experiment(self)
self.slug = slug
EXPERIMENTS[slug] = self
class Experiment(six.with_metaclass(ExperimentMeta)):
groups = ('control', 'experiment')
control_group = 'control'
superuser_group = None
include_new_users = True
include_old_users = True
metrics = ()
start_date = None
end_date = None
| mit | Python |
316d0518f2cf81ce3045335b79bc993020befce1 | create main class `FlaskQuik` for bridging quik and flask | avelino/Flask-Quik | flask_quik.py | flask_quik.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
flask.ext.quik
~~~~~~~~~~~~~~
Extension implementing Quik Templates support in Flask with support for
flask-babel
:copyright: (c) 2012 by Thiago Avelino <[email protected]>
:license: MIT, see LICENSE for more details.
"""
from quik import FileLoader
class FlaskQuik(object):
"""
Main class for bridging quik and flask. We try to stay as close as possible
to how Jinja2 is used in Flask, while at the same time surfacing the useful
stuff from Quik.
"""
def __init__(self, app=None):
self.app = None
if app is not None:
self.init_app(app)
self.app = app
def init_app(self, app):
"""
Initialize a :class:`~flask.Flask` application
for use with this extension. This method is useful for the factory
pattern of extension initialization. Example::
quik = FlaskQuik()
app = Flask(__name__)
quik.init_app(app)
.. note::
This call will fail if you called the :class:`FlaskQuik`
constructor with an ``app`` argument.
"""
if self.app:
raise RuntimeError("Cannot call init_app when app argument was "
"provided to FlaskQuik constructor.")
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['quik'] = self
| mit | Python |
|
4844ac93326186ded80147a3f8e1e1429212428b | add user's launcher | tensorflow/tfx,tensorflow/tfx | tfx/experimental/templates/taxi/stub_component_launcher.py | tfx/experimental/templates/taxi/stub_component_launcher.py | # Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stub component launcher for launching stub executors in KFP."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tfx.experimental.pipeline_testing import base_stub_executor
from tfx.experimental.pipeline_testing import stub_component_launcher
from tfx.experimental.templates.taxi.pipeline import configs
class StubComponentLauncher(stub_component_launcher.StubComponentLauncher):
"""Responsible for launching stub executors in KFP Template.
This stub component launcher cannot be defined in the kubeflow_dag_runner.py
because launcher class is imported by the module path.
"""
def __init__(self, **kwargs):
super(StubComponentLauncher, self).__init__(**kwargs)
# TODO(StubExecutor): GCS directory where KFP outputs are recorded
self.test_data_dir = "gs://{}/testdata".format(configs.GCS_BUCKET_NAME)
# TODO(StubExecutor): customize self.stubbed_component_ids to replace components
# with BaseStubExecutor
self.stubbed_component_ids = ['CsvExampleGen', 'StatisticsGen',
'SchemaGen', 'ExampleValidator',
'Trainer', 'Transform', 'Evaluator', 'Pusher']
# TODO(StubExecutor): (Optional) Use stubbed_component_map to insert custom stub
# executor class as a value and component id as a key.
self.stubbed_component_map = {}
for c_id in self.stubbed_component_ids:
self.stubbed_component_map[c_id] = base_stub_executor.BaseStubExecutor
def get_stub_launcher_class(stub_launcher: Type[StubComponentLauncher],
test_data_dir: Text,
stubbed_component_ids: List[Text],
stubbed_component_map: Dict[Text, Type[base_stub_executor.BaseStubExecutor]]
) -> Type[StubComponentLauncher]:
"""Returns a StubComponentLauncher class.
Returns:
StubComponentLauncher class holding stub executors.
"""
stub_launcher.stubbed_component_map = dict(stubbed_component_map)
for component_id in stubbed_component_ids:
stub_launcher.stubbed_component_map[component_id] = \
base_stub_executor.BaseStubExecutor
stub_launcher.test_data_dir = test_data_dir
return stub_launcher
| apache-2.0 | Python |
|
20d77f66e0287b3aab08b4cf14f23e7e5672aefd | Create database import script for the Picks table (each NFLPool Player's picks for a given season) | prcutler/nflpool,prcutler/nflpool | db_setup/nflpool_picks.py | db_setup/nflpool_picks.py | import sqlite3
conn = sqlite3.connect('nflpool.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS Player;
CREATE TABLE Picks (
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
id INTEGER NOT NULL PRIMARY KEY UNIQUE,
season TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
timestamp TEXT NOT NULL
key
afc_east_first TEXT NOT NULL
afc_east_second TEXT NOT NULL
afc_east_last TEXT NOT NULL
afc_north_first TEXT NOT NULL
afc_north_second TEXT NOT NULL
afc_north_last TEXT NOT NULL
afc_south_first TEXT NOT NULL
afc_south_second TEXT NOT NULL
afc_south_last TEXT NOT NULL
afc_west_first TEXT NOT NULL
afc_west_second TEXT NOT NULL
afc_west_last TEXT NOT NULL
nfc_east_first TEXT NOT NULL
nfc_east_second TEXT NOT NULL
nfc_east_last TEXT NOT NULL
nfc_north_first TEXT NOT NULL
nfc_north_second TEXT NOT NULL
nfc_north_last TEXT NOT NULL
nfc_south_first TEXT NOT NULL
nfc_south_second TEXT NOT NULL
nfc_south_last TEXT NOT NULL
nfc_west_first TEXT NOT NULL
nfc_west_second TEXT NOT NULL
nfc_west_last TEXT NOT NULL
afc_wildcard1 TEXT NOT NULL
afc_wildcard2 TEXT NOT NULL
nfc_wildcard1 TEXT NOT NULL
nfc_wildcard2 TEXT NOT NULL
afc_rushing_first TEXT NOT NULL
afc_rushing_second TEXT NOT NULL
afc_rushing_third TEXT NOT NULL
afc_passing_first TEXT NOT NULL
afc_passing_second TEXT NOT NULL
afc_passing_third TEXT NOT NULL
afc_receiving_first TEXT NOT NULL
afc_receiving_second TEXT NOT NULL
afc_receiving_third TEXT NOT NULL
afc_sacks_first TEXT NOT NULL
afc_sacks_second TEXT NOT NULL
afc_sacks_third TEXT NOT NULL
afc_int_first TEXT NOT NULL
afc_int_second TEXT NOT NULL
afc_int_third TEXT NOT NULL
nfc_rushing_first TEXT NOT NULL
nfc_rushing_second TEXT NOT NULL
nfc_rushing_third TEXT NOT NULL
nfc_passing_first TEXT NOT NULL
nfc_passing_second TEXT NOT NULL
nfc_passing_third TEXT NOT NULL
nfc_receiving_first TEXT NOT NULL
nfc_receiving_second TEXT NOT NULL
nfc_receiving_third TEXT NOT NULL
nfc_sacks_first TEXT NOT NULL
nfc_sacks_second TEXT NOT NULL
nfc_sacks_third TEXT NOT NULL
nfc_int_first TEXT NOT NULL
nfc_int_second TEXT NOT NULL
nfc_int_third TEXT NOT NULL
afc_pf TEXT NOT NULL
nfc_pf TEXT NOT NULL
specialteams_td TEXT NOT NULL
)
''')
conn.commit()
conn.close()
| mit | Python |
|
ed1cd0f7de1a7bebaaf0f336ba52e04286dd87de | Create my_mapper.py | jnimish77/Cloud-Computing-and-Programming-using-various-tools,jnimish77/Cloud-Computing-and-Programming-using-various-tools,jnimish77/Cloud-Computing-and-Programming-using-various-tools | Hadoop--Project-to-map-new-Your-taxi-data-info/my_mapper.py | Hadoop--Project-to-map-new-Your-taxi-data-info/my_mapper.py | #!/usr/bin/env python
import sys
for line in sys.stdin:
line = line.strip()
unpacked = line.split(",")
stadium, capacity, expanded, location, surface, turf, team, opened, weather, roof, elevation = line.split(",")
#medallion, hack_license, vendor_id, rate_code, store_and_fwd_flag, pickup_datetime, dropoff_datetime, passenger_count, trip_time_in_secs, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude = line.split(",")
results = [turf, "1"]
print("\t".join(results))
| apache-2.0 | Python |
|
8ad4627973db344e228a9170aef030ab58efdeb9 | Add column order and importable objects lists | edofic/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,uskudnik/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,uskudnik/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,uskudnik/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core | src/ggrc/converters/__init__.py | src/ggrc/converters/__init__.py | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc.converters.sections import SectionsConverter
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
COLUMN_ORDER = (
"slug",
"title",
"description",
"notes",
"owners",
)
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
| # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc.converters.sections import SectionsConverter
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
| apache-2.0 | Python |
53926f18fb4f058bba9dd23fb75721d3dfa1d24b | add hashes directory | TheAlgorithms/Python | hashes/md5.py | hashes/md5.py | import math
def rearrange(bitString32):
if len(bitString32) != 32:
raise ValueError("Need length 32")
newString = ""
for i in [3,2,1,0]:
newString += bitString32[8*i:8*i+8]
return newString
def reformatHex(i):
hexrep = format(i,'08x')
thing = ""
for i in [3,2,1,0]:
thing += hexrep[2*i:2*i+2]
return thing
def pad(bitString):
startLength = len(bitString)
bitString += '1'
while len(bitString) % 512 != 448:
bitString += '0'
lastPart = format(startLength,'064b')
bitString += rearrange(lastPart[32:]) + rearrange(lastPart[:32])
return bitString
def getBlock(bitString):
currPos = 0
while currPos < len(bitString):
currPart = bitString[currPos:currPos+512]
mySplits = []
for i in range(16):
mySplits.append(int(rearrange(currPart[32*i:32*i+32]),2))
yield mySplits
currPos += 512
def not32(i):
i_str = format(i,'032b')
new_str = ''
for c in i_str:
new_str += '1' if c=='0' else '0'
return int(new_str,2)
def sum32(a,b):
return (a + b) % 2**32
def leftrot32(i,s):
return (i << s) ^ (i >> (32-s))
def md5me(testString):
bs =''
for i in testString:
bs += format(ord(i),'08b')
bs = pad(bs)
tvals = [int(2**32 * abs(math.sin(i+1))) for i in range(64)]
a0 = 0x67452301
b0 = 0xefcdab89
c0 = 0x98badcfe
d0 = 0x10325476
s = [7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, \
5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, \
4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, \
6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21 ]
for m in getBlock(bs):
A = a0
B = b0
C = c0
D = d0
for i in range(64):
if i <= 15:
#f = (B & C) | (not32(B) & D)
f = D ^ (B & (C ^ D))
g = i
elif i<= 31:
#f = (D & B) | (not32(D) & C)
f = C ^ (D & (B ^ C))
g = (5*i+1) % 16
elif i <= 47:
f = B ^ C ^ D
g = (3*i+5) % 16
else:
f = C ^ (B | not32(D))
g = (7*i) % 16
dtemp = D
D = C
C = B
B = sum32(B,leftrot32((A + f + tvals[i] + m[g]) % 2**32, s[i]))
A = dtemp
a0 = sum32(a0, A)
b0 = sum32(b0, B)
c0 = sum32(c0, C)
d0 = sum32(d0, D)
digest = reformatHex(a0) + reformatHex(b0) + reformatHex(c0) + reformatHex(d0)
return digest
def test():
assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e"
assert md5me("The quick brown fox jumps over the lazy dog") == "9e107d9d372bb6826bd81d3542a419d6"
print "Success."
if __name__ == "__main__":
test()
| mit | Python |
|
8141d6cafb4a1c8986ec7065f27d536d98cc9916 | Add little script calculate sample spectra. | RabadanLab/MITKats,MITK/MITK,MITK/MITK,RabadanLab/MITKats,MITK/MITK,MITK/MITK,fmilano/mitk,fmilano/mitk,fmilano/mitk,iwegner/MITK,RabadanLab/MITKats,iwegner/MITK,RabadanLab/MITKats,iwegner/MITK,RabadanLab/MITKats,MITK/MITK,fmilano/mitk,iwegner/MITK,fmilano/mitk,iwegner/MITK,fmilano/mitk,fmilano/mitk,RabadanLab/MITKats,MITK/MITK,iwegner/MITK | Modules/Biophotonics/python/iMC/script_plot_one_spectrum.py | Modules/Biophotonics/python/iMC/script_plot_one_spectrum.py | '''
Created on Oct 12, 2015
@author: wirkert
'''
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
import luigi
import tasks_regression as rt
from msi.plot import plot
from msi.msi import Msi
import msi.normalize as norm
import scriptpaths as sp
sp.ROOT_FOLDER = "/media/wirkert/data/Data/2015_xxxx_plot_one_spectrum"
# the wavelengths recorded by our camera
RECORDED_WAVELENGTHS = \
np.array([580, 470, 660, 560, 480, 511, 600, 700]) * 10 ** -9
PARAMS = np.array([0.05, # bvf
0.0, # SaO2
0.0, # billirubin
500., # a_mie
0.0, # a_ray
1.091, # b (for scattering
500. * 10 ** -6]) # d_muc
class PlotOneSpectrum(luigi.Task):
batch_prefix = luigi.Parameter()
def requires(self):
return rt.TrainForestForwardModel(self.batch_prefix)
def run(self):
f = file(self.input().path, "r")
rf = pickle.load(f)
f.close()
refl = rf.predict(PARAMS)
msi = Msi(refl)
msi.set_wavelengths(RECORDED_WAVELENGTHS)
norm.standard_normalizer.normalize(msi)
plot(msi)
plt.gca().set_xlabel("wavelength")
plt.gca().set_ylabel("normalized reflectance")
plt.grid()
plt.ylim([0.0, 0.4])
plt.title("bvf: " + str(PARAMS[0]) + "; saO2: " + str(PARAMS[1]) +
"; bili: " + str(PARAMS[2]) + "; a_mie: " + str(PARAMS[3]) +
"; a_ray: " + str(PARAMS[4]) + "; d_muc: " + str(PARAMS[6]))
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
luigi.interface.setup_interface_logging()
sch = luigi.scheduler.CentralPlannerScheduler()
w = luigi.worker.Worker(scheduler=sch)
main_task = PlotOneSpectrum(batch_prefix=
"jacques_no_billi_generic_scattering_")
w.add(main_task)
w.run()
| bsd-3-clause | Python |
|
ec0cf9c6eb8ecc69482ed08f22a760d73f420619 | Add API tests | PyBossa/pybossa,Scifabric/pybossa,Scifabric/pybossa,PyBossa/pybossa | test/test_api/test_api_project_stats.py | test/test_api/test_api_project_stats.py | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2017 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
import json
from default import with_context
from test_api import TestAPI
from factories import ProjectFactory, TaskFactory, TaskRunFactory
from pybossa.repositories import ProjectStatsRepository
import pybossa.cache.project_stats as stats
class TestProjectStatsAPI(TestAPI):
@with_context
def test_query_projectstats(self):
"""Test API query for project stats endpoint works"""
project_stats = []
projects = ProjectFactory.create_batch(3)
for project in projects:
for task in TaskFactory.create_batch(4, project=project, n_answers=3):
TaskRunFactory.create(task=task)
stats.update_stats(project.id)
ps = stats.get_stats(project.id, full=True)
project_stats.append(ps)
extra_stat_types = ['hours_stats', 'dates_stats', 'users_stats']
# As anon
url = '/api/projectstats'
res = self.app_get_json(url)
data = json.loads(res.data)
assert len(data) == 3, data
# Limits
res = self.app.get(url + "?limit=1")
data = json.loads(res.data)
assert len(data) == 1, data
# Keyset pagination
res = self.app.get(url + '?limit=1&last_id=' + str(projects[1].id))
data = json.loads(res.data)
assert len(data) == 1, len(data)
assert data[0]['id'] == project.id
# Errors
res = self.app.get(url + "?something")
err = json.loads(res.data)
err_msg = "AttributeError exception should be raised"
res.status_code == 415, err_msg
assert res.status_code == 415, err_msg
assert err['action'] == 'GET', err_msg
assert err['status'] == 'failed', err_msg
assert err['exception_cls'] == 'AttributeError', err_msg
# Desc filter
url = "/api/projectstats?orderby=wrongattribute"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should be 415."
assert data['status'] == 'failed', data
assert data['status_code'] == 415, data
assert 'has no attribute' in data['exception_msg'], data
# Order by
url = "/api/projectstats?orderby=id"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should get the last item first."
ps_by_id = sorted(project_stats, key=lambda x: x.id, reverse=False)
for i in range(len(project_stats)):
assert ps_by_id[i].id == data[i]['id']
# Desc filter
url = "/api/projectstats?orderby=id&desc=true"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should get the last item first."
ps_by_id = sorted(project_stats, key=lambda x: x.id, reverse=True)
for i in range(len(project_stats)):
assert ps_by_id[i].id == data[i]['id']
# Without full filter
url = "/api/projectstats"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should not return the full stats."
extra = [row['info'].get(_type) for _type in extra_stat_types
for row in data if row['info'].get(_type)]
assert not extra
# With full filter
url = "/api/projectstats?full=1"
res = self.app.get(url)
data = json.loads(res.data)
err_msg = "It should return full stats."
for i, row in enumerate(data):
for _type in extra_stat_types:
assert row['info'][_type] == project_stats[i].info[_type]
| agpl-3.0 | Python |
|
55aae76ae3813045542b8f94736fdfb1e08592f2 | Add chrome driver path. | VinnieJohns/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core | src/lib/environment/__init__.py | src/lib/environment/__init__.py | import os
import logging
from lib import constants, file_ops
yaml = file_ops.load_yaml_contents(constants.path.YAML)
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
VIRTENV_PATH = PROJECT_ROOT_PATH + constants.path.VIRTUALENV_DIR
LOGGING_FORMAT = yaml[constants.yaml.LOGGING][constants.yaml.FORMAT]
CHROME_DRIVER_PATH = PROJECT_ROOT_PATH + constants.path.RESOURCES + constants.path.CHROME_DRIVER
# register loggers
selenium_logger = logging.getLogger(constants.log.Selenium.SELENIUM_REMOTE_CONNECTION)
# Only display possible problems
selenium_logger.setLevel(logging.WARNING)
| import os
import logging
from lib import constants, file_ops
yaml = file_ops.load_yaml_contents(constants.path.YAML)
PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../"
VIRTENV_PATH = PROJECT_ROOT_PATH + constants.path.VIRTUALENV_DIR
LOGGING_FORMAT = yaml[constants.yaml.LOGGING][constants.yaml.FORMAT]
# register loggers
selenium_logger = logging.getLogger(constants.log.Selenium.SELENIUM_REMOTE_CONNECTION)
# Only display possible problems
selenium_logger.setLevel(logging.WARNING)
| apache-2.0 | Python |
56422abd9e5dbc1b17b009d84fd5e4b028719b94 | add basic IPC traffic analyzer | amccreight/mochitest-logs | ipc-viewer.py | ipc-viewer.py | #!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This file analyzes the output of running with MOZ_IPC_MESSAGE_LOG=1
import sys
import re
msgPatt = re.compile('^\[time:(\d+)\]\[(\d+)(->|<-)(\d+)\]\[([^\]]+)\] (Sending|Received)((?: reply)?) ([^\(]+)\(\[TODO\]\)$')
#[time:1441041587246153][9641->9647][PPluginScriptableObjectParent] Sending reply Reply_NPN_Evaluate([TODO])
matchCount = 0
notMatchCount = 0
msgCounts = {}
for l in sys.stdin:
mm = msgPatt.match(l)
if not mm:
notMatchCount += 1
continue
timeStamp = mm.group(1)
pid1 = mm.group(2)
arrow = mm.group(3)
pid2 = mm.group(4)
actor = mm.group(5)
sendRecv = mm.group(6)
sendRecvExtra = not not mm.group(7)
msg = mm.group(8)
p = (actor, msg)
msgCounts[p] = msgCounts.setdefault(p, 0) + 1
#print timeStamp, pid1, arrow, pid2, actor, sendRecv, sendRecvExtra, msg
matchCount += 1
# Resort the data a bit.
counts = []
for p, count in msgCounts.iteritems():
counts.append((count, p))
counts.sort()
counts.reverse()
for (count, (actor, msg)) in counts:
print count, actor, msg
| mpl-2.0 | Python |
|
561957a2492714e1b6d76b13daeced66a90aba1d | Create __init__.py | Radiergummi/libconfig | docs/_themes/sphinx_rtd_theme/__init__.py | docs/_themes/sphinx_rtd_theme/__init__.py | """Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
import os
VERSION = (0, 1, 5)
__version__ = ".".join(str(v) for v in VERSION)
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return cur_dir
| mit | Python |
|
3018a418b24da540f259a59a578164388b0c2686 | add examples/call-gtk.py | detrout/telepathy-python,freedesktop-unofficial-mirror/telepathy__telepathy-python,detrout/telepathy-python,epage/telepathy-python,epage/telepathy-python,PabloCastellano/telepathy-python,max-posedon/telepathy-python,max-posedon/telepathy-python,freedesktop-unofficial-mirror/telepathy__telepathy-python,PabloCastellano/telepathy-python | examples/call-gtk.py | examples/call-gtk.py |
import sys
import pygtk
pygtk.require('2.0')
import dbus
import gobject
import gtk
from account import read_account, connect
from call import IncomingCall, OutgoingCall, get_stream_engine
from telepathy.interfaces import CONN_INTERFACE
class CallWindow(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
hbox = gtk.HBox()
hbox.set_border_width(10)
vbox = gtk.VBox()
output_frame = gtk.Frame()
output_frame.set_shadow_type(gtk.SHADOW_IN)
preview_frame = gtk.Frame()
preview_frame.set_shadow_type(gtk.SHADOW_IN)
self.output = gtk.Socket()
self.output.set_size_request(400, 300)
self.preview = gtk.Socket()
self.preview.set_size_request(200, 150)
self.call_button = gtk.Button('Call')
self.call_button.connect('clicked', self._call_button_clicked)
output_frame.add(self.output)
preview_frame.add(self.preview)
vbox.pack_start(preview_frame, False)
vbox.pack_end(self.call_button, False)
hbox.add(output_frame)
hbox.pack_start(vbox, padding=10)
self.add(hbox)
def _call_button_clicked(self, button):
pass
class GtkLoopMixin:
def run(self):
try:
gtk.main()
except KeyboardInterrupt:
print "killed"
self.interrupt()
def quit(self):
gtk.main_quit()
class BaseGtkCall:
def __init__(self):
self.window = CallWindow()
self.window.connect('destroy', gtk.main_quit)
self.window.show_all()
def add_preview_window(self):
se = dbus.Interface(get_stream_engine(),
'org.freedesktop.Telepathy.StreamEngine')
se.AddPreviewWindow(self.window.preview.get_id())
return False
def add_output_window(self):
se = dbus.Interface(get_stream_engine(),
'org.freedesktop.Telepathy.StreamEngine')
chan_path = self.channel._dbus_object._object_path
se.SetOutputWindow(chan_path, 2, self.window.output.get_id())
return False
class GtkOutgoingCall(GtkLoopMixin, BaseGtkCall, OutgoingCall):
def __init__(self, conn, contact):
OutgoingCall.__init__(self, conn, contact)
BaseGtkCall.__init__(self)
def members_changed_cb(self, message, added, removed, local_pending,
remote_pending, actor, reason):
OutgoingCall.members_changed_cb(self, message, added, removed,
local_pending, remote_pending, actor, reason)
if self.handle in added:
gobject.timeout_add(5000, self.add_output_window)
gobject.timeout_add(5000, self.add_preview_window)
class GtkIncomingCall(GtkLoopMixin, BaseGtkCall, IncomingCall):
def __init__(self, conn):
IncomingCall.__init__(self, conn)
BaseGtkCall.__init__(self)
def members_changed_cb(self, message, added, removed, local_pending,
remote_pending, actor, reason):
IncomingCall.members_changed_cb(self, message, added, removed,
local_pending, remote_pending, actor, reason)
if self.conn[CONN_INTERFACE].GetSelfHandle() in added:
gobject.timeout_add(5000, self.add_output_window)
gobject.timeout_add(5000, self.add_preview_window)
if __name__ == '__main__':
assert len(sys.argv) in (2, 3)
account_file = sys.argv[1]
manager, protocol, account = read_account(account_file)
conn = connect(manager, protocol, account)
if len(sys.argv) > 2:
contact = sys.argv[2]
call = GtkOutgoingCall(conn, sys.argv[2])
else:
call = GtkIncomingCall(conn)
print "connecting"
conn[CONN_INTERFACE].Connect()
call.run()
try:
print "disconnecting"
conn[CONN_INTERFACE].Disconnect()
except dbus.DBusException:
pass
| lgpl-2.1 | Python |
|
c979fe37cc5f3dd83933893a1e7774c4aa7d061c | Add test script. | ieeg-portal/ieegpy | examples/get_data.py | examples/get_data.py | '''
Copyright 2019 Trustees of the University of Pennsylvania
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import argparse
import getpass
from ieeg.auth import Session
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', required=True, help='username')
parser.add_argument('-p', '--password',
help='password (will be prompted if missing)')
parser.add_argument('dataset', help='dataset name')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass()
with Session(args.user, args.password) as session:
dataset_name = args.dataset
dataset = session.open_dataset(dataset_name)
raw_data = dataset.get_data(0, 2000, [0, 1])
print(2000, raw_data.shape)
print(raw_data)
raw_data = dataset.get_data(0, 4000, [0, 1])
print(4000, raw_data.shape)
print(raw_data)
raw_data = dataset.get_data(0, 6000, [0, 1])
print(6000, raw_data.shape)
print(raw_data)
session.close_dataset(dataset_name)
if __name__ == "__main__":
main()
| apache-2.0 | Python |
|
11bd97a647507645f90e259dd8000eb6a8001890 | Add index to log_once table, make cleanup run with db cleanup event. refs #1167 | camon/Flexget,JorisDeRieck/Flexget,tarzasai/Flexget,Danfocus/Flexget,antivirtel/Flexget,drwyrm/Flexget,Pretagonist/Flexget,jacobmetrick/Flexget,ZefQ/Flexget,jawilson/Flexget,malkavi/Flexget,ratoaq2/Flexget,Danfocus/Flexget,antivirtel/Flexget,voriux/Flexget,Flexget/Flexget,tobinjt/Flexget,ianstalk/Flexget,tvcsantos/Flexget,gazpachoking/Flexget,v17al/Flexget,v17al/Flexget,Flexget/Flexget,tarzasai/Flexget,xfouloux/Flexget,sean797/Flexget,jawilson/Flexget,malkavi/Flexget,crawln45/Flexget,ibrahimkarahan/Flexget,OmgOhnoes/Flexget,tsnoam/Flexget,lildadou/Flexget,cvium/Flexget,tsnoam/Flexget,tobinjt/Flexget,poulpito/Flexget,ibrahimkarahan/Flexget,tobinjt/Flexget,jacobmetrick/Flexget,ZefQ/Flexget,vfrc2/Flexget,LynxyssCZ/Flexget,asm0dey/Flexget,cvium/Flexget,offbyone/Flexget,poulpito/Flexget,qk4l/Flexget,patsissons/Flexget,asm0dey/Flexget,crawln45/Flexget,qvazzler/Flexget,spencerjanssen/Flexget,patsissons/Flexget,JorisDeRieck/Flexget,X-dark/Flexget,grrr2/Flexget,lildadou/Flexget,oxc/Flexget,dsemi/Flexget,offbyone/Flexget,thalamus/Flexget,offbyone/Flexget,spencerjanssen/Flexget,Pretagonist/Flexget,OmgOhnoes/Flexget,jacobmetrick/Flexget,lildadou/Flexget,thalamus/Flexget,poulpito/Flexget,camon/Flexget,grrr2/Flexget,qvazzler/Flexget,vfrc2/Flexget,ratoaq2/Flexget,v17al/Flexget,spencerjanssen/Flexget,qk4l/Flexget,X-dark/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,sean797/Flexget,asm0dey/Flexget,thalamus/Flexget,ibrahimkarahan/Flexget,LynxyssCZ/Flexget,tobinjt/Flexget,Flexget/Flexget,drwyrm/Flexget,tvcsantos/Flexget,X-dark/Flexget,Flexget/Flexget,OmgOhnoes/Flexget,malkavi/Flexget,ZefQ/Flexget,drwyrm/Flexget,jawilson/Flexget,cvium/Flexget,xfouloux/Flexget,crawln45/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,ianstalk/Flexget,tsnoam/Flexget,dsemi/Flexget,voriux/Flexget,jawilson/Flexget,LynxyssCZ/Flexget,ratoaq2/Flexget,sean797/Flexget,ianstalk/Flexget,malkavi/Flexget,LynxyssCZ/Flexget,gazpachoking/Flexget,crawln45/Flexget,antivirtel/Flexget,dsemi/Flexget,xfouloux/Flexget,grrr2/Flexget,qvazzler/Flexget,qk4l/Flexget,Pretagonist/Flexget,oxc/Flexget,patsissons/Flexget,vfrc2/Flexget,tarzasai/Flexget,oxc/Flexget | flexget/utils/log.py | flexget/utils/log.py | """Logging utilities"""
import logging
import hashlib
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime, Index
from flexget import schema
from flexget.utils.sqlalchemy_utils import table_schema
from flexget.manager import Session
from flexget.event import event
log = logging.getLogger('util.log')
Base = schema.versioned_base('log_once', 0)
@schema.upgrade('log_once')
def upgrade(ver, session):
if ver is None:
log.info('Adding index to md5sum column of log_once table.')
table = table_schema('log_once', session)
Index('log_once_md5sum', table.c.md5sum, unique=True).create()
ver = 0
return ver
class LogMessage(Base):
"""Declarative"""
__tablename__ = 'log_once'
id = Column(Integer, primary_key=True)
md5sum = Column(String, unique=True)
added = Column(DateTime, default=datetime.now())
def __init__(self, md5sum):
self.md5sum = md5sum
def __repr__(self):
return "<LogMessage('%s')>" % (self.md5sum)
@event('manager.db_cleanup')
def purge(session):
"""Purge old messages from database"""
old = datetime.now() - timedelta(days=365)
result = session.query(LogMessage).filter(LogMessage.added < old).delete()
if result:
log.verbose('Purged %s entries from log_once table.' % result)
def log_once(message, logger=logging.getLogger('log_once')):
"""Log message only once using given logger. Returns False if suppressed logging."""
digest = hashlib.md5()
digest.update(message.encode('latin1', 'replace')) # ticket:250
md5sum = digest.hexdigest()
session = Session()
try:
# abort if this has already been logged
if session.query(LogMessage).filter_by(md5sum=md5sum).first():
session.close()
return False
row = LogMessage(md5sum)
session.add(row)
finally:
session.commit()
logger.info(message)
return True
| """Logging utilities"""
import logging
from flexget.manager import Session, Base
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, String, DateTime
log = logging.getLogger('util.log')
class LogMessage(Base):
"""Declarative"""
__tablename__ = 'log_once'
id = Column(Integer, primary_key=True)
md5sum = Column(String)
added = Column(DateTime, default=datetime.now())
def __init__(self, md5sum):
self.md5sum = md5sum
def __repr__(self):
return "<LogMessage('%s')>" % (self.md5sum)
def purge():
"""Purge old messages from database"""
old = datetime.now() - timedelta(days=365)
session = Session()
try:
for message in session.query(LogMessage).filter(LogMessage.added < old):
log.debug('purging: %s' % message)
session.delete(message)
finally:
session.commit()
def log_once(message, logger=logging.getLogger('log_once')):
"""Log message only once using given logger. Returns False if suppressed logging."""
purge()
import hashlib
digest = hashlib.md5()
digest.update(message.encode('latin1', 'replace')) # ticket:250
md5sum = digest.hexdigest()
session = Session()
try:
# abort if this has already been logged
if session.query(LogMessage).filter_by(md5sum=md5sum).first():
session.close()
return False
row = LogMessage(md5sum)
session.add(row)
finally:
session.commit()
logger.info(message)
return True
| mit | Python |
b3977289de72421530614ff4f28cdf7333d743e4 | Add region migration validation | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/logical/validators.py | dbaas/logical/validators.py | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from logical.models import Database
from django.core.exceptions import ValidationError
from django.core.exceptions import ObjectDoesNotExist
from system.models import Configuration
def validate_evironment(database_name, environment_name):
try:
database = Database.objects.get(database_name)
except ObjectDoesNotExist:
pass
else:
dev_envs = Configuration.get_by_name_as_list('dev_envs')
new_db_env_is_not_dev = environment_name not in dev_envs
prod_envs = Configuration.get_by_name_as_list('prod_envs')
db_env_is_prod = database.environment.name in prod_envs
if new_db_env_is_not_dev and db_env_is_prod:
raise ValidationError(
_('%(database_name)s already exists in production!'),
params={'database_name': database_name},
)
| bsd-3-clause | Python |
|
f0e092b060d9afb700f027197fdf44eeb2fdd91b | Create __init__.py | fcopantoja/turningio_challenge | __ini__.py | __ini__.py | mit | Python |
||
660fc806d11c6a8af321bb14caec21ca7cba4141 | add kafka streaming consumer | zhexiao/mnet,zhexiao/mnet,zhexiao/mnet,zhexiao/mnet | deploy/test/kf_consumer1.py | deploy/test/kf_consumer1.py | import json
from kafka import KafkaConsumer
consumer = KafkaConsumer('testres', bootstrap_servers='192.168.33.50:9092')
for msg in consumer:
val = msg.value.decode()
print(msg.key.decode())
print(json.loads(val).get('word'))
print(json.loads(val).get('count'))
print(json.loads(val).get('window'))
print('='*30)
| apache-2.0 | Python |
|
4c96e1eb17a5cbb4c1a33cef5c37aac00b4ec8e0 | Update test_api.py | bartTC/dpaste,bartTC/dpaste,rbarrois/xelpaste,SanketDG/dpaste,SanketDG/dpaste,rbarrois/xelpaste,bartTC/dpaste,SanketDG/dpaste,rbarrois/xelpaste | dpaste/tests/test_api.py | dpaste/tests/test_api.py | # -*- encoding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test import TestCase
from ..models import Snippet
from ..forms import EXPIRE_DEFAULT
from ..highlight import LEXER_DEFAULT
class SnippetAPITestCase(TestCase):
def setUp(self):
self.api_url = reverse('dpaste_api_create_snippet')
self.client = Client()
def test_empty(self):
"""
The browser sent a content field but with no data.
"""
data = {}
# No data
response = self.client.post(self.api_url, {})
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# No content
data['content'] = ''
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Just some spaces
data['content'] = ' '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Linebreaks or tabs only are not valid either
data['content'] = '\n\t '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
def test_valid(self):
"""
A valid snippet, contains Unicode, tabs, spaces, linebreaks etc.
"""
data = {'content': u"Hello Wörld.\n\tGood Bye"}
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
# The response is a URL with quotes
self.assertTrue(response.content.startswith('"'))
self.assertTrue(response.content.endswith('"'))
# The URL returned is the absolute url to the snippet.
# If we call that url our snippet should be in the page content.
snippet_url = response.content[1:-1]
response = self.client.get(snippet_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, data['content'])
| # -*- encoding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test import TestCase
from ..models import Snippet
from ..forms import EXPIRE_DEFAULT
from ..highlight import LEXER_DEFAULT
class SnippetAPITestCase(TestCase):
def setUp(self):
self.api_url = reverse('dpaste_api_create_snippet')
self.client = Client()
def test_empty(self):
"""
The browser sent a content field but with no data.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ALL tests fail due to a Piston bug:
https://bitbucket.org/jespern/django-piston/issue/221/attributeerror-httpresponseservererror
"""
data = {}
# No data
response = self.client.post(self.api_url, {})
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# No content
data['content'] = ''
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Just some spaces
data['content'] = ' '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
# Linebreaks or tabs only are not valid either
data['content'] = '\n\t '
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Snippet.objects.count(), 0)
def test_valid(self):
"""
A valid snippet, contains Unicode, tabs, spaces, linebreaks etc.
"""
data = {'content': u"Hello Wörld.\n\tGood Bye"}
response = self.client.post(self.api_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Snippet.objects.count(), 1)
# The response is a URL with quotes
self.assertTrue(response.content.startswith('"'))
self.assertTrue(response.content.endswith('"'))
# The URL returned is the absolute url to the snippet.
# If we call that url our snippet should be in the page content.
snippet_url = response.content[1:-1]
response = self.client.get(snippet_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, data['content'])
| mit | Python |
49d28814c498d1698c61b8eeae3c3e3e019a09c3 | add recipe3 scrap | zifeo/Food-habits,zifeo/Food-habits,zifeo/Food-habits,zifeo/Food-habits | scrap/recipe3.py | scrap/recipe3.py | import scrapy
class Recipe3Spider(scrapy.Spider):
name = "recipe3"
download_delay = 0.5
start_urls = [
"http://www.cuisineaz.com/recettes/recherche_v2.aspx?recherche={}".format(r)
for r in [
'bases',
'aperitifs',
'entrees',
'plats',
'desserts',
'accompagnements',
'recettes-pas-cheres',
'viandes',
'poissons',
'legumes',
'fruits',
'fromages',
'repas',
'cher',
'farine',
'sucre',
'facile',
]
]
def parse(self, response):
url = response.css('.pagination-next a::attr(href)').extract_first()
if url:
page = response.urljoin(url.strip())
yield scrapy.Request(page, callback=self.parse)
recipes = response.css('#titleRecette a::attr(href)').extract()
for recipe in recipes:
page = response.urljoin(recipe.strip())
yield scrapy.Request(page, callback=self.parse_recipe)
return
def parse_recipe(self, response):
yield {
'uri': response.url,
'recipe': response.css('.recipe_main h1::text').extract_first(),
'breadcrumb': [],
'quantity': response.css('#ctl00_ContentPlaceHolder_LblRecetteNombre::text').extract_first(),
'content': response.css('.recipe_ingredients ul').extract_first()
}
| apache-2.0 | Python |
|
f486343277a94e511ea1e152ca6b69f12fd657a0 | Create droidgpspush.py | bhaumikbhatt/PiDroidGPSTracker,bhaumikbhatt/PiDroidGPSTracker | droidgpspush.py | droidgpspush.py | import androidhelper
import socket
import time
droid = androidhelper.Android()
port=12345
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("10.201.19.201",port)) #connecting to pi as client
droid.makeToast("Starting location fetch") #notify me
while True:
location = droid.getLastKnownLocation().result
location = location.get('network', location.get('gps')) #fetch location
data = str(location)
print(data) #logging
s.send(data) #send to server
time.sleep(5) #wait for 5 seconds
| unlicense | Python |
|
fc0d54ff6d6b6ca91727c7aa0832f6c6dfc64967 | Add a prototype WinNT updater | grawity/rwho,grawity/rwho,grawity/rwho,grawity/rwho,grawity/rwho | rwho-update-winnt.py | rwho-update-winnt.py | #!/usr/bin/python
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Half of this hasn't been implemented yet.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import ctypes as c
import socket as so
import win32api as api
#import win32con as con
import win32ts as ts
import win32security as sec
import json
from urllib import urlencode
import urllib2
from time import sleep
class WTS_INFO_CLASS():
InitialProgram = 0
ApplicationName = 1
WorkingDirectory = 2
OEMId = 3
SessionId = 4
UserName = 5
WinStationName = 6
DomainName = 7
ConnectState = 8
ClientBuildNumber = 9
ClientName = 10
ClientDirectory = 11
ClientProductId = 12
ClientHardwareId = 13
ClientAddress = 14
ClientDisplay = 15
ClientProtocolType = 16
IdleTime = 17
LogonTime = 18
IncomingBytes = 19
OutgoingBytes = 20
IncomingFrames = 21
OutgoingFrames = 22
ClientInfo = 23
SessionInfo = 24
SessionInfoEx = 25
ConfigInfo = 26
ValidationInfo = 27
SessionAddressV4 = 28
IsRemoteSession = 29
def _wtsapi_WTSQuerySessionInformation(hServer, sessionID, infoClass):
ppBuffer = c.c_int32()
pBytesReturned = c.c_int32()
if c.windll.wtsapi32.WTSQuerySessionInformationW(
c.c_int32(hServer), c.c_int32(sessionID), c.c_int32(infoClass),
c.byref(ppBuffer), c.byref(pBytesReturned)):
return (ppBuffer, pBytesReturned)
SERVER_URL = "http://equal.cluenet.org/~grawity/rwho/server.php"
def get_sessions():
protocols = {
ts.WTS_PROTOCOL_TYPE_CONSOLE: "console",
ts.WTS_PROTOCOL_TYPE_ICA: "citrix",
ts.WTS_PROTOCOL_TYPE_RDP: "rdp",
}
hServer = ts.WTS_CURRENT_SERVER_HANDLE
#hServer = ts.WTSOpenServer("digit.cluenet.org")
curSessId = ts.WTSGetActiveConsoleSessionId()
for sess in ts.WTSEnumerateSessions(hServer):
utent = {}
id = sess["SessionId"]
for key, const in {
"User": ts.WTSUserName,
"Address": ts.WTSClientAddress,
"Client": ts.WTSClientName,
"Protocol": ts.WTSClientProtocolType,
#"XClient": 23, #ts.WTSClientInfo,
#"XSession": 24, #ts.WTSSessionInfo,
}.items():
sess[key] = ts.WTSQuerySessionInformation(hServer, id, const)
if not sess["User"]:
# skip non-login sessions
continue
if sess["State"] != 0:
continue
userSid, userDomain, acctType = sec.LookupAccountName(None, sess["User"])
userSidAuths = [userSid.GetSubAuthority(i) for i in range(userSid.GetSubAuthorityCount())]
utent["user"] = sess["User"]
utent["uid"] = userSidAuths[-1]
utent["host"] = ""
utent["line"] = "%s/%s" % (sess["WinStationName"].lower(), id)
utent["time"] = 0
#utent["proto"] = protocols.get(sess["Protocol"], "unknown")
print "="*79
for k, v in sess.items():
print "%-10s: %s" % (k, repr(v))
print
for k, v in utent.items():
print "%-10s: %s" % (k, repr(v))
yield utent
def upload(utmp):
data = {
"host": so.gethostname().lower(),
"fqdn": so.getfqdn().lower(),
"action": "put",
"utmp": json.dumps(utmp),
}
resp = urllib2.urlopen(SERVER_URL, urlencode(data))
print resp.read()
utmp = list(get_sessions())
upload(utmp)
| mit | Python |
|
6d25c1958a84eb1a6004ebadec6769511974cca4 | add basic rsa by request | team41434142/cctf-16,team41434142/cctf-16,team41434142/cctf-16 | basic-rsa/rsa.py | basic-rsa/rsa.py | def main():
e = int('3', 16)
n = int('64ac4671cb4401e906cd273a2ecbc679f55b879f0ecb25eefcb377ac724ee3b1', 16)
d = int('431d844bdcd801460488c4d17487d9a5ccc95698301d6ab2e218e4b575d52ea3', 16)
c = int('599f55a1b0520a19233c169b8c339f10695f9e61c92bd8fd3c17c8bba0d5677e', 16)
m = pow(c, d, n)
print(hex(m))
| agpl-3.0 | Python |
|
6be3e0c5264ca2750a77ac1dbd4175502e51fd3c | Add argparse tests for ceph-deploy admin | Vicente-Cheng/ceph-deploy,ceph/ceph-deploy,codenrhoden/ceph-deploy,shenhequnying/ceph-deploy,branto1/ceph-deploy,ghxandsky/ceph-deploy,imzhulei/ceph-deploy,isyippee/ceph-deploy,trhoden/ceph-deploy,zhouyuan/ceph-deploy,Vicente-Cheng/ceph-deploy,zhouyuan/ceph-deploy,ceph/ceph-deploy,osynge/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,isyippee/ceph-deploy,branto1/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,osynge/ceph-deploy,imzhulei/ceph-deploy,SUSE/ceph-deploy,shenhequnying/ceph-deploy,trhoden/ceph-deploy,ghxandsky/ceph-deploy,codenrhoden/ceph-deploy,SUSE/ceph-deploy | ceph_deploy/tests/parser/test_admin.py | ceph_deploy/tests/parser/test_admin.py | import pytest
from ceph_deploy.cli import get_parser
class TestParserAdmin(object):
def setup(self):
self.parser = get_parser()
def test_admin_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy admin' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_admin_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_admin_one_host(self):
args = self.parser.parse_args('admin host1'.split())
assert args.client == ['host1']
def test_admin_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['admin'] + hostnames)
assert args.client == hostnames
| mit | Python |
|
2ee5f1e3563e5a7104515adf74e41a8781fbcd9e | Create exercise5.py | pwittchen/learn-python-the-hard-way,pwittchen/learn-python-the-hard-way,pwittchen/learn-python-the-hard-way | exercise5.py | exercise5.py | # -- coding: utf-8 --
my_name = 'Zed A. Shaw'
my_age = 35 # not a lie
my_height = 74 # inches
my_weight = 180 # lbs
my_eyes = 'Blue'
my_teeth = 'White'
my_hair = 'Brown'
print "Let's talk about %s." % my_name
print "He's %d inches tall." % my_height
print "He's %d pounds heavy." % my_weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (my_eyes, my_hair)
print "His teeth are usually %s depending on the coffee." % my_teeth
# this line is tricky, try to get it exactly right
print "If I add %d, %d, and %d I get %d." % (
my_age, my_height, my_weight, my_age + my_height + my_weight)
| mit | Python |
|
2523d34d4f3e26a408c7ec0e43708efea77f03a9 | Add to support the chinese library | Kuniz/alfnaversearch,Kuniz/alfnaversearch | workflow/cndic_naver_search.py | workflow/cndic_naver_search.py | # Naver Search Workflow for Alfred 2
# Copyright (C) 2013 Jinuk Baek
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from workflow import web, Workflow
def get_dictionary_data(word):
url = 'http://ac.cndic.naver.com/ac2'
params = dict(q=word,
_callback='',
q_enc='utf-8',
st=11,
r_lt='00',
t_koreng=1,
r_format='json',
r_enc='utf-8',
r_unicode=0,
r_escape=1)
r = web.get(url, params)
r.raise_for_status()
return r.json()
def main(wf):
import cgi;
args = wf.args[0]
wf.add_item(title = 'Search Naver Cndic for \'%s\'' % args,
autocomplete=args,
arg=args,
valid=True)
def wrapper():
return get_dictionary_data(args)
res_json = wf.cached_data("cn_%s" % args, wrapper, max_age=600)
for item in res_json['items']:
for ltxt in item:
if len(ltxt) > 0:
txt = ltxt[0][0];
rtxt = cgi.escape(ltxt[1][0]);
wf.add_item(title = u"%s %s" % (txt, rtxt) ,
subtitle = 'Search Naver Cndic for \'%s\'' % txt,
autocomplete=txt,
arg=txt,
valid=True);
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
| mit | Python |
|
ac0e7cb6ff2885457ccbe9f7311489edf7c9406b | create train object utils | hycis/Mozi | mozi/utils/train_object_utils.py | mozi/utils/train_object_utils.py | from __future__ import absolute_import
from __future__ import print_function
import matplotlib
# matplotlib.use('Agg')
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
from theano.compile.ops import as_op
from mozi.utils.progbar import Progbar
import tarfile, inspect, os
from six.moves.urllib.request import urlretrieve
floatX = theano.config.floatX
def split_list(tuple_list):
"""
DESCRIPTION:
split a list of tuples into two lists whereby one list contains the first elements
of the tuples and the other list contains the second elements.
PARAM:
tuple_list: a list of tuples, example tuple_list = [('a', 1), ('b', 2)]
RETURN:
two lists, example from above tuple_list will be split into ['a', 'b'] and [1, 2]
"""
ls_A = []
ls_B = []
for tuple in tuple_list:
ls_A.append(tuple[0])
ls_B.append(tuple[1])
return ls_A, ls_B
def generate_shared_list(ls):
"""
DESCRIPTION:
generate a list of shared variables that matched the length of ls
PARAM:
ls: the list used for generating the shared variables
RETURN:
a list of shared variables initialized to 0 of len(ls)
"""
rlist = []
for i in xrange(len(ls)):
rlist.append(theano.shared(np.array(0., dtype=theano.config.floatX)))
return rlist
def merge_lists(ls_A, ls_B):
"""
DESCRIPTION:
merge two lists of equal length into into a list of tuples
PARAM:
ls_A: first list
ls_B: second list
RETURN:
a list of tuples
"""
assert len(ls_A) == len(ls_B), 'two lists of different length'
rlist = []
for a, b in zip(ls_A, ls_B):
rlist.append((a,b))
return rlist
def get_shared_values(shared_ls):
"""
DESCRIPTION:
get a list of values from a list of shared variables
PARAM:
shared_ls: list of shared variables
RETURN:
numpy array of the list of values
"""
val_ls = []
for var in shared_ls:
val_ls.append(var.get_value())
return np.asarray(val_ls, dtype=theano.config.floatX)
def is_shared_var(var):
return var.__class__.__name__ == 'TensorSharedVariable' or \
var.__class__.__name__ == 'CudaNdarraySharedVariable'
def merge_var(*vars):
def absortvar(v):
rvar = []
if isinstance(v, (list, tuple)):
rvar += v
else:
rvar.append(v)
return rvar
rvars = []
for var in vars:
rvars += absortvar(var)
return rvars
| mit | Python |
|
1768a69163c50e5e964eaf110323e590f13b4ff0 | add 0000 file | Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python | Drake-Z/0000/0000.py | Drake-Z/0000/0000.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'第 0000 题:将你的 QQ 头像(或者微博头像)右上角加上红色的数字,类似于微信未读信息数量那种提示效果。 类似于图中效果'
__author__ = 'Drake-Z'
from PIL import Image, ImageDraw, ImageFont
def add_num(filname, text = '4', fillcolor = (255, 0, 0)):
img = Image.open(filname)
width, height = img.size
myfont = ImageFont.truetype('C:/windows/fonts/Arial.ttf', size=width//8)
fillcolor = (255, 0, 0)
draw = ImageDraw.Draw(img)
draw.text((width-width//8, 0), text, font=myfont, fill=fillcolor)
img.save('1.jpg','jpeg')
return 0
if __name__ == '__main__':
filname = '0.jpg'
text = '4'
fillcolor = (255, 0, 0)
add_num(filname, text, fillcolor) | mit | Python |
|
ebc2b419a3cc7cace9c79d1c5032a2ae33b8bff1 | Remove unused imports | dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq | custom/up_nrhm/reports/asha_reports.py | custom/up_nrhm/reports/asha_reports.py | from corehq.apps.reports.filters.select import MonthFilter, YearFilter
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.filters.dates import DatespanFilter
from custom.up_nrhm.filters import DrillDownOptionFilter, SampleFormatFilter
from custom.up_nrhm.reports.asha_facilitators_report import ASHAFacilitatorsReport
from custom.up_nrhm.reports.block_level_month_report import BlockLevelMonthReport
def total_rows(report):
if not report.report_config.get('sf'):
return {
"total_under_facilitator": getattr(report, 'total_under_facilitator', 0),
"total_with_checklist": getattr(report, 'total_with_checklist', 0)
}
return {}
class ASHAReports(GenericTabularReport, DatespanMixin, CustomProjectReport):
fields = [SampleFormatFilter, DatespanFilter, DrillDownOptionFilter, MonthFilter, YearFilter]
name = "ASHA Reports"
slug = "asha_reports"
show_all_rows = True
default_rows = 20
printable = True
report_template_path = "up_nrhm/asha_report.html"
extra_context_providers = [total_rows]
no_value = '--'
@property
def report_config(self):
config = {
'sf': self.request.GET.get('sf'),
}
return config
@property
def report_context(self):
context = super(ASHAReports, self).report_context
context['sf'] = self.request.GET.get('sf')
return context
@property
def model(self):
config = self.report_config
if config.get('sf') == 'sf5':
return []
elif config.get('sf') == 'sf4':
return []
elif config.get('sf') == 'sf3':
return BlockLevelMonthReport(self.request, domain=self.domain)
else:
return ASHAFacilitatorsReport(self.request, domain=self.domain)
@property
def headers(self):
return self.model.headers
@property
def rows(self):
config = self.report_config
if not config.get('sf'):
rows, self.total_under_facilitator, total_with_checklist = self.model.rows
else:
rows = self.model.rows
return rows
| import datetime
from dateutil.relativedelta import relativedelta
from corehq.apps.reports.filters.select import MonthFilter, YearFilter
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.filters.dates import DatespanFilter
from custom.up_nrhm.filters import DrillDownOptionFilter, SampleFormatFilter
from custom.up_nrhm.reports.asha_facilitators_report import ASHAFacilitatorsReport
from custom.up_nrhm.reports.block_level_af import BlockLevelAFReport
from custom.up_nrhm.reports.block_level_month_report import BlockLevelMonthReport
def total_rows(report):
if not report.report_config.get('sf'):
return {
"total_under_facilitator": getattr(report, 'total_under_facilitator', 0),
"total_with_checklist": getattr(report, 'total_with_checklist', 0)
}
return {}
class ASHAReports(GenericTabularReport, DatespanMixin, CustomProjectReport):
fields = [SampleFormatFilter, DatespanFilter, DrillDownOptionFilter, MonthFilter, YearFilter]
name = "ASHA Reports"
slug = "asha_reports"
show_all_rows = True
default_rows = 20
printable = True
report_template_path = "up_nrhm/asha_report.html"
extra_context_providers = [total_rows]
no_value = '--'
@property
def report_config(self):
config = {
'sf': self.request.GET.get('sf'),
}
return config
@property
def report_context(self):
context = super(ASHAReports, self).report_context
context['sf'] = self.request.GET.get('sf')
return context
@property
def model(self):
config = self.report_config
if config.get('sf') == 'sf5':
return []
elif config.get('sf') == 'sf4':
return []
elif config.get('sf') == 'sf3':
return BlockLevelMonthReport(self.request, domain=self.domain)
else:
return ASHAFacilitatorsReport(self.request, domain=self.domain)
@property
def headers(self):
return self.model.headers
@property
def rows(self):
config = self.report_config
if not config.get('sf'):
rows, self.total_under_facilitator, total_with_checklist = self.model.rows
else:
rows = self.model.rows
return rows
| bsd-3-clause | Python |
3d8f02eb7c1b9b363143f25af9eadeb94c43b4ae | increase uwnetid maxlength | uw-it-aca/myuw,uw-it-aca/myuw,uw-it-aca/myuw,uw-it-aca/myuw | myuw/migrations/0017_netidlen.py | myuw/migrations/0017_netidlen.py | # Generated by Django 2.0.13 on 2020-03-12 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myuw', '0016_myuw_notice_group'),
]
operations = [
migrations.AlterField(
model_name='user',
name='uwnetid',
field=models.SlugField(max_length=32, unique=True),
),
]
| apache-2.0 | Python |
|
1e7b84155623691fb9fc1cec4efa6386938f3e72 | Add missing migration (updating validators=) | CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend | core/migrations/0055_update_username_validators.py | core/migrations/0055_update_username_validators.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-22 22:03
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0054_add_provider__cloud_config_and_timezone'),
]
operations = [
migrations.AlterField(
model_name='atmosphereuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| apache-2.0 | Python |
|
48217e5317412a9b5fb8181b6915963783efeaf2 | Add test for kline result of exact amount | sammchardy/python-binance | tests/test_historical_klines.py | tests/test_historical_klines.py | #!/usr/bin/env python
# coding=utf-8
from binance.client import Client
import pytest
import requests_mock
client = Client('api_key', 'api_secret')
def test_exact_amount():
"""Test Exact amount returned"""
first_res = []
row = [1519892340000,"0.00099400","0.00099810","0.00099400","0.00099810","4806.04000000",1519892399999,"4.78553253",154,"1785.14000000","1.77837524","0"]
for i in range(0, 500):
first_res.append(row)
second_res = []
with requests_mock.mock() as m:
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519862400000&symbol=BNBBTC', json=first_res)
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519892400000&symbol=BNBBTC', json=second_res)
client.get_historical_klines(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str="1st March 2018"
)
| mit | Python |
|
1f3a15b8ae6ffcb96faaf0acab940d9590fe6cb1 | Add migration | softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat | fat/migrations/0064_auto_20160809_1559.py | fat/migrations/0064_auto_20160809_1559.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-09 15:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fat', '0063_auto_20160809_1545'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='status',
field=models.CharField(choices=[('W', 'Not submitted yet'), ('S', 'Submitted (awaiting processing)'), ('C', 'Administrator checking'), ('P', 'Authoriser checking'), ('A', 'Approved (submitted to finance)'), ('F', 'Finished')], default='P', max_length=1),
),
]
| bsd-3-clause | Python |
|
5ec3f8dbe9f044d08a80563c05b648590fabdda7 | add fibonnaci example | llllllllll/toolz,JNRowe/toolz,berrytj/toolz,simudream/toolz,Julian-O/toolz,karansag/toolz,cpcloud/toolz,obmarg/toolz,machinelearningdeveloper/toolz,jdmcbr/toolz,quantopian/toolz,simudream/toolz,pombredanne/toolz,pombredanne/toolz,karansag/toolz,machinelearningdeveloper/toolz,llllllllll/toolz,bartvm/toolz,Julian-O/toolz,JNRowe/toolz,berrytj/toolz,whilo/toolz,whilo/toolz,cpcloud/toolz,jcrist/toolz,obmarg/toolz,quantopian/toolz,jcrist/toolz,bartvm/toolz,jdmcbr/toolz | examples/fib.py | examples/fib.py | # / 0 if i is 0
# fib(i) = | 1 if i is 1
# \ fib(i - 1) + fib(i - 2) otherwise
def fib(n):
""" Imperative definition of Fibonacci numbers """
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return b
# This is intuitive but VERY slow
def fib(n):
""" Functional definition of Fibonacci numbers """
if n == 0 or n == 1:
return n
else:
return fib(n - 1) + fib(n - 2)
from toolz import memoize
# Oh wait, it's fast again
fib = memoize(fib)
| bsd-3-clause | Python |
|
c663f6b6e31832fae682c2c527955b13682b701e | Remove learner_testimonials column from course_metadata course run table | edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery | course_discovery/apps/course_metadata/migrations/0127_remove_courserun_learner_testimonials.py | course_discovery/apps/course_metadata/migrations/0127_remove_courserun_learner_testimonials.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-07 17:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0126_course_has_ofac_restrictions'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='learner_testimonials',
),
]
| agpl-3.0 | Python |
|
8b1bd5995ff4c95335e25e19962724e6d8c399d7 | Create 0003_auto_20150930_1132.py | illing2005/django-cities,illing2005/django-cities | cities/migrations/0003_auto_20150930_1132.py | cities/migrations/0003_auto_20150930_1132.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cities', '0002_auto_20150811_1912'),
]
operations = [
migrations.AddField(
model_name='city',
name='name_de',
field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),
),
migrations.AddField(
model_name='city',
name='name_en',
field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),
),
migrations.AddField(
model_name='country',
name='name_de',
field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),
),
migrations.AddField(
model_name='country',
name='name_en',
field=models.CharField(db_index=True, verbose_name='ascii name', null=True, max_length=200),
),
]
| mit | Python |
|
b75e10f3235e9215458071279b67910627a95180 | Add celery based job runner | ihmeuw/vivarium | ceam/framework/celery_tasks.py | ceam/framework/celery_tasks.py | import os
from time import time
import logging
import pandas as pd
from celery import Celery
from billiard import current_process
app = Celery()
@app.task(autoretry_for=(Exception,), max_retries=2)
def worker(draw_number, component_config, branch_config, logging_directory):
worker = current_process().index
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filename=os.path.join(logging_directory, str(worker)+'.log'), level=logging.DEBUG)
logging.info('Starting job: {}'.format((draw_number, component_config, branch_config)))
run_configuration = component_config['configuration'].get('run_configuration', {})
results_directory = run_configuration['results_directory']
run_configuration['run_id'] = str(worker)+'_'+str(time())
if branch_config is not None:
run_configuration['run_key'] = dict(branch_config)
run_configuration['run_key']['draw'] = draw_number
component_config['configuration']['run_configuration'] = run_configuration
try:
from ceam.framework.engine import configure, run
from ceam.framework.components import prepare_component_configuration
from ceam.framework.util import collapse_nested_dict
configure(draw_number=draw_number, simulation_config=branch_config)
results = run(prepare_component_configuration(component_config))
results = pd.DataFrame(results, index=[draw_number]).to_json()
return results
except Exception as e:
logging.exception('Unhandled exception in worker')
raise
finally:
logging.info('Exiting job: {}'.format((draw_number, component_config, branch_config)))
| bsd-3-clause | Python |
|
164f43f902b89b84b4f0d474f4d3e0a18924110d | Add test of randomized select algorithm | timpel/stanford-algs,timpel/stanford-algs | selection_test.py | selection_test.py | import quicksort.quicksort
import random_selection.random_selection
import sys
import time
from random import randint
def main(max_len, check):
for n in [2**(n+1) for n in range(max_len)]:
arr = [randint(0, 2**max_len) for n in range(n)]
median = int((len(arr)+1)/2) - 1
current_time = time.time()
result = random_selection.random_selection.select(arr, median)
end_time = time.time() - current_time
sorted_arr = quicksort.quicksort.sort(arr)
if sorted_arr[median] == result:
print "Success! In %f" % end_time
else:
print "Failed"
return
if __name__ == '__main__':
arr_len = int(sys.argv[1])
main(arr_len) | mit | Python |
|
80651fc7dba6a390091dc0f0908ec165cf33c0bb | make diagnostic plots for a star | adrn/TwoFace,adrn/TwoFace | scripts/plot_star.py | scripts/plot_star.py | """ Make diagnostic plots for a specified APOGEE ID """
# Standard library
from os import path
# Third-party
import h5py
import matplotlib.pyplot as plt
from sqlalchemy.orm.exc import NoResultFound
# Project
from twoface.log import log as logger
from twoface.db import db_connect
from twoface.db import (JokerRun, AllStar, AllVisit, StarResult, Status,
AllVisitToAllStar, RedClump, CaoVelocity)
from twoface.config import TWOFACE_CACHE_PATH
from twoface.io import load_samples
from twoface.plot import plot_data_orbits
def main(database_file, apogee_id, joker_run, cao):
db_path = path.join(TWOFACE_CACHE_PATH, database_file)
if not path.exists(db_path):
raise IOError("sqlite database not found at '{0}'\n Did you run "
"scripts/initdb.py yet for that database?"
.format(db_path))
logger.debug("Connecting to sqlite database at '{0}'".format(db_path))
Session, engine = db_connect(database_path=db_path,
ensure_db_exists=False)
session = Session()
# Get The Joker run information
run = session.query(JokerRun).filter(JokerRun.name == joker_run).one()
try:
star = session.query(AllStar).join(StarResult, JokerRun)\
.filter(AllStar.apogee_id == apogee_id)\
.filter(JokerRun.name == joker_run)\
.one()
except NoResultFound:
raise NoResultFound("Star {0} has no results in Joker run {1}."
.format(apogee_id, joker_run))
# get the RV data for this star
data = star.apogeervdata(cao=cao)
# load posterior samples from The Joker
samples_dict = load_samples(path.join(TWOFACE_CACHE_PATH,
'{0}.hdf5'.format(run.name)),
apogee_id)
# Plot the data with orbits on top
fig = plot_data_orbits(data, samples_dict, jitter=run.jitter,
xlim_choice='wide', title=star.apogee_id)
fig.set_tight_layout(True)
fig = plot_data_orbits(data, samples_dict, jitter=run.jitter,
xlim_choice='tight', title=star.apogee_id)
fig.set_tight_layout(True)
# TODO:
session.close()
plt.show()
if __name__ == "__main__":
from argparse import ArgumentParser
import logging
# Define parser object
parser = ArgumentParser(description="")
vq_group = parser.add_mutually_exclusive_group()
vq_group.add_argument('-v', '--verbose', action='count', default=0,
dest='verbosity')
vq_group.add_argument('-q', '--quiet', action='count', default=0,
dest='quietness')
# Required:
parser.add_argument("-a", "--apogeeid", dest="apogee_id",
required=True, type=str,
help="The APOGEE ID to visualize.")
parser.add_argument("-j", "--jokerrun", dest="joker_run",
required=True, type=str,
help="The Joker run name to load results from.")
# Optional:
parser.add_argument("-d", "--dbfile", dest="database_file",
default="apogee.sqlite", type=str,
help="Path to the database file.")
parser.add_argument("--cao", dest="cao_velocities", default=False,
action="store_true",
help="Plot the Cao velocities instead of APOGEE "
"radial velocities.")
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbosity != 0:
if args.verbosity == 1:
logger.setLevel(logging.DEBUG)
else: # anything >= 2
logger.setLevel(1)
elif args.quietness != 0:
if args.quietness == 1:
logger.setLevel(logging.WARNING)
else: # anything >= 2
logger.setLevel(logging.ERROR)
else: # default
logger.setLevel(logging.INFO)
main(apogee_id=args.apogee_id, database_file=args.database_file,
joker_run=args.joker_run, cao=args.cao_velocities)
| mit | Python |
|
b528956e9394dc56951c2fb0894fefd7ee6872ff | Create cnn_evaluation.py | jdvala/ProjectTF | Convolutional_Neural_Network/cnn_evaluation.py | Convolutional_Neural_Network/cnn_evaluation.py | """ Using an Convolutional Nural Network on MNIST handwritten digits, and evaluating its performance with different scores
References:
Tflearn.org/examples
Tensorflow.org
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
Method and Examples Used:
[1] An simple example from Tflean, which is an higher level API for tensorflow provided with an autoencoder example which reconstructed the
images but the motive here was to evaluate this autoencoder with different score so it could be fine tuned in future for various specific tasks.
Also for reconstructing the images this program used decoder which we don't need for our evaluation.
[2] Secondly the last layer for classification should be softmax layer and here I changed here acoordingly
[3] I am not using Confusion matrix from tensorflow, rather I used sklearn library for that purpose.
[4] All the steps involved in this program is commented out for better understanding of this program.
"""
from __future__ import division, print_function, absolute_import
import numpy
import tflearn
import tensorflow as tf
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from random import randint
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
Images, Lables, testImages, testLables = mnist.load_data(one_hot=True)
Images = Images.reshape([-1, 28, 28, 1])
testImages = testImages.reshape([-1, 28, 28, 1])
f = randint(0,20)
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit({'input': Images}, {'target':Lables}, n_epoch=1,
validation_set=({'input': testImages}, {'target': testLables}),
snapshot_step=100, show_metric=True, run_id='Convolution_Neural_Network')
# Here I evaluate the model with Test Images and Test Lables, calculating the Mean Accuracy of the model.
evaluation= model.evaluate(testImages,testLables)
print("\n")
print("\t"+"Mean accuracy of the model is :", evaluation)
# Prediction the Lables of the Images that we give to the model just to have a clear picture of Neural Netwok
lables = model.predict_label(testImages)
print("\n")
print("\t"+"The predicted labels are :",lables)
# Predicted probailites
y = model.predict(testImages)
print("\n")
print("\t"+"\t"+"\t"+"The predicted probabilities are :" )
print("\n")
print (y[f])
# Running a session to feed calculate the confusion matrix
sess = tf.Session()
# taking the argumented maximum of the predicted probabilities for generating confusion matrix
prediction = tf.argmax(y,1)
# displaying length of predictions and evaluating them in a session
with sess.as_default():
print (len(prediction.eval()))
predicted_labels = prediction.eval()
# Again importing the mnist data with one hot as false because we need to know the truepositive and other values for evaluation
Images, Lables, testImages, targetLables = mnist.load_data(one_hot=False)
# Used Sklearn library for evaluation as tensorflows library was not documented properly
# Generated the Confusion Matrix
confusionMatrix = confusion_matrix(targetLables, predicted_labels)
print("\n"+"\t"+"The confusion Matrix is ")
print ("\n",confusionMatrix)
# Classification_report in Sklearn provide all the necessary scores needed to succesfully evaluate the model.
classification = classification_report(targetLables,predicted_labels, digits=4,
target_names =['class 0','class 1','class 2','class 3','class 4','class 5','class 6','class 7','class 8','class 9'])
print("\n"+"\t"+"The classification report is ")
print ("\n",classification)
| mit | Python |
|
9168807db69372ffb93430991fc4e666fa53a8f5 | Add missing example file | sklam/numba,stonebig/numba,pitrou/numba,sklam/numba,cpcloud/numba,IntelLabs/numba,pitrou/numba,pombredanne/numba,pitrou/numba,numba/numba,gmarkall/numba,pombredanne/numba,stonebig/numba,numba/numba,ssarangi/numba,jriehl/numba,jriehl/numba,GaZ3ll3/numba,gdementen/numba,sklam/numba,stefanseefeld/numba,gdementen/numba,numba/numba,ssarangi/numba,GaZ3ll3/numba,gdementen/numba,ssarangi/numba,IntelLabs/numba,cpcloud/numba,jriehl/numba,gmarkall/numba,seibert/numba,cpcloud/numba,cpcloud/numba,gmarkall/numba,stonebig/numba,GaZ3ll3/numba,seibert/numba,stuartarchibald/numba,seibert/numba,stefanseefeld/numba,seibert/numba,pitrou/numba,ssarangi/numba,numba/numba,stefanseefeld/numba,sklam/numba,ssarangi/numba,IntelLabs/numba,GaZ3ll3/numba,stuartarchibald/numba,numba/numba,stefanseefeld/numba,pombredanne/numba,IntelLabs/numba,gmarkall/numba,stonebig/numba,pombredanne/numba,gdementen/numba,gdementen/numba,jriehl/numba,stuartarchibald/numba,pitrou/numba,stefanseefeld/numba,pombredanne/numba,cpcloud/numba,stuartarchibald/numba,GaZ3ll3/numba,IntelLabs/numba,stonebig/numba,stuartarchibald/numba,sklam/numba,seibert/numba,gmarkall/numba,jriehl/numba | examples/movemean.py | examples/movemean.py | """
A moving average function using @guvectorize.
"""
import numpy as np
from numba import guvectorize
@guvectorize(['void(float64[:], intp[:], float64[:])'], '(n),()->(n)')
def move_mean(a, window_arr, out):
window_width = window_arr[0]
asum = 0.0
count = 0
for i in range(window_width):
asum += a[i]
count += 1
out[i] = asum / count
for i in range(window_width, len(a)):
asum += a[i] - a[i - window_width]
out[i] = asum / count
arr = np.arange(20, dtype=np.float64).reshape(2, 10)
print(arr)
print(move_mean(arr, 3))
| bsd-2-clause | Python |
|
82d34111295fdfa35d0e9815053498e935d415af | Add example script to store & read datetime | h5py/h5py,h5py/h5py,h5py/h5py | examples/store_datetimes.py | examples/store_datetimes.py | import h5py
import numpy as np
arr = np.array([np.datetime64('2019-09-22T17:38:30')])
with h5py.File('datetimes.h5', 'w') as f:
# Create dataset
f['data'] = arr.astype(h5py.opaque_dtype(arr.dtype))
# Read
print(f['data'][:])
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.