commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
ba50883881d3e652c1175489e16c3c5839807feb
add new: GWinstek GDS-840S, RS-232 connection.
imrehg/labhardware,imrehg/labhardware
serial/serialGDS840S.py
serial/serialGDS840S.py
#!/usr/bin/env python # RS-232 serial support for GW Instek GDS-840S Digital Storage Oscilloscope # http://www.gwinstek.com/html/en/DownloadFile.asp?sn=255&uid=&lv= # Filename: 82DS-82000IA.pdf import serial # Values set on unit manually (but these are standard settings) ser = serial.Serial('/dev/ttyUSB0',baudrate=38400, bytesize=8, stopbits=1, \ parity=serial.PARITY_NONE, timeout=3) ser.open() def sendCmd(handler,command): handler.write("%s\n" %(command)) def recvCmd(handler): return handler.readline().strip() sendCmd(ser, "*IDN?") id = ser.readline() print id #~ sendCmd(ser, ":AUToset") sendCmd(ser, ":MEASure:FREQuency?") freq = recvCmd(ser) print freq ser.close()
mit
Python
096087b4fddf9bc2644bcbb71834fcfc5985558c
add flash order
joequant/bitcoin-price-api
scripts/flash-order.py
scripts/flash-order.py
#!/usr/bin/python3 from pubnub.pnconfiguration import PNConfiguration from pubnub.pubnub import PubNub pnconfig = PNConfiguration() pnconfig.subscribe_key = "my_subkey" pnconfig.publish_key = "my_pubkey" pnconfig.ssl = False pubnub = PubNub(pnconfig) from pubnub.callbacks import SubscribeCallback from pubnub.enums import PNOperationType, PNStatusCategory class MySubscribeCallback(SubscribeCallback): def status(self, pubnub, status): pass # The status object returned is always related to subscribe but could contain # information about subscribe, heartbeat, or errors # use the operationType to switch on different options if status.operation == PNOperationType.PNSubscribeOperation \ or status.operation == PNOperationType.PNUnsubscribeOperation: if status.category == PNStatusCategory.PNConnectedCategory: pass # This is expected for a subscribe, this means there is no error or issue whatsoever elif status.category == PNStatusCategory.PNReconnectedCategory: pass # This usually occurs if subscribe temporarily fails but reconnects. This means # there was an error but there is no longer any issue elif status.category == PNStatusCategory.PNDisconnectedCategory: pass # This is the expected category for an unsubscribe. This means there # was no error in unsubscribing from everything elif status.category == PNStatusCategory.PNUnexpectedDisconnectCategory: pass # This is usually an issue with the internet connection, this is an error, handle # appropriately retry will be called automatically elif status.category == PNStatusCategory.PNAccessDeniedCategory: pass # This means that PAM does allow this client to subscribe to this # channel and channel group configuration. This is another explicit error else: pass # This is usually an issue with the internet connection, this is an error, handle appropriately # retry will be called automatically elif status.operation == PNOperationType.PNSubscribeOperation: # Heartbeat operations can in fact have errors, so it is important to check first for an error. # For more information on how to configure heartbeat notifications through the status # PNObjectEventListener callback, consult <link to the PNCONFIGURATION heartbeart config> if status.is_error(): pass # There was an error with the heartbeat operation, handle here else: pass # Heartbeat operation was successful else: pass # Encountered unknown status type def presence(self, pubnub, presence): pass # handle incoming presence data def message(self, pubnub, message): pass # handle incoming messages pubnub.add_listener(MySubscribeCallback())
mit
Python
7dee9be2022bdf481bc5bc6766684058fd9d44e5
add script for generating the manifest for a given package
sassoftware/mirrorball,sassoftware/mirrorball
scripts/genmanifest.py
scripts/genmanifest.py
#!/usr/bin/python # # Copyright (c) 2008 rPath, Inc. # # This program is distributed under the terms of the Common Public License, # version 1.0. A copy of this license should have been distributed with this # source file in a file called LICENSE. If it is not present, the license # is always available at http://www.rpath.com/permanent/licenses/CPL-1.0. # # This program is distributed in the hope that it will be useful, but # without any warranty; without even the implied warranty of merchantability # or fitness for a particular purpose. See the Common Public License for # full details. # import os import sys sys.path.insert(0, os.environ['HOME'] + '/hg/rpath-xmllib') sys.path.insert(0, os.environ['HOME'] + '/hg/conary') sys.path.insert(0, os.environ['HOME'] + '/hg/mirrorball') from conary.lib import util sys.excepthook = util.genExcepthook() from updatebot import bot, config, log log.addRootLogger() cfg = config.UpdateBotConfig() cfg.read(os.environ['HOME'] + '/hg/mirrorball/config/centos/updatebotrc') obj = bot.Bot(cfg) obj._populatePkgSource() pkgName = sys.argv[1] srcPkg = obj._updater._getPackagesToImport(pkgName) manifest = obj._updater._getManifestFromPkgSource(srcPkg) print '\n'.join(manifest)
apache-2.0
Python
0ca7d4a20c8a65e45ddb7c61ca72c0e6c464a80e
Create template_redacted entry for templates created by migration
alphagov/notifications-api,alphagov/notifications-api
migrations/versions/0296_template_redacted_fix.py
migrations/versions/0296_template_redacted_fix.py
""" Revision ID: 0296_template_redacted_fix Revises: 0295_api_key_constraint Create Date: 2019-06-07 17:02:14.350064 """ from alembic import op revision = '0296_template_redacted_fix' down_revision = '0295_api_key_constraint' def upgrade(): op.execute(""" INSERT INTO template_redacted (template_id, redact_personalisation, updated_at, updated_by_id) SELECT templates.id, FALSE, now(), templates.created_by_id FROM templates WHERE templates.id NOT IN (SELECT template_id FROM template_redacted WHERE template_id = templates.id) ; """) def downgrade(): pass
mit
Python
a38f18b8c51ad83b5c4b92853fa5640137131ad9
imprime sequencia de gtins, calculando dígito verificador
anselmobd/fo2,anselmobd/fo2,anselmobd/fo2,anselmobd/fo2
script/gera_gtin.py
script/gera_gtin.py
from gtin import GTIN country = 789 company = 96188 product = 7251 quant = 127 for incr in range(quant): numero_gtin = '{}{}{}'.format(country, company, product+incr) print(str(GTIN(raw=numero_gtin)))
mit
Python
2611476df6f362cd59e4aad38a243fc8f6cbf8a8
Purge da página de palestra quando salva palestrante
devincachu/devincachu-2013,devincachu/devincachu-2013,devincachu/devincachu-2014,devincachu/devincachu-2014,devincachu/devincachu-2014,devincachu/devincachu-2013,devincachu/devincachu-2013
devincachu/purger.py
devincachu/purger.py
# -*- coding: utf-8 -*- import roan from django.contrib.flatpages import models from palestras import models as pmodels def connect(): flatpages = models.FlatPage.objects.all() for f in flatpages: roan.purge(f.url).on_save(models.FlatPage) palestras = pmodels.Palestra.objects.all() for p in palestras: roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra) roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra) roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestrante) roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestrante)
# -*- coding: utf-8 -*- import roan from django.contrib.flatpages import models from palestras import models as pmodels def connect(): flatpages = models.FlatPage.objects.all() for f in flatpages: roan.purge(f.url).on_save(models.FlatPage) palestras = pmodels.Palestra.objects.all() for p in palestras: roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra) roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
bsd-2-clause
Python
b8cd3912bbe67381829f70ec0f1d94e590632387
Create BLASTvouchers.py
DidemnumVex/IonTorrentVoucher
BLASTvouchers.py
BLASTvouchers.py
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Sun Apr 9 23:13:07 2017 BLASTvouchers.py Will BLAST contigs made by MIRA against most recent Invertebrate db. Modification: 11Apr17, now requires csv file list of fasta files to BLAST (because BLAST mod requires fasta input) @author: tcampbell """ import os import signal import pandas as pd import re import mysql.connector import sqlalchemy import numpy as np from MIRA_templateFilewriter import mysqlconnect from IonZipSplitv3 import chunkdf #need to make sure this gets updated with new versions. import mysql import multiprocessing from Bio.Blast import NCBIXML import argparse def BLASTvouchers(df, directory, assemdir, outdir):#this doesn't seem to be working with the Bio version of BLAST. should try with os.system BLAST cmd. x = 0 x+=1 outfile = re.search(r'.*\/([A-Za-z0-9]*\_[0-9]{2}[A-H])_d_results/.*', str(df['FileName'])) if os.path.exists(directory + outdir) == False: os.system('mkdir ' + directory + outdir ) outf = directory + outdir + str(outfile.group(1)) + '.xml' print outf os.system('/home/tcampbell/BLASTdb/ncbi-blast-2.6.0+/bin/blastn -query ' + str(df['FileName']) + ' -db /home/tcampbell/BLASTdb/MLML_Coarb_nosemi_10Apr17.fa -evalue 0.01 -outfmt 5 -out ' + outf) Return None def applyblast(df): processid = os.getpid() print processid df[0].apply(BLASTvouchers, axis=1, args=(df[1], df[2],df[3] )) os.kill(processid, signal.SIGTERM) return None def parsxml(filein): result_handle = open(filein) blast_records = NCBIXML.parse(result_handle) assembly = 0 for blast_record in blast_records: count = 0 assembly += 1 print "Assembly Sequence:", assembly for alignment in blast_record.alignments[:3]: for hsp in alignment.hsps: print('****Alignment****') print('sequence:', alignment.title) print('e value:', hsp.expect) print('length:', alignment.length) print('score:', hsp.score) print('alignment length:', hsp.align_length) print('identity:',hsp.identities) pctID = float(hsp.identities) / float(hsp.align_length) * 100 print 'Percent ID:', pctID print('Query Start:', hsp.query_start) count +=1 print "BLAST alignment Num", count return blast_records def main(): #===============PARSE ARGS=========================== parser = argparse.ArgumentParser(prog='IonZipSplitv3', usage='Insert Assembled Contigs into MySQL database', description= 'Insert Assembled Sequences into DB', conflict_handler='error', add_help=True) parser.add_argument('--SequenceFiles', type = str, nargs = '+', help = 'File names of Sequencing Runs to assemble') parser.add_argument('--Out', type = str, help = 'Output Directory, e.g. if /home/user/Documents/out/ then out/') parser.add_argument('--DataDir', type=str, help = 'The directory where your data are found. e.g. /home/user/Documents/') parser.add_argument('--AssemblyDir', type = str, help = 'Name of the subdirectory with assembly folders') parser.add_argument('--Cores', type = int, default = 56, help = 'Number of processing cores to use. Default is 56.') args = parser.parse_args() #===============SQL Connect==================================================== connect = mysqlconnect('/home/tcampbell/scripts/Testdict.txt') #gets the connection info from config file exec(connect) #executes declaration of variable using string from previous step cnx = mysql.connector.connect(user=connect['User'], password= connect['Pass'], host= connect['IP'], database=connect['DB']) engine = sqlalchemy.create_engine('mysql://' + connect['User'] + ':' + connect['Pass']+ '@' + connect['IP'] +'/'+ connect['DB']) # is used? cursor = cnx.cursor() #======================Begin Main===================================== fastas = pd.read_csv(args.DataDir + args.AssemblyDir + 'fastalist.csv', header = None)#Fastalist is made in AssembledSequenceProcess.py fastas.columns = ['FileName'] chuckfas, Cores = chunkdf(fastas, args.Cores) d = [args.DataDir, args.AssemblyDir, args.Out] #comment for debug 5/4/17 chuckfas = [i + d for i in chuckfas] pool = multiprocessing.Pool(processes=Cores) re = pool.map(applyblast, chuckfas) # re = pool.apply(assemble,args = (chunkeddf), axis=1) pool.close() ####END Main Guts###### cursor.close() cnx.close() return None if __name__ == '__main__': main()
mit
Python
4fe11f89c008909dd21451ac0e23dce86de7c849
Add profiling dev script.
tallakahath/pymatgen,mbkumar/pymatgen,Bismarrck/pymatgen,xhqu1981/pymatgen,czhengsci/pymatgen,nisse3000/pymatgen,aykol/pymatgen,aykol/pymatgen,gmatteo/pymatgen,blondegeek/pymatgen,gpetretto/pymatgen,dongsenfo/pymatgen,tschaume/pymatgen,blondegeek/pymatgen,Bismarrck/pymatgen,montoyjh/pymatgen,matk86/pymatgen,tschaume/pymatgen,tallakahath/pymatgen,tschaume/pymatgen,czhengsci/pymatgen,gmatteo/pymatgen,vorwerkc/pymatgen,czhengsci/pymatgen,fraricci/pymatgen,vorwerkc/pymatgen,richardtran415/pymatgen,setten/pymatgen,nisse3000/pymatgen,mbkumar/pymatgen,gpetretto/pymatgen,tschaume/pymatgen,johnson1228/pymatgen,gpetretto/pymatgen,ndardenne/pymatgen,blondegeek/pymatgen,xhqu1981/pymatgen,richardtran415/pymatgen,mbkumar/pymatgen,richardtran415/pymatgen,tallakahath/pymatgen,gVallverdu/pymatgen,nisse3000/pymatgen,davidwaroquiers/pymatgen,dongsenfo/pymatgen,vorwerkc/pymatgen,davidwaroquiers/pymatgen,vorwerkc/pymatgen,mbkumar/pymatgen,montoyjh/pymatgen,nisse3000/pymatgen,setten/pymatgen,dongsenfo/pymatgen,gVallverdu/pymatgen,johnson1228/pymatgen,setten/pymatgen,matk86/pymatgen,dongsenfo/pymatgen,aykol/pymatgen,gVallverdu/pymatgen,richardtran415/pymatgen,matk86/pymatgen,ndardenne/pymatgen,czhengsci/pymatgen,davidwaroquiers/pymatgen,setten/pymatgen,johnson1228/pymatgen,montoyjh/pymatgen,davidwaroquiers/pymatgen,Bismarrck/pymatgen,montoyjh/pymatgen,johnson1228/pymatgen,gpetretto/pymatgen,blondegeek/pymatgen,fraricci/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,fraricci/pymatgen,matk86/pymatgen,xhqu1981/pymatgen,Bismarrck/pymatgen,tschaume/pymatgen,Bismarrck/pymatgen,ndardenne/pymatgen
dev_scripts/profile_structure.py
dev_scripts/profile_structure.py
#!/usr/bin/env python from pymatgen.io.vaspio import Poscar import cProfile import pstats import os p = Poscar.from_file("../test_files/POSCAR.LiFePO4", check_for_POTCAR=False) s = p.structure def test(): nn = s.get_sites_in_sphere([0, 0, 0], 20) print len(nn) cProfile.run('test()', 'testprof') p = pstats.Stats('testprof') p.sort_stats('cumulative').print_stats(20) os.remove("testprof")
mit
Python
aa8a87eabd97406c91b0474dc6018b618101b503
add the code
kbatten/python-socks5-socket
monkey_sockets_socks5.py
monkey_sockets_socks5.py
#!/usr/bin/env python # monkey patch socks5 support into sockets import os import socket import struct def _split_proxy(uri, port): split_auth = uri.split("@") if uri == "": split_uri = [] elif len(split_auth) == 2: split_first = split_auth[0].split(":") split_second = split_auth[1].split(":") if len(split_first) == 3: split_uri = [int(split_first[0])] + split_first[1:] + [split_second[0], int(split_second[1])] else: split_uri = [int(split_first[0])] + split_first[1:] + [""] + [split_second[0], int(split_second[1])] else: split_small = split_auth[0].split(":") split_uri = [int(split_small[0])] + [""] + [""] + [split_small[1]] + [int(split_small[2])] if len(split_uri) != 5: split_uri = None elif split_uri[0] != port: split_uri = None return split_uri # CAVEATS: # only supports ipv4 # only supports socks5 # user/pass auth has not been tested # if socks_proxy env variable is set, all socket connections on that port will use it class Socks5Socket(socket.socket): def connect(self, address): # socks_proxy=<DESTPORT:>[username[:password]@]<PROXYHOST:><PROXYPORT> socks_proxy = _split_proxy(os.getenv("socks_proxy",""), address[1]) if not socks_proxy: true_socket.connect(self, address) else: # print "{socks_host}:{socks_port} -> {remote_host}:{remote_port}".format(socks_host=socks_proxy[3], socks_port=socks_proxy[4], remote_host=address[0], remote_port=address[1]) true_socket.connect(self, (socks_proxy[3], socks_proxy[4])) auth_methods_available = 1 auth_methods = [0x00] if socks_proxy[1]: auth_methods_available += 1 auth_methods.append(0x02) # greet the socks server msg = struct.pack("!BB",0x05,auth_methods_available) for auth_method in auth_methods: msg += struct.pack("!B", auth_method) # print msg.encode("hex") self.send(msg) resp = self.recv(2) # print resp.encode("hex") (version, auth_method) = struct.unpack("!BB", resp) # authorize to the socks server if auth_method == 0x00: pass elif auth_method == 0x02: # TODO: test this :/ msg = struct.pack("!BBsBs", 0x01, len(socks_proxy[1]), socks_proxy[1], len(socks_proxy[2]), socks_proxy[2]) # print msg.encode("hex") self.send(msg) resp = self.recv(2) # print resp.encode("hex") (version, status) = struct.unpack("!BB", resp) if status != 0: self.close() raise Exception("socks authorization failed") else: raise Exception("no acceptable socks authorization available") # set connection to tcp/ip stream, ipv4 ipb = [int(b) for b in address[0].split(".")] msg = struct.pack("!B B B B BBBB H",0x05,0x01,0x00,0x01,ipb[0],ipb[1],ipb[2],ipb[3],address[1]) # print msg.encode("hex") self.send(msg) resp = self.recv(10) # print resp.encode("hex") (version, status) = struct.unpack("!B B 8x", resp) if status != 0: self.close() raise Exception("socks connection failed, error: " + status) true_socket = socket.socket socket.socket = Socks5Socket
mit
Python
ea30b49012af2003049f4b1b7deeecb1232c7513
Create permutations.py
Kunalpod/codewars,Kunalpod/codewars
permutations.py
permutations.py
#Kunal Gautam #Codewars : @Kunalpod #Problem name: Permutations #Problem level: 4 kyu from itertools import groupby, permutations as perm def permutations(string): return [k for k,_ in groupby(sorted([''.join(comb) for comb in perm(string)]))]
mit
Python
eb943bb45695472483352978060a94e0d48b5e4a
Add scatterplot
ronrest/convenience_py,ronrest/convenience_py
plot/scatter.py
plot/scatter.py
import matplotlib.pyplot as plt def plot_scatter(x, y, ax=None, color=None, alpha=None, size=None, labels=None, title="Scatterplot", figsize=(10,6)): # TODO: Add x, and y labels # TODO: grid if ax is None: fig, ax = plt.subplots(figsize=figsize) fig.suptitle(title, fontsize=15) else: fig = ax.get_figure() ax.scatter(x=x, y=y, c=color, alpha=alpha, s=size) # LABEL - each of the points if labels is not None: for xx, yy, label in zip(x, y, labels): plt.annotate(label, xy=(xx, yy), xytext=(7, 0), textcoords='offset points', ha='left', va='center') return fig, ax
apache-2.0
Python
02f207269f7d2773919e520e04ab8f9261357d4b
Add isup plugin
tomleese/smartbot,Cyanogenoid/smartbot,Muzer/smartbot,thomasleese/smartbot-old
plugins/isup.py
plugins/isup.py
import requests import urllib.parse class Plugin: def __call__(self, bot): bot.on_respond(r"is (.*) (up|down)(\?)?", self.on_respond) bot.on_respond(r"isup (.*)$", self.on_respond) bot.on_help("isup", self.on_help) def on_respond(self, bot, msg, reply): url = "http://isitup.org/" + urllib.parse.quote(msg["match"].group(1)) + ".json" headers = { "User-Agent": "SmartBot" } res = requests.get(url, headers=headers).json() if res["status_code"] == 1: reply("{0} looks up for me.".format(res["domain"])) else: reply("{0} looks down for me.".format(res["domain"])) def on_help(self, bot, msg, reply): reply("Syntax: is <domain> up|down")
mit
Python
8c6b412e01e81a7c062ba8234ebafc6fca61651c
Add shovel test.quick for sanity check before pushing
python-astrodynamics/astrodynamics,python-astrodynamics/astrodynamics
shovel/test.py
shovel/test.py
# coding: utf-8 from __future__ import absolute_import, division, print_function import subprocess from collections import OrderedDict from shovel import task @task def quick(): failed = OrderedDict.fromkeys( ['test', 'docs', 'spelling', 'doc8', 'flake8'], False) failed['tests'] = bool(subprocess.call(['py.test', 'astrodynamics/'])) failed['docs'] = bool(subprocess.call( ['sphinx-build', '-W', '-b', 'html', 'docs', 'docs/_build/html'])) failed['spelling'] = bool(subprocess.call([ 'sphinx-build', '-W', '-b', 'spelling', 'docs', 'docs/_build/html'])) failed['doc8'] = bool(subprocess.call(['doc8', 'docs'])) failed['flake8'] = bool(subprocess.call(['flake8'])) print('\nSummary:') for k, v in failed.items(): print('{:8s}: {}'.format(k, 'Fail' if v else 'Pass'))
mit
Python
cdcc45eb6982e68415632a8bcfbc5e7596e0a1cf
add resize_logos.py
EuroPython/ep-tools,PythonSanSebastian/ep-tools,EuroPython/ep-tools,EuroPython/ep-tools,PythonSanSebastian/ep-tools,PythonSanSebastian/ep-tools,EuroPython/ep-tools,PythonSanSebastian/ep-tools
scripts/resize_logos.py
scripts/resize_logos.py
#!/usr/bin/env python import os import os.path as op from PIL import Image dirPath = op.abspath('./logos') out_dir = op.join(dirPath, 'resize') if not op.exists(out_dir): os.mkdir(out_dir) supported_formats = ['png', 'gif', 'jpg'] for img_file in os.listdir(dirPath): if img_file[-3:] not in supported_formats: print('Extension for file {} not supported, skipped.'.format(img_file)) continue print(img_file) img_name = img_file[:-4] print(img_name) fpath = os.path.join(dirPath, img_file) outPath = os.path.join(out_dir, img_name) img = Image.open(fpath) if img.mode == "CMYK": img = img.convert("RGB") img.thumbnail((190, 90), Image.ANTIALIAS) img_w, img_h = img.size background = Image.new('RGBA', (190, 90), (255, 255, 255, 255)) bg_w, bg_h = background.size offset = int((bg_w - img_w) / 2), int((bg_h - img_h) / 2) background.paste(img, offset) background.save(outPath+"_thumb.png")
mit
Python
ff994f8bfd7642fc95694d511a1cec81d0ba8f4d
fix bugs
cboling/xos,jermowery/xos,wathsalav/xos,cboling/xos,zdw/xos,xmaruto/mcord,jermowery/xos,open-cloud/xos,xmaruto/mcord,opencord/xos,xmaruto/mcord,open-cloud/xos,wathsalav/xos,opencord/xos,cboling/xos,jermowery/xos,zdw/xos,jermowery/xos,wathsalav/xos,zdw/xos,zdw/xos,xmaruto/mcord,open-cloud/xos,wathsalav/xos,cboling/xos,opencord/xos,cboling/xos
plstackapi/planetstack/api/sites.py
plstackapi/planetstack/api/sites.py
from plstackapi.openstack.client import OpenStackClient from plstackapi.openstack.driver import OpenStackDriver from plstackapi.planetstack.api.auth import auth_check from plstackapi.planetstack.models import Site def add_site(auth, **fields): driver = OpenStackDriver(client = auth_check(auth)) site = Site(**fields) nova_fields = {'tenant_name': fields['login_base'], 'description': fields['name'], 'enabled': fields['enabled']} tenant = driver.create_tenant(**nova_fields) site.tenant_id=tenant.id site.save() return role def update_site(auth, tenant_id, **fields): driver = OpenStackDriver(client = auth_check(auth)) sites = Site.objects.filter(tenant_id=tenant_id) if not sites: return site = Site[0] nova_fields = {} if 'description' in fields: nova_fields['description'] = fields['name'] if 'enabled' in fields: nova_fields['enabled'] = fields['enabled'] site.updtae(**fields) return site def delete_site(auth, filter={}): driver = OpenStackDriver(client = auth_check(auth)) sites = Site.objects.filter(**filter) for site in sites: driver.delete_tenant({'id': site.tenant_id}) site.delete() return 1 def get_sites(auth, filter={}): client = auth_check(auth) sites = Site.objects.filter(**filter) return sites
from plstackapi.openstack.client import OpenStackClient from plstackapi.openstack.driver import OpenStackDriver from plstackapi.planetstack.api.auth import auth_check from plstackapi.planetstack.models import Site def add_site(auth, **fields): driver = OpenStackDriver(client = auth_check(auth)) site = Site(**fields) nova_fields = {'tenant_name': fields['login_base'], 'description': fields['name', 'enabled': fields['enabled']} tenant = driver.create_tenant(**nova_fields) site.tenant_id=tenant.id site.save() return role def update_site(auth, tenant_id, **fields): driver = OpenStackDriver(client = auth_check(auth)) sites = Site.objects.filter(tenant_id=tenant_id) if not sites: return site = Site[0] nova_fields = {} if 'description' in fields: nova_fields['description'] = fields['name'] if 'enabled' in fields: nova_fields['enabled'] = fields['enabled'] site.updtae(**fields) return site def delete_site(auth, filter={}): driver = OpenStackDriver(client = auth_check(auth)) sites = Site.objects.filter(**filter) for site in sites: driver.delete_tenant({'id': site.tenant_id}) site.delete() return 1 def get_sites(auth, filter={}): client = auth_check(auth) sites = Site.objects.filter(**filter) return sites
apache-2.0
Python
1be4972ca39408b8d4770b5722642996908c9a70
add 5-for.py
weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016,weizhenwei/tech-docs-2016
python/5-for.py
python/5-for.py
#!/usr/bin/env python for letter in 'python': print "Current character is ", letter fruits = ['banana', 'apple', 'mango'] for fruit in fruits: print "Current fruit is ", fruit for index in range(len(fruits)): print "Current fruit is ", fruits[index] print "Good bye!"
bsd-2-clause
Python
4cef0dc3af25ec4c781ed04b28d425374f793702
add socket comm class
codingfoo/overo_python_examples,codingfoo/overo_python_examples
socket_communication.py
socket_communication.py
#!/usr/bin/env python import socket class SocketCommunication: def __init__(self): self.RECEIVER_HOST = '192.168.1.4' # The remote host self.PORT = 3000 # The same port as used by the server def open(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.connect((self.RECEIVER_HOST, self.PORT)) def close(self): self.sock.close() def communicate(self, data): self.sock.send(data) def main(): pass #TODO: add example if __name__ == "__main__": main()
mit
Python
50a7b9bc262c98f4e387746f67a638f50f94ba38
Add migration merge
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
portal/migrations/versions/e396fb1974ef_.py
portal/migrations/versions/e396fb1974ef_.py
from alembic import op import sqlalchemy as sa """empty message Revision ID: e396fb1974ef Revises: ('773b1de060dd', '3271a78bbc8b') Create Date: 2018-04-24 12:19:56.689921 """ # revision identifiers, used by Alembic. revision = 'e396fb1974ef' down_revision = ('773b1de060dd', '3271a78bbc8b') def upgrade(): pass def downgrade(): pass
bsd-3-clause
Python
85c67110db1fbb5a25faef36bdfe282952f5a034
Create __init__.py
shahryarabaki/ICE
src/__init__.py
src/__init__.py
apache-2.0
Python
d686f54aff87d4dea1266ee1fec8c1c320dee5b9
add tests for fizz_buzz.
bm5w/codeeval
test_fizz_buzz.py
test_fizz_buzz.py
from cStringIO import StringIO import sys import fizz_buzz class Capture(list): """Context manager for capturing stdout.""" def __enter__(self): self._stdout = sys.stdout sys.stdout = self._stringio = StringIO() return self def __exit__(self, *args): self.extend(self._stringio.getvalue().splitlines()) sys.stdout = self._stdout def test_fizz_buzz(): expected = ['1 2 F 4 B F 7 8 F B', '1 F 3 F 5 F B F 9 F 11 F 13 FB 15'] with Capture() as output: fizz_buzz.main("input_test.txt") assert len(expected) == len(output) for ex, act in zip(expected, output): assert ex == act
mit
Python
cab46eb2323a062bff8bedbf1cc8dd036cd52044
Solve Code Fights frequency analysis problem
HKuz/Test_Code
CodeFights/frequencyAnalysis.py
CodeFights/frequencyAnalysis.py
#!/usr/local/bin/python # Code Fights Frequency Analysis Problem from collections import Counter def frequencyAnalysis(encryptedText): return Counter(encryptedText).most_common(1)[0][0] def main(): tests = [ ["$~NmiNmim$/NVeirp@dlzrCCCCfFfQQQ", "C"], ["Agoodglassinthebishop'shostelinthedevil'sseattwenty-onedegreesandthirteenminutesnortheastandbynorthmainbranchseventhlimbeastsideshootfromthelefteyeofthedeath's-headabeelinefromthetreethroughtheshotfiftyfeetout.", "e"], ["Q", "Q"], ["):<<}:BnUUKc=>~LKU><,;U><U=~BKc=>~}~jKB;UU~n== ~c=fS<c~}~:w~~Unc}=>Kw=~~ceKc*=~Uc<w=>~nU=nc}Lfc<w=>enKcLwncY>U~j~c=>BKeL~nU=UK}~U><<=mw<e=>~B~m=~f~<m=>~}~n=>;US>~n}nL~~BKc~mw<e=>~=w~~=>w<*:>=>~U><=mKm=fm~~=<*=k", "~"], ["(:c:@%aF;:NBo@o:'X:%CFCBoFB@X@iFCTPc@iFi::@o%;@a!PXCF:iTcCNbCFPoFCc;:YCo%a@a}Pcco@Cc:%@FF;::o%BYBo:bi@oT;=nFv:|i@o%`a%Ci:TFCBo<!PXCF:i%iBXaF;:bP|F;iBP|;Bo:: :aBT}:F@o%v:|i@o%X@T:aBPFFB@aXBF*;:i:F;:|iBPXb:|CoaFB%C|=$Co%Co|oBF;Co|F;:i:<v:|i@o%;@a!PXCF:iTcCNbF;:Fi::@|@Co@o%%iBXF;:bP|F;iBP|;F;:a}Pcc`aBF;:i: :dF;: T;BBa:@%CYY:i:oFaXBFFB%C|<F;CaFCN:YCo%Co|F*Ba}:c:FBoa@o%@T;:aFYCcc:%*CF;|Bc%TBCoa@o%D:*:ci =V;: :aFCN@F:F;:FBF@cE@cP:@Fj1=5NCccCBo<bPF:E:oF;@FYC|Pi:XiBE:aFBb:b:cB*F;:@TFP@c*BiF;*;:oF;: :E:oFP@cc a:ccF;:CF:Na=", ":"] ] for t in tests: res = frequencyAnalysis(t[0]) if t[1] == res: print("PASSED: frequencyAnalysis({}) returned {}" .format(t[0], res)) else: print(("FAILED: frequencyAnalysis({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
mit
Python
4fa2ca578b7015bee68f9f2f7bc26df2f7ab01b4
add test_cli.py module and initial test
akittas/geocoder,DenisCarriere/geocoder
tests/test_cli.py
tests/test_cli.py
#!/usr/bin/env python # coding: utf8 """ Unit tests for cli functionality """ # --- Imports import subprocess import geocoder # --- Constants _CLI_EX = './geocoder/cli.py' # CLI executable path us_address = '595 Market St' us_city = 'San Francisco' us_state = 'CA' us_zipcode = '94105' location = ' '.join([us_address, us_city, us_state, us_zipcode]) # --- CLI tests. Each shell call should have return code 0 if successfull. def test_cli_default(): # default provider cli test assert not subprocess.call(['python', _CLI_EX, location])
mit
Python
8f4ac0b12c0f83ff892e16e312cc5edbfb089850
add tests for no config startup
20c/vaping,20c/vaping
tests/test_cli.py
tests/test_cli.py
from click.testing import CliRunner from vaping import cli import pytest def test_start_no_home(): runner = CliRunner() with pytest.raises(ValueError) as excinfo: runner.invoke(cli.cli, ['start'], catch_exceptions=False) assert str(excinfo.value).startswith('no config specified')
apache-2.0
Python
1f2f4837e823ff6cc5c9cb961f4852753926f0d7
Create __init__.py
petermchale/tumor
tumor/__init__.py
tumor/__init__.py
mit
Python
f146583961733feb90567fdf03a6a5ee122c550f
Create r34.py
Loreleix64/aradiabot
r34.py
r34.py
# Aradiabot function for searching rule34.xxx # As they don't have an API, this was easier to put in it's own file so I could organize everything. import requests from html.parser import HTMLParser import random import sys counter = [10,9,8,7,6,5,4,3,2,1] images = [] class booruparser(HTMLParser): def handle_starttag(self, tag, attrs): if tag == 'a': if any('id' in pairs for pairs in attrs): try: images.append(str(attrs[1][1])) except: pass class imageparser(HTMLParser): def handle_starttag(self, tag, attrs): if ('id', 'image') in attrs: print("http:" + attrs[2][1]) parser = booruparser() imgparser = imageparser() tags = "" for arg in sys.argv: if arg == sys.argv[0]: pass else: tags = tags + arg + "+" count = 0 while len(images) < 1: if count < 10: parser.feed(requests.get('http://rule34.xxx/index.php?page=post&s=list&tags=' + tags + '&pid=' + str(counter[count])).text) count = count + 1 else: break if count != 10: image = requests.get('http://rule34.xxx/' + random.choice(images)).text imgparser.feed(image) else: print("0")
mit
Python
d0287d9deaa3eb03076cdd199414b772a291e2c5
Add command for moving zips
california-civic-data-coalition/django-calaccess-downloads-website,california-civic-data-coalition/django-calaccess-downloads-website,california-civic-data-coalition/django-calaccess-downloads-website
calaccess_website/management/commands/mvzips.py
calaccess_website/management/commands/mvzips.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Move downloaded and cleaned zips to their proper place in the raw data archived dir. """ import boto3 from django.conf import settings from calaccess_raw.management.commands import CalAccessCommand from calaccess_raw.models.tracking import RawDataVersion import logging logger = logging.getLogger(__name__) class Command(CalAccessCommand): """ Move downloaded and cleaned zips to their proper place in the raw data archived dir. """ help = 'Move downloaded and cleaned zips to their proper place in the raw data archived dir' def handle(self, *args, **options): """ Make it happen. """ super(Command, self).handle(*args, **options) # set up boto session self.session = boto3.Session( aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY, region_name=settings.AWS_S3_REGION_NAME ) # and client self.client = self.session.client('s3') # loop over all the versions for v in RawDataVersion.objects.exclude(id=34).exclude(id=33): # if there's a download zip if v.download_zip_archive: # set the initial path initial_download_path = v.download_zip_archive.name # split datetime from file name and ext download_datetime, download_fullname = initial_download_path.split('/') # split file name and ext download_filename, download_ext = download_fullname.split('.') # set new path new_download_path = '{fn}_{dt}.{fx}'.format( fn=download_filename, dt=download_datetime, fx=download_ext ) # move logger.debug('Move {0} to {1}'.format( initial_download_path, new_download_path ) ) self.client.copy_object( Bucket=settings.AWS_STORAGE_BUCKET_NAME, Key=new_download_path, CopySource={ 'Bucket': settings.AWS_STORAGE_BUCKET_NAME, 'Key': initial_download_path, }, ) # reset file name v.download_zip_archive.name = new_download_path # repeat for clean zips if v.clean_zip_archive: # set the initial path initial_clean_path = v.clean_zip_archive.name # split datetime from file name and ext clean_datetime, clean_fullname = initial_clean_path.split('/') # split file name and ext clean_filename, clean_ext = clean_fullname.split('.') # set new path new_clean_path = 'clean_{dt}.{fx}'.format( dt=clean_datetime, fx=clean_ext ) # move logger.debug('Move {0} to {1}'.format( initial_clean_path, new_clean_path ) ) self.client.copy_object( Bucket=settings.AWS_STORAGE_BUCKET_NAME, Key=new_clean_path, CopySource={ 'Bucket': settings.AWS_STORAGE_BUCKET_NAME, 'Key': initial_clean_path, }, ) # reset file name v.clean_zip_archive.name = new_clean_path # save the version v.save()
mit
Python
cdd1f3410b8ae304485f7992ac6048e1277cffe1
Add local locale from file
phoebebright/parsedatetime,bear/parsedatetime,idpaterson/parsedatetime
parsedatetime/pdt_locales/__init__.py
parsedatetime/pdt_locales/__init__.py
# -*- encoding: utf-8 -*- """ pdt_locales All of the included locale classes shipped with pdt. """ import os try: import PyICU as pyicu except: pyicu = None import yaml def lcase(x): return x.lower() from .base import pdtLocale_base, pdtLocale_icu from .de_DE import * from .en_AU import * from .en_US import * from .es import * from .nl_NL import * from .pt_BR import * from .ru_RU import * pdtLocales = [ 'icu', 'en_US', 'en_AU', 'es_ES', 'de_DE', 'nl_NL', 'ru_RU', ] def load_yaml(path): """ Read yaml data from filepath :param path: :return: """ with open(path, 'r') as fio: return yaml.load(fio.read()) def _get_yaml_path(locale): """ Return filepath of locale file :param locale: :return: """ return os.path.join(os.path.dirname(__file__), '%s.yaml' % locale) def load_locale(locale): """ Return data of locale :param locale: :return: """ assert locale in pdtLocales, "The locale '%s' is not supported" % locale _data_base = load_yaml(_get_yaml_path('base')) return _data_base.update(**load_yaml(_get_yaml_path(locale))) load_locale('ru_RU')
# -*- encoding: utf-8 -*- """ pdt_locales All of the included locale classes shipped with pdt. """ try: import PyICU as pyicu except: pyicu = None def lcase(x): return x.lower() from .base import pdtLocale_base, pdtLocale_icu from .de_DE import * from .en_AU import * from .en_US import * from .es import * from .nl_NL import * from .pt_BR import * from .ru_RU import *
apache-2.0
Python
c98039a25638db0c124efeaa394f89f2a84a2ede
Create aekjdfh.py
sajjadelastica/3G45,sajjadelastica/3G45,sajjadelastica/3G45,sajjadelastica/3G45
aekjdfh.py
aekjdfh.py
sdl;jfhlkjsdhfhdf
apache-2.0
Python
c51651dba8ccd14be9e6fb9ee028d1d2940b3202
Add parity test for simple RNN (#1351)
williamFalcon/pytorch-lightning,williamFalcon/pytorch-lightning
benchmarks/test_rnn_parity.py
benchmarks/test_rnn_parity.py
import time import numpy as np import pytest import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from pytorch_lightning import Trainer, LightningModule class AverageDataset(Dataset): def __init__(self, dataset_len=300, sequence_len=100): self.dataset_len = dataset_len self.sequence_len = sequence_len self.input_seq = torch.randn(dataset_len, sequence_len, 10) top, bottom = self.input_seq.chunk(2, -1) self.output_seq = top + bottom.roll(shifts=1, dims=-1) def __len__(self): return self.dataset_len def __getitem__(self, item): return self.input_seq[item], self.output_seq[item] class ParityRNN(LightningModule): def __init__(self): super(ParityRNN, self).__init__() self.rnn = nn.LSTM(10, 20, batch_first=True) self.linear_out = nn.Linear(in_features=20, out_features=5) def forward(self, x): seq, last = self.rnn(x) return self.linear_out(seq) def training_step(self, batch, batch_nb): x, y = batch y_hat = self(x) loss = F.mse_loss(y_hat, y) return {'loss': loss} def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=0.02) def train_dataloader(self): return DataLoader(AverageDataset(), batch_size=30) @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") def test_pytorch_parity(tmpdir): """ Verify that the same pytorch and lightning models achieve the same results :param tmpdir: :return: """ num_epochs = 2 num_rums = 3 lightning_outs, pl_times = lightning_loop(ParityRNN, num_rums, num_epochs) manual_outs, pt_times = vanilla_loop(ParityRNN, num_rums, num_epochs) # make sure the losses match exactly to 5 decimal places for pl_out, pt_out in zip(lightning_outs, manual_outs): np.testing.assert_almost_equal(pl_out, pt_out, 8) def set_seed(seed): np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) def vanilla_loop(MODEL, num_runs=10, num_epochs=10): """ Returns an array with the last loss from each epoch for each run """ device = torch.device('cuda' if torch.cuda.is_available() else "cpu") errors = [] times = [] for i in range(num_runs): time_start = time.perf_counter() # set seed seed = i set_seed(seed) # init model parts model = MODEL() dl = model.train_dataloader() optimizer = model.configure_optimizers() # model to GPU model = model.to(device) epoch_losses = [] for epoch in range(num_epochs): # run through full training set for j, batch in enumerate(dl): x, y = batch x = x.cuda(0) y = y.cuda(0) batch = (x, y) loss_dict = model.training_step(batch, j) loss = loss_dict['loss'] loss.backward() optimizer.step() optimizer.zero_grad() # track last epoch loss epoch_losses.append(loss.item()) time_end = time.perf_counter() times.append(time_end - time_start) errors.append(epoch_losses[-1]) return errors, times def lightning_loop(MODEL, num_runs=10, num_epochs=10): errors = [] times = [] for i in range(num_runs): time_start = time.perf_counter() # set seed seed = i set_seed(seed) # init model parts model = MODEL() trainer = Trainer( max_epochs=num_epochs, show_progress_bar=False, weights_summary=None, gpus=1, early_stop_callback=False, checkpoint_callback=False, distributed_backend='dp', ) trainer.fit(model) final_loss = trainer.running_loss.last().item() errors.append(final_loss) time_end = time.perf_counter() times.append(time_end - time_start) return errors, times
apache-2.0
Python
8a1dff9437a4f013a96369a1fe174c505e8636cb
Add missing migration (fix #130)
csalom/puput,APSL/puput,APSL/puput,APSL/puput,csalom/puput,csalom/puput
puput/migrations/0004_auto_20170912_0928.py
puput/migrations/0004_auto_20170912_0928.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-09-12 09:28 from __future__ import unicode_literals from django.db import migrations import django.db.models.manager class Migration(migrations.Migration): dependencies = [ ('puput', '0003_add_short_feed_description_to_blog_page'), ] operations = [ migrations.AlterModelManagers( name='blogpage', managers=[ ('extra', django.db.models.manager.Manager()), ], ), ]
mit
Python
8a3d757be17d395ba14ae7458036a78d10e3e212
Test to find out how to remove a file a git repo.
alekscl/HelloWorld-Python
holamundo.py
holamundo.py
#!/usr/bin/env python def main(): print("Hola mundo!!") if __name__ == "__main__": main()
unlicense
Python
17aefac614890b1fe7079a76b803707ae3fbb832
Add perihelion
Caronthir/FYS3150,Caronthir/FYS3150,Caronthir/FYS3150
Project3/analysis/perihelion.py
Project3/analysis/perihelion.py
#!/bin/python3 # -*- coding: utf-8 -*- from runner import Runner import numpy as np import matplotlib.pyplot as plt class Perihelion(Runner): def setup(self): self['number of years'] = 100 self['do save results'] = True self['do save any results'] = False self['use all planets'] = False self['save period'] = 1000 self['use planets'] = ['Sun', 'Mercury'] self['steps per year'] = 1e7 self['freeze sun'] = True self['use two body approximation'] = False self['use relativistic correction'] = True self['method'] = 'verlet' self.get_planet('Sun')['position'] = [0.0, 0.0, 0.0] self.get_planet('Sun')['Velocity'] = [0.0, 0.0, 0.0] self.get_planet('Mercury')['velocity'] = [12.44, 0.0, 0.0] self.get_planet('Mercury')['position'] = [0.0, 0.3075, 0.0] def run(self): out, _ = self.run_simulation() print(out) # self.run_analysis() self.getPerihelion() def getPerihelion(self): timenangle = np.loadtxt("../data/precession.txt"); print(timenangle.shape) print(timenangle[:, 1] - timenangle[0, 1]) plt.plot(timenangle[:, 0], timenangle[:, 1], 'o') plt.xlabel("Time [yr]") plt.ylabel("Precession angle") plt.show() def findPerihelion(self): position = self.get_position() r = np.linalg.norm(position[:, :, 1] - position[:, :, 0], axis=1) # plt.plot(r) # plt.show() # argrelextrema() rfunc = interp1d(np.linspace(0,len(r)-1,len(r)),r, kind='slinear') r = rfunc(np.linspace(0,len(r)-1, len(r))) larger = np.roll(r, 1) - r > 1e-3 smaller = np.roll(r, -1) - r > 1e-3 minima = np.logical_and(larger, smaller) above_mean = r < r.mean() minima = np.logical_and(minima, above_mean) plt.plot(r) plt.plot(r*minima, 'o') plt.show() print(minima.sum()) x, y = position[minima, 0:2, 1].T print(x.shape, y.shape) theta = np.arctan2(y, x) plt.plot(theta*180/np.pi) plt.show() if __name__ == '__main__': with Perihelion() as mercury: mercury.run()
mit
Python
41f68e14fe890cac3de391f7bc4cdd5c2e5b9d75
test B07
gisce/primestg
spec/Order_B07_spec.py
spec/Order_B07_spec.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from primestg.order.orders import Order from expects import expect, equal with description('Order B07 Generation'): with it('generates expected B07 xml'): expected_result = '<Order IdPet="1234" IdReq="B07" Version="3.1.c">\n ' \ '<Cnc Id="CIR000000000">\n ' \ '<B07 IPftp="10.1.5.206"/>\n ' \ '</Cnc>\n</Order>\n' generic_values = { 'id_pet': '1234', 'id_req': 'B07', 'cnc': 'CIR000000000', } payload = { 'IPftp': '10.1.5.206', } order = Order('B07') order = order.create(generic_values, payload) expect(order).to(equal(expected_result))
agpl-3.0
Python
e6f9f12d1c9faae8f718306bcd2862278a083351
Test gnssUblox message processing (#24404)
commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot
selfdrive/locationd/test/test_ublox_processing.py
selfdrive/locationd/test/test_ublox_processing.py
import unittest import numpy as np from laika import AstroDog from laika.helpers import UbloxGnssId from laika.raw_gnss import calc_pos_fix, correct_measurements, process_measurements, read_raw_ublox from selfdrive.test.openpilotci import get_url from tools.lib.logreader import LogReader def get_gnss_measurements(log_reader): gnss_measurements = [] for msg in log_reader: if msg.which() == "ubloxGnss": ublox_msg = msg.ubloxGnss if ublox_msg.which == 'measurementReport': report = ublox_msg.measurementReport if len(report.measurements) > 0: gnss_measurements.append(read_raw_ublox(report)) return gnss_measurements class TestUbloxProcessing(unittest.TestCase): NUM_TEST_PROCESS_MEAS = 10 @classmethod def setUpClass(cls): lr = LogReader(get_url("4cf7a6ad03080c90|2021-09-29--13-46-36", 0)) cls.gnss_measurements = get_gnss_measurements(lr) def test_read_ublox_raw(self): count_gps = 0 count_glonass = 0 for measurements in self.gnss_measurements: for m in measurements: if m.ublox_gnss_id == UbloxGnssId.GPS: count_gps += 1 elif m.ublox_gnss_id == UbloxGnssId.GLONASS: count_glonass += 1 self.assertEqual(count_gps, 5036) self.assertEqual(count_glonass, 3651) def test_get_fix(self): dog = AstroDog() position_fix_found = 0 count_processed_measurements = 0 count_corrected_measurements = 0 position_fix_found_after_correcting = 0 pos_ests = [] for measurements in self.gnss_measurements[:self.NUM_TEST_PROCESS_MEAS]: processed_meas = process_measurements(measurements, dog) count_processed_measurements += len(processed_meas) pos_fix = calc_pos_fix(processed_meas) if len(pos_fix) > 0 and all(pos_fix[0] != 0): position_fix_found += 1 corrected_meas = correct_measurements(processed_meas, pos_fix[0][:3], dog) count_corrected_measurements += len(corrected_meas) pos_fix = calc_pos_fix(corrected_meas) if len(pos_fix) > 0 and all(pos_fix[0] != 0): pos_ests.append(pos_fix[0]) position_fix_found_after_correcting += 1 mean_fix = np.mean(np.array(pos_ests)[:, :3], axis=0) np.testing.assert_allclose(mean_fix, [-2452306.662377, -4778343.136806, 3428550.090557], rtol=0, atol=1) # Note that can happen that there are less corrected measurements compared to processed when they are invalid. # However, not for the current segment self.assertEqual(position_fix_found, self.NUM_TEST_PROCESS_MEAS) self.assertEqual(position_fix_found_after_correcting, self.NUM_TEST_PROCESS_MEAS) self.assertEqual(count_processed_measurements, 69) self.assertEqual(count_corrected_measurements, 69) if __name__ == "__main__": unittest.main()
mit
Python
45a91a5c32227aabf17b52960d98851cd7608dd1
add qha plot tool (function version)
abelcarreras/aiida_extensions,abelcarreras/aiida_extensions
workflows/tools/plot_quasiparticle_scan.py
workflows/tools/plot_quasiparticle_scan.py
from aiida import load_dbenv load_dbenv() from aiida.orm import load_node, load_workflow from aiida.orm import Code, DataFactory import matplotlib.pyplot as plt StructureData = DataFactory('structure') ParameterData = DataFactory('parameter') ArrayData = DataFactory('array') KpointsData = DataFactory('array.kpoints') import numpy as np ####################### wf = load_workflow(1086) ####################### thermal_properties = wf.get_result('thermal_properties') energy = thermal_properties.get_array('electronic_energies') volumes = thermal_properties.get_array('volumes') entropy = thermal_properties.get_array('entropy') cv = thermal_properties.get_array('cv') temperature = thermal_properties.get_array('temperature') plt.figure(1) plt.plot(volumes, energy) plt.figure(2) for i, w in enumerate(wf.get_steps()[1].get_sub_workflows()): frequencies = [w.get_result('quasiparticle_data').get_dict()['{}'.format(k)]['q_point_0']['4']['frequency'] for k in range(100,800,100)] plt.plot(volumes, frequencies, label='{}'.format(temperature[i])) plt.show()
mit
Python
082e7d63192c2e7eaa4210e0c559b145313ecc3a
Add files via upload
SpencerNorris/SuperPACs,SpencerNorris/SuperPACs,SpencerNorris/SuperPACs,SpencerNorris/SuperPACs
server/src/datasource/parse_indepexpends.py
server/src/datasource/parse_indepexpends.py
from datasource import fec from datasource import propublica import os FEC_APIKEY = os.getenv('FEC_API_KEY', '') ProPublica_APIKEY = os.getenv('PP_API_KEY', '') FecApiObj = fec.FECAPI(FEC_APIKEY) committees = FecApiObj.get_committees() PPCampFinObj = propublica.CampaignFinanceAPI(ProPublica_APIKEY) datafile = open("IndepExpends.json", 'w') for committee in committees: if(2016 in committee['cycles']): print(committee['committee_id']) indepExpend = PPCampFinObj.get_indep_expends(str(committee['committee_id'])) datafile.write(str(indepExpend)) datafile.close()
apache-2.0
Python
d8ba95ddb1e469600c735316a1aeafa115399b3c
Add an execution module called key to return minion public key finger
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
salt/modules/key.py
salt/modules/key.py
''' Functions to view the minion's public key information ''' # Import python libs import os # Import Salt libs import salt.utils def finger(): ''' Return the minion's public key fingerprint CLI Example:: salt '*' key.finger ''' return salt.utils.pem_finger( os.path.join(__opts__['pki_dir'], 'minion.pub') )
apache-2.0
Python
ce8465e5f0f085bedcd1a84220316c8eab29a493
Add Tensor Flow
kakaba2009/MachineLearning,kakaba2009/MachineLearning,kakaba2009/MachineLearning,kakaba2009/MachineLearning
python/src/algorithm/coding/setupdate.py
python/src/algorithm/coding/setupdate.py
n = int(input()) s = set(map(int, input().split())) N = int(input()) for i in range(N): cmd = input() B = set(map(int, input().split())) if "symmetric_difference_update" in cmd: s.symmetric_difference_update(B) elif "intersection_update" in cmd: s.intersection_update(B) elif "difference_update" in cmd: s.difference_update(B) elif "update" in cmd: s.update(B) print(sum(s))
apache-2.0
Python
b3889f8ff6d66963d4253d6796c3bb20dc9adbb7
Add external driver and parameter file
eguil/ENSO_metrics,eguil/ENSO_metrics
scripts/my_Param.py
scripts/my_Param.py
#================================================= # Observation #------------------------------------------------- sstObsPath = '/clim_obs/obs/ocn/mo/tos/UKMETOFFICE-HadISST-v1-1/130122_HadISST_sst.nc' tauxObsPath = '/clim_obs/obs/atm/mo/tauu/ERAINT/tauu_ERAINT_198901-200911.nc' sstNameObs = 'sst' tauxNameObs = 'tauu' #================================================= # Models #------------------------------------------------- modpath = '/work/cmip5/historical/atm/mo/VAR/cmip5.MOD.historical.r1i1p1.mo.atm.Amon.VAR.ver-1.latestX.xml' modnames = ['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS', 'CSIRO-Mk3-6-0', 'CanCM4', 'GISS-E2-H-CC', 'GISS-E2-H', 'GISS-E2-R-CC', 'GISS-E2-R', 'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'MIROC-ESM-CHEM', 'MIROC-ESM', 'MIROC4h', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'inmcm4' ] modnames = ['IPSL-CM5A-LR'] # Variables sstName = 'ts' tauxName= 'tauu' #================================================= # Output #------------------------------------------------- outpathdata = '.' # e.g. '/user/directory/output/nc' outpathjsons = '.' # e.g. '/user/directory/output/json' outnamejson = 'test.json' #================================================= # Output #------------------------------------------------- # Metrics metrics = ['EnsoAmpl', 'EnsoMu'] # Variable name and nino box ninoBox = 'nino3'
bsd-3-clause
Python
771fc766446e1610a0599102720dc7e0f358e0e6
Add wsgi file
dlapiduz/govcode.org,dlapiduz/govcode.org,dlapiduz/govcode.org,HackAgenda/govcode,HackAgenda/govcode
app.wsgi
app.wsgi
from app import app as application
mit
Python
fc636dbaacb5d2d1ebba1ba7f577ee4ec4deb958
Add synthtool scripts (#3765)
googleapis/java-containeranalysis,googleapis/java-containeranalysis,googleapis/java-containeranalysis
google-cloud-containeranalysis/synth.py
google-cloud-containeranalysis/synth.py
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This script is used to synthesize generated parts of this library.""" import synthtool as s import synthtool.gcp as gcp gapic = gcp.GAPICGenerator() common_templates = gcp.CommonTemplates() library = gapic.java_library( service='container', version='v1beta1', config_path='/google/devtools/containeranalysis/artman_containeranalysis_v1beta1.yaml', artman_output_name='') s.copy(library / 'gapic-google-cloud-containeranalysis-v1beta1/src', 'src') s.copy(library / 'grpc-google-cloud-containeranalysis-v1beta1/src', '../../google-api-grpc/grpc-google-cloud-containeranalysis-v1beta1/src') s.copy(library / 'proto-google-cloud-containeranalysis-v1beta1/src', '../../google-api-grpc/proto-google-cloud-containeranalysis-v1beta1/src')
apache-2.0
Python
faf9e73f44cfee0c637c006ad6c37e8dcef0d1df
Create arcEC.py
MartinHvidberg/Esri_stuff
arcEC.py
arcEC.py
import arcpy import sys ## Version 1.8 (8 functions) '130213/MaHvi def SetMsg(msg, severity=0): # 0:Message, 1:Warning, 2:Error #print msg try: for string in msg.split('\n'): string = ":) "+string if severity == 0: arcpy.AddMessage(string) elif severity == 1: arcpy.AddWarning(string) elif severity == 2: arcpy.AddError(string) except: pass def ecMessage(strI,numI=0,severity=0): """ Neither message number nor severity is mandatory """ if numI == 0: SetMsg(" Message: "+strI,0) else: SetMsg(" Message: "+str(numI)+" : "+strI,0) def ecWarning(strI,numI,severity=0): """ Severity is not mandatory """ SetMsg(" ! Warning: "+str(numI)+" : "+strI,1) def ecError(strI,numI,severity): """ Severity > 0 causes program termination """ SetMsg("!!!Error: "+str(numI)+" : "+strI,2) if severity > 0: sys.exit(numI) def Describe2String(desIn): strReport = "" if hasattr(desIn, "Name"): strReport +="\n Name: "+desIn.Name if hasattr(desIn, "baseName"): strReport +="\n baseName: "+desIn.baseName if hasattr(desIn, "dataType"): strReport +="\n dataType: "+desIn.dataType #if hasattr(desIn, "dataElementType"): # strReport +="\n dataElementType: "+desIn.dataElementType if hasattr(desIn, "catalogPath"): strReport +="\n catalogPath: "+desIn.catalogPath if hasattr(desIn, "children"): strReport +="\n children: "+str(len(desIn.children)) if hasattr(desIn, "fields"): strReport +="\n fields: "+str(len(desIn.fields)) if len(desIn.fields) > 0: for fldX in desIn.fields: strReport +="\n field: "+fldX.name if hasattr(desIn, "pludder"): strReport +="\n pludder: "+desIn.pludder return strReport def Table2Ascii(tblIn): strReport = "" desIn = arcpy.Describe(tblIn) if hasattr(desIn, "dataType"): if desIn.dataType == "Table": strReport +="\n Table2Ascii ::" if hasattr(desIn, "fields"): strReport +="\n fields: "+str(len(desIn.fields))+"\n" if len(desIn.fields) > 0: for fldX in desIn.fields: strReport +="|"+fldX.name+" <"+fldX.type+">" rows = arcpy.SearchCursor(tblIn) numRows = 0 for rowX in rows: strReport += "\n " for fldX in desIn.fields: strReport += "|"+str(rowX.getValue(fldX.name)) numRows += 1 strReport += "\n Row count: "+str(numRows) else: strReport +="No Fields in tabel ..." return strReport def Table2Ascii_byFields(tblIn): strReport = "" desIn = arcpy.Describe(tblIn) if hasattr(desIn, "dataType"): if desIn.dataType == "Table": strReport +="Table2Ascii_ByFields" if hasattr(desIn, "fields"): strReport +="\n fields: "+str(len(desIn.fields)) if len(desIn.fields) > 0: for fldX in desIn.fields: rows = arcpy.SearchCursor(tblIn) strReport +="\n field: "+fldX.name+" <"+fldX.type+">" strReport += "\n " for rowX in rows: strReport += "|"+str(rowX.getValue(fldX.name)) rows.reset() return strReport def Dict2String(dicIn): strReport = "" lstK = dicIn.keys() lstK.sort() for K in lstK: strReport += str(K)+" : "+str(dicIn[K])+"\n" return strReport # Music that accompanied the coding of this script: # Deep Forest - Savana Dance
apache-2.0
Python
3aed2efd64d38a78682d7ae4c55400763af22c63
add avoid.py
ukscone/unicornhat,ukscone/unicornhat
avoid.py
avoid.py
#!/usr/bin/env python import unicornhat as unicorn import getch, random, time, colorsys import numpy as np unicorn.rotation(90) unicorn.brightness(0.4) screen = [[0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0]] score=0 carX=3 carY=6 def drawObstacles(): for y in range(8): for x in range(8): unicorn.set_pixel(x,y,0,screen[y][x],0) unicorn.show() def addObstacle(): r=random.randrange(0,2) if r==1: screen[0][random.randrange(0,7)]=64 def moveObstacles(): for y in range(7,-1,-1): for x in range(7): screen[y][x]=screen[y-1][x] def drawCar(y, x): unicorn.set_pixel((x),(y),0,0,64 ) unicorn.set_pixel((x)+1,(y),0,0,64 ) unicorn.set_pixel((x),(y)+1,0,0,64 ) unicorn.set_pixel((x)+1,(y)+1,0,0,64) unicorn.show() def undrawCar(y,x): unicorn.set_pixel((x),(y),0,0,0) unicorn.set_pixel((x)+1,(y),0,0,0) unicorn.set_pixel((x),(y)+1,0,0,0) unicorn.set_pixel((x)+1,(y)+1,0,0,0) unicorn.show() def checkHit(): if (screen[carY][carX]==64) or (screen[carY+1][carX]==64) or (screen[carY][carX+1]==64) or (screen[carY+1][carX+1]==64): return True else: return False def crashed(): for z in range(10): rand_mat = np.random.rand(8,8) for y in range(8): for x in range(8): h = 0.1 * rand_mat[x, y] s = 0.8 v = rand_mat[x, y] rgb = colorsys.hsv_to_rgb(h, s, v) r = int(rgb[0]*255.0) g = int(rgb[1]*255.0) b = int(rgb[2]*255.0) unicorn.set_pixel(x, y, r, g, b) unicorn.show() time.sleep(0.01) while True: moveObstacles() addObstacle() drawObstacles() drawCar(carY,carX) if (checkHit()==True): crashed() print "Crashed\nGame Over\nScore: ",score break else: score=score+1 user_input="" while user_input=="": user_input = getch.getch().lower() if (user_input!="q") and (user_input!="w") and (user_input!=" ") and (user_input!="x"): user_input="" if user_input!="x": undrawCar(carY,carX) if user_input=="q": carX=carX-1 if carX < 0: carX=0 elif user_input=="w": carX=carX+1 if carX > 6: carX=6 elif user_input==" ": pass else: print "Game Over\nScore: ", score break
unlicense
Python
7f7effb6ec47c3714df5d6d9dbb403c6fda9cd89
Add test to create repository on GitHub
LouisBarranqueiro/selenium-docker-aws-deployer
selenium_github.py
selenium_github.py
import requests from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException import unittest, time, re class SeleniumGithub(unittest.TestCase): def setUp(self): """ Setup """ self.GITHUB_URL = "https://github.com/" # GitHub credentials self.GITHUB_LOGIN = "developergithubnoreply" self.GITHUB_PASSWORD = "eRm-dpW-qkd-34f-!" # GitHub repository informations self.GITHUB_REPO_NAME = "selenium5" self.GITHUB_REPO_DESC = "Automated web test with selenium" self.driver = webdriver.Firefox() self.driver.implicitly_wait(30) self.BASE_URL = self.GITHUB_URL self.verificationErrors = [] self.accept_next_alert = True def test_selenium_github(self): """ Login into Github account and create a new public repository and test if the repository is created """ driver = self.driver driver.get(self.BASE_URL) driver.find_element_by_link_text("Sign in").click() # Login driver.find_element_by_id("login_field").clear() driver.find_element_by_id("login_field").send_keys(self.GITHUB_LOGIN) driver.find_element_by_id("password").clear() driver.find_element_by_id("password").send_keys(self.GITHUB_PASSWORD) driver.find_element_by_name("commit").click() # Create new repository driver.find_element_by_xpath("//ul[@id='user-links']/li[2]/a/span").click() driver.find_element_by_link_text("New repository").click() driver.find_element_by_id("repository_name").clear() driver.find_element_by_id("repository_name").send_keys(self.GITHUB_REPO_NAME) driver.find_element_by_id("repository_public_true").click() driver.find_element_by_id("repository_description").clear() driver.find_element_by_id("repository_description").send_keys(self.GITHUB_REPO_DESC) driver.find_element_by_xpath("//button[@type='submit']").click() # Check existence of the repository previously created self.assertEqual(requests.head(self.BASE_URL + "/" + self.GITHUB_LOGIN + "/" + self.GITHUB_REPO_NAME).status_code, 200) def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException, e: return False return True def is_alert_present(self): try: self.driver.switch_to_alert() except NoAlertPresentException, e: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to_alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": unittest.main()
mit
Python
1fe3fd59e4000216c4d6694690dc0ba866a66ecb
add bloom_count_intersection.py
kdmurray91/khmer,souravsingh/khmer,souravsingh/khmer,souravsingh/khmer,kdmurray91/khmer,F1000Research/khmer,ged-lab/khmer,jas14/khmer,kdmurray91/khmer,Winterflower/khmer,ged-lab/khmer,ged-lab/khmer,F1000Research/khmer,jas14/khmer,F1000Research/khmer,Winterflower/khmer,Winterflower/khmer,jas14/khmer
scripts/bloom_count_intersection.py
scripts/bloom_count_intersection.py
## using bloom filter to count intersection import khmer import sys import screed from screed.fasta import fasta_iter filename = sys.argv[1] K = int(sys.argv[2]) # size of kmer HT_SIZE= int(sys.argv[3])# size of hashtable N_HT = int(sys.argv[4]) # number of hashtables ht = khmer.new_hashbits(K, HT_SIZE, N_HT) n_unique = 0 for n, record in enumerate(fasta_iter(open(filename))): sequence = record['sequence'] seq_len = len(sequence) for n in range(0,seq_len+1-K): kmer = sequence[n:n+K] if (not ht.get(kmer)): n_unique+=1 ht.count(kmer) print filename,'has been consumed.' print '# of unique kmers:',n_unique print '# of occupied bin:',ht.n_occupied() filename2 = sys.argv[5] ht2 = khmer.new_hashbits(K, HT_SIZE, N_HT) n_unique = 0 n_overlap = 0 for n, record in enumerate(fasta_iter(open(filename2))): sequence = record['sequence'] seq_len = len(sequence) for n in range(0,seq_len+1-K): kmer = sequence[n:n+K] if (not ht2.get(kmer)): n_unique+=1 if (ht.get(kmer)): n_overlap+=1 ht2.count(kmer) print filename2,'has been consumed.' print '# of unique kmers:',n_unique print '# of occupied bin:',ht2.n_occupied() print n_overlap,'unique kmers also appears in ',filename2
bsd-3-clause
Python
4722c73643cbf9cbd63f05736a8469afc4c03443
test project: convert IPAddressField fields to GenericIPAddressField
IMAmuseum/django-admin-bootstrapped,pombredanne/django-admin-bootstrapped,sn0wolf/django-admin-bootstrapped,pombredanne/django-admin-bootstrapped,mynksngh/django-admin-bootstrapped,xrmx/django-admin-bootstrapped,kevingu1003/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin-new,bformet/django-admin-bootstrapped,squallcs12/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin,IMAmuseum/django-admin-bootstrapped,andrewyager/django-admin-bootstrapped,django-admin-bootstrapped/django-admin-bootstrapped,andrewyager/django-admin-bootstrapped,avara1986/django-admin-bootstrapped,merlian/django-admin-bootstrapped,bformet/django-admin-bootstrapped,bformet/django-admin-bootstrapped,benthomasson/django-admin-bootstrapped,benthomasson/django-admin-bootstrapped,mynksngh/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin,Corner1024/django-admin-bootstrapped,benthomasson/django-admin-bootstrapped,avara1986/django-admin-bootstrapped,jmagnusson/django-admin-bootstrapped,sn0wolf/django-admin-bootstrapped,kevingu1003/django-admin-bootstrapped,merlian/django-admin-bootstrapped,squallcs12/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin-new,askinteractive/mezzanine-advanced-admin,askinteractive/mezzanine-advanced-admin-new,django-admin-bootstrapped/django-admin-bootstrapped,IMAmuseum/django-admin-bootstrapped,xrmx/django-admin-bootstrapped,kevingu1003/django-admin-bootstrapped,mynksngh/django-admin-bootstrapped,Corner1024/django-admin-bootstrapped,avara1986/django-admin-bootstrapped,Corner1024/django-admin-bootstrapped,pombredanne/django-admin-bootstrapped,jmagnusson/django-admin-bootstrapped,django-admin-bootstrapped/django-admin-bootstrapped,sn0wolf/django-admin-bootstrapped,jmagnusson/django-admin-bootstrapped,squallcs12/django-admin-bootstrapped,xrmx/django-admin-bootstrapped,andrewyager/django-admin-bootstrapped,merlian/django-admin-bootstrapped
test_django_admin_bootstrapped/test_django_admin_bootstrapped/models.py
test_django_admin_bootstrapped/test_django_admin_bootstrapped/models.py
from django.db import models class TestMe(models.Model): test_m2m = models.ManyToManyField('self', blank=True, help_text="Lorem dolor") test_ip = models.GenericIPAddressField(help_text="Lorem dolor") test_url = models.URLField(help_text="Lorem dolor") test_int = models.IntegerField(help_text="Lorem dolor") test_img = models.ImageField(upload_to='dummy', blank=True) test_file = models.FileField(upload_to='dummy', blank=True) test_date = models.DateField(help_text="Lorem dolor") test_char = models.CharField(max_length=50, help_text="Lorem dolor") test_bool = models.BooleanField(help_text="Lorem dolor", default=False) test_time = models.TimeField(help_text="Lorem dolor") test_slug = models.SlugField(help_text="Lorem dolor") test_text = models.TextField(help_text="Lorem dolor") test_email = models.EmailField(help_text="Lorem dolor") test_float = models.FloatField(help_text="Lorem dolor") test_bigint = models.BigIntegerField(help_text="Lorem dolor") test_positive_integer = models.PositiveIntegerField(help_text="Lorem dolor") test_decimal = models.DecimalField(max_digits=5, decimal_places=2, help_text="Lorem dolor") test_comma_separated_int = models.CommaSeparatedIntegerField(max_length=100, help_text="Lorem dolor") test_small_int = models.SmallIntegerField(help_text="Lorem dolor") test_nullbool = models.NullBooleanField(help_text="Lorem dolor") test_filepath = models.FilePathField(blank=True, help_text="Lorem dolor") test_positive_small_int = models.PositiveSmallIntegerField(help_text="Lorem dolor") def get_absolute_url(self): return '' class Meta: verbose_name = u'Test me' verbose_name_plural = u'Lot of Test me' class TestMeProxyForFieldsets(TestMe): class Meta: proxy = True verbose_name = u'Test me fieldsets' verbose_name_plural = u'Lot of Test me fieldsets' class TestThat(models.Model): that = models.ForeignKey(TestMe, help_text="Lorem dolor") test_ip = models.GenericIPAddressField(help_text="Lorem dolor") test_url = models.URLField(help_text="Lorem dolor") test_int = models.IntegerField(help_text="Lorem dolor") test_date = models.DateField(help_text="Lorem dolor") test_bool = models.BooleanField(help_text="Lorem dolor", default=True) class Meta: verbose_name = u'Test that' verbose_name_plural = u'Lot of Test that' class TestSortable(models.Model): that = models.ForeignKey(TestMe) position = models.PositiveSmallIntegerField("Position") test_char = models.CharField(max_length=5) class Meta: ordering = ('position', )
from django.db import models class TestMe(models.Model): test_m2m = models.ManyToManyField('self', blank=True, help_text="Lorem dolor") test_ip = models.IPAddressField(help_text="Lorem dolor") test_url = models.URLField(help_text="Lorem dolor") test_int = models.IntegerField(help_text="Lorem dolor") test_img = models.ImageField(upload_to='dummy', blank=True) test_file = models.FileField(upload_to='dummy', blank=True) test_date = models.DateField(help_text="Lorem dolor") test_char = models.CharField(max_length=50, help_text="Lorem dolor") test_bool = models.BooleanField(help_text="Lorem dolor", default=False) test_time = models.TimeField(help_text="Lorem dolor") test_slug = models.SlugField(help_text="Lorem dolor") test_text = models.TextField(help_text="Lorem dolor") test_email = models.EmailField(help_text="Lorem dolor") test_float = models.FloatField(help_text="Lorem dolor") test_bigint = models.BigIntegerField(help_text="Lorem dolor") test_positive_integer = models.PositiveIntegerField(help_text="Lorem dolor") test_decimal = models.DecimalField(max_digits=5, decimal_places=2, help_text="Lorem dolor") test_comma_separated_int = models.CommaSeparatedIntegerField(max_length=100, help_text="Lorem dolor") test_small_int = models.SmallIntegerField(help_text="Lorem dolor") test_nullbool = models.NullBooleanField(help_text="Lorem dolor") test_filepath = models.FilePathField(blank=True, help_text="Lorem dolor") test_positive_small_int = models.PositiveSmallIntegerField(help_text="Lorem dolor") def get_absolute_url(self): return '' class Meta: verbose_name = u'Test me' verbose_name_plural = u'Lot of Test me' class TestMeProxyForFieldsets(TestMe): class Meta: proxy = True verbose_name = u'Test me fieldsets' verbose_name_plural = u'Lot of Test me fieldsets' class TestThat(models.Model): that = models.ForeignKey(TestMe, help_text="Lorem dolor") test_ip = models.IPAddressField(help_text="Lorem dolor") test_url = models.URLField(help_text="Lorem dolor") test_int = models.IntegerField(help_text="Lorem dolor") test_date = models.DateField(help_text="Lorem dolor") test_bool = models.BooleanField(help_text="Lorem dolor", default=True) class Meta: verbose_name = u'Test that' verbose_name_plural = u'Lot of Test that' class TestSortable(models.Model): that = models.ForeignKey(TestMe) position = models.PositiveSmallIntegerField("Position") test_char = models.CharField(max_length=5) class Meta: ordering = ('position', )
apache-2.0
Python
63f91c2459cb98cf0cfb1e60d298944212d9d639
add missing file in symm
sunqm/pyscf,gkc1000/pyscf,sunqm/pyscf,sunqm/pyscf,gkc1000/pyscf,sunqm/pyscf,gkc1000/pyscf,gkc1000/pyscf,gkc1000/pyscf
symm/addons.py
symm/addons.py
# # Author: Qiming Sun <[email protected]> # import numpy import pyscf.lib.logger def label_orb_symm(mol, irrep_name, symm_orb, mo): nmo = mo.shape[1] s = mol.intor_symmetric('cint1e_ovlp_sph') mo_s = numpy.dot(mo.T, s) orbsym = [None] * nmo for i,ir in enumerate(irrep_name): moso = numpy.dot(mo_s, symm_orb[i]) for j in range(nmo): if not numpy.allclose(moso[j], 0, atol=1e-6): if orbsym[j] is None: orbsym[j] = ir else: raise ValueError('orbital %d not symmetrized' % j) pyscf.lib.logger.debug(mol, 'irreps of each MO %s', str(orbsym)) return orbsym def symmetrize_orb(mol, irrep_name, symm_orb, mo): s = mol.intor_symmetric('cint1e_ovlp_sph') mo_s = numpy.dot(mo.T, s) mo1 = 0 for csym in symm_orb: ovlpso = reduce(numpy.dot, (csym.T, s, csym)) sc = numpy.linalg.solve(ovlpso, numpy.dot(mo_s, csym).T) mo1 = mo1 + numpy.dot(csym, sc) return mo1 if __name__ == "__main__": from pyscf import gto from pyscf import scf mol = gto.Mole() mol.build( atom = [['H', (0,0,0)], ['H', (0,0,1)]], basis = {'H': 'cc-pvdz'}, symmetry = 1 ) mf = scf.RHF(mol) mf.scf() print label_orb_symm(mol, mol.irrep_name, mol.symm_orb, mf.mo_coeff)
apache-2.0
Python
005872ea37dfdd4b8ab8b16e3c5b0083fb86cdb9
Add settings file
ninjawil/garden-web-server,ninjawil/garden-web-server,ninjawil/garden-web-server,ninjawil/garden-web-server
scripts/settings.py
scripts/settings.py
#!/usr/bin/env python #=============================================================================== # GLOBAL CONSTANTS #=============================================================================== # --- Set up GPIO referencing---- broadcom_ref = True if broadcom_ref: PIN_11 = 17 PIN_12 = 18 PIN_13 = 27 PIN_15 = 22 PIN_37 = 26 PIN_38 = 20 PIN_40 = 21 else: PIN_11 = 11 PIN_12 = 12 PIN_13 = 13 PIN_15 = 15 PIN_37 = 37 PIN_38 = 38 PIN_40 = 40 # --- System set up --- UPDATE_RATE = 300 # seconds W1_DEVICE_PATH = '/sys/bus/w1/devices/' DEBOUNCE_MICROS = 0.250 #seconds SYS_FOLDER = '/home/pi/weather' DATA_FOLDER = '/data/' TICK_DATA = 'tick_count' # --- RRDTool set up --- RRDTOOL_RRD_FILE = 'weather_data.rrd' RRDTOOL_HEARTBEAT = 2 # multiplier # XML filename: Consolidation type, Resolution (minutes), Recording Period (days) RRDTOOL_RRA = {'wd_last_1d.xml': ('LAST', 5, 1.17), 'wd_avg_2d.xml': ('AVERAGE', 30, 2), 'wd_avg_1w.xml': ('AVERAGE', 120, 7), 'wd_avg_1m.xml': ('AVERAGE', 240, 31), 'wd_avg_3m.xml': ('AVERAGE', 720, 93), 'wd_avg_1y.xml': ('AVERAGE', 1440, 365), 'wd_min_1y.xml': ('MIN', 1440, 365), 'wd_max_1y.xml': ('MAX', 1440, 365)} SENSOR_SET= { 'inside_temp': (True, PIN_37, '*C', -50, 100, 'GAUGE'), 'inside_hum': (True, PIN_37, '%', -1, 101, 'GAUGE'), 'door_open': (True, PIN_40, '', -1, 2, 'GAUGE'), 'precip_rate': (True, PIN_38, 'mm', -5, 50, 'GAUGE'), 'precip_acc': (True, PIN_38, 'mm', -5, 500, 'GAUGE'), 'outside_temp': (True, '28-0414705bceff', '*C', -50, 50, 'GAUGE'), 'sw_status': (True, '', '', -1, 2, 'GAUGE'), 'sw_power': (True, '', 'W', -9999, 9999, 'GAUGE')}
mit
Python
746dd90a17d756f5601ddcbbd6c2de6fed9c75d5
add splitter script
nogizhopaboroda/musfinder,nogizhopaboroda/musfinder,nogizhopaboroda/musfinder
scripts/splitter.py
scripts/splitter.py
import sys import os import json import pdb content = "" for line in sys.stdin: content += line data = json.loads(content) print('ok') for item in data: filename = "items_data/{0}.json".format(item['_key']) print("creating ".format(filename)) if not os.path.exists(os.path.dirname(filename)): try: os.makedirs(os.path.dirname(filename)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open(filename, 'w') as file_: file_.write(json.dumps(item, indent=4)) print(len(data))
mit
Python
7a49dfb41888b6afed4ff3dca3987f641e497056
Add PageHandler
kkstu/Torweb,kkstu/Torweb
handler/page.py
handler/page.py
#!/usr/bin/python # -*- coding:utf-8 -*- # Powered By KK Studio from BaseHandler import BaseHandler # 404 Page class Page404Handler(BaseHandler): def get(self): self.render('page/404.html', title="404") # 500 Page class Page500Handler(BaseHandler): def get(self): self.render('page/500.html', title="500") # Blank Page class BlankHandler(BaseHandler): def get(self): self.render('page/blank.html', title="Blank")
mit
Python
d3248cebcb1ef161dfc706d99b4d361205fc9fbe
Add wsgi file
jpgneves/t-10_server,jpgneves/t-10_server
t10server.wsgi
t10server.wsgi
from teeminus10_api import app as application
mit
Python
be249ac1ce39977e4448e320f80d3b642f2c9193
Add original code
nickfrostatx/gitcontrib
gitcontrib.py
gitcontrib.py
#!/usr/bin/env python3 # TODO # Multiple source extensions # Handle different name same email as one contributor # Handle different email same name as one contributor # Handle github-side edits being double-counted import subprocess as sp from sys import argv, exit from os import chdir, devnull def usage(): print("Usage:\ngitcontrib <Path> <File Extension>"); def pretty_output(loc, auth_loc, expected_contrib): print("\033[37;1mPROJECT CONTRIBUTIONS:\033[0m") print("\033[37mThe project has \033[34;1m%d\033[0;37m lines of code.\033[0m" % loc) print() print("\033[37mContributors (%d):\033[0m" % len(auth_loc.keys())) print('', end=' ') print('\n '.join(auth_loc.keys())) print() print("\033[37mContribution breakdown:\033[0m") outs = [] for a in auth_loc: outs.append((a, auth_loc[a])) outs.sort(key = lambda u: u[1]) outs.reverse() for a in outs: if a[1] >= expected_contrib*loc: print(' ', a[0], ' has contributed ', '\033[32;1m', a[1], '\033[0m', ' lines of code ', '(\033[032;1m%.2f%%\033[0m) ' % (a[1]*100/loc), sep="") else: print(' ', a[0], ' has contributed ', '\033[31;1m', a[1], '\033[0m', ' lines of code ', '(\033[031;1m%.2f%%\033[0m) ' % (a[1]*100/loc), sep="") def git_contrib(location, ext): try: chdir(location) except: print("Error accessing %s (check file permissions?)" % location) return 1 try: sp.check_call(['ls', '.git'], stdout=sp.DEVNULL, stderr=sp.DEVNULL) except: print("%s is not a git repository" % location) return 1 (s, author_out) = sp.getstatusoutput("git log | grep Author | sort -u") if s != 0: print(author_out) return 0 authors = author_out.split('\n') authors = [a.replace("Author: ", "") for a in authors] try: assert len(authors) > 0 except AssertionError: print("No git-commit authors found") return 1 files = sp.getoutput("find . -iname \*.%s" % ext).replace('\n', ' ') if len(files): try: loc = int(sp.getoutput("wc -l %s" % files).split("\n")[-1].split()[0]); assert loc >= 0 except: print("Error in parsing files (check file permissions?)") return 1 else: print("No files with extension '%s' in %s" % (ext, location)) return 1 auth_loc = {} for a in authors: aloc = 0 try: name = a[0:a.index("<") - 1] except: name = a for f in files.split(): aloc += sum([int(x) for x in sp.getoutput("git blame %s | grep \"%s\" | wc -l" % (f, name)).split('\n')]) auth_loc[a] = aloc pretty_output(loc, auth_loc, 1 / len(authors)) return 0 def main(): if (len(argv) != 3): usage() return 1 return git_contrib(argv[1], argv[2]) if __name__ == '__main__': exit(main())
mit
Python
2866c8fbb3549ffd2405c5b13338a3fdf87a6c5d
add checks
sliceofcode/dogbot,slice/dogbot,slice/dogbot,slice/dogbot,sliceofcode/dogbot
dog/checks.py
dog/checks.py
from discord.ext import commands owner_id = '97104885337575424' def is_owner(): return commands.check(lambda ctx: ctx.message.author.id == owner_id)
mit
Python
482859488865fe9b1e05a923e7aafeb7e090f049
Create volumeBars.py
DarkAce65/rpi-led-matrix,DarkAce65/rpi-led-matrix
python/volumeBars.py
python/volumeBars.py
#!/usr/bin/env python from rgbmatrix import RGBMatrix from random import randint import time rows = 16 chains = 1 parallel = 1 ledMatrix = RGBMatrix(rows, chains, parallel) height = ledMatrix.height width = ledMatrix.width nextFrame = ledMatrix.CreateFrameCanvas() while True: nextFrame.SetPixel(randint(0, width), randint(0, height), randint(0, 255), randint(0, 255), randint(0, 255)) nextFrame = ledMatrix.swapOnVSync(nextFrame)
mit
Python
15839dd4b37761e49599f6b278f6bd6e6d18b1e5
Add initial rpc implementation example
victorpoluceno/xwing
examples/mailbox/rpc.py
examples/mailbox/rpc.py
import sys sys.path.append('.') # NOQA from xwing.mailbox import spawn, run, stop class Server(object): def hello_world(self): return 'Hello World!' def run(self): async def rpc_server(mailbox, server): while True: function, pid = await mailbox.recv() print('Got call from: ', pid) result = getattr(server, function)() await mailbox.send(pid, result) spawn(rpc_server, self, name='rpc_server') class Client(object): def __init__(self, server_pid): self.server_pid = server_pid def call(self, function): async def dispatch(mailbox, function): await mailbox.send(self.server_pid, function, mailbox.pid) result = await mailbox.recv() print(result) spawn(dispatch, function) if __name__ == '__main__': # python examples/mailbox/rpc.py server = Server() server.run() client = Client('[email protected]') client.call('hello_world') try: run() except KeyboardInterrupt: print('Stopping...') stop()
isc
Python
f09bddb89681fdd03ac190a1caa4847b3da7a61f
add script for reinserting unparsed sentences into the parser output
rsling/cow,rsling/cow,rsling/cow,rsling/cow
src/corex/save_unparsables.py
src/corex/save_unparsables.py
#!/usr/bin/python # This script takes the input file (one-sentence-per-line) for the # the Berkeleyparser (topological fields model) and compares it to the # parser's output file. Sentences missing in the parser output (unparsables) # in the output are inserted from the parsers input file, one-sentence-per-line) # import sys import codecs import re original = codecs.open(sys.argv[1], 'r', 'utf-8') parsed = codecs.open(sys.argv[2], 'r', 'utf-8') pos_and_token = re.compile('\(([^ ]+ (?:[^ )]+|\)))\)') # This takes a line of the Berkeley topological parser's # output, returns a string of tokens separated by whitespace def get_tokens(line): pt = pos_and_token.findall(line) if len(pt) > 0: pt = [i.split(" ") for i in pt] t = [i[1] for i in pt] s = " ".join(t) else: s = '' return(s) for oline in original: oline = oline.strip() pline = parsed.readline().strip() pline_tokens = get_tokens(pline) if oline == pline_tokens: print(pline.encode('utf-8')) else: print(oline.encode('utf-8')) if not pline_tokens =="": for ooline in original: ooline = ooline.strip() if not ooline == pline_tokens: print(ooline.encode('utf-8')) else: print(pline.encode('utf-8')) break
bsd-2-clause
Python
7d52d1efaf5bb07bfbb66e78f7c51e92b6c531dd
Use BytesIO. Closes #17
bradleyg/django-ajaximage,bradleyg/django-ajaximage,subhaoi/kioskuser,bradleyg/django-ajaximage,subhaoi/kioskuser,subhaoi/kioskuser
ajaximage/image.py
ajaximage/image.py
import os from PIL import Image, ImageOps try: from StringIO import StringIO as IO except ImportError: from io import BytesIO as IO from django.core.files.base import ContentFile from django.core.files.uploadedfile import SimpleUploadedFile def resize(file_, max_width=0, max_height=0, crop=0): max_width = int(max_width) max_height = int(max_height) crop = int(crop) if(max_width is 0 and max_height is 0): return file_ max_width = 9999 if max_width is 0 else max_width max_height = 9999 if max_height is 0 else max_height size = (max_width, max_height) image = Image.open(file_) if(image.mode == 'RGBA'): image.load() background = Image.new('RGB', image.size, (255, 255, 255)) background.paste(image, mask=image.split()[3]) image = background temp = IO() if(crop is 1): image = ImageOps.fit(image, size, Image.ANTIALIAS) else: image.thumbnail(size, Image.ANTIALIAS) image.save(temp, 'jpeg') temp.seek(0) return SimpleUploadedFile(file_.name, temp.read(), content_type='image/jpeg')
import os from PIL import Image, ImageOps try: from StringIO import StringIO except ImportError: from io import StringIO from django.core.files.base import ContentFile from django.core.files.uploadedfile import SimpleUploadedFile def resize(file_, max_width=0, max_height=0, crop=0): max_width = int(max_width) max_height = int(max_height) crop = int(crop) if(max_width is 0 and max_height is 0): return file_ max_width = 9999 if max_width is 0 else max_width max_height = 9999 if max_height is 0 else max_height size = (max_width, max_height) image = Image.open(file_) if(image.mode == 'RGBA'): image.load() background = Image.new('RGB', image.size, (255, 255, 255)) background.paste(image, mask=image.split()[3]) image = background temp = StringIO() if(crop is 1): image = ImageOps.fit(image, size, Image.ANTIALIAS) else: image.thumbnail(size, Image.ANTIALIAS) image.save(temp, 'jpeg') temp.seek(0) return SimpleUploadedFile(file_.name, temp.read(), content_type='image/jpeg')
mit
Python
5f9c6e49597abe07a74cd2e7370216bd0fc57cd4
add topology
YYMo/sdn_project,YYMo/sdn_project,YYMo/sdn_project,YYMo/sdn_project
scripts/topology.py
scripts/topology.py
#!/usr/bin/python from mininet.net import Mininet from mininet.node import Controller, OVSSwitch from mininet.cli import CLI from mininet.log import setLogLevel import sys def multiControllerNet( number ): "Create a network from semi-scratch with multiple controllers." net = Mininet( controller=Controller, switch=OVSSwitch, build=False ) print "*** Creating (reference) controllers" c0 = net.addController( 'c0' , port=(7700)) s_count = int(number) h_count = s_count * 2 # sys.exit("END"); hosts = [0] * h_count switches = [0] * s_count for i in range(h_count): hosts[i] = net.addHost('h' + str(i)) for i in range(s_count): switches[i] = net.addSwitch('s' + str(i)) print "*** Creating links between hosts and #switch" for i in range(s_count): net.addLink( switches[i],hosts[i * 2] ) net.addLink( switches[i],hosts[i * 2 + 1] ) print "*** Creating links between switches" for i in range(s_count-1): net.addLink( switches[i],switches[i+1] ) print "*** Starting network" net.build() c0.start() for i in range(s_count): switches[i].start( [c0] ) print "*** Testing network" # net.pingAll() print "*** Running CLI" CLI( net ) print "*** Stopping network" net.stop() if __name__ == '__main__': setLogLevel( 'info' ) # for CLI output if len(sys.argv) < 3: print "Usage: sudo ./topo1.py -s [switch number]\n" sys.exit(1) elif sys.argv[1] == "-s": multiControllerNet(sys.argv[2])
apache-2.0
Python
4ff6b846311a0f7bd6cfcf2e661a7c53061406fe
Add command to print vault info
carsonmcdonald/glacier-cmd
glaciercmd/command_vault_info.py
glaciercmd/command_vault_info.py
import boto class CommandVaultInfo(object): def execute(self, args, config): glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret')) try: vault = glacier_connection.get_vault(args[2]) print "Vault info:\n\tname={}\n\tarn={}\n\tcreation_date={}\n\tlast_inventory_date={}\n\tsize={}\n\tnumber_of_archives={}".format(vault.name, vault.arn, vault.creation_date, vault.last_inventory_date, vault.size, vault.number_of_archives) except: print "Vaule named '{}' does not exist.".format(args[2]) def accept(self, args): return len(args) >= 3 and args[0] == 'vault' and args[1] == 'info' def command_init(): return CommandVaultInfo()
mit
Python
adcbdc06f0c476bc4c24e8c69d06cffbb6726a9f
Add migration
OpenVolunteeringPlatform/django-ovp-organizations,OpenVolunteeringPlatform/django-ovp-organizations
ovp_organizations/migrations/0023_auto_20170712_1704.py
ovp_organizations/migrations/0023_auto_20170712_1704.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-07-12 17:04 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ovp_organizations', '0022_auto_20170613_1424'), ] operations = [ migrations.AlterField( model_name='organization', name='address', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='ovp_core.SimpleAddress', verbose_name='address'), ), ]
agpl-3.0
Python
582ebd448508625ed2c9f362aaafc3fc46e60df0
Add unit tests for security_scan
opnfv/functest,opnfv/functest,mywulin/functest,mywulin/functest
functest/tests/unit/features/test_security_scan.py
functest/tests/unit/features/test_security_scan.py
#!/usr/bin/env python # Copyright (c) 2017 Orange and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 # pylint: disable=missing-docstring import logging import unittest from functest.opnfv_tests.features import security_scan from functest.utils import constants class SecurityScanTesting(unittest.TestCase): logging.disable(logging.CRITICAL) def setUp(self): self.sscan = security_scan.SecurityScan() def test_init(self): self.assertEqual(self.sscan.project_name, "securityscanning") self.assertEqual(self.sscan.case_name, "security_scan") self.assertEqual( self.sscan.repo, constants.CONST.__getattribute__("dir_repo_securityscan")) self.assertEqual( self.sscan.cmd, ( '. {0}/stackrc && cd {1} && ' 'python security_scan.py --config config.ini && ' 'cd -'.format( constants.CONST.__getattribute__("dir_functest_conf"), self.sscan.repo))) if __name__ == "__main__": unittest.main(verbosity=2)
apache-2.0
Python
25f5ff62e1652e3293d12e3e73e44e7d7c21463c
upgrade incs
rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs
bin/upgrade_fortran_inc.py
bin/upgrade_fortran_inc.py
#!/usr/bin/env python3 # -*- coding: utf8 -*- # py f:\dev\progs\bin\clean_fortran.py ricks.f90 # f:\f90ppr\moware\f90ppr.exe < tmp.f90 > out.txt import sys, os, subprocess, shutil sys.path.append(r'C:\msys64\mingw64\bin') f90ppr_exe = r"F:\f90ppr\moware\f90ppr" def main(fname): # tmpname = 'tmp.f90' if not os.path.isfile(fname): raise Exception(f'{fname} not found!') base, ext = os.path.splitext(fname) outname = base+'.ppr'+ext outfile = open(outname,'wb') cmd = [ f90ppr_exe ] p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=outfile) # maximum line length (2-132) p.stdin.write(b'$define FPPR_MAX_LINE 132\n') # keywords case: FPPR_LEAVE, FPPR_UPPER, FPPR_LOWER p.stdin.write(b'$define FPPR_KWD_CASE FPPR_LOWER\n') # variables case: FPPR_LEAVE, FPPR_UPPER, FPPR_LOWER p.stdin.write(b'$define FPPR_USR_CASE FPPR_LEAVE\n') # indentation (0-60) p.stdin.write(b'$define FPPR_STP_INDENT 4\n') # input format: 0=free format p.stdin.write(b'$define FPPR_FXD_IN 1\n') # output format: 0=free format p.stdin.write(b'$define FPPR_FXD_OUT 0\n') with open(fname,'rb') as infile: for l in infile.readlines(): p.stdin.write(l) p.stdin.close() retcode = p.wait() print(f'retcode={retcode}') outfile.close() # overwrite file shutil.copy(outname, fname) # remove temporary if os.path.isfile(outname): os.remove(outname) if __name__=="__main__": f = sys.argv[1] main(f)
apache-2.0
Python
0fe4a3c3a1d31230c9b5c931ff1e33584f1ccd4e
Create maximum-length-of-pair-chain.py
kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,yiwen-luo/LeetCode
Python/maximum-length-of-pair-chain.py
Python/maximum-length-of-pair-chain.py
# Time: O(nlogn) # Space: O(1) # You are given n pairs of numbers. # In every pair, the first number is always smaller than the second number. # # Now, we define a pair (c, d) can follow another pair (a, b) # if and only if b < c. Chain of pairs can be formed in this fashion. # # Given a set of pairs, find the length longest chain which can be formed. # You needn't use up all the given pairs. You can select pairs in any order. # # Example 1: # Input: [[1,2], [2,3], [3,4]] # Output: 2 # Explanation: The longest chain is [1,2] -> [3,4] # Note: # The number of given pairs will be in the range [1, 1000]. class Solution(object): def findLongestChain(self, pairs): """ :type pairs: List[List[int]] :rtype: int """ pairs.sort(key=lambda x: x[1]) cnt, i = 0, 0 for j in xrange(len(pairs)): if j == 0 or pairs[i][1] < pairs[j][0]: cnt += 1 i = j return cnt
mit
Python
0a0f17044b90b6897cd931a2e9d002b764b27b00
Add pytorch-lightning bf16 training example (#5526)
intel-analytics/BigDL,yangw1234/BigDL,yangw1234/BigDL,yangw1234/BigDL,intel-analytics/BigDL,intel-analytics/BigDL,yangw1234/BigDL,intel-analytics/BigDL
python/nano/tutorial/training/pytorch-lightning/lightning_train_bf16.py
python/nano/tutorial/training/pytorch-lightning/lightning_train_bf16.py
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torchvision import transforms from torchvision.datasets import OxfordIIITPet from torch.utils.data.dataloader import DataLoader import torch from torchvision.models import resnet18 from bigdl.nano.pytorch import Trainer import pytorch_lightning as pl class MyLightningModule(pl.LightningModule): def __init__(self): super().__init__() self.model = resnet18(pretrained=True) num_ftrs = self.model.fc.in_features # Here the size of each output sample is set to 37. self.model.fc = torch.nn.Linear(num_ftrs, 37) self.criterion = torch.nn.CrossEntropyLoss() def forward(self, x): return self.model(x) def training_step(self, batch, batch_idx): x, y = batch output = self.model(x) loss = self.criterion(output, y) self.log('train_loss', loss) return loss def validation_step(self, batch, batch_idx): x, y = batch output = self.forward(x) loss = self.criterion(output, y) pred = torch.argmax(output, dim=1) acc = torch.sum(y == pred).item() / (len(y) * 1.0) metrics = {'test_acc': acc, 'test_loss': loss} self.log_dict(metrics) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) def create_dataloaders(): train_transform = transforms.Compose([transforms.Resize(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=.5, hue=.3), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) val_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # Apply data augmentation to the tarin_dataset train_dataset = OxfordIIITPet(root="/tmp/data", transform=train_transform, download=True) val_dataset = OxfordIIITPet(root="/tmp/data", transform=val_transform) # obtain training indices that will be used for validation indices = torch.randperm(len(train_dataset)) val_size = len(train_dataset) // 4 train_dataset = torch.utils.data.Subset(train_dataset, indices[:-val_size]) val_dataset = torch.utils.data.Subset(val_dataset, indices[-val_size:]) # prepare data loaders train_dataloader = DataLoader(train_dataset, batch_size=32) val_dataloader = DataLoader(val_dataset, batch_size=32) return train_dataloader, val_dataloader if __name__ == "__main__": model = MyLightningModule() train_loader, val_loader = create_dataloaders() # Bfloat16 Training # # BFloat16 is a custom 16-bit floating point format for machine learning # that’s comprised of one sign bit, eight exponent bits, and seven mantissa bits. # BFloat16 has a greater "dynamic range" than FP16. This means it is able to # improve numerical stability than FP16 while delivering increased performance # and reducing memory usage. # # In BigDL-Nano, you can easily enable BFloat16 Mixed precision by setting precision='bf16' # # Note: Using BFloat16 precision with torch < 1.12 may result in extremely slow training. trainer = Trainer(max_epochs=5, precision='bf16') trainer.fit(model, train_dataloaders=train_loader) trainer.validate(model, dataloaders=val_loader) # You can also set use_ipex=True and precision='bf16' to enable ipex optimizer fusion # for bf16 to gain more acceleration from BFloat16 data type. trainer = Trainer(max_epochs=5, use_ipex=True, precision='bf16') trainer.fit(model, train_dataloaders=train_loader) trainer.validate(model, dataloaders=val_loader)
apache-2.0
Python
4f586f16eaf3e06d347bf9976a02005c70cd7e13
Create installTests.py
jmategk0/UbuntuSetup,jmategk0/conductor
installTests.py
installTests.py
import unittest import install class TestOperationWrapperMethods(unittest.TestCase): def setUp(self): # TODO: Write Tests self.test_dataset = ""
mit
Python
95182581beebbd181b20b23ee02657cb18347dd6
update spec: update read_spectrum.py: add read_spectrum for elodie
hypergravity/bopy,hypergravity/bopy
bopy/spec/read_spectrum.py
bopy/spec/read_spectrum.py
# -*- coding: utf-8 -*- """ Author ------ Bo Zhang Email ----- [email protected] Created on ---------- - Tue Mar 8 15:26:00 2016 read_spectrum Modifications ------------- - Aims ---- - read various kinds of spectra """ import os import numpy as np from astropy.io import fits from .spec import Spec def reconstruct_wcs_coord_from_fits_header(hdr, dim=1): """ reconstruct wcs coordinates (e.g., wavelenght array) """ # assert dim is not larger than limit assert dim <= hdr['NAXIS'] # get keywords crval = hdr['CRVAL%d' % dim] cdelt = hdr['CDELT%d' % dim] crpix = hdr['CRPIX%d' % dim] naxis = hdr['NAXIS%d' % dim] # reconstruct wcs coordinates coord = np.arange(1 - crpix, naxis + 1 - crpix) * cdelt + crval return coord def read_spectrum_elodie_r42000(fp): """ read spectrum from ELODIE library (R42000) """ # assert the file exists assert os.path.exists(fp) # read fits hl = fits.open(fp) # reconstruct wave array wave = reconstruct_wcs_coord_from_fits_header(hl[0].header, dim=1) # flux flux = hl[0].data # flux err flux_err = hl[2].data # flux ivar flux_ivar = 1 / flux_err ** 2. # reconstruct spec sp = Spec(data=[wave, flux, flux_ivar, flux_err], names=['wave', 'flux', 'flux_ivar', 'flux_err']) return sp
bsd-3-clause
Python
39bb6cd51ce5351bfd93adac7b083a52b25590f8
Create 6kyu_vending_machine.py
Orange9000/Codewars,Orange9000/Codewars
Solutions/6kyu/6kyu_vending_machine.py
Solutions/6kyu/6kyu_vending_machine.py
class VendingMachine(): def __init__(self, items, money): self.items = dict(enumerate(items)) self.money = money def vend(self, selection, item_money): try: n,v = [(n,self.items[n]) for n in self.items if (self.items[n]['code']).lower() == selection.lower()][0] except: return "Invalid selection! : Money in vending machine = {:.2f}".format(self.money) if item_money < v['price']: return "Not enough money!" if v['quantity'] <= 0: return "{}: Out of stock!".format(v['name']) if item_money > v['price']: change = item_money - v['price'] v['quantity'] = v['quantity'] - 1 self.money += (-change + item_money) self.items[n] = v return "Vending {} with {:.2f} change.".format(v['name'], change) else: v['quantity'] = v['quantity'] - 1 self.money += item_money self.items[n] = v return "Vending {}".format(v['name'])
mit
Python
5e96dd2846660f14e1d7b691ba928da63b699f19
Add support for Spotify
foauth/foauth.org,foauth/foauth.org,foauth/foauth.org
services/spotify.py
services/spotify.py
from oauthlib.common import add_params_to_uri import foauth.providers class Spotify(foauth.providers.OAuth2): # General info about the provider provider_url = 'https://spotify.com/' docs_url = 'https://developer.spotify.com/web-api/endpoint-reference/' category = 'Music' # URLs to interact with the API authorize_url = 'https://accounts.spotify.com/authorize' access_token_url = 'https://accounts.spotify.com/api/token' api_domain = 'api.spotify.com' available_permissions = [ (None, 'Read your publicly available information'), ('playlist-modify', 'Manage your public playlists'), ('playlist-modify-private', 'Manage all your playlists (even private)'), ('playlist-read-private', 'Access your private playlists'), ('user-read-private', 'Access your name, image and subscription details'), ('user-read-email', 'Get your real email address'), ] def get_user_id(self, key): r = self.api(key, self.api_domain, u'/v1/me') return r.json()[u'id']
bsd-3-clause
Python
062c4bc134f77f9279d18774b954a06566f99c5a
Add logger
cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata,cmu-delphi/delphi-epidata
src/acquisition/covidcast/logger.py
src/acquisition/covidcast/logger.py
"""Structured logger utility for creating JSON logs in Delphi pipelines.""" import logging import sys import threading import structlog def handle_exceptions(logger): """Handle exceptions using the provided logger.""" def exception_handler(etype, value, traceback): logger.exception("Top-level exception occurred", exc_info=(etype, value, traceback)) def multithread_exception_handler(args): exception_handler(args.exc_type, args.exc_value, args.exc_traceback) sys.excepthook = exception_handler threading.excepthook = multithread_exception_handler def get_structured_logger(name=__name__, filename=None, log_exceptions=True): """Create a new structlog logger. Use the logger returned from this in indicator code using the standard wrapper calls, e.g.: logger = get_structured_logger(__name__) logger.warning("Error", type="Signal too low"). The output will be rendered as JSON which can easily be consumed by logs processors. See the structlog documentation for details. Parameters --------- name: Name to use for logger (included in log lines), __name__ from caller is a good choice. filename: An (optional) file to write log output. """ # Configure the underlying logging configuration handlers = [logging.StreamHandler()] if filename: handlers.append(logging.FileHandler(filename)) logging.basicConfig( format="%(message)s", level=logging.INFO, handlers=handlers ) # Configure structlog. This uses many of the standard suggestions from # the structlog documentation. structlog.configure( processors=[ # Filter out log levels we are not tracking. structlog.stdlib.filter_by_level, # Include logger name in output. structlog.stdlib.add_logger_name, # Include log level in output. structlog.stdlib.add_log_level, # Allow formatting into arguments e.g., logger.info("Hello, %s", # name) structlog.stdlib.PositionalArgumentsFormatter(), # Add timestamps. structlog.processors.TimeStamper(fmt="iso"), # Match support for exception logging in the standard logger. structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, # Decode unicode characters structlog.processors.UnicodeDecoder(), # Render as JSON structlog.processors.JSONRenderer() ], # Use a dict class for keeping track of data. context_class=dict, # Use a standard logger for the actual log call. logger_factory=structlog.stdlib.LoggerFactory(), # Use a standard wrapper class for utilities like log.warning() wrapper_class=structlog.stdlib.BoundLogger, # Cache the logger cache_logger_on_first_use=True, ) logger = structlog.get_logger(name) if log_exceptions: handle_exceptions(logger) return logger
mit
Python
934f4ccfc4e34c5486c3d5a57b429742eb9b5915
add algorithms.ml to make format for machine learning
tanghaibao/jcvi,sgordon007/jcvi_062915
algorithms/ml.py
algorithms/ml.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Machine learning algorithms. """ import sys from optparse import OptionParser from jcvi.apps.base import ActionDispatcher, debug debug() def main(): actions = ( ('libsvm', 'convert csv file to LIBSVM format'), ) p = ActionDispatcher(actions) p.dispatch(globals()) def libsvm(args): """ %prog libsvm csvfile prefix.ids Convert csv file to LIBSVM format. `prefix.ids` contains the prefix mapping. Ga -1 Gr 1 So the feature in the first column of csvfile get scanned with the prefix and mapped to different classes. Formatting spec: http://svmlight.joachims.org/ """ from jcvi.formats.base import DictFile p = OptionParser(libsvm.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) csvfile, prefixids = args d = DictFile(prefixids) fp = open(csvfile) fp.next() for row in fp: atoms = row.split() klass = atoms[0] kp = klass.split("_")[0] klass = d.get(kp, "0") feats = ["{0}:{1}".format(i + 1, x) for i, x in enumerate(atoms[1:])] print " ".join([klass] + feats) if __name__ == '__main__': main()
bsd-2-clause
Python
2ca07d4a8893196bbf304bcdac16688505e6123a
Add a management command to register webhooks
CorbanU/corban-shopify,CorbanU/corban-shopify
shopify/webhooks/management/commands/webhookregister.py
shopify/webhooks/management/commands/webhookregister.py
from django.core.management.base import NoArgsCommand from webhooks.models import Webhook class Command(NoArgsCommand): help = 'Register all created Shopify webhooks' def handle_noargs(self, **options): Webhook.objects.register()
bsd-3-clause
Python
4aafeac9c238ffb8dc448c87f18abfd7f1f0c9d7
store data dir info
bw2/gemini,heuermh/gemini,bgruening/gemini,heuermh/gemini,arq5x/gemini,bpow/gemini,brentp/gemini,xuzetan/gemini,xuzetan/gemini,bgruening/gemini,bw2/gemini,bpow/gemini,heuermh/gemini,bw2/gemini,udp3f/gemini,arq5x/gemini,udp3f/gemini,brentp/gemini,bgruening/gemini,arq5x/gemini,xuzetan/gemini,brentp/gemini,udp3f/gemini,bpow/gemini,udp3f/gemini,heuermh/gemini,xuzetan/gemini,arq5x/gemini,bgruening/gemini,brentp/gemini,bw2/gemini,bpow/gemini
gemini/anno_info.py
gemini/anno_info.py
#!/usr/bin/env python """ Store the path for GEMINI data-dir """ from gemini.config import read_gemini_config config = read_gemini_config() anno_dirname = config["annotation_dir"] print anno_dirname
mit
Python
9d6a053441505fae600915e24a263de798843fbb
Add test_weapon class
TsvetaKandilarova/Escape-The-Labyrinth
test_weapon.py
test_weapon.py
import unittest import weapon class TestWeapon(unittest.TestCase): def setUp(self): self.w = weapon.Weapon('bow', 30, 1.0, 1) self.w2 = weapon.Weapon('bow', 30, 2.0, 1) def test_weapon_init(self): self.assertEqual('bow', self.w.type) self.assertEqual(30, self.w.damage) self.assertEqual(1.0, self.w.critical_strike_percent) self.assertEqual(1, self.w.tier) def test_weapon_init2(self): self.assertEqual('bow', self.w2.type) self.assertEqual(30, self.w2.damage) self.assertEqual(0.0, self.w2.critical_strike_percent) self.assertEqual(1, self.w2.tier) def test_weapon_init_with_incorrect_argument(self): self.assertEqual(0.0, self.w2.critical_strike_percent) def test_weapon_to_string(self): self.assertEqual('bow\n30 damage\n100% critical strike percent', str(self.w)) def test_critical_hit(self): self.assertTrue(self.w.critical_hit()) self.assertFalse(self.w2.critical_hit()) if __name__ == '__main__': unittest.main()
mit
Python
7a4b4a116a10f389f6d14321547fa1966b262c0d
Add Hacker News
kitsook/newssum,kitsook/newssum,kitsook/newssum
sources/misc.py
sources/misc.py
# -*- coding: utf-8 -*- # Copyright (c) 2020 Clarence Ho (clarenceho at gmail dot com) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re import datetime from lxml import html from lxml import etree import traceback from logger import logger from fetcher import read_http_page from .base import BaseSource from .base import RSSBase from .base import RDFBase class HackerNews(BaseSource): def get_id(self): return 'hackernews' def get_desc(self): return 'Hacker News' def get_articles(self): # Although the source is in RSS, the daily items are consolidated as CDATA. # Parse and break them down instead of using RSSBase rss_url = 'http://www.daemonology.net/hn-daily/index.rss' resultList = [] try: doc = html.document_fromstring(read_http_page(rss_url)) for item in doc.xpath('//rss/channel/item'): title = item.xpath('title')[0].text if len(item.xpath('title')) > 0 else 'Daily Hacker News' resultList.append(self.create_section(title)) description = item.xpath('description')[0] if len(item.xpath('description')) > 0 else None if description is not None: for article in description.xpath('ul/li/span[@class="storylink"]/a'): if article.text and article.get('href'): resultList.append(self.create_article(article.text.strip(), article.get('href'))) except Exception as e: logger.exception('Problem processing Hacker News: ' + str(e)) logger.exception(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)) return resultList
mit
Python
cd48829eb08df62d8222128b33a7c00b9ca2ed8a
Add files via upload
dewuem/python-bioinf
interpro_go_extraction_direct.py
interpro_go_extraction_direct.py
#!/usr/bin/env python2 # Daniel Elsner # 26.09.2016 # ake the GO ID directly from Interproscan, without the need of previous cutting and grepping. # Input: The interproscan-output.tsv file import sys with open(sys.argv[1], 'r') as readfile: id_list_content = list(readfile) outdict={} # make a dict, this prevents duplicate entries and makes access easy for i in range(len(id_list_content)): if "GO" in id_list_content[i]: # only if there is a GO entry, otherwise there is nothing to split inputs = id_list_content[i].split('\t') p, j = inputs[0], inputs[13] #from the entry line, get the Gene Name and the GO IDs outdict[p] = set() # create a set, this spares us from checking for duplicates and just keeps everything once else: pass #if there is no GO entry, pass the line for i in range(len(id_list_content)): if "GO" in id_list_content[i]: # only if there is a GO entry, otherwise there is nothing to split inputs = id_list_content[i].split('\t') p, j = inputs[0], inputs[13] #from the entry line, get the Gene Name and the GO IDs if '|' in str(j): for n in str(j).split('|'): outdict[p].add(n.strip()) # individual GOs are separated by "|", for each of them add them to the set, automatically checking if it is already there. else: outdict[p].add(str(j.strip())) # create a set, this spares us from checking for duplicates and just keeps everything once else: pass #if there is no GO entry, pass the line for i in range(len(outdict)): print str(outdict.keys()[i]) + "\t" + ', '.join(outdict.values()[i])
mit
Python
ed578177781ff1d4aeb0b7abb7d5f11fc5a7c626
Create copy of WeakList and set it to raise exception instead of removing item from list
PatrikValkovic/grammpy
grammpy/WeakList.py
grammpy/WeakList.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 31.08.2017 12:11 :Licence GNUv3 Part of grammpy Original implementation: https://github.com/apieum/weakreflist """ import weakref from .exceptions import TreeDeletedException __all__ = ["WeakList"] def is_slice(index): return isinstance(index, slice) class WeakList(list): def __init__(self, items=list()): list.__init__(self, self._refs(items)) def value(self, item): if isinstance(item, weakref.ReferenceType): if item() is None: raise TreeDeletedException() return item() return item def ref(self, item): try: item = weakref.ref(item) finally: return item def __contains__(self, item): return list.__contains__(self, self.ref(item)) def __getitem__(self, index): items = list.__getitem__(self, index) return type(self)(self._values(items)) if is_slice(index) else self.value(items) def __setitem__(self, index, item): items = self._refs(item) if is_slice(index) else self.ref(item) return list.__setitem__(self, index, items) def __iter__(self): return iter(self[index] for index in range(len(self))) def __reversed__(self): reversed_self = type(self)(self) reversed_self.reverse() return reversed_self def append(self, item): list.append(self, self.ref(item)) def remove(self, item): return list.remove(self, self.ref(item)) def remove_all(self, item): item = self.ref(item) while list.__contains__(self, item): list.remove(self, item) def index(self, item, start=None, stop=None): return list.index(self, self.ref(item), start=start, stop=stop) def count(self, item): return list.count(self, self.ref(item)) def pop(self, index=-1): return self.value(list.pop(self, self.ref(index))) def insert(self, index, item): return list.insert(self, index, self.ref(item)) def extend(self, items): return list.extend(self, self._refs(items)) def __iadd__(self, other): return list.__iadd__(self, self._refs(other)) def _refs(self, items): return map(self.ref, items) def _values(self, items): return map(self.value, items) def _sort_key(self, key=None): return self.value if key is None else lambda item: key(self.value(item)) def sort(self, *, key=None, reverse=False): return list.sort(self, key=self._sort_key(key), reverse=reverse)
mit
Python
ee859881af0633d4d2d88015c907cfa856516dbe
Create TwoSum II for Lint
Chasego/codi,Chasego/codi,cc13ny/Allin,cc13ny/algo,Chasego/cod,cc13ny/algo,Chasego/codirit,Chasego/cod,cc13ny/Allin,Chasego/codirit,Chasego/codi,Chasego/codirit,cc13ny/Allin,Chasego/codi,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/cod,cc13ny/Allin,cc13ny/algo,Chasego/codirit,cc13ny/algo,cc13ny/Allin,Chasego/cod,Chasego/cod
lintcode/000-000-Two-Sum-II/TwoSumII.py
lintcode/000-000-Two-Sum-II/TwoSumII.py
class Solution: # @param nums, an array of integer # @param target, an integer # @return an integer def twoSum2(self, nums, target): # Write your code here nums.sort() i, j = 0, len(nums) - 1 res = 0 while i < j: if nums[i] + nums[j] <= target: i += 1 else: res += j - i j -= 1 return res
mit
Python
52e8a378d8a31989c9d93ef83eabbe6df339f915
Add data migration to add category components for VPC.
opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur
src/waldur_mastermind/marketplace/migrations/0083_offering_component.py
src/waldur_mastermind/marketplace/migrations/0083_offering_component.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations from waldur_mastermind.marketplace_openstack import STORAGE_TYPE, RAM_TYPE, CORES_TYPE, PACKAGE_TYPE def create_category_components(apps, schema_editor): CATEGORY_TITLE = 'Private clouds' Category = apps.get_model('marketplace', 'Category') CategoryComponent = apps.get_model('marketplace', 'CategoryComponent') OfferingComponent = apps.get_model('marketplace', 'OfferingComponent') try: vpc_category = Category.objects.get(title=CATEGORY_TITLE) except Category.DoesNotExist: return storage_gb_cc, _ = CategoryComponent.objects.get_or_create( category=vpc_category, type=STORAGE_TYPE, name='Storage', measured_unit='GB' ) ram_gb_cc, _ = CategoryComponent.objects.get_or_create( category=vpc_category, type=RAM_TYPE, name='RAM', measured_unit='GB' ) cores_cc, _ = CategoryComponent.objects.get_or_create( category=vpc_category, type=CORES_TYPE, name='Cores', measured_unit='cores' ) components = OfferingComponent.objects.filter(offering__type=PACKAGE_TYPE, parent=None) components.filter(type=STORAGE_TYPE).update(parent=storage_gb_cc) components.filter(type=RAM_TYPE).update(parent=ram_gb_cc) components.filter(type=CORES_TYPE).update(parent=cores_cc) class Migration(migrations.Migration): dependencies = [ ('marketplace', '0082_orderitem_activated'), ] operations = [ migrations.RunPython(create_category_components), ]
mit
Python
df0772b3ae02ff0180f18410cf4350b493db9cb4
Create fold_stereonet_fisher_mean.py
mlaloux/QGIS_structural_geology
fold_stereonet_fisher_mean.py
fold_stereonet_fisher_mean.py
#Definition of inputs and outputs #================================== ##[Mes scripts GEOL]=group ##entree=vector ##dip_dir=field entree ##dip=field entree #Algorithm body #================================== from qgis.core import * from apsg import * layer = processing.getObject(entree) dipdir = layer.fieldNameIndex(dip_dir) dip = layer.fieldNameIndex(dip) if layer.selectedFeatureCount(): print "ok", layer.selectedFeatureCount() g= Group([Vec3(Fol(elem.attributes()[dipdir],elem.attributes()[dip])) for elem in layer.selectedFeatures()],name='plis') else: g= Group([Vec3(Fol(elem.attributes()[dipdir],elem.attributes()[dip])) for elem in layer.getFeatures()],name='plis') # mean vector resultat= "mean vector: " + str(int(round(g.R.aslin.dd[1]))) + " - " + str(int(round(g.R.aslin.dd[0]))) s = StereoNet() s.line(g.aslin, 'b.',markersize=18) s.line(g.R.aslin,'g^',markersize=18) s.cone(g.R.aslin, g.fisher_stats['a95'], 'r') s.cone(g.R.aslin, g.fisher_stats['csd'], 'k') a = s.ax a.set_title(resultat, y=1.06, size=14, color='g') s.show()
apache-2.0
Python
8ce4b91c9f1eca911809bc2e8c315ea24eac10ae
Add scheduler example
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
schedule.py
schedule.py
import schedule import time def job(): print("I'm working...") schedule.every(10).minutes.do(job) schedule.every().hour.do(job) schedule.every().day.at("10:30").do(job) schedule.every(5).to(10).minutes.do(job) schedule.every().monday.do(job) schedule.every().wednesday.at("13:15").do(job) while True: schedule.run_pending() time.sleep(1)
mit
Python
9ab752bc96c1ad8d6e718cbf87f247aba4ab76a9
Create MiddleCharacter.py
JLJTECH/TutorialTesting
Edabit/MiddleCharacter.py
Edabit/MiddleCharacter.py
#!/usr/bin/env python3 ''' Create a function that takes a string and returns the middle character(s). With conditions. ''' def get_middle(word): if len(word) <= 2: return word elif len(word) % 2 == 0: return word[(len(word) // 2) - 1] + word[(len(word) // 2)] else: return word[(len(word) // 2)] #Alternative Solutions def get_middle(word): return word[(len(word)-1)//2:(len(word)+2)//2] def get_middle(word): while len(word) > 2: word = word[1:-1] return word
mit
Python
4e7a074fdfb0b5df300c2b4f1c2109ffb47e89cc
Create t3_muptiplot.py
jxjdzmyy/battery
t3_muptiplot.py
t3_muptiplot.py
''' Created on 2017年8月20日 @author: rob ''' # Learn about API authentication here: https://plot.ly/python/getting-started # Find your api_key here: https://plot.ly/settings/api import plotly.plotly as py import plotly.graph_objs as go import plotly import numpy as np import pandas as pd #py.sign_in('matterphiz', '3eu3YK0Rjn56EzoOWvgx') #Streaming API Tokens:g5sptjri5x dates = pd.date_range('20160101',periods=60) print(dates) df = pd.DataFrame(np.random.rand(60,4),index=dates,columns=list('ABCD')) # DataFrame 不可以小写 # np.random.rand(6,4),随机生成一个6*4的矩阵,其元素介于0-1之间 # index=dates 索引按照dates的日期元素作为索引 # columns=list('ABCD'),列名为A,B,C,D print(df) trace1 = go.Scatter( x=df.index, y=df['A'], name='yaxis1 data' ) trace2 = go.Scatter( x=df.index, y=10*df['B'], name='yaxis2 data', yaxis='y2' ) trace3 = go.Scatter( x=df.index, y=100*df['C'], name='yaxis3 data', yaxis='y3' ) trace4 = go.Scatter( x=df.index, y=1000*df['D'], name='yaxis4 data', yaxis='y4' ) data = [trace1, trace2, trace3, trace4] layout = go.Layout( title='多轴数据示范', width=1280, xaxis=dict( domain=[20160101, 20160301] ), yaxis=dict( title='yaxis title', titlefont=dict( color='#1f77b4' ), tickfont=dict( color='#1f77b4' ) ), yaxis2=dict( title='yaxis2 title', titlefont=dict( color='#ff7f0e' ), tickfont=dict( color='#ff7f0e' ), anchor='free', overlaying='y', side='left', position=0.15 ), yaxis3=dict( title='yaxis4 title', titlefont=dict( color='#d62728' ), tickfont=dict( color='#d62728' ), anchor='x', overlaying='y', side='right' ), yaxis4=dict( title='yaxis5 title', titlefont=dict( color='#9467bd' ), tickfont=dict( color='#9467bd' ), anchor='free', overlaying='y', side='right', position=0.85 ) ) fig = go.Figure(data=data, layout=layout) #plot_url = py.plot(fig, filename='multiple-axes-multiple') plot_url = plotly.offline.plot(fig)
epl-1.0
Python
2e7058a9b48154ad205b6f53e07a224574a2e125
add command-line script to fix missing exposure times in MMIRS data
MMTObservatory/mmtwfs
scripts/fix_mmirs_exposure_time.py
scripts/fix_mmirs_exposure_time.py
#!/usr/bin/env python import sys import math import argparse from datetime import datetime from pathlib import Path from astropy.io import fits import logging log = logging.getLogger('Fix MMIRS') log.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) log.addHandler(ch) def main(): parser = argparse.ArgumentParser(description='Utility for fixing missing exposure times in MMIRS WFS images.') parser.add_argument( 'rootdir', metavar="<WFS data directory>", help="Directory containing MMIRS WFS data to fix.", default="." ) parser.add_argument( '--dryrun', help="Calculate new exposure times, but don't rewrite files.", action="store_true" ) args = parser.parse_args() rootdir = Path(args.rootdir) files = sorted(list(rootdir.glob("mmirs*.fits"))) if len(files) < 1: log.error(f"No MMIRS WFS data found in {str(rootdir)}") return timedict = {} for f in files: with fits.open(f) as hdulist: hdr = hdulist[-1].header data = hdulist[-1].data timedict[str(f)] = hdr['DATE-OBS'] log.debug(timedict) sec = 0. for i in range(0, len(files)): if i < len(files)-1: t1 = datetime.strptime(timedict[str(files[i])], "%Y-%m-%dT%H:%M:%S") t2 = datetime.strptime(timedict[str(files[i+1])], "%Y-%m-%dT%H:%M:%S") else: # handle last file t1 = datetime.strptime(timedict[str(files[i-1])], "%Y-%m-%dT%H:%M:%S") t2 = datetime.strptime(timedict[str(files[i])], "%Y-%m-%dT%H:%M:%S") diff = t2-t1 # exposure times are almost always in multiples of 5 sec unless the exposures are very short diff_sec = 5 * math.floor(diff.seconds/5) # mmirs wfs exposures should almost never be more than 3 min during normal operations. # large gaps are assumed to be the end of a track so 200 seems a good cutoff to reject # those and use the previous time diff instead. if diff_sec < 200: sec = diff_sec f = files[i] with fits.open(f) as hdulist: changed = False for h in hdulist: if 'EXPTIME' in h.header: if h.header['EXPTIME'] == 0.0: if args.dryrun: log.info(f"DRYRUN -- Setting EXPTIME to {sec} in {str(f)}..") else: log.info(f"Setting EXPTIME to {sec} in {str(f)}..") h.header['EXPTIME'] = sec changed = True else: log.info(f"EXPTIME already set to {h.header['EXPTIME']} for {str(f)}") if changed and not args.dryrun: hdulist.writeto(f, overwrite=True) if __name__ == "__main__": main()
bsd-3-clause
Python
0cff0d69f0d2f52f950be37f95c8f261a9741ae7
Create KAKAO_DATA_PREPARE_NEW.py
thkim107/sim
KAKAO_DATA_PREPARE_NEW.py
KAKAO_DATA_PREPARE_NEW.py
import h5py from scipy.spatial import distance import scipy.misc import numpy as np path = './Desktop/COVER_SONG/chroma_data_training/CP_1000ms_training_s2113_d2113_170106223452.h5' f1 = h5py.File(path) datasetNames=[n for n in f1.keys()] X = f1['X'] idxDis_train = f1['idxDis_train'] idxDis_validate = f1['idxDis_validate'] idxSim_train = f1['idxSim_train'] idxSim_validate = f1['idxSim_validate'] def oti(cover1,cover2,chroma_dim): cover1_mean = np.sum(cover1,axis=0)/np.max(np.sum(cover1,axis=0)) cover2_mean = np.sum(cover2,axis=0)/np.max(np.sum(cover2,axis=0)) dist_store = np.zeros(chroma_dim) for i in range(0,chroma_dim): cover2_mean_shifted = np.roll(cover2_mean, i) dist = np.dot(cover1_mean,cover2_mean_shifted) dist_store[i] = dist oti = np.argmax(dist_store) cover2_shifted = np.roll(cover2, oti, axis=1) return cover1, cover2_shifted def simple_matrix(X,Y): XX = oti(X,Y,12)[0] YY = oti(X,Y,12)[1] M = [[0 for col in range(180)] for row in range(180)] for i in range(180): for j in range(180): M[i][j] = distance.euclidean(XX[i,:],YY[j,:]) return np.asarray(M) # np.shape(idxSim_train)[0] for i in range(np.shape(idxSim_train)[0]): a=[idxSim_train[i][0], idxSim_train[i][1]] scipy.misc.imsave('./Desktop/KAKAO_ALL_PAIR_TRAIN/'+'{:0=4}'.format((int)(min(a)))+'_'+'{:0=4}'.format((int)(max(a)))+'_S.jpg',simple_matrix(X[min(a)-1],X[max(a)-1])) print((str)(i)+'th complete') # np.shape(idxDis_train)[0] for i in range(np.shape(idxDis_train)[0]): a=[idxDis_train[i][0], idxDis_train[i][1]] scipy.misc.imsave('./Desktop/KAKAO_ALL_PAIR_TRAIN/'+'{:0=4}'.format((int)(min(a)))+'_'+'{:0=4}'.format((int)(max(a)))+'_D.jpg',simple_matrix(X[min(a)-1],X[max(a)-1])) print((str)(i)+'th complete') # 1175 x 1175 pair (180 by 180 matrix) complete
mit
Python
3de9ab07b67bd37e418cba16318aa813326793bb
Create createREFgenomesForPhasing.py
evodify/phasing-genomes
createREFgenomesForPhasing.py
createREFgenomesForPhasing.py
mit
Python
c87779ed6e0163503c01efd3a3913b547954d73d
Create convcsv.py
pythonbag/scripts,pythonbag/scripts
convcsv.py
convcsv.py
#!/usr/bin/python # # convert spreadsheet data, removing multiple spaces # import os, sys, getopt, shutil, glob, re, traceback, json, csv def handle_exception(): traceback.print_exc() os._exit(1) def addRow(lst,row): key = row[9] if key in lst: setlst = lst[key] setlst.append(row) else: setlst=[row] lst[key]=setlst return lst def getRow(filename): try: lst = {} with open(filename,"rb") as csvfile: rdr = csv.reader(csvfile, delimiter=',', quotechar='"') for row in rdr: hdr=row break for row in rdr: row=re.sub("\s{2,}" , " ", row) key = row[1].lower() if "almaden" in key: lst=addRow(lst,row) elif "san jose" in key: lst=addRow(lst,row) elif "arc" in key: lst=addRow(lst,row) csvfile.close() return lst except: traceback.print_exc() # # argv[0] = NAME # argv[1] = IP # def main(argv): try: if len(argv)<1: print '{"STATUS":"FAIL", "MSG":"MISSING ARGS" }' os._exit(2) lst=getRow(argv[0]) for name in lst: #print name machines=lst[name] for machine in machines: print machine[9]+","+machine[13]+","+machine[11]+","+machine[12] break for machine in machines: print " "+machine[3]+","+machine[2] #print lst except: handle_exception() if __name__ == "__main__": main(sys.argv[1:])
apache-2.0
Python
1c81643eaed91b4171a4e68699d930e5ef3688db
Add negative API tests for policy validation
openstack/senlin,openstack/senlin,stackforge/senlin,stackforge/senlin,openstack/senlin
senlin/tests/tempest/api/policies/test_policy_validate_negative.py
senlin/tests/tempest/api/policies/test_policy_validate_negative.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from tempest.lib import decorators from tempest.lib import exceptions from tempest import test from senlin.tests.tempest.api import base from senlin.tests.tempest.common import constants class TestPolicyValidateNegativeBadRequest(base.BaseSenlinAPITest): @test.attr(type=['negative']) @decorators.idempotent_id('4b55bb3e-12d6-4728-9b53-9db5094ac8b5') def test_policy_validate_with_empty_body(self): params = { } # Verify badrequest exception(400) is raised. self.assertRaises(exceptions.BadRequest, self.client.validate_obj, 'policies', params) @test.attr(type=['negative']) @decorators.idempotent_id('a1c35d93-2d19-4a72-919f-cfd70f5cbf06') def test_policy_validate_no_spec(self): params = { 'policy': { } } # Verify badrequest exception(400) is raised. self.assertRaises(exceptions.BadRequest, self.client.validate_obj, 'policies', params) @test.attr(type=['negative']) @decorators.idempotent_id('6073da36-ee3e-4925-bce1-6c9a158e710d') def test_policy_validate_policy_type_incorrect(self): spec = copy.deepcopy(constants.spec_scaling_policy) spec['type'] = 'senlin.policy.bogus' params = { 'policy': { 'spce': spec } } # Verify badrequest exception(400) is raised. self.assertRaises(exceptions.BadRequest, self.client.validate_obj, 'policies', params) @test.attr(type=['negative']) @decorators.idempotent_id('1e1833ea-4a67-4ac1-b6e2-f9afff51c945') def test_policy_validate_spec_validation_failed(self): spec = copy.deepcopy(constants.spec_scaling_policy) spec['properties']['bogus'] = 'foo' params = { 'policy': { 'spce': spec } } # Verify badrequest exception(400) is raised. self.assertRaises(exceptions.BadRequest, self.client.validate_obj, 'policies', params)
apache-2.0
Python
f040351dd3397ba7297b69b2468b2b37589c0d8f
Add task to get stats about files
lutris/website,lutris/website,lutris/website,lutris/website
games/management/commands/get_installer_urls.py
games/management/commands/get_installer_urls.py
import json from collections import defaultdict from django.core.management.base import BaseCommand from common.util import load_yaml from games import models class Command(BaseCommand): def handle(self, *args, **kwargs): self.stdout.write("Installer stats\n") installers = models.Installer.objects.all() url_stats = defaultdict(list) for installer in installers: slug = installer.slug installer_content = load_yaml(installer.content) try: files = installer_content.get("files", []) except AttributeError: print("Deleting installer %s" % installer) installer.delete() continue if files is None: print("Deleting installer %s" % installer) installer.delete() continue for url_dict in files: fileid = next(iter(url_dict)) try: url = url_dict[fileid] except TypeError: print("Deleting installer %s" % installer) installer.delete() continue if isinstance(url, str): if url.startswith("N/A"): continue url_stats[url].append(slug) elif isinstance(url, dict): if url["url"].startswith("N/A"): continue url_stats[url["url"]].append(slug) with open("installer-files.json", "w") as installer_files: json.dump(url_stats, installer_files, indent=2)
agpl-3.0
Python
cff5035ad469adc46ed9cf446bb95d9a1e07bd77
Fix inline template
DMOJ/site,monouno/site,monouno/site,DMOJ/site,monouno/site,Phoenix1369/site,DMOJ/site,Phoenix1369/site,Minkov/site,DMOJ/site,Minkov/site,Minkov/site,Minkov/site,Phoenix1369/site,Phoenix1369/site,monouno/site,monouno/site
judge/templatetags/smart_math.py
judge/templatetags/smart_math.py
from HTMLParser import HTMLParser from django.template import Library from django.conf import settings import re register = Library() MATHTEX_CGI = 'http://www.forkosh.com/mathtex.cgi'#settings.get('MATHTEX_CGI', 'http://www.forkosh.com/mathtex.cgi') inlinemath = re.compile(r'~(.*?)~|\\\((.*?)\\\)') def inline_template(match): math = match.group(1) or match.group(2) return r''' <span> <img class="tex-image" src="%s?\textstyle %s"/> <span class="tex-text" style="display:none">\( %s \)</span> </span> ''' % (MATHTEX_CGI, math, math) displaymath = re.compile(r'\$\$(.*?)\$\$|\\\[(.*?)\\\]') def display_template(match): math = match.group(1) or match.group(2) return r''' <span> <img class="tex-image" src="%s?\displaystyle %s" alt="%s"/> <div class="tex-text" style="display:none">\[ %s \]</div> </span> ''' % (MATHTEX_CGI, math, math, math) class MathHTMLParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.new_page = [] self.data_buffer = [] def purge_buffer(self): if self.data_buffer: buffer = ''.join(self.data_buffer) buffer = inlinemath.sub(inline_template, buffer) buffer = displaymath.sub(display_template, buffer) self.new_page.append(buffer) del self.data_buffer[:] def handle_starttag(self, tag, attrs): self.purge_buffer() self.new_page.append('<%s%s>' % (tag, ' '.join([''] + ['%s="%s"' % p for p in attrs]))) def handle_endtag(self, tag): self.purge_buffer() self.new_page.append('</%s>' % tag) def handle_data(self, data): self.data_buffer.append(data) def handle_entityref(self, name): self.data_buffer.append('&%s;' % name) def handle_charref(self, name): self.data_buffer.append('&#%s;' % name) @register.filter(name='smart_math', is_safe=True) def math(page): parser = MathHTMLParser() parser.feed(page) return ''.join(parser.new_page)
from HTMLParser import HTMLParser from django.template import Library from django.conf import settings import re register = Library() MATHTEX_CGI = 'http://www.forkosh.com/mathtex.cgi'#settings.get('MATHTEX_CGI', 'http://www.forkosh.com/mathtex.cgi') inlinemath = re.compile(r'~(.*?)~|\\\((.*?)\\\)') def inline_template(match): math = match.group(1) or match.group(2) return r''' <span> <img src="%s?\textstyle %s"/> <span style="display:none">\( %s \)</span> </span> ''' % (MATHTEX_CGI, math, math) displaymath = re.compile(r'\$\$(.*?)\$\$|\\\[(.*?)\\\]') def display_template(match): math = match.group(1) or match.group(2) return r''' <span> <img class="tex-image" src="%s?\displaystyle %s" alt="%s"/> <div class="tex-text" style="display:none">\[ %s \]</div> </span> ''' % (MATHTEX_CGI, math, math, math) class MathHTMLParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.new_page = [] self.data_buffer = [] def purge_buffer(self): if self.data_buffer: buffer = ''.join(self.data_buffer) buffer = inlinemath.sub(inline_template, buffer) buffer = displaymath.sub(display_template, buffer) self.new_page.append(buffer) del self.data_buffer[:] def handle_starttag(self, tag, attrs): self.purge_buffer() self.new_page.append('<%s%s>' % (tag, ' '.join([''] + ['%s="%s"' % p for p in attrs]))) def handle_endtag(self, tag): self.purge_buffer() self.new_page.append('</%s>' % tag) def handle_data(self, data): self.data_buffer.append(data) def handle_entityref(self, name): self.data_buffer.append('&%s;' % name) def handle_charref(self, name): self.data_buffer.append('&#%s;' % name) @register.filter(name='smart_math', is_safe=True) def math(page): parser = MathHTMLParser() parser.feed(page) return ''.join(parser.new_page)
agpl-3.0
Python
12445164d5a7651ddcc381f5e602577d8372fe6a
Add is_eq_size script
vladimirgamalian/pictools
is_eq_size.py
is_eq_size.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import click from PIL import Image from utils import get_file_list @click.command() @click.argument('path', type=click.Path(exists=True)) def is_eq_size(path): """ Test all pictures in folder (recursive) for size equality. """ files = get_file_list(path) sizes = [Image.open(f).size for f in files] if all(s == sizes[0] for s in sizes): print 'all pictures have same size' else: print 'not all pictures have same size' if __name__ == '__main__': is_eq_size()
mit
Python
ad052e71145296897c1510752c0f3403b9cb45a4
add 1st py file
wuchengang/PythonLearing,wuchengang/PythonLearing
hello.py
hello.py
print('Hello, Python!'); name = input(); print(name);
apache-2.0
Python
73f2260e0e5ae3534f13664063808abbe73b1d72
add a new extractor, from json files
scorphus/dotfilesetal,scorphus/dotfilesetal,scorphus/dotfilesetal,scorphus/dotfilesetal,scorphus/dotfilesetal,scorphus/dotfilesetal
bin/extract_json.py
bin/extract_json.py
#!/usr/bin/env python # -*- coding:utf-8 -*- # Copyright 2015 Pablo Santiago Blum de Aguiar <[email protected]>. All rights # reserved. Use of this source code is governed by Apache License, Version 2.0, # that can be found on https://opensource.org/licenses/Apache-2.0 import json import sys def main(argv): '''Usage: extract_json.py <json-file> <key>[.<key>...]''' def usage(json_dict=None): print main.__doc__ if json_dict: print 'Available keys:\n {}'.format( '\n '.join(sorted(json_dict.keys())) ) if len(argv) < 1: usage() return 1 if not sys.stdin.isatty(): try: json_dict = json.load(sys.stdin) except Exception as e: print 'Could not read from STDIN: {}'.format(e) return 2 keys = [x for x in argv[1].split('.')] if len(argv) > 1 else [] else: with open(sys.argv[1]) as json_file: try: json_dict = json.load(json_file) except Exception as e: print 'Could not read {}: {}'.format(sys.argv[1], e) return 2 keys = [x for x in argv[2].split('.')] if len(argv) > 2 else [] if not keys: usage(json_dict) return 3 for key in keys: try: idx = int(key) key = idx except: pass try: json_dict = json_dict[key] except: json_dict = '' print(json_dict.encode('utf8') if json_dict else '') if __name__ == '__main__': status = main(sys.argv) sys.exit(status)
apache-2.0
Python
81a38564379af16f4ea2d64572e517a6657f4450
add first test for NormalIndPower and normal_power
Averroes/statsmodels,statsmodels/statsmodels,bavardage/statsmodels,jseabold/statsmodels,rgommers/statsmodels,huongttlan/statsmodels,edhuckle/statsmodels,rgommers/statsmodels,nguyentu1602/statsmodels,nvoron23/statsmodels,hlin117/statsmodels,astocko/statsmodels,huongttlan/statsmodels,DonBeo/statsmodels,yarikoptic/pystatsmodels,wdurhamh/statsmodels,kiyoto/statsmodels,wzbozon/statsmodels,astocko/statsmodels,wdurhamh/statsmodels,edhuckle/statsmodels,bashtage/statsmodels,adammenges/statsmodels,bert9bert/statsmodels,saketkc/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,saketkc/statsmodels,bzero/statsmodels,alekz112/statsmodels,josef-pkt/statsmodels,waynenilsen/statsmodels,musically-ut/statsmodels,josef-pkt/statsmodels,ChadFulton/statsmodels,detrout/debian-statsmodels,yl565/statsmodels,bashtage/statsmodels,wdurhamh/statsmodels,gef756/statsmodels,kiyoto/statsmodels,bert9bert/statsmodels,nguyentu1602/statsmodels,wwf5067/statsmodels,wkfwkf/statsmodels,jstoxrocky/statsmodels,cbmoore/statsmodels,bavardage/statsmodels,nguyentu1602/statsmodels,bsipocz/statsmodels,yl565/statsmodels,bzero/statsmodels,wkfwkf/statsmodels,adammenges/statsmodels,YihaoLu/statsmodels,wwf5067/statsmodels,waynenilsen/statsmodels,jseabold/statsmodels,saketkc/statsmodels,bavardage/statsmodels,wzbozon/statsmodels,kiyoto/statsmodels,ChadFulton/statsmodels,wkfwkf/statsmodels,kiyoto/statsmodels,detrout/debian-statsmodels,bashtage/statsmodels,huongttlan/statsmodels,rgommers/statsmodels,bashtage/statsmodels,yarikoptic/pystatsmodels,kiyoto/statsmodels,statsmodels/statsmodels,gef756/statsmodels,wzbozon/statsmodels,detrout/debian-statsmodels,Averroes/statsmodels,wdurhamh/statsmodels,edhuckle/statsmodels,wwf5067/statsmodels,cbmoore/statsmodels,musically-ut/statsmodels,bert9bert/statsmodels,hlin117/statsmodels,DonBeo/statsmodels,phobson/statsmodels,yarikoptic/pystatsmodels,hainm/statsmodels,yl565/statsmodels,DonBeo/statsmodels,alekz112/statsmodels,bashtage/statsmodels,adammenges/statsmodels,nguyentu1602/statsmodels,Averroes/statsmodels,jstoxrocky/statsmodels,rgommers/statsmodels,wkfwkf/statsmodels,yl565/statsmodels,saketkc/statsmodels,hlin117/statsmodels,jstoxrocky/statsmodels,cbmoore/statsmodels,nvoron23/statsmodels,bzero/statsmodels,jseabold/statsmodels,phobson/statsmodels,wwf5067/statsmodels,gef756/statsmodels,saketkc/statsmodels,DonBeo/statsmodels,bashtage/statsmodels,YihaoLu/statsmodels,josef-pkt/statsmodels,wkfwkf/statsmodels,astocko/statsmodels,waynenilsen/statsmodels,bavardage/statsmodels,cbmoore/statsmodels,ChadFulton/statsmodels,jseabold/statsmodels,hainm/statsmodels,statsmodels/statsmodels,phobson/statsmodels,phobson/statsmodels,adammenges/statsmodels,hainm/statsmodels,jstoxrocky/statsmodels,nvoron23/statsmodels,waynenilsen/statsmodels,YihaoLu/statsmodels,statsmodels/statsmodels,edhuckle/statsmodels,bzero/statsmodels,edhuckle/statsmodels,josef-pkt/statsmodels,astocko/statsmodels,detrout/debian-statsmodels,bsipocz/statsmodels,rgommers/statsmodels,bert9bert/statsmodels,ChadFulton/statsmodels,bsipocz/statsmodels,ChadFulton/statsmodels,josef-pkt/statsmodels,gef756/statsmodels,musically-ut/statsmodels,bsipocz/statsmodels,wzbozon/statsmodels,hainm/statsmodels,gef756/statsmodels,DonBeo/statsmodels,hlin117/statsmodels,musically-ut/statsmodels,Averroes/statsmodels,wdurhamh/statsmodels,YihaoLu/statsmodels,bavardage/statsmodels,alekz112/statsmodels,bert9bert/statsmodels,cbmoore/statsmodels,YihaoLu/statsmodels,alekz112/statsmodels,wzbozon/statsmodels,nvoron23/statsmodels,bzero/statsmodels,josef-pkt/statsmodels,nvoron23/statsmodels,phobson/statsmodels,ChadFulton/statsmodels,huongttlan/statsmodels,yl565/statsmodels
statsmodels/stats/tests/test_power.py
statsmodels/stats/tests/test_power.py
# -*- coding: utf-8 -*- """Tests for statistical power calculations Note: test for ttest power are in test_weightstats.py tests for chisquare power are in test_gof.py Created on Sat Mar 09 08:44:49 2013 Author: Josef Perktold """ import numpy as np from numpy.testing import assert_almost_equal import statsmodels.stats.power as smp def test_normal_power_explicit(): # a few initial test cases for NormalIndPower sigma = 1 d = 0.3 nobs = 80 alpha = 0.05 res1 = smp.normal_power(d, nobs/2., 0.05) res2 = smp.NormalIndPower().power(d, nobs, 0.05) res3 = smp.NormalIndPower().solve_power(effect_size=0.3, nobs1=80, alpha=0.05, beta=None) res_R = 0.475100870572638 assert_almost_equal(res1, res_R, decimal=13) assert_almost_equal(res2, res_R, decimal=13) assert_almost_equal(res3, res_R, decimal=13) norm_pow = smp.normal_power(-0.01, nobs/2., 0.05) norm_pow_R = 0.05045832927039234 #value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="two.sided") assert_almost_equal(norm_pow, norm_pow_R, decimal=13) norm_pow = smp.NormalIndPower().power(0.01, nobs, 0.05, alternative="1s") norm_pow_R = 0.056869534873146124 #value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="greater") assert_almost_equal(norm_pow, norm_pow_R, decimal=13) # Note: negative effect size is same as switching one-sided alternative # TODO: should I switch to larger/smaller instead of "one-sided" options norm_pow = smp.NormalIndPower().power(-0.01, nobs, 0.05, alternative="1s") norm_pow_R = 0.0438089705093578 #value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less") assert_almost_equal(norm_pow, norm_pow_R, decimal=13)
bsd-3-clause
Python
f24c8376847b0226f3d3f674af2f568367f15234
add data structure for parens problem
clair3st/code-katas
src/linked_list.py
src/linked_list.py
"""Singly-Linked List in Python.""" class Node(object): """docstring for LinkedList.""" def __init__(self, data, next_item=None): """Init for instance of a node.""" self.data = data self.next_item = next_item class LinkedList(object): """Class for head of Linked List.""" def __init__(self, data=None): """Initialize the head node.""" self.head = None if data: try: for item in data: self.push(item) except TypeError: self.head = Node(data) def push(self, data=None): """Create new node in front of head.""" new_head = Node(data, self.head) self.head = new_head def pop(self): """Remove the first value off the head of the list and return it.""" if self.head is None: raise IndexError('Cannot pop from an empty list.') new_head = self.head.next_item old_head = self.head.data self.head = new_head return old_head def size(self): """Count the objects in linked list.""" count = 0 curr = self.head while curr: count += 1 curr = curr.next_item return count def search(self, val): """Iterate through the linked list to find instance containing val.""" curr = self.head result = None try: while val != curr.data: curr = curr.next_item else: result = curr except AttributeError: pass return result def remove(self, node): """Remove a given node in the list.""" curr = self.head previous = None while curr: if curr == node: break previous = curr curr = previous.next_item if previous is None: self.head = curr.next_item else: previous.next_item = curr.next_item def display(self): """Return a string of the linked list.""" curr = self.head return_tuple = () while curr: return_tuple = return_tuple + (curr.data, ) curr = curr.next_item return return_tuple
mit
Python
9992a4ff90156a1c5678303530c2feeaecf700d6
Create a_deco.py
DaivdZhang/LittleProject
src/misc/a_deco.py
src/misc/a_deco.py
import os import sys import linecache def trace(func): """ A trace decorator from: https://zhuanlan.zhihu.com/p/20175869 :param func: :return: """ def globaltrace(frame, why, arg): if why == "call": return localtrace return None def localtrace(frame, why, arg): if why == "line": filename = frame.f_code.co_filename line_no = frame.f_lineno b_name = os.path.basename(filename) tmp = linecache.getline(filename, line_no) print("{0}({1}):{2}".format(b_name, line_no, tmp), end='') return localtrace def _func(*args, **kwargs): sys.settrace(globaltrace) result = func(*args, **kwargs) sys.settrace(None) return result return _func @trace def foo(i): string = "Hello world!" print(string) print(string[i]) os.system("cls") if __name__ == "__main__": foo(-1)
mit
Python
463502a251111199da130e508929a35b2f126f4e
Add columns to User model
byanofsky/bookmarks,byanofsky/bookmarks,byanofsky/bookmarks
bookmarks/models.py
bookmarks/models.py
from sqlalchemy import Column, Integer, String from bookmarks.database import Base class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) username = Column(String(50), unique=True, nullable=False) name = Column(String(120)) email = Column(String(256), unique=True, nullable=False) def __init__(self, name=None, username=None, email=None): self.username = username self.name = name self.email = email def __repr__(self): return '<User %r>' % (self.name)
apache-2.0
Python
0c77666c259ba78899863bbbe482a857102c19be
add settings module
HackerEarth/he-sdk-python
hackerearth/settings.py
hackerearth/settings.py
# v3 API endpoints of HackerEarth Code Checker API COMPILE_API_ENDPOINT = 'https://api.hackerearth.com/v3/code/compile' RUN_API_ENDPOINT = 'https://api.hackerearth.com/v3/code/run' # Max run time of a program in seconds RUN_TIME_UPPER_LIMIT = 5 # Max memory consumption allowed for a program MEMORY_UPPER_LIMIT = 1024*256 # please keep this secret CLIENT_SECRET = ''
mit
Python
36af113eb363ddf25f96ab53e41db0ea7f3bb481
add a python scripts to generate user weibo file from weibo.txt
anphoenix/data_model,anphoenix/data_model
src/main/python/data_aggregation.py
src/main/python/data_aggregation.py
import sys, os def generateData(inputData, outputDir, userLimit): print "Generate person weibo to folder: " + outputDir if not os.path.isdir(outputDir): os.mkdir(outputDir) print 'Directory created at: ' + outputDir currentID = "" userNum = 0 outputFile = None l = inputData.readline() while l: line = l.strip() if line: fields = line.split("\t") if len(fields) < 6: print "Broken line found: " + line l = inputData.readline() continue if fields[1] != currentID: userNum += 1 if userNum > userLimit: break print "Find weibo for " + str(userNum) + " user: " + fields[1] currentID = fields[1] fileName = outputDir + "/" + currentID print "Create a new file: " + fileName outputFile = file(fileName,"w") outputFile.write(fields[5] + "\n") l = inputData.readline() print "Generate user weibo " + str(userNum - 1) def usage(): print "Two parameter is required to run the scripts: input file and output folder\n" print "One parameter is optional: the limited number of user need generate, default will generate all the user weibo data in the input file\n" if __name__ == "__main__": if len(sys.argv) < 3: # Expect more then two argument: the input data file and output folder usage() sys.exit(2) try: inputData = file(sys.argv[1],"r") except IOError: sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % arg) sys.exit(1) userCount = sys.maxint if len(sys.argv) >= 4: userCount = int(sys.argv[3]) print "Generate weibo user: " + str(userCount) generateData(inputData, sys.argv[2], userCount)
apache-2.0
Python
8c78679bc9875c698f639a0c45a5208b43162f4e
comment obsolete ff prefs script.
CarlFK/veyepar,xfxf/veyepar,yoe/veyepar,yoe/veyepar,EricSchles/veyepar,yoe/veyepar,EricSchles/veyepar,CarlFK/veyepar,xfxf/veyepar,xfxf/veyepar,yoe/veyepar,CarlFK/veyepar,yoe/veyepar,EricSchles/veyepar,EricSchles/veyepar,xfxf/veyepar,xfxf/veyepar,CarlFK/veyepar,EricSchles/veyepar,CarlFK/veyepar
setup/nodes/review/set_ff_prefs.py
setup/nodes/review/set_ff_prefs.py
#!/usr/bin/python """ Allow a web page to access local files. This makes it easier to preview title screens and video files. FF stores profiles in ~/.mozilla/firefox/profiles.ini FF settings are set by creating a .js file that sets things on startup 1. count number of FF profiles. If more than 1, give up. 2. get profile dir 3. create user.js that sets custom settings. """ import os import ConfigParser home_dir = os.path.expanduser('~') print "home dir:", home_dir profiles_path= os.path.join(home_dir, ".mozilla","firefox","profiles.ini") print "profiles_path:", profiles_path # read ini file config = ConfigParser.RawConfigParser() config.read([profiles_path]) profiles = [s for s in config.sections() if s !='General'] if len(profiles)>1: print "more than one profile, you fix it." print profiles else: d=dict(config.items(profiles[0])) settings_path= os.path.join(home_dir, ".mozilla","firefox",d['path'],"user.js") config=""" user_pref("capability.policy.policynames", "localfilelinks"); user_pref("capability.policy.localfilelinks.sites", "http://localhost:8080","http://veyepar.nextdayvideo.com:8080"); user_pref("capability.policy.localfilelinks.checkloaduri.enabled", "allAccess"); """ print "writing to", settings_path open(settings_path,'w').write(config)
#!/usr/bin/python import os import ConfigParser home_dir = os.path.expanduser('~') print "home dir:", home_dir profiles_path= os.path.join(home_dir, ".mozilla","firefox","profiles.ini") print "profiles_path:", profiles_path # read ini file config = ConfigParser.RawConfigParser() config.read([profiles_path]) profiles = [s for s in config.sections() if s !='General'] if len(profiles)>1: print "more than one profile, you fix it." print profiles else: d=dict(config.items(profiles[0])) settings_path= os.path.join(home_dir, ".mozilla","firefox",d['path'],"user.js") config=""" user_pref("capability.policy.policynames", "localfilelinks"); user_pref("capability.policy.localfilelinks.sites", "http://localhost:8080","http://veyepar.nextdayvideo.com:8080"); user_pref("capability.policy.localfilelinks.checkloaduri.enabled", "allAccess"); """ print "writing to", settings_path open(settings_path,'w').write(config)
mit
Python
bb11ab050fe9a7bb0ffe83419eb0e87390f7deac
Add registration method for TB
hopshadoop/hops-util-py,hopshadoop/hops-util-py
hopsutil/tensorboard.py
hopsutil/tensorboard.py
""" Utility functions to retrieve information about available services and setting up security for the Hops platform. These utils facilitates development by hiding complexity for programs interacting with Hops services. """ import socket import subprocess import os import hdfs def register(logdir): #find free port s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('',0)) addr, port = s.getsockname() s.close() #let tb bind to port subprocess.Popen([os.getenv("PYSPARK_PYTHON"), "tensorboard", "--logdir=%s"%logdir, "--port=%d"%port, "--debug"]) tb_url = "http://{0}:{1}".format(addr, port) #dump tb host:port to hdfs hops_user = os.environ["USER"]; hops_user_split = hops_user.split("__"); project = hops_user_split[0]; hdfs_handle = hdfs.get() hdfs_handle.dump(tb_url, "hdfs:///Projects/" + project + "/Resources/.jupyter.tensorboard", user=hops_user)
apache-2.0
Python