commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
7cd1c65b77eb474f67f1e194ceeb7bcde2d2bdb9 | Create wsgi.py | KrishMunot/NGeO,KrishMunot/NGeO,KrishMunot/NGeO | NGeO/NGeO/wsgi.py | NGeO/NGeO/wsgi.py | """
WSGI config for NGeO project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NGeO.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| mit | Python |
|
a7f1565efbdfa20d4d97d90a688b78da51533113 | Add new package: ycsb (#17788) | LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/ycsb/package.py | var/spack/repos/builtin/packages/ycsb/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ycsb(Package):
"""Yahoo! Cloud Serving Benchmark."""
homepage = "https://research.yahoo.com/news/yahoo-cloud-serving-benchmark/"
url = "https://github.com/brianfrankcooper/YCSB/archive/0.17.0.tar.gz"
git = "https://github.com/brianfrankcooper/YCSB.git"
version('0.17.0', sha256='5dd1a3d4dd7ac336eadccc83b097c811e142cfe1b23fc278f247054a1892c0e0')
version('0.16.0', sha256='4296fd5e90d7d6d7dfcbad90039ddf16e785706a07f99c1c8a06e6ee06440f71')
version('0.15.0', sha256='50b83c11f1a2f19f45e3cc6781f952c69944d1221dfec72169c3587802fc7fbb')
version('0.14.0', sha256='456bcc9fa3d5d66d76fffa9cec34afd4528d9f02aa8a8d1135f511650516d5cb')
version('0.13.0', sha256='21cb8078a0fe2d8d909145744ca15848dbb6757e98a7fdc97fb4049f82f4afbc')
depends_on('maven', type='build')
depends_on('java@8', type=('build', 'run'))
depends_on('mongodb-async-driver', type='build')
def install(self, spec, prefix):
mvn = which('mvn')
jar_name = 'target/mongodb-async-driver-' + \
spec['mongodb-async-driver'].version.string + '.jar'
path = join_path(self.spec['mongodb-async-driver'].prefix, jar_name)
mvn('install:install-file', '-Dfile={0}'.format(path),
'-DgroupId=com.allanbank', '-DartifactId=mongodb-async-driver',
'-Dversion=2.0.1', '-Dpackaging=jar')
mvn('package', '-DskipTests')
install_tree('.', prefix)
| lgpl-2.1 | Python |
|
b7baf1e53f24bb96a0b09e9305f5f1e562cf3547 | Create analog_tester.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/moz4r/analog_tester.py | home/moz4r/analog_tester.py | arduino = Runtime.createAndStart("arduino","Arduino")
arduino.setBoardNano()
arduino.connect("COM6")
arduino.setAref("DEFAULT")
def publishPin(pins):
for pin in range(0, len(pins)):print(pins[pin].value)
arduino.addListener("publishPinArray","python","publishPin")
#arduino.enablePin(pinAddress, rate)
#analog pin range are 14-18 on uno, 54-70 on mega
#rate is the number of polling / sec
arduino.enablePin(14, 1)
| apache-2.0 | Python |
|
17956f008eabcca80dbacdb20e92b819b87d0f57 | Create homework-1-cryptanalysis.py | JohnDvorak/general-cryptanalysis | homework-1-cryptanalysis.py | homework-1-cryptanalysis.py | #! usr/bin/env python3
ORIGINAL_CIPHERTEXT = "\
NAGQNXIIZAGBGIIYXQOMQUGQUZAXTNGMYXQGTTASNISQO\
AMFGZAGEZVOOGUZAGIGMTAMQUTZYMXQGUMCMYZDECMLWS\
RVQYVIEASVQUTXLMQQSZTZMYZZAGDMOMXQSQMPVMYYESR\
WQSNIGUOGZAGEAMZGZSAVQZXLMQAMVIZAGDMQUVYOGZAG\
DQSDSYGQSDSYGLMQXGQUVYGZSBGMYZAGBYVQZSRZAGBSS\
WTZAMZIXGSVZSQZAGUGTWTMRVIIZAYGGTLSYGSRTGFGYM\
IXQTVIZTSRBISZZGUCMOGTMQUTLYMNISRTISFGQIENSYW\
ZAMZZAGEAMFGSRRGYGUDGXMDTXLWMQUZXYGUDSYGZAMQM\
QEZAYMIIVCSQZAGNSSUTZMLWTNSYWXQONGMYXGUIE"
translate_dict = {
'A' : 'h',
'B' : 'b',
'C' : 'p',
'D' : 'm',
'E' : 'y',
'F' : 'v',
'G' : 'e',
'H' : ' ', # H is not present in the ciphertext
'I' : 'l',
'J' : ' ', # J is not present in the ciphertext
'K' : ' ', # H is not present in the ciphertext
'L' : 'c',
'M' : 'a',
'N' : 'w',
'O' : 'g',
'P' : 'q', # Only used once
'Q' : 'n',
'R' : 'f',
'S' : 'o',
'T' : 's',
'U' : 'd',
'V' : 'u',
'W' : 'k',
'X' : 'i',
'Y' : 'r',
'Z' : 't'
}
def print_letter_distribution(ciphertext):
"""
Prints the letter distribution of ciphertext from
highest frequency to lowest.
"""
distribution = dict()
for character in ciphertext:
# Keep a running total in 'distribution' of character count
if character not in distribution:
distribution[character] = 1
else:
distribution[character] += 1
# Print the number of unique letters in the ciphertext
print("Unique characters in ciphertext:", len(distribution), '\n')
# Print the highest frequency letters first
for char in sorted(distribution, key=distribution.get, reverse=True):
print(char,'has a frequency of',distribution[char]/4.01)
print()
def translate(ciphertext):
"""
Translate the ciphertext using the
translate_dict as a substitution table.
"""
plaintext = ''
for character in ciphertext:
plaintext += translate_dict[character]
return plaintext
def count_digraphs(ciphertext):
"""
Count and print the most frequent 2-letter
combinations in the ciphertext.
"""
# Zip the ciphertext with an offset to get 2char elements
two_letter_set = [x+y for x,y in zip(*[ciphertext[i:]
for i in range(2)])]
digraph_frequency = dict()
for digraph in two_letter_set:
if digraph not in digraph_frequency:
digraph_frequency[digraph] = 1
else:
digraph_frequency[digraph] += 1
print('2-letter sequences:')
for digraph in sorted(digraph_frequency,
key=digraph_frequency.get,
reverse=True):
if digraph_frequency[digraph] > 5:
print(digraph, digraph_frequency[digraph])
print()
def count_trigraphs(ciphertext):
"""
Count and print the most frequent
3-letter combinations in ciphertext.
"""
# Zip the ciphertext with an offset to get 3char elements
three_letter_set = [x+y+z for x,y,z in zip(*[ciphertext[i:]
for i in range(3)])]
trigraph_frequency = dict()
for trigraph in three_letter_set:
if trigraph not in trigraph_frequency:
trigraph_frequency[trigraph] = 1
else:
trigraph_frequency[trigraph] += 1
print('3 letter sequences:')
for trigraph in sorted(trigraph_frequency,
key=trigraph_frequency.get,
reverse=True):
if trigraph_frequency[trigraph] > 3:
print(trigraph, trigraph_frequency[trigraph])
print()
print('Original ciphertext:\n', ORIGINAL_CIPHERTEXT, '\n')
print("Total lengths of ciphertext:", len(ORIGINAL_CIPHERTEXT))
print_letter_distribution(ORIGINAL_CIPHERTEXT)
count_digraphs(ORIGINAL_CIPHERTEXT)
count_trigraphs(ORIGINAL_CIPHERTEXT)
print('Translation:\n', translate(ORIGINAL_CIPHERTEXT))
| mit | Python |
|
5e6cfc84a4b34a292281ea466bf11facb680e72b | initialize radix sort file | miracode/data-structures | radix_sort.py | radix_sort.py | def radix_sort(array):
"""
Sorts an array of numbers using the least signficant digit radix algorithm.
"""
if __name__ == '__main__':
print radix_sort.func_doc
| mit | Python |
|
3e54e311a747b1e032384c7a74a8ed9aeafe1e8d | Fix constructor for Bernoulli node | SalemAmeen/bayespy,fivejjs/bayespy,jluttine/bayespy,bayespy/bayespy | bayespy/inference/vmp/nodes/bernoulli.py | bayespy/inference/vmp/nodes/bernoulli.py | ######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
A module for the Bernoulli distribution node
"""
import numpy as np
from .binomial import (BinomialMoments,
BinomialDistribution,
Binomial)
class BernoulliMoments(BinomialMoments):
"""
Class for the moments of Bernoulli variables.
"""
def __init__(self):
super().__init__(1)
class BernoulliDistribution(BinomialDistribution):
"""
Class for the VMP formulas of Bernoulli variables.
"""
def __init__(self):
super().__init__(1)
class Bernoulli(Binomial):
"""
Node for Bernoulli random variables.
"""
_moments = BernoulliMoments()
_distribution = BernoulliDistribution()
@classmethod
def _constructor(cls, p, **kwargs):
"""
Constructs distribution and moments objects.
"""
p = cls._ensure_moments(p, cls._parent_moments[0])
parents = [p]
return ( parents,
kwargs,
( (), ),
cls._total_plates(kwargs.get('plates'),
cls._distribution.plates_from_parent(0, p.plates)),
cls._distribution,
cls._moments,
cls._parent_moments)
def show(self):
"""
Print the distribution using standard parameterization.
"""
p = 1 / (1 + np.exp(-self.phi[0]))
print("%s ~ Bernoulli(p)" % self.name)
print(" p = ")
print(p)
| ######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
A module for the Bernoulli distribution node
"""
import numpy as np
from .binomial import (BinomialMoments,
BinomialDistribution,
Binomial)
class BernoulliMoments(BinomialMoments):
"""
Class for the moments of Bernoulli variables.
"""
def __init__(self):
super().__init__(1)
class BernoulliDistribution(BinomialDistribution):
"""
Class for the VMP formulas of Bernoulli variables.
"""
def __init__(self):
super().__init__(1)
class Bernoulli(Binomial):
"""
Node for Bernoulli random variables.
"""
_moments = BernoulliMoments()
_distribution = BernoulliDistribution()
def __init__(self, p, **kwargs):
super().__init__(1, p, **kwargs)
def show(self):
"""
Print the distribution using standard parameterization.
"""
p = 1 / (1 + np.exp(-self.phi[0]))
print("%s ~ Bernoulli(p)" % self.name)
print(" p = ")
print(p)
| mit | Python |
0a16a2002e1247ad87a877de6aa85bb0844dc9c4 | tag tweaks | luetgendorf/Espruino,wilberforce/Espruino,wilberforce/Espruino,luetgendorf/Espruino,wilberforce/Espruino,lancernet/Espruino,wilberforce/Espruino,luetgendorf/Espruino,lancernet/Espruino,wilberforce/Espruino,lancernet/Espruino,luetgendorf/Espruino,lancernet/Espruino,lancernet/Espruino,luetgendorf/Espruino,luetgendorf/Espruino,wilberforce/Espruino,wilberforce/Espruino,luetgendorf/Espruino,lancernet/Espruino,lancernet/Espruino | boards/NRF51TAG.py | boards/NRF51TAG.py | #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "nRF51 Tag",
'link' : [ "" ],
'default_console' : "EV_BLUETOOTH",
# 'default_console_tx' : "D15",
# 'default_console_rx' : "D17",
# 'default_console_baudrate' : "9600",
'variables' : 150,
'binary_name' : 'espruino_%v_nrf51tag.bin',
'build' : {
'defines' : [
'USE_BLUETOOTH'
]
}
};
chip = {
'part' : "NRF51822",
'family' : "NRF51",
'package' : "QFN48",
'ram' : 16,
'flash' : 256,
'speed' : 16,
'usart' : 1,
'spi' : 1,
'i2c' : 1,
'adc' : 1,
'dac' : 0,
# If using DFU bootloader, it sits at 0x3C000 - 0x40000 (0x40000 is end of flash)
# Might want to change 256 -> 240 in the code below
'saved_code' : {
'address' : ((256 - 3 - 16) * 1024),
'page_size' : 1024,
'pages' : 3,
'flash_available' : (256 - 108 - 16 - 3) # total flash pages - softdevice - bootloader - saved code
}
};
devices = {
# 'LED1' : { 'pin' : 'D22' },
# 'LED2' : { 'pin' : 'D21' },
# 'LED3' : { 'pin' : 'D23' }
};
def get_pins():
pins = pinutils.generate_pins(0,31) # 32 General Purpose I/O Pins.
pinutils.findpin(pins, "PD27", True)["functions"]["XL1"]=0;
pinutils.findpin(pins, "PD26", True)["functions"]["XL2"]=0;
#The boot/reset button will function as a reset button in normal operation. Pin reset on PD21 needs to be enabled on the nRF52832 device for this to work.
return pins
| #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "nRF51 Tag",
'link' : [ "" ],
'default_console' : "EV_SERIAL1",
'default_console_tx' : "D15",
'default_console_rx' : "D17",
'default_console_baudrate' : "9600",
'variables' : 310,
'binary_name' : 'espruino_%v_nrf51tag.bin',
'build' : {
'defines' : [
'USE_BLUETOOTH'
]
}
};
chip = {
'part' : "NRF51822",
'family' : "NRF51",
'package' : "QFN48",
'ram' : 16,
'flash' : 256,
'speed' : 16,
'usart' : 1,
'spi' : 1,
'i2c' : 1,
'adc' : 1,
'dac' : 0,
# If using DFU bootloader, it sits at 0x3C000 - 0x40000 (0x40000 is end of flash)
# Might want to change 256 -> 240 in the code below
'saved_code' : {
'address' : ((256 - 3) * 1024),
'page_size' : 1024,
'pages' : 3,
'flash_available' : (256 - 108 - 16) # total flash pages - softdevice - bootloader
}
};
devices = {
# 'LED1' : { 'pin' : 'D22' },
# 'LED2' : { 'pin' : 'D21' },
# 'LED3' : { 'pin' : 'D23' }
};
def get_pins():
pins = pinutils.generate_pins(0,31) # 32 General Purpose I/O Pins.
pinutils.findpin(pins, "PD27", True)["functions"]["XL1"]=0;
pinutils.findpin(pins, "PD26", True)["functions"]["XL2"]=0;
#The boot/reset button will function as a reset button in normal operation. Pin reset on PD21 needs to be enabled on the nRF52832 device for this to work.
return pins
| mpl-2.0 | Python |
e96832e16a6e5746faeaf647c6cd681f1d2f9bca | Create break_fracmorse.py | jameslyons/python_cryptanalysis,jameslyons/python_cryptanalysis,jameslyons/python_cryptanalysis | break_fracmorse.py | break_fracmorse.py | # usage: python break_fracmorse.py 'CIPHERTEXTMESSAGE'
# ideally you'll want 200 or so characters to reliably decrypt, shorter will often work but not as reliably.
import random
from ngram_score import ngram_score
import re
import sys
from pycipher import FracMorse
#ctext = FracMorse('PQRSTUVWXYZABCDEFGHIJKLMNO').encipher("He has not been returned to sea because of his affection for caregivers.The waitress pointed to the lunch menu, but the oldest living ex-major leaguer had no use for it")
fitness = ngram_score('fmorse_quadgrams.txt') # load our quadgram model
# helper function, converts an integer 0-25 into a character
def i2a(i): return 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[i%26]
# decipher a piece of text using the substitution cipher and a certain key
def sub_decipher(text,key):
invkey = [i2a(key.index(i)) for i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
ret = ''
for c in text:
if c.isalpha(): ret += invkey[ord(c.upper())-ord('A')]
else: ret += c
return ret
# This code is just the simple substitution cipher cracking code, it works perfectly for fractionated morse as
# long as you use fractioned morse statistics instead of english statistics.
def break_simplesub(ctext,startkey=None):
''' perform hill-climbing with a single start. This function may have to be called many times
to break a substitution cipher. '''
# make sure ciphertext has all spacing/punc removed and is uppercase
ctext = re.sub('[^A-Z]','',ctext.upper())
parentkey,parentscore = startkey or list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'),-99e99
if not startkey: random.shuffle(parentkey)
parentscore = fitness.score(sub_decipher(ctext,parentkey))
count = 0
while count < 1000:
a = random.randint(0,25)
b = random.randint(0,25)
child = parentkey[:]
# swap two characters in the child
child[a],child[b] = child[b],child[a]
score = fitness.score(sub_decipher(ctext,child))
# if the child was better, replace the parent with it
if score > parentscore:
parentscore, parentkey = score, child[:]
count = 0 # reset the counter
count += 1
return parentscore, parentkey
ctext = sys.argv[1]
ctext = re.sub(r'[^A-Z ]','',ctext.upper())
maxscore, maxkey = break_simplesub(ctext,list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'))
print str(maxscore),'simplesub key:',''.join(maxkey), 'decrypt: ',sub_decipher(ctext,maxkey)
for i in range(1000):
score, key = break_simplesub(ctext)
if score > maxscore:
maxscore,maxkey = score,key[:]
print str(maxscore),'FractionatedMorse key:',''.join(maxkey), 'decrypt: ',FracMorse(maxkey).decipher(ctext)
| mit | Python |
|
decf4b1916a421fe996a31feb131b7ed9e4e3c36 | Add a simple benchmark script | abhimanyuma/ml-with-py | numpy-benchmark-one.py | numpy-benchmark-one.py | import timeit
normal_py_sec = timeit.timeit('sum (x*x for x in xrange(1000))',number = 10000)
naive_np_sec = timeit.timeit('sum(na*na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
good_np_sec = timeit.timeit('na.dot(na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)
print("Normal Python: %f sec"%normal_py_sec)
print("Naive Numpy : %f sec"%naive_np_sec)
print("Good Numpy : %f sec"%good_np_sec)
| unlicense | Python |
|
cb517a2cd1dea12fadf4f72147fecf0105cbd717 | include missing Message | cocodelabs/api.palaverapp.com,cocodelabs/api.palaverapp.com | palaverapi/message.py | palaverapi/message.py | # Adapted from https://github.com/kylef/irctk/blob/master/irctk/message.py
from typing import List, Optional
class Message:
@classmethod
def parse(cls, string: str) -> 'Message':
prefix = None
parameters = []
if string.startswith('@'):
_, string = string[1:].split(' ', 1)
if string.startswith(':'):
prefix, string = string.split(' ', 1)
prefix = prefix[1:]
if ' ' in string:
command, string = string.split(' ', 1)
else:
command = string
string = ''
while len(string) != 0:
if string[0] == ':':
parameters.append(string[1:])
string = ''
elif ' ' in string:
parameter, string = string.split(' ', 1)
parameters.append(parameter)
else:
parameters.append(string)
string = ''
return cls(prefix, command, parameters)
def __init__(
self,
prefix: str = None,
command: str = '',
parameters: List[str] = None,
):
self.prefix = prefix
self.command = command
self.parameters = parameters or []
def get(self, index: int) -> Optional[str]:
if index >= len(self.parameters):
return None
return self.parameters[index]
| bsd-3-clause | Python |
|
7aaf42a7b129ba5b9548db0b2a71a095246aeac9 | Add Py3 compatibility support helpers | paramiko/paramiko,Automatic/paramiko,rcorrieri/paramiko,redixin/paramiko,thisch/paramiko,davidbistolas/paramiko,reaperhulk/paramiko,anadigi/paramiko,thusoy/paramiko,varunarya10/paramiko,mhdaimi/paramiko,mirrorcoder/paramiko,fvicente/paramiko,ameily/paramiko,esc/paramiko,zarr12steven/paramiko,torkil/paramiko,digitalquacks/paramiko,remram44/paramiko,dorianpula/paramiko,CptLemming/paramiko,jaraco/paramiko,zpzgone/paramiko,selboo/paramiko,dlitz/paramiko,jorik041/paramiko,toby82/paramiko,SebastianDeiss/paramiko | paramiko/py3compat.py | paramiko/py3compat.py | import sys
__all__ = ['PY3', 'string_types', 'integer_types', 'text_type', 'bytes_type', 'long', 'input', 'bytestring', 'byte_ord', 'byte_chr', 'byte_mask', 'b', 'u', 'StringIO', 'BytesIO', 'is_callable', 'MAXSIZE', 'next']
PY3 = sys.version_info[0] >= 3
if PY3:
import collections
import struct
string_types = str
integer_types = int
text_type = str
bytes_type = bytes
long = int
input = input
def bytestring(s):
return s
def byte_ord(c):
assert isinstance(c, int)
return c
def byte_chr(c):
assert isinstance(c, int)
return struct.pack('B', c)
def byte_mask(c, mask):
assert isinstance(c, int)
return struct.pack('B', c & mask)
def b(s, encoding='utf8'):
"""cast unicode or bytes to bytes"""
if isinstance(s, bytes):
return s
elif isinstance(s, str):
return s.encode(encoding)
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
def u(s, encoding='utf8'):
"""cast bytes or unicode to unicode"""
if isinstance(s, bytes):
return s.decode(encoding)
elif isinstance(s, str):
return s
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
import io
StringIO = io.StringIO # NOQA
BytesIO = io.BytesIO # NOQA
def is_callable(c):
return isinstance(c, collections.Callable)
def get_next(c):
return c.__next__
next = next
MAXSIZE = sys.maxsize # NOQA
else:
string_types = basestring
integer_types = (int, long)
text_type = unicode
bytes_type = str
long = long
input = raw_input
def bytestring(s): # NOQA
if isinstance(s, unicode):
return s.encode('utf-8')
return s
byte_ord = ord # NOQA
byte_chr = chr # NOQA
def byte_mask(c, mask):
return chr(ord(c) & mask)
def b(s, encoding='utf8'): # NOQA
"""cast unicode or bytes to bytes"""
if isinstance(s, str):
return s
elif isinstance(s, unicode):
return s.encode(encoding)
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
def u(s, encoding='utf8'): # NOQA
"""cast bytes or unicode to unicode"""
if isinstance(s, str):
return s.decode(encoding)
elif isinstance(s, unicode):
return s
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
try:
import cStringIO
StringIO = cStringIO.StringIO # NOQA
except ImportError:
import StringIO
StringIO = StringIO.StringIO # NOQA
BytesIO = StringIO
def is_callable(c): # NOQA
return callable(c)
def get_next(c): # NOQA
return c.next
def next(c):
return c.next()
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1) # NOQA
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1) # NOQA
del X
| lgpl-2.1 | Python |
|
6db9688d7c078c8cf8d1b17305e89bb680a46e53 | Create lc1001.py | FiveEye/ProblemSet,FiveEye/ProblemSet | LeetCode/lc1001.py | LeetCode/lc1001.py | xs = {}
ys = {}
fs = {}
ss = {}
grid = {}
def checkdic(xs, x):
if x not in xs:
xs[x] = set()
def checkempty(xs, x):
if x not in xs:
return 1
if len(xs[x]) == 0:
return 1
return 0
def remove(x, y):
if x not in grid:
return
if y not in grid[x]:
return
grid[x].remove(y)
xs[x].remove((x,y))
ys[y].remove((x,y))
fs[x+y].remove((x,y))
ss[x-y].remove((x,y))
class Solution:
def gridIllumination(self, N: int, lamps: List[List[int]], queries: List[List[int]]) -> List[int]:
global xs, ys, fs, ss, grid
xs = {}
ys = {}
fs = {}
ss = {}
grid = {}
n = len(lamps)
for l in lamps:
x = l[0]
y = l[1]
if x not in grid:
grid[x] = {y}
else:
grid[x].add(y)
checkdic(xs, x)
checkdic(ys, y)
checkdic(fs, x + y)
checkdic(ss, x - y)
xs[x].add((x,y))
ys[y].add((x,y))
fs[x+y].add((x,y))
ss[x-y].add((x,y))
ans = []
for q in queries:
x = q[0]
y = q[1]
tmp = checkempty(xs, x) and checkempty(ys, y) and checkempty(fs, x+y) and checkempty(ss, x-y)
if tmp:
ans.append(0)
else:
ans.append(1)
remove(x, y-1)
remove(x, y)
remove(x, y+1)
remove(x-1, y-1)
remove(x-1, y)
remove(x-1, y+1)
remove(x+1, y-1)
remove(x+1, y)
remove(x+1, y+1)
return ans
| mit | Python |
|
b7762c1b8bc987ed1b72ba0db0dbf47894c2e931 | add score | xiahei/Daily_scripts,x1ah/Daily_scripts,x1ah/Daily_scripts,xiahei/Daily_scripts,x1ah/Daily_scripts,xiahei/Daily_scripts,x1ah/Daily_scripts,x1ah/Daily_scripts | StuScore/Score.py | StuScore/Score.py | #!/usr/bin/env python
# coding:utf-8
from bs4 import BeautifulSoup
import requests
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def login(username, pswd='0'):
'''
模拟登录教务系统
:param username:
:param pswd:
:return: 登录状态
'''
login_url = 'http://219.242.68.33/Login.aspx'
from_data = {
"ToolkitScriptManager1_HiddenField": "",
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
"__VIEWSTATE": "/wEPDwUKMTY0Njg4MjEwM2Rkj+Af8kaVOxsefGZECk5PM6rOOYgs0taVhQxQSxoC298=",
"__VIEWSTATEGENERATOR": "C2EE9ABB",
"__EVENTVALIDATION": "/wEWCQKK9JioBQLB2tiHDgK1qbSRCwLB9fLCCQKVwf3jAwL7jJeqDQK2yLNyAoyp3LQNAoLch4YM4/7Gzd6qXWcFlpTQVOKRLsJcEeZ1kj5lh7u9AQrHyms=",
"txtUser": username,
"txtPassword": pswd,
"rbLx": "学生",
"btnLogin": " 登 录 "
}
header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36"
}
s = requests.session()
response = s.post(url=login_url, data=from_data, headers=header)
response_text = response.text
if response_text.find('个人资料') > 0:
print '登录成功!'
return s
elif response_text.find('密码不正确') > 0:
print '密码错误...请重试...'
return False
else:
print '登录失败...请重试...'
return False
def get_ifo(sess):
'''
通过登录会话session获取学生信息
:param sess:
:return: 学生信息
'''
ifo_url = 'http://219.242.68.33/xuesheng/xsxx.aspx'
html = sess.get(ifo_url)
soup = BeautifulSoup(html.text, 'lxml')
data = {}
data['a.姓名'] = soup.find(id="ctl00_ContentPlaceHolder1_lblXm").text
data['b.身份证号'] = soup.find(id="ctl00_ContentPlaceHolder1_lblSfz").text
data['c.学号'] = soup.find(id="ctl00_ContentPlaceHolder1_lblXh").text
data['d.班级'] = soup.find(id="ctl00_ContentPlaceHolder1_className").text
data['e.院系'] = soup.find(id="ctl00_ContentPlaceHolder1_collegeName").text
for item in sorted(data):
print '{0}:{1}{2}'.format(item, '-'*5, data[item])
def get_score(username, pswd='0'):
pass
def elective(username, pswd):
pass
def Quit():
'''
退出
:return: None
'''
print 'Quited...'
def main():
prompt = '''
+===========================+
| [1]查成绩 |
| [2]个人信息 |
| [3]选修课 |
| [4]登录其他账号 |
| [5]安全退出 |
+===========================+
>>> '''
username = raw_input('学号: ')
pswd = raw_input('密码: ')
sess = login(username, pswd)
if sess:
choice = True
while choice:
usr_choice = raw_input('\r'+prompt).strip()[0]
if usr_choice == '1':
get_score(sess, username, pswd)
elif usr_choice == '2':
get_ifo(sess)
elif usr_choice == '3':
elective(sess, username, pswd)
elif usr_choice == '4':
main()
break
elif usr_choice == '5':
Quit()
break
else:
print 'Input incorrect..again!'
else:
cho = raw_input('Cotinue or not [n/y]: ').strip()[0]
if cho == 'y':
main()
else:
Quit()
if __name__ == '__main__':
main()
| mit | Python |
|
98aee2af9aa3f7dcc75969f1ec3118c40539793e | Add clone of Haskell version | pilona/Utils,pilona/Utils,pilona/Utils | pandoc-include-code.py | pandoc-include-code.py | #! /usr/bin/env python3
from sys import stdout, stderr, exit
import json
def walktransform(tree):
if isinstance(tree, list):
return [walktransform(subtree)
for subtree
in tree]
elif not isinstance(tree, dict):
exit('Unsupported AST node', type(tree))
elif isinstance(tree, dict):
if tree.get('t') == 'CodeBlock':
(_, _, meta, *_), code = tree.get('c', [[None, None, None], ''])
if code.strip():
breakpoint()
exit('Code in block:', code, sep='\n')
includes = [v for k, v in meta if k == 'include']
if len(includes) > 1:
exit('Too many includes', *includes)
elif not includes:
exit('No file to include', meta)
else:
with open(includes[0]) as fp:
code = fp.read()
return {
't': 'CodeBlock',
'c': [
[
'',
[],
[
# TODO: file type
],
],
code
],
}
# TODO: https://github.com/owickstrom/pandoc-include-code#snippets
# TODO: https://github.com/owickstrom/pandoc-include-code#ranges
# TODO: https://github.com/owickstrom/pandoc-include-code#dedent
# TODO: https://github.com/owickstrom/pandoc-include-code#adding-base-url-for-all-codeblock-links # noqa
if __name__ == '__main__':
from argparse import ArgumentParser, FileType
argument_parser = ArgumentParser()
argument_parser.add_argument('ast', type=FileType('r'), default='-')
args = argument_parser.parse_args()
ast = json.load(args.ast)
if ast['pandoc-api-version'] != (1, 22):
print('Unsupported Pandoc API version',
'.'.join(map(str, ast['pandoc-api-version'])) + '.',
'Use at own risk.',
file=stderr)
json.dump(walktransform(ast['blocks']), stdout)
| isc | Python |
|
70a6553d9323b3522e492c414b67e76111519368 | Add file to create all files to school census. | DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site | scripts/data_download/school_census/create_all_files.py | scripts/data_download/school_census/create_all_files.py | import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/school_census/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2007, 2016):
logging.info("python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/school_census/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n") | mit | Python |
|
91b58112f1c83048511fdab09f9aad58351eb991 | add new package (#23573) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-pycocotools/package.py | var/spack/repos/builtin/packages/py-pycocotools/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPycocotools(PythonPackage):
"""Official APIs for the MS-COCO dataset."""
homepage = "https://github.com/cocodataset/cocoapi"
pypi = "pycocotools/pycocotools-2.0.2.tar.gz"
version('2.0.2', sha256='24717a12799b4471c2e54aa210d642e6cd4028826a1d49fcc2b0e3497e041f1a')
depends_on('python', type=('build', 'link', 'run'))
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type='build')
depends_on('py-numpy', type=('build', 'link', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 | Python |
|
638ee09f0f2958a955fbad42368ffc6bb2a2688a | Add minimal REST API script based on flask | BioroboticsLab/deeppipeline,BioroboticsLab/bb_pipeline,BioroboticsLab/deeppipeline | pipeline/scripts/bb_pipeline_api.py | pipeline/scripts/bb_pipeline_api.py | #!/usr/bin/env python3
from tempfile import NamedTemporaryFile
import json
from threading import Lock
import numpy as np
from flask import Flask, request
from scipy.misc import imread
from pipeline import Pipeline
from pipeline.objects import Image, Candidates, Saliencies, IDs
from pipeline.pipeline import get_auto_config
app = Flask(__name__)
def init_pipeline():
pipeline = Pipeline([Image],
[Candidates, Saliencies, IDs],
**get_auto_config())
return pipeline
pipeline = init_pipeline()
pipeline_lock = Lock()
def jsonify(instance):
if isinstance(instance, np.ndarray):
return instance.tolist()
return instance
def process_image(image):
with pipeline_lock:
results = pipeline([image])
return json.dumps(dict([(k.__name__, jsonify(v)) for k, v in
results.items()]), ensure_ascii=False)
@app.route('/process', methods=['POST'])
def api_message():
print('Retrieving process request')
if request.headers['Content-Type'] == 'application/octet-stream':
try:
with NamedTemporaryFile(delete=True) as f:
f.write(request.data)
image = imread(f)
return process_image(image)
except Exception as err:
return '{}'.format(err)
else:
return "415 Unsupported Media Type"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=10000)
| apache-2.0 | Python |
|
427caaa998ea03bf80a00aaf90833eb910cf909d | Add migration file | OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/PolicyBrain | webapp/apps/taxbrain/migrations/0061_auto_20171220_1859.py | webapp/apps/taxbrain/migrations/0061_auto_20171220_1859.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import webapp.apps.taxbrain.models
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0060_auto_20171219_2153'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='ID_RealEstate_crt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='taxsaveinputs',
name='ID_RealEstate_crt_cpi',
field=models.NullBooleanField(default=None),
),
migrations.AddField(
model_name='taxsaveinputs',
name='ID_StateLocalTax_crt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='taxsaveinputs',
name='ID_StateLocalTax_crt_cpi',
field=models.NullBooleanField(default=None),
),
]
| mit | Python |
|
80d75bad57c8be1b08fbb2129bb0511c633446e2 | Create CertPaIT.py | yeti-platform/yeti,yeti-platform/yeti,yeti-platform/yeti,yeti-platform/yeti | plugins/feeds/public/CertPaIT.py | plugins/feeds/public/CertPaIT.py | import logging
from datetime import datetime, timedelta
from core.observables import Hash
from core.feed import Feed
from core.errors import ObservableValidationError
class CertPaIt(Feed):
default_values = {
"frequency": timedelta(minutes=30),
"name": "CertPaIT",
"source" : "https://infosec.cert-pa.it/analyze/submission.rss",
"description": "This feed contains data from infosec.cert-pa.it",
}
def update(self):
for item in self.update_xml('item', ["title", "link"]):
self.analyze(item)
def analyze(self, item):
md5 = item['title'].replace("MD5: ", "")
context = {}
context['date_added'] = datetime.now()
context['source'] = self.name
context['url'] = item['link']
try:
if md5:
hash_data = Hash.get_or_create(value=md5)
if hash_data.new is True or self.name not in hash_data.sources:
hash_data.add_context(context)
hash_data.add_source(self.name)
except ObservableValidationError as e:
logging.error(e)
| apache-2.0 | Python |
|
90a467a849bb05cd0922ca0808279bf009657150 | Create reverse_words.py | mschruf/python | Google_Code_Jam/2010_Africa/Qualification_Round/B/reverse_words.py | Google_Code_Jam/2010_Africa/Qualification_Round/B/reverse_words.py | #!/usr/bin/python -tt
"""Solves problem B from Google Code Jam Qualification Round Africa 2010
(https://code.google.com/codejam/contest/351101/dashboard#s=p1)
"Reverse Words"
"""
import sys
def main():
"""Reads problem data from stdin and prints answers to stdout.
Args:
None
Returns:
Nothing
"""
lines = sys.stdin.read().splitlines()
num_test_cases = int(lines[0])
test_cases = lines[1:]
assert len(test_cases) == num_test_cases
i = 1
for test_case in test_cases:
words = test_case.split()
words.reverse()
print 'Case #%d:' % (i,), ' '.join(words)
i += 1
if __name__ == '__main__':
main()
| cc0-1.0 | Python |
|
3155a8ab725c1b1535a99229f31008587ceb3e64 | Add continuous finite test | econ-ark/HARK,econ-ark/HARK | HARK/ConsumptionSaving/tests/test_ConsRiskyContribModel.py | HARK/ConsumptionSaving/tests/test_ConsRiskyContribModel.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 10:06:51 2021
@author: Mateo
"""
import unittest
from copy import copy
import numpy as np
from HARK.ConsumptionSaving.ConsRiskyAssetModel import (
RiskyContribConsumerType,
init_riskyContrib
)
class test_(unittest.TestCase):
def setUp(self):
# A set of finite parameters
self.par_finite = init_riskyContrib.copy()
# Four period model
self.par_finite['PermGroFac'] = [2.0, 1.0, 0.1, 1.0]
self.par_finite['PermShkStd'] = [0.1, 0.1, 0.0, 0.0]
self.par_finite['TranShkStd'] = [0.2, 0.2, 0.0, 0.0]
self.par_finite['AdjustPrb'] = [0.5, 0.5, 1.0, 1.0]
self.par_finite['tau'] = [0.1, 0.1, 0.0, 0.0]
self.par_finite['LivPrb'] = [1.0, 1.0, 1.0, 1.0]
self.par_finite['T_cycle'] = 4
self.par_finite['T_retire'] = 0
self.par_finite['T_age'] = 4
# Adjust discounting and returns distribution so that they make sense in a
# 4-period model
self.par_finite['DiscFac'] = 0.95**15
self.par_finite['Rfree'] = 1.03**15
self.par_finite['RiskyAvg'] = 1.08**15 # Average return of the risky asset
self.par_finite['RiskyStd'] = 0.20*np.sqrt(15) # Standard deviation of (log) risky returns
def test_finite_cont_share(self):
cont_params = copy(self.par_finite)
cont_params['DiscreteShareBool'] = False
cont_params['vFuncBool'] = False
fin_cont_agent = RiskyContribConsumerType(**cont_params)
fin_cont_agent.solve()
self.assertAlmostEqual(
fin_cont_agent.solution[0].stageSols['Reb'].DFuncAdj(3,4), -0.87757204
)
self.assertAlmostEqual(
fin_cont_agent.solution[0].stageSols['Sha'].ShareFuncAdj(5,0.1), 0.10846904
)
self.assertAlmostEqual(
fin_cont_agent.solution[0].stageSols['Cns'].cFunc(3,4,0.1), 2.46055802
)
| apache-2.0 | Python |
|
0f6961c10def1f1343c6c31d117e5ca87cefd4b7 | add openvas_vulns migration | asrozar/perception | alembic/versions/506c8e35ba7c_create_openvas_vuln_table.py | alembic/versions/506c8e35ba7c_create_openvas_vuln_table.py | """create openvas_vuln table
Revision ID: 506c8e35ba7c
Revises: 13b7c3d4c802
Create Date: 2017-07-21 12:19:35.711173
"""
from alembic import op
import sqlalchemy as sa
import datetime
def _get_date():
return datetime.datetime.now()
# revision identifiers, used by Alembic.
revision = '506c8e35ba7c'
down_revision = '13b7c3d4c802'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('openvas_vulns',
sa.Column('id', sa.Integer, primary_key=True, nullable=False),
sa.Column('name', sa.Text, unique=True, nullable=False),
sa.Column('created_at', sa.TIMESTAMP(timezone=False), default=_get_date))
def downgrade():
op.drop_table('openvas_vulns')
| mit | Python |
|
6ce0d934cfe8b9e93a833ff1d31915ffd14c643d | add new package (#25526) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-pydantic/package.py | var/spack/repos/builtin/packages/py-pydantic/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPydantic(PythonPackage):
"""Data validation and settings management using Python type hinting."""
homepage = "https://github.com/samuelcolvin/pydantic"
pypi = "pydantic/pydantic-1.8.2.tar.gz"
version('1.8.2', sha256='26464e57ccaafe72b7ad156fdaa4e9b9ef051f69e175dbbb463283000c05ab7b')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', when='^python@:3.6', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 | Python |
|
f363864f7f6ad9da45cb3053816d500838821a27 | add new package (#27093) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/r-posterior/package.py | var/spack/repos/builtin/packages/r-posterior/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPosterior(RPackage):
"""Tools for Working with Posterior Distributions.
Provides useful tools for both users and developers of packages for
fitting Bayesian models or working with output from Bayesian models. The
primary goals of the package are to: (a) Efficiently convert between many
different useful formats of draws (samples) from posterior or prior
distributions. (b) Provide consistent methods for operations commonly
performed on draws, for example, subsetting, binding, or mutating draws.
(c) Provide various summaries of draws in convenient formats. (d) Provide
lightweight implementations of state of the art posterior inference
diagnostics. References: Vehtari et al. (2021) <doi:10.1214/20-BA1221>."""
homepage = "https://mc-stan.org/posterior/"
cran = "posterior"
version('1.1.0', sha256='eff6262dbcc1bf18337f535b0c75ba2fe360322e8b170c466e24ed3ee76cf4d2')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-abind', type=('build', 'run'))
depends_on('r-checkmate', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-vctrs', type=('build', 'run'))
depends_on('r-tensora', type=('build', 'run'))
depends_on('r-pillar', type=('build', 'run'))
depends_on('r-distributional', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
| lgpl-2.1 | Python |
|
1596d091183d89c703e67555e81f24722dc0d8a2 | add import script for Chelmsford | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_chelmsford.py | polling_stations/apps/data_collection/management/commands/import_chelmsford.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000070'
addresses_name = 'Democracy_Club__04May2017 (1).tsv'
stations_name = 'Democracy_Club__04May2017 (1).tsv'
elections = ['local.essex.2017-05-04']
csv_delimiter = '\t'
csv_encoding = 'latin-1'
| bsd-3-clause | Python |
|
94e83a48d3700cdc7c9bb6bd9a14860d2665c655 | Add custom roster module | saltstack/salt-pkg-tests,saltstack/salt-pkg-tests,saltstack/salt-pkg-tests | _modules/roster.py | _modules/roster.py | # import python libraries
import logging
# import salt libraries
import salt.utils.files
import salt.utils.yaml
log = logging.getLogger(__name__)
def remove(roster, name):
'''
remove an entry from the salt-ssh roster
'''
with salt.utils.files.fopen(roster, 'r') as conf:
roster_txt = conf.read()
roster_yaml = salt.utils.yaml.safe_load(roster_txt)
try:
del roster_yaml[name]
except KeyError:
log.error('{0} does not exist in roster file {1}'.format(name, roster))
return False
try:
with salt.utils.files.fopen(roster, 'w+') as conf:
salt.utils.yaml.safe_dump(roster_yaml, conf, default_flow_style=False)
except (IOError, OSError):
log.error('Unable to delete {0} from roster file {1}'.format(name, roster))
return False
| apache-2.0 | Python |
|
8a043a2d3a9517c5eb84aea3e9916419f6136e23 | Add tests for IndexAbstractor. | pySUMO/pysumo,pySUMO/pysumo | test/lib/indexabstractor.py | test/lib/indexabstractor.py | """ The PyUnit test framework for the indexabstractor. """
import unittest
from lib import parser
from lib.indexabstractor import *
class indexTestCase(unittest.TestCase):
def setUp(self):
self.sumo = parser.Ontology('data/Merge.kif', name='SUMO')
self.kif = parser.kifparse(self.sumo)
self.indexabstractor = IndexAbstractor()
def test0Normalize(self):
self.assertEqual(normalize('t.erm '), 'term')
self.assertEqual(normalize(' TeRM '), 'term')
self.assertNotEqual(normalize('t erm '), 'term')
def test1BuildIndex(self):
self.indexabstractor.update_index(self.kif)
self.assertEqual(self.indexabstractor.ontologies, {self.sumo})
self.assertEqual(self.indexabstractor.root, self.kif)
assert self.sumo in self.indexabstractor.index
def test2Search(self):
self.maxDiff = None
self.indexabstractor.update_index(self.kif)
self.assertEqual(self.indexabstractor.search('Plasma'),
self.indexabstractor.search('plasma'))
self.assertEqual(self.indexabstractor.search('ValidDeductiveArgument'),
self.indexabstractor.search(' valIddedUctiVeargument '))
self.assertNotEqual(self.indexabstractor.search('ValidDeductiveArgument'),
self.indexabstractor.search('InvalidDeductiveArgument'))
result = self.indexabstractor.search(' ContentbearingObJect')
assert self.sumo in result
definition = result[self.sumo]
self.assertEqual(sorted(definition),
sorted(['( relatedInternalConcept ContentBearingObject containsInformation )',
'( subclass ContentBearingObject CorpuscularObject )',
'( subclass ContentBearingObject ContentBearingPhysical )',
'( documentation ContentBearingObject EnglishLanguage "Any &%SelfConnectedObject that expressescontent. This content may be a &%Proposition, e.g. when the &%ContentBearingObjectis a &%Sentence or &%Text, or it may be a representation of an abstract orphysical object, as with an &%Icon, a &%Word or a &%Phrase." )']))
indexTestSuit = unittest.makeSuite(indexTestCase, 'test')
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(indexTestSuit)
| bsd-2-clause | Python |
|
859d1031bc61cd4466953cbc7a5e282abff35e50 | Create database.py | deepak223098/Data_Science_Python | database.py | database.py | bsd-3-clause | Python |
||
c8d57138240e87c802b84cf0b2b01efd01c80e41 | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/implementation/easy/angry_professor/py/solution.py | hackerrank/algorithms/implementation/easy/angry_professor/py/solution.py | #!/bin/python3
import sys
def isClassCancelled(arrivalTimes, cancellationThreshold):
count = 0
for arrivalTime in arrivalTimes:
if arrivalTime <= 0:
count += 1
return count < cancellationThreshold
t = int(input())
for a0 in range(t):
n, k = map(int, input().split())
a = tuple(map(int, input().split()))
if isClassCancelled(a, k):
print('YES')
else:
print('NO')
| mit | Python |
|
02183bdcd1b3e4109568f5077a6074573bbd8bf9 | Add send_unsent.py. | ProgVal/site-enseigner | send_unsent.py | send_unsent.py | #!/usr/bin/env python2
# -*- coding: utf8 -*-
import smtplib
import enseigner.model as model
import enseigner.emails as emails
mails = model.Mail.all_unsent()
yesno = raw_input(u'Envoyer %d mails ? ' % len(mails))
if yesno != 'yes':
exit(0)
sender = emails.Sender()
errors = []
for mail in mails:
try:
sender.send(mail.recipient, mail.subject, mail.content)
except smtplib.SMTPException as e:
errors.append((mail, e))
else:
mail.set_sent()
print(repr(errors))
with open('/tmp/enseigner_errors.txt', 'a') as fd:
for error in errors:
fd.write('\n\n')
fd.write(repr(error))
| mit | Python |
|
eee8b3e96f7b0c9f24e7c43483bb6d74bd8a490a | add proto | mylokin/servy | servy/proto.py | servy/proto.py | import json
class Response(object):
@classmethod
def encode(cls, content):
return json.dumps(content)
@classmethod
def decode(cls, content):
return json.loads(content)
class Request(object):
@classmethod
def encode(cls, proc, args, kw):
return json.dumps({
'proc': proc,
'args': args,
'kw': kw,
})
@classmethod
def decode(cls, content):
content = json.loads(content)
return (
content['proc'],
content['args'],
content['kw'],
)
| mit | Python |
|
6f8d2e724f4aafb6b8295b8b0a1f915d5f21fa38 | fix script | EGP-CIG-REU/dealii,nicolacavallini/dealii,lue/dealii,ibkim11/dealii,sriharisundar/dealii,angelrca/dealii,spco/dealii,naliboff/dealii,gpitton/dealii,flow123d/dealii,YongYang86/dealii,flow123d/dealii,msteigemann/dealii,sriharisundar/dealii,adamkosik/dealii,sairajat/dealii,nicolacavallini/dealii,Arezou-gh/dealii,Arezou-gh/dealii,mac-a/dealii,shakirbsm/dealii,jperryhouts/dealii,lue/dealii,shakirbsm/dealii,gpitton/dealii,kalj/dealii,mac-a/dealii,maieneuro/dealii,EGP-CIG-REU/dealii,ibkim11/dealii,jperryhouts/dealii,rrgrove6/dealii,EGP-CIG-REU/dealii,pesser/dealii,natashasharma/dealii,maieneuro/dealii,adamkosik/dealii,natashasharma/dealii,Arezou-gh/dealii,sriharisundar/dealii,lpolster/dealii,spco/dealii,mac-a/dealii,lpolster/dealii,YongYang86/dealii,EGP-CIG-REU/dealii,YongYang86/dealii,pesser/dealii,EGP-CIG-REU/dealii,msteigemann/dealii,ESeNonFossiIo/dealii,sriharisundar/dealii,natashasharma/dealii,pesser/dealii,ibkim11/dealii,jperryhouts/dealii,Arezou-gh/dealii,andreamola/dealii,lpolster/dealii,kalj/dealii,ibkim11/dealii,ESeNonFossiIo/dealii,andreamola/dealii,johntfoster/dealii,rrgrove6/dealii,nicolacavallini/dealii,spco/dealii,naliboff/dealii,gpitton/dealii,danshapero/dealii,mac-a/dealii,YongYang86/dealii,johntfoster/dealii,danshapero/dealii,lue/dealii,rrgrove6/dealii,mtezzele/dealii,msteigemann/dealii,danshapero/dealii,spco/dealii,kalj/dealii,johntfoster/dealii,naliboff/dealii,ESeNonFossiIo/dealii,gpitton/dealii,mtezzele/dealii,sairajat/dealii,YongYang86/dealii,rrgrove6/dealii,gpitton/dealii,flow123d/dealii,danshapero/dealii,kalj/dealii,pesser/dealii,mac-a/dealii,flow123d/dealii,mac-a/dealii,adamkosik/dealii,gpitton/dealii,maieneuro/dealii,adamkosik/dealii,andreamola/dealii,flow123d/dealii,YongYang86/dealii,msteigemann/dealii,angelrca/dealii,lpolster/dealii,johntfoster/dealii,angelrca/dealii,maieneuro/dealii,sairajat/dealii,JaeryunYim/dealii,YongYang86/dealii,naliboff/dealii,johntfoster/dealii,msteigemann/dealii,mtezzele/dealii,shakirbsm/dealii,natashasharma/dealii,spco/dealii,ibkim11/dealii,mtezzele/dealii,maieneuro/dealii,maieneuro/dealii,natashasharma/dealii,JaeryunYim/dealii,gpitton/dealii,maieneuro/dealii,naliboff/dealii,mtezzele/dealii,ibkim11/dealii,jperryhouts/dealii,shakirbsm/dealii,lue/dealii,shakirbsm/dealii,natashasharma/dealii,pesser/dealii,ESeNonFossiIo/dealii,kalj/dealii,angelrca/dealii,andreamola/dealii,lue/dealii,rrgrove6/dealii,johntfoster/dealii,adamkosik/dealii,mtezzele/dealii,nicolacavallini/dealii,jperryhouts/dealii,lue/dealii,ESeNonFossiIo/dealii,Arezou-gh/dealii,shakirbsm/dealii,andreamola/dealii,naliboff/dealii,rrgrove6/dealii,spco/dealii,andreamola/dealii,adamkosik/dealii,mtezzele/dealii,sriharisundar/dealii,naliboff/dealii,Arezou-gh/dealii,JaeryunYim/dealii,Arezou-gh/dealii,sairajat/dealii,JaeryunYim/dealii,ESeNonFossiIo/dealii,EGP-CIG-REU/dealii,ibkim11/dealii,andreamola/dealii,JaeryunYim/dealii,mac-a/dealii,ESeNonFossiIo/dealii,kalj/dealii,kalj/dealii,lpolster/dealii,danshapero/dealii,msteigemann/dealii,angelrca/dealii,danshapero/dealii,nicolacavallini/dealii,lue/dealii,sairajat/dealii,flow123d/dealii,angelrca/dealii,JaeryunYim/dealii,johntfoster/dealii,flow123d/dealii,shakirbsm/dealii,adamkosik/dealii,spco/dealii,nicolacavallini/dealii,jperryhouts/dealii,msteigemann/dealii,sriharisundar/dealii,nicolacavallini/dealii,sairajat/dealii,JaeryunYim/dealii,pesser/dealii,pesser/dealii,lpolster/dealii,danshapero/dealii,sairajat/dealii,jperryhouts/dealii,angelrca/dealii,EGP-CIG-REU/dealii,sriharisundar/dealii,rrgrove6/dealii,lpolster/dealii,natashasharma/dealii | tests/scripts/makereport.py | tests/scripts/makereport.py | # accepts 0,1, or 2 arguments. If a string starting with a number is handed in, it is assumed to be a subdirectory of the current directory to run on. If not specified, the newest build is used. Any other string is taken as the branch name for this test (or treated as mainline). Order of the arguments does not matter.
# for questions: Timo Heister
import xml.etree.ElementTree as ET
import glob
import sys
from datetime import datetime
import subprocess
class Group:
def __init__(self, name):
self.name = name
self.n_tests = 0
self.n_fail = 0
self.fail = []
self.fail_text = {}
class Revision:
def __init__(self):
self.groups = {}
self.number = -1
self.name = ''
self.n_tests = 0
self.n_fail = 0
branch=''
args=sys.argv
args.pop(0)
dirname=""
while len(args)>0:
if args[0].startswith("20"): #I hope this script is not used in the year 2100
dirname=args[0].replace('/','')
else:
branch=args[0].replace('/','')+'/'
args.pop(0)
if dirname=="":
n=glob.glob("*/Build.xml")
n.sort(reverse=True)
dirname = n[0].replace('/Build.xml','')
if len(glob.glob(dirname+'/Update.xml'))>0:
#new format
tree = ET.parse(dirname+'/Update.xml')
name = tree.getroot().find('BuildName').text
number = tree.getroot().find('Revision').text
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
else:
#old format
tree = ET.parse(dirname+'/Notes.xml')
name = tree.getroot().attrib['BuildName']
number = name.split('-')[-1]
number = number[1:]
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
header = "Revision: %s"%number + "\n"
header += "Date: %s"%(date.strftime("%Y %j %F %U-%w")) + '\n'
id = subprocess.check_output(["id","-un"])+'@'+subprocess.check_output(["hostname"])
id=id.replace('\n','')
header += "Id: %s"%id
#now Test.xml:
tree = ET.parse(dirname+'/Test.xml')
root = tree.getroot()
testing = root.find('Testing')
tests={}
for test in testing.findall("Test"):
status = test.attrib['Status']
fail=False
if status=="failed": fail=True
name = test.find('Name').text
group = name.split('/')[0]
if fail:
line = "%s 3 %s%s"%(date,branch,name)
else:
line = "%s + %s%s"%(date,branch,name)
if group not in tests: tests[group]=[]
tests[group].append( line )
for g in sorted(tests):
group = tests[g]
print header
for l in group:
print l
| # accepts 0,1, or 2 arguments. If a string starting with a number is handed in, it is assumed to be a subdirectory of the current directory to run on. If not specified, the newest build is used. Any other string is taken as the branch name for this test (or treated as mainline). Order of the arguments does not matter.
# for questions: Timo Heister
import xml.etree.ElementTree as ET
import glob
import sys
from datetime import datetime
import subprocess
class Group:
def __init__(self, name):
self.name = name
self.n_tests = 0
self.n_fail = 0
self.fail = []
self.fail_text = {}
class Revision:
def __init__(self):
self.groups = {}
self.number = -1
self.name = ''
self.n_tests = 0
self.n_fail = 0
branch=''
args=sys.argv
args.pop(0)
dirname=""
while len(args)>0:
if args[0].startswith("20"): #I hope this script is not used in the year 2100
dirname=args[0].replace('/','')
else:
branch=args[0].replace('/','')+'/'
args.pop(0)
if len(glob.glob(dirname+'/Update.xml'))>0:
#new format
tree = ET.parse(dirname+'/Update.xml')
name = tree.getroot().find('BuildName').text
number = tree.getroot().find('Revision').text
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
else:
#old format
tree = ET.parse(dirname+'/Notes.xml')
name = tree.getroot().attrib['BuildName']
number = name.split('-')[-1]
number = number[1:]
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
header = "Revision: %s"%number + "\n"
header += "Date: %s"%(date.strftime("%Y %j %F %U-%w")) + '\n'
id = subprocess.check_output(["id","-un"])+'@'+subprocess.check_output(["hostname"])
id=id.replace('\n','')
header += "Id: %s"%id
#now Test.xml:
tree = ET.parse(dirname+'/Test.xml')
root = tree.getroot()
testing = root.find('Testing')
tests={}
for test in testing.findall("Test"):
status = test.attrib['Status']
fail=False
if status=="failed": fail=True
name = test.find('Name').text
group = name.split('/')[0]
if fail:
line = "%s 3 %s%s"%(date,branch,name)
else:
line = "%s + %s%s"%(date,branch,name)
if group not in tests: tests[group]=[]
tests[group].append( line )
for g in sorted(tests):
group = tests[g]
print header
for l in group:
print l
| lgpl-2.1 | Python |
339bb5cd325c7b9c08b8a43994f55bbe1756fbde | validate redirect | djaodjin/djaodjin-signup,smirolo/djaodjin-signup,smirolo/djaodjin-signup,djaodjin/djaodjin-signup,djaodjin/djaodjin-signup,smirolo/djaodjin-signup | signup/auth.py | signup/auth.py | # Copyright (c) 2014, Fortylines LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import urlparse
from django.conf import settings
from django.http.request import split_domain_port, validate_host
def validate_redirect_url(next_url):
"""
Returns the next_url path if next_url matches allowed hosts.
"""
if not next_url:
return None
parts = urlparse.urlparse(next_url)
if parts.netloc:
domain, port = split_domain_port(parts.netloc)
allowed_hosts = ['*'] if settings.DEBUG else settings.ALLOWED_HOSTS
if not (domain and validate_host(domain, allowed_hosts)):
return None
return parts.path
| bsd-2-clause | Python |
|
c6e85e35a090c33bc1d6813dce959c8d47588ae8 | send an email with current IP address | aldebaran1/skytraq | send_Email.py | send_Email.py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 16 12:14:51 2017
@author: smrak
"""
import requests
import urllib3
from datetime import datetime
def getIP():
"""
Sebastijan Mrak:
get & reteurn a public IP address
"""
http = urllib3.PoolManager()
r = http.request('GET', 'http://ip.42.pl/raw')
my_ip = r.data.decode('utf-8')
return my_ip
def send_simple_message(dev_name, ip):
dt = datetime.utcnow()
time = dt.strftime("%d-%m %H:%M")
return requests.post(
"https://api.mailgun.net/v3/sandbox1b5516af304e4d3bbb4ce505c254cbca.mailgun.org/messages",
auth=("api", "key-6e8d2a811ff2ea98114574c72dc988f6"),
data={"from": "Mailgun Sandbox <[email protected]>",
"to": "Sebastijan <[email protected]>",
"subject": "Current IP address for device: " + dev_name,
"text": "IP address at a time "+str(time)+" is: "+str(ip)})
ip = getIP()
#print (ip)
send_simple_message('PC1', ip) | mit | Python |
|
c13968125383581e67804e11bc430391d355145a | Create DataStreamasDisjointIntervals.py | Chasego/codirit,Chasego/codirit,cc13ny/algo,Chasego/codirit,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/cod,Chasego/cod,Chasego/cod,Chasego/codi,cc13ny/algo,cc13ny/algo,Chasego/codi,Chasego/codi,Chasego/codirit,Chasego/cod,cc13ny/algo,Chasego/cod,Chasego/codi | leetcode/352-Data-Stream-as-Disjoint-Intervals/DataStreamasDisjointIntervals.py | leetcode/352-Data-Stream-as-Disjoint-Intervals/DataStreamasDisjointIntervals.py | # Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class SummaryRanges(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.intervals = []
def addNum(self, val):
"""
:type val: int
:rtype: void
"""
intv = self.intervals
l, r = 0, len(self.intervals) - 1
print val
while l <= r:
m = l + (r - l) / 2
if val < intv[m].start:
r = m - 1
elif val <= intv[m].end:
break
else:
l = m + 1
if l > r:
if 1 <= l < len(intv) and intv[l - 1].end + 1 == val and intv[l].start - 1 == val:
intv[l - 1].end = intv[l].end
intv.pop(l)
elif len(intv) > l and intv[l].start - 1 == val:
intv[l].start = val
elif l > 0 and intv[l - 1].end + 1 == val:
intv[l - 1].end = val
else:
intv.insert(l, Interval(val, val))
def getIntervals(self):
"""
:rtype: List[Interval]
"""
return self.intervals
# Your SummaryRanges object will be instantiated and called as such:
# obj = SummaryRanges()
# obj.addNum(val)
# param_2 = obj.getIntervals()
| mit | Python |
|
435220dda7eb928d9d959594d7986136f17da973 | Add actual url patter for #239 | Sinar/popit_ng,Sinar/popit_ng | popit/urls/rooturls.py | popit/urls/rooturls.py | from django.conf.urls import url
from popit.views import *
urlpatterns = [
url(r'^(?P<language>\w{2})', api_root, name="api-root"),
url(r'^$', api_root_all),
]
| agpl-3.0 | Python |
|
f955620fb2cb12f14c38ad196d99ae12d5b9c1ff | add default openflow test settings | avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf | src/python/expedient/clearinghouse/defaultsettings/openflowtests.py | src/python/expedient/clearinghouse/defaultsettings/openflowtests.py | '''Contains default settings for the testing environment.
Created on Aug 22, 2010
@author: jnaous
'''
from os.path import join, dirname
PYTHON_DIR = join(dirname(__file__), "../../..")
OM_PROJECT_DIR = join(PYTHON_DIR, "openflow/optin_manager")
CH_PROJECT_DIR = join(PYTHON_DIR, "expedient/clearinghouse")
GCF_DIR = join(PYTHON_DIR, "gcf")
SSL_DIR = join(dirname(__file__), "ssl")
FLOWVISOR_DIR = join(PYTHON_DIR, "../../../../flowvisor")
'''Location of the testing Flowvisor source directory.'''
USE_RANDOM = False
'''Randomize the tests where possible?'''
SITE_IP_ADDR = '192.168.126.128'
'''The IP address of the host where Expedient and the OM are running.'''
OM_PORT = 8443
'''Port on which the Opt-In manager is running.'''
CH_PORT = 443
'''Port on which Expedient is running.'''
PREFIX = ""
FV_CONFIG = 'fv_vm_config.xml'
'''Name of the Flowvisor config file.'''
GCH_PORT = 8001
'''The port on which the GENI Clearinghouse should run.'''
FLOWVISORS = [
dict(
of_port=6633, # openflow port
xmlrpc_port=8080, # XMLRPC port for the flowvisor
username="root", # The username to use to connect to the FV
password='rootpassword', # The password to use to connect to the FV
path=(FLOWVISOR_DIR, FV_CONFIG), # configuration file
),
]
'''Information about where the test flowvisor should run.
This should be a list of dicts with the following keys:
* C{of_port}: The openflow port number the Flowvisor will use.
* C{xmlrpc_port}: The port number for XMLRPC calls to the Flowvisor.
* C{username}: The username to use for accessing the xmlrpc calls.
* C{password}: The password to use for accessing the xmlrpc calls.
* C{path}: The location of the flowvisor config file.
'''
MININET_VMS = [('172.16.77.131', 22)]
'''Information about where the Mininet VM is running.
This should be a list of tuples (IP address, SSH port number)
'''
MININET_SWITCH_TYPE = "user"
'''Type of switch to use. One of "user", "ovsk", "kernel"'''
NUM_EXPERIMENTS = 2
'''Number of Slices to instantiate during testing.'''
NUM_DUMMY_OMS = 3
'''Number of Dummy OMs to use for GAPI tests.'''
NUM_SWITCHES_PER_AGG = 10
'''Number of dummy switches for GAPI tests.'''
NUM_LINKS_PER_AGG = 20
'''Number of dummy links for GAPI tests.'''
NUM_DUMMY_FVS = 1
'''Don't change. Num of Dummy FVs for OM tests.'''
USE_HTTPS = True
'''Run using HTTPS or HTTP to expedient & OM?'''
SHOW_PROCESSES_IN_XTERM = True
'''Don't change. Should forked processes run in an xterm?'''
PAUSE_AFTER_TESTS = False
'''If true, each test will wait for an Enter from the user
before tearing down (useful to look at xterm output).
'''
TIMEOUT = 20
'''Time to wait for processes to run and for communication to work.'''
# basic settings sanity checks
assert(len(FLOWVISORS) == len(MININET_VMS))
| bsd-3-clause | Python |
|
99f454b3fa62cffac922a7b3431e0024e6dfde3d | add data migration script | fedspendingtransparency/data-act-broker-backend,chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend,fedspendingtransparency/data-act-broker-backend,chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | dataactcore/scripts/migrateDataBroker.py | dataactcore/scripts/migrateDataBroker.py | # migrate data using pg_dump and pg_restore
# data copied from tables:
# error_data:
# error_metadata
# file
# job_tracker:
# job
# submission
# job_dependency
# user_manager;
# users
# email_token
# validator:
# appropriation
# award_financial
# award_financial_assistance
# object_class_program_activity
# run on command line: python migrateDataBroker.py
from dataactcore.config import CONFIG_DB
import subprocess
c = 'postgresql://{}:{}@{}/'.format(
CONFIG_DB['username'], CONFIG_DB['password'], CONFIG_DB['host'])
target = '{}data_broker'.format(c)
# error_data
db = 'error_data'
source = '{}{}'.format(c, db)
print('migrating {}'.format(db))
cmd = 'pg_dump -d {} -t error_metadata -t file --data-only --format=c | ' \
'pg_restore -d {} --data-only'.format(source, target)
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
print('return code = {}\n'.format(p))
# job_tracker
db = 'job_tracker'
source = '{}{}'.format(c, db)
print('migrating {}'.format(db))
cmd = 'pg_dump -d {} -t job_dependency -t job -t submission --data-only --format=c | ' \
'pg_restore -d {} --data-only'.format(source, target)
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
print('return code = {}\n'.format(p))
# user_manager
db = 'user_manager'
source = '{}{}'.format(c, db)
print('migrating {}'.format(db))
cmd = 'pg_dump -d {} -t users -t email_token --data-only --format=c | ' \
'pg_restore -d {} --data-only'.format(source, target)
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
print('return code = {}\n'.format(p))
# validation - these tables are larger, so do individually
db = 'validation'
source = '{}{}'.format(c, db)
tables = ['appropriation', 'object_class_program_activity',
'award_financial', 'award_financial_assistance']
for t in tables:
print('migrating {}: {}'.format(db, t))
cmd = 'pg_dump -d {} -t {} --data-only --format=c | ' \
'pg_restore -d {} --data-only'.format(source, t, target)
p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
print('return code = {}\n'.format(p))
| cc0-1.0 | Python |
|
17bbd6d44ec7edd1a079b12a44c283a358b11b92 | add import script for Teignbridge (closes #865) | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_teignbridge.py | polling_stations/apps/data_collection/management/commands/import_teignbridge.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000045'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| bsd-3-clause | Python |
|
c750cbb65541ea32c2f8904c394469a14fa1e82b | add import script for West Dorset | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_west_dorset.py | polling_stations/apps/data_collection/management/commands/import_west_dorset.py | from data_collection.management.commands import BaseXpressDCCsvInconsistentPostcodesImporter
class Command(BaseXpressDCCsvInconsistentPostcodesImporter):
council_id = 'E07000052'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017WDDC.TSV'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017WDDC.TSV'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
station_postcode_search_fields = [
'polling_place_postcode',
'polling_place_address_4',
'polling_place_address_3',
'polling_place_address_2',
'polling_place_address_1',
]
| bsd-3-clause | Python |
|
3e4ed4d6624ac0db7838e9aeb7a98710f746b2b8 | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | hackerrank/algorithms/strings/easy/mars_exploration/py/solution.py | hackerrank/algorithms/strings/easy/mars_exploration/py/solution.py | #!/bin/python3
import sys
def solution(signal):
import itertools
count = 0
for expected, received in zip(itertools.cycle('SOS'), signal):
if expected != received:
count += 1
return count
signal = input().strip()
count = solution(signal)
print(count)
| mit | Python |
|
eb9eb8fd295d8dbba66267e7551f4e6a51687797 | Set db starting point. | glogiotatidis/snippets-service,mozilla/snippets-service,mozmar/snippets-service,glogiotatidis/snippets-service,mozmar/snippets-service,mozilla/snippets-service,mozmar/snippets-service,glogiotatidis/snippets-service,mozilla/snippets-service,glogiotatidis/snippets-service,mozmar/snippets-service,mozilla/snippets-service | snippets/base/migrations/0062_set_asrsnippet_id_autoincrement_starting_point.py | snippets/base/migrations/0062_set_asrsnippet_id_autoincrement_starting_point.py | # Generated by Django 2.1.3 on 2018-11-16 12:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0061_auto_20181116_0810'),
]
operations = [
migrations.RunSQL(['ALTER TABLE base_asrsnippet AUTO_INCREMENT=10500;'], [''])
]
| mpl-2.0 | Python |
|
68e10dcb52f17aca1482112816062ea15e40097b | Create viruscheck.py | kieranjol/IFIscripts | viruscheck.py | viruscheck.py | #!/usr/bin/env python
#Requires ClamAV to be installed
import sys
import subprocess
def clamscan():
scan = subprocess.check_output([
'clamscan',
'-r',
starting_dir
])
print scan
starting_dir = sys.argv[1]
print "Running scan.........."
clamscan()
| mit | Python |
|
70e14187ecd2567894e5e8183341a63835d6839c | Create pldm related specific constants file. | openbmc/openbmc-test-automation,openbmc/openbmc-test-automation | data/pldm_variables.py | data/pldm_variables.py | #!/usr/bin/python
r"""
Contains PLDM-related constants.
"""
PLDM_TYPE_BASE = '00'
PLDM_TYPE_PLATFORM = '02'
PLDM_TYPE_BIOS = '03'
PLDM_TYPE_OEM = '3F'
PLDM_BASE_CMD = {
'GET_TID': '2',
'GET_PLDM_VERSION': '3',
'GET_PLDM_TYPES': '4',
'GET_PLDM_COMMANDS': '5'}
PLDM_SUCCESS = '00'
PLDM_ERROR = '01'
PLDM_ERROR_INVALID_DATA = '02'
PLDM_ERROR_INVALID_LENGTH = '03'
PLDM_ERROR_NOT_READY = '04'
PLDM_ERROR_UNSUPPORTED_PLDM_CMD = '05'
PLDM_ERROR_INVALID_PLDM_TYPE = '20'
BIOS_TABLE_UNAVAILABLE = '83',
INVALID_BIOS_TABLE_DATA_INTEGRITY_CHECK = '84',
INVALID_BIOS_TABLE_TYPE = '85'
PLDM_BIOS_CMD = {
'GET_BIOS_TABLE': '01',
'SET_BIOS_ATTRIBUTE_CURRENT_VALUE': '07',
'GET_BIOS_ATTRIBUTE_CURRENT_VALUE_BY_HANDLE': '08',
'GET_DATE_TIME': '0c'}
PLDM_PLATFORM_CMD = {
'SET_STATE_EFFECTER_STATES': '39',
'GET_PDR': '51'}
PLDM_PDR_TYPES = {
'STATE_EFFECTER_PDR': '11'}
# PLDM OEM related variables.
PLDM_FILEIO_CMD = {
'GET_FILE_TABLE': '1',
'READ_FILE': '4',
'WRITE_FILE': '5',
'READ_FILE_INTO_MEMORY': '6',
'WRITE_FILE_FROM_MEMORY': '7'}
PLDM_FILEIO_COMPLETION_CODES = {
'INVALID_FILE_HANDLE': '80',
'DATA_OUT_OF_RANGE': '81',
'INVALID_READ_LENGTH': '82',
'INVALID_WRITE_LENGTH': '83',
'FILE_TABLE_UNAVAILABLE': '84',
'INVALID_FILE_TABLE_TYPE': '85'}
| apache-2.0 | Python |
|
8ff3b74df83055068b1f8abe05e8ce186ab6eb18 | implement strStr with KMP. Kana我喜欢你啊!!! | sureleo/leetcode,sureleo/leetcode,lsingal/leetcode,lsingal/leetcode,lsingal/leetcode,sureleo/leetcode | python/string/ImplementstrStr.py | python/string/ImplementstrStr.py | #KMP algorithm. can't get it. Just a simple implementation of
#0. https://www.youtube.com/watch?v=2ogqPWJSftE
#and
#1. http://www.cnblogs.com/zuoyuan/p/3698900.html
class Solution:
# @param haystack, a string
# @param needle, a string
# @return an integer
def strStr(self, haystack, needle):
n = len(haystack)
m = len(needle)
if m == 0 or haystack == needle:
return 0
prefix = self.prefixMap(needle)
q = 0
for i in range(0, n):
while q > 0 and needle[q] != haystack[i]:
q = prefix[q - 1]
if needle[q] == haystack[i]:
q = q + 1
if q == m:
return i - m + 1
return -1
def prefixMap(self, needle):
prefix = [0 for i in xrange(len(needle))]
a = 0
for b in xrange(2, len(needle)+1):
while a > 0 and needle[a] != needle[b-1]:
a = prefix[a-1]
if needle[a] == needle[b-1]:
a += 1
prefix[b-1] = a
return prefix
if __name__ == "__main__":
solution = Solution()
print solution.strStr("mississippi", "pi")
print solution.strStr("a", "a")
| mit | Python |
|
8b42b0825d5cbb6becef9669b43a2c8229ea8642 | Add script to remove unpaired fasta entries. | konrad/kuf_bio_scripts | remove_unpaired_fasta_entries.py | remove_unpaired_fasta_entries.py | #!/usr/bin/env python
"""
Remove unpaired reads from a fasta file.
This script can be used for the case that unpaired reads (e.g. as
reads were removed during quality trimming) in a pair of fasta files
from paired-end sequencing need to be removed.
"""
import argparse
from Bio import SeqIO
from Bio.SeqIO.FastaIO import FastaWriter
parser = argparse.ArgumentParser()
parser.add_argument("fasta_file_to_filter")
parser.add_argument("reference_fasta_file")
parser.add_argument("--output_fasta", default="output.fa")
args = parser.parse_args()
# Read reference file header
reference_headers = {}
for seq_record in SeqIO.parse(args.reference_fasta_file, "fasta"):
reference_headers[seq_record.id.split()[0]] = 1
# Read fasta file to filter and write output
with open(args.output_fasta, 'w') as output_fh:
writer = FastaWriter(output_fh, wrap=0)
writer.write_file(
filter(lambda seq_record: seq_record.id.split()[0] in reference_headers,
SeqIO.parse(args.fasta_file_to_filter, "fasta")))
| isc | Python |
|
b84af881f800bfad13b5e90379c5f4ec0445239a | Add setup.py. | vasilvv/pymoira | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name = 'pymoira',
version = '1.0',
description = 'Client library for MIT Moira service managment system protocol',
author = 'Victor Vasiliev',
author_email = '[email protected]',
url = 'https://github.com/vasilvv/pymoira',
license = 'MIT',
py_modules = ['pymoira'])
| mit | Python |
|
52d3a5a20c7f1bf4c874e4210fd17753a67d5c71 | Add ID command | TheReverend403/Pyper,TheReverend403/Pyper | commands/cmd_id.py | commands/cmd_id.py | from lib.command import Command
class IdCommand(Command):
name = 'id'
description = 'Returns your user ID, or the ID of the current chat when -c or \'chat\' is passed as an argument.'
def run(self, message, args):
reply = 'Your Telegram ID is {0}'.format(message.from_user.id)
if '-c' or 'chat' in args:
reply = 'This chat\'s ID is {0}'.format(message.chat.id)
self.reply(message, reply)
| agpl-3.0 | Python |
|
9bc26f8a0d2c209fc3e73cd0f267164bfd49fef3 | Update setup.py | moreati/wok,algor512/wok,edunham/wok,vaygr/wok,wummel/wok,mythmon/wok,wummel/wok,edunham/wok,matt-garman/wok,edunham/wok,ngokevin/wok,gchriz/wok,ngokevin/wok,matt-garman/wok,matt-garman/wok,Avaren/wok,jneves/wok,abbgrade/wok,chrplace/wok,abbgrade/wok,algor512/wok,vaygr/wok,jneves/wok,Avaren/wok,Avaren/wok,mythmon/wok,moreati/wok,vaygr/wok,gchriz/wok,chrplace/wok,jneves/wok,moreati/wok,gchriz/wok,algor512/wok,mythmon/wok,chrplace/wok | setup.py | setup.py | #!/usr/bin/env python2
from distutils.core import setup
from wok import version
setup(
name='wok',
version=version.encode("utf8"),
author='Mike Cooper',
author_email='[email protected]',
url='http://wok.mythmon.com',
description='Static site generator',
long_description=
"Wok is a static website generator. It turns a pile of templates, "
"content, and resources (like CSS and images) into a neat stack of "
"plain HTML. You run it on your local computer, and it generates a "
"directory of web files that you can upload to your web server, or "
"serve directly."
download_url="http://wok.mythmon.com/download",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
'Operating System :: POSIX',
'Programming Language :: Python',
]
requires=['pyyaml', 'jinja2', 'Markdown', 'docutils', 'Pygments'],
packages=['wok'],
scripts=['scripts/wok'],
)
| #!/usr/bin/env python2
from distutils.core import setup
from wok import version
setup(name='wok',
version=version.encode("utf8"),
description='Static site generator',
install_requires=['pyyaml', 'jinja2'],
author='Mike Cooper',
author_email='[email protected]',
url='https://www.github.com/mythmon/wok',
packages=['wok'],
scripts=['scripts/wok'],
)
| mit | Python |
8238e0476097af0afed1443391370285dd61d8ca | Add setup.py | althonos/fs.sshfs | setup.py | setup.py | #!/usr/bin/env python
import setuptools
import os
with open(os.path.join('fs', 'sshfs', '__metadata__.py')) as f:
exec(f.read())
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: System :: Filesystems',
]
with open('README.rst', 'rt') as f:
DESCRIPTION = f.read()
with open('requirements.txt') as f:
REQUIREMENTS = f.read().splitlines()
with open(os.path.join('tests', 'requirements.txt')) as f:
TEST_REQUIREMENTS = [l for l in f if not l.startswith('-r')]
TEST_REQUIREMENTS.extend(REQUIREMENTS)
setuptools.setup(
author=__author__,
author_email=__author_email__,
classifiers=CLASSIFIERS,
description="Pyfilesystem2 implementation for SSH/SFTP using paramiko ",
install_requires=REQUIREMENTS,
license=__license__,
long_description=DESCRIPTION,
name='fs.sshfs',
packages=setuptools.find_packages(exclude=("tests",)),
platforms=['any'],
test_suite="tests",
tests_require=TEST_REQUIREMENTS,
url="https://github.com/althonos/fs.sshfs",
version=__version__,
)
| lgpl-2.1 | Python |
|
72416d5bf4308c10bc9b2ab31464ad2853042402 | Use the official package django select2 that finally support py3 | kenjhim/django-accounting,kenjhim/django-accounting,kenjhim/django-accounting,dulaccc/django-accounting,dulaccc/django-accounting,kenjhim/django-accounting,dulaccc/django-accounting,dulaccc/django-accounting | setup.py | setup.py | #!/usr/bin/env python
"""
Installation script:
To release a new version to PyPi:
- Ensure the version is correctly set in accounting.__init__.py
- Run:
`python setup.py sdist`
`twine upload dist/*`
"""
from setuptools import setup, find_packages
import os
import sys
from accounting import get_version
PROJECT_DIR = os.path.dirname(__file__)
setup(name='django-accounting',
version=get_version().replace(' ', '-'),
url='https://github.com/dulaccc/django-accounting',
author="Pierre Dulac",
author_email="[email protected]",
description="Accounting made accessible for small businesses and "
"sole proprietorships through a simple Django project",
long_description=open(os.path.join(PROJECT_DIR, 'README.rst')).read(),
keywords="Accounting, Django, Money, Cashflow",
license='MIT',
platforms=['linux'],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
install_requires=[
'django>=1.7.0,<1.8',
# Used to render the forms
'django-bootstrap3==4.11.0',
# Used to improve the forms
'Django_Select2_Py3>=4.2.1',
# Used for date/time form fields
'django-datetime-widget>=0.9,<1.0',
# Define beautiful tags
'django-classy-tags==0.5.1',
# Internationalization
'Babel>=1.0,<1.4',
# Date utilities
'python-dateutil>=2.2,<2.3',
# Select2
'django-select2>=4.3,<4.4',
],
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Application Frameworks']
)
| #!/usr/bin/env python
"""
Installation script:
To release a new version to PyPi:
- Ensure the version is correctly set in accounting.__init__.py
- Run:
`python setup.py sdist`
`twine upload dist/*`
"""
from setuptools import setup, find_packages
import os
import sys
from accounting import get_version
PROJECT_DIR = os.path.dirname(__file__)
setup(name='django-accounting',
version=get_version().replace(' ', '-'),
url='https://github.com/dulaccc/django-accounting',
author="Pierre Dulac",
author_email="[email protected]",
description="Accounting made accessible for small businesses and "
"sole proprietorships through a simple Django project",
long_description=open(os.path.join(PROJECT_DIR, 'README.rst')).read(),
keywords="Accounting, Django, Money, Cashflow",
license='MIT',
platforms=['linux'],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
install_requires=[
'django>=1.7.0,<1.8',
# Used to render the forms
'django-bootstrap3==4.11.0',
# Used to improve the forms
'Django_Select2_Py3>=4.2.1',
# Used for date/time form fields
'django-datetime-widget>=0.9,<1.0',
# Define beautiful tags
'django-classy-tags==0.5.1',
# Internationalization
'Babel>=1.0,<1.4',
# Date utilities
'python-dateutil>=2.2,<2.3',
],
dependency_links=[
'http://github.com/applegrew/django-select2@python3#egg=Django_Select2_Py3-4.2.1',
],
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Application Frameworks']
)
| mit | Python |
a79fcf2786df38f84b065ff579f83f03c1d5a20b | Add setup.py file | playfire/django-cyclebufferfield | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-cyclebufferfield',
description="Field to manage Django fields in a fixed-size ring buffer.",
version='0.1',
url='http://code.playfire.com/',
author='Playfire.com',
author_email='[email protected]',
license='BSD',
packages=find_packages(),
)
| bsd-3-clause | Python |
|
5d6f52d2b89eda2aa070faafad2fd89eeaf599ec | add setup py | RustoriaRu/SelectelCloudApi | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='selectel_cloud_api',
version='1.0',
packages=find_packages(),
install_requires='selectel_cloud_api',
url='https://github.com/RustoriaRu/SelectelCloudApi',
license='MIT',
author='vir-mir',
keywords='selectel.ru selectel api, cloud',
author_email='[email protected]',
description='api select cloud api',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| mit | Python |
|
ab3728405be94c071c353374735b97f207479c00 | Add setup.py to make an .exe with py2exe | cevad/tmpr | setup.py | setup.py | #!/c/Anaconda/python
from distutils.core import setup
import py2exe
setup(console=["tmpr.py"])
| mit | Python |
|
2e91c826a72e3f240f6d010678d68bab0bab5749 | Add setup.py for packaging | winny-/sirsi | setup.py | setup.py | from setuptools import setup
from sirsi import __version__, __author__
setup(
name='sirsi',
version=__version__,
author=__author__,
author_email='[email protected]',
description='Manage a sirsi enterprise-based library account',
url='https://github.com/-winny/sirsi',
license='MIT',
packages=['sirsi'],
install_requires=[
'argparse==1.2.1',
'beautifulsoup4==4.3.2',
'mechanize==0.2.5',
'python-dateutil==2.2',
'tabulate==0.7.2',
],
)
| mit | Python |
|
76601be760f0aa15637f65164c5e595b218fc2b9 | Add setup.py | tylertreat/gaeutils | setup.py | setup.py | from setuptools import find_packages
from setuptools import setup
VERSION = '0.0.1'
setup(
name='gae-utils',
version=VERSION,
packages=find_packages(),
install_requires=[],
include_package_data=True,
zip_safe=False,
maintainer='Tyler Treat',
maintainer_email='[email protected]'
)
| apache-2.0 | Python |
|
609bc6fbd1284c1b769c2e0548f6c65a97d144cd | Add initial attempt at a setup.py file | AndyDeany/pygame-template | setup.py | setup.py | from setuptools import setup
import pygametemplate
setup(
name="pygametemplate",
version=pygametemplate.__version__,
description=pygametemplate.__doc__,
url="https://github.com/AndyDeany/pygame-template",
author=pygametemplate.__author__,
author_email="[email protected]",
packages=["pygametemplate"]
)
| mit | Python |
|
b5b503229789c61af5bb47d6bb587bafb2ada562 | Fix setup.py, bump version. | agoragames/pykafka,dsully/pykafka,xujun10110/pykafka | setup.py | setup.py | #!/usr/bin/env python
"""
# pykafka
pykafka allows you to produce messages to the Kafka distributed publish/subscribe messaging service.
## Requirements
You need to have access to your Kafka instance and be able to connect through
TCP. You can obtain a copy and instructions on how to setup kafka at
https://github.com/kafka-dev/kafka
## Installation
pip install pykafka
## Usage
### Sending a simple message
import kafka
producer = kafka.producer.Producer('test')
message = kafka.message.Message("Foo!")
producer.send(message)
### Sending a sequence of messages
import kafka
producer = kafka.producer.Producer('test')
message1 = kafka.message.Message("Foo!")
message2 = kafka.message.Message("Bar!")
producer.send([message1, message2])
### Batching a bunch of messages using a context manager.
import kafka
producer = kafka.producer.Producer('test')
with producer.batch() as messages:
print "Batching a send of multiple messages.."
messages.append(kafka.message.Message("first message to send")
messages.append(kafka.message.Message("second message to send")
* they will be sent all at once, after the context manager execution.
### Consuming messages one by one
import kafka
consumer = kafka.consumer.Consumer('test')
messages = consumer.consume()
### Consuming messages using a generator loop
import kafka
consumer = kafka.consumer.Consumer('test')
for message in consumer.loop():
print message
Contact:
Please use the GitHub issues: https://github.com/dsully/pykafka/issues
* Inspiried from Alejandro Crosa's kafka-rb: https://github.com/acrosa/kafka-rb
"""
import setuptools
# Don't install deps for development mode.
setuptools.bootstrap_install_from = None
setuptools.setup(
name = 'pykafka',
version = '0.1.1',
license = 'MIT',
long_description = __doc__,
author = "Dan Sully",
author_email = "[email protected]",
url = 'http://github.com/dsully/pykafka',
platforms = 'any',
# What are we packaging up?
packages = setuptools.find_packages('kafka'),
zip_safe = True,
verbose = False,
)
| #!/usr/bin/env python
"""
# pykafka
pykafka allows you to produce messages to the Kafka distributed publish/subscribe messaging service.
## Requirements
You need to have access to your Kafka instance and be able to connect through
TCP. You can obtain a copy and instructions on how to setup kafka at
https://github.com/kafka-dev/kafka
## Installation
pip install pykafka
## Usage
### Sending a simple message
import kafka
producer = kafka.producer.Producer('test')
message = kafka.message.Message("Foo!")
producer.send(message)
### Sending a sequence of messages
import kafka
producer = kafka.producer.Producer('test')
message1 = kafka.message.Message("Foo!")
message2 = kafka.message.Message("Bar!")
producer.send([message1, message2])
### Batching a bunch of messages using a context manager.
import kafka
producer = kafka.producer.Producer('test')
with producer.batch() as messages:
print "Batching a send of multiple messages.."
messages.append(kafka.message.Message("first message to send")
messages.append(kafka.message.Message("second message to send")
* they will be sent all at once, after the context manager execution.
### Consuming messages one by one
import kafka
consumer = kafka.consumer.Consumer('test')
messages = consumer.consume()
### Consuming messages using a generator loop
import kafka
consumer = kafka.consumer.Consumer('test')
for message in consumer.loop():
print message
Contact:
Please use the GitHub issues: https://github.com/dsully/pykafka/issues
* Inspiried from Alejandro Crosa's kafka-rb: https://github.com/acrosa/kafka-rb
"""
import setuptools
# Don't install deps for development mode.
setuptools.bootstrap_install_from = None
setuptools.setup(
name = 'pykafka',
version = '0.1',
license = 'MIT',
long_description = __doc__
author = "Dan Sully",
author_email = "[email protected]",
url = 'http://github.com/dsully/pykafka',
platforms = 'any',
# What are we packaging up?
packages = setuptools.find_packages('kafka'),
zip_safe = True,
verbose = False,
)
| mit | Python |
a1e35b73b5e10a885e78e965242c5b1b6e92aa16 | Add a setup.py file | jcarbaugh/django-wellknown | setup.py | setup.py | from setuptools import setup
setup(
name='wellknown',
version='0.1dev',
packages=['wellknown']
)
| bsd-3-clause | Python |
|
65ecc0145406e7d8e20a281c0e5c04b26208646d | Add a setup.py file. | Tech-XCorp/ultracold-ions,hosseinsadeghi/ultracold-ions,hosseinsadeghi/ultracold-ions,Tech-XCorp/ultracold-ions | setup.py | setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'name': 'ultracold-ions',
'description': 'A library for the simulation of ultracold neutral plasmas.',
'author': 'Tech-X Corporation',
'url': 'https://github.com/Tech-XCorp/ultracold-ions',
'download_url': 'https://github.com/Tech-XCorp/ultracold-ions',
'author_email': '[email protected]',
'version': '0.1',
'install_requires': ['numpy','pyopencl','nose'],
'packages': ['uci'],
'scripts': []
}
setup(**config)
| mit | Python |
|
d9d3ae4a1d4007a0aa1dafe09102cb7414c338db | Remove extracting HG revision from setup.py. | zsiciarz/django-markitup,carljm/django-markitup,carljm/django-markitup,carljm/django-markitup,zsiciarz/django-markitup,WimpyAnalytics/django-markitup,zsiciarz/django-markitup,WimpyAnalytics/django-markitup,WimpyAnalytics/django-markitup | setup.py | setup.py | from setuptools import setup
long_description = (open('README.rst').read() +
open('CHANGES.rst').read() +
open('TODO.rst').read())
def _static_files(prefix):
return [prefix+'/'+pattern for pattern in [
'markitup/*.*',
'markitup/sets/*/*.*',
'markitup/sets/*/images/*.png',
'markitup/skins/*/*.*',
'markitup/skins/*/images/*.png',
'markitup/templates/*.*'
]]
setup(
name='django-markitup',
version='2.2.2.post0',
description='Markup handling for Django using the MarkItUp! universal markup editor',
long_description=long_description,
author='Carl Meyer',
author_email='[email protected]',
url='http://bitbucket.org/carljm/django-markitup/',
packages=['markitup', 'markitup.templatetags'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
zip_safe=False,
test_suite='runtests.runtests',
tests_require='Django>=1.3',
package_data={'markitup': ['templates/markitup/*.html'] +
_static_files('static')}
)
| from setuptools import setup
import subprocess
import os.path
try:
# don't get confused if our sdist is unzipped in a subdir of some
# other hg repo
if os.path.isdir('.hg'):
p = subprocess.Popen(['hg', 'parents', r'--template={rev}\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not p.returncode:
fh = open('HGREV', 'wb')
fh.write(p.communicate()[0].splitlines()[0])
fh.close()
except (OSError, IndexError):
pass
try:
hgrev = open('HGREV').read()
except IOError:
hgrev = ''
long_description = (open('README.rst').read() +
open('CHANGES.rst').read() +
open('TODO.rst').read())
def _static_files(prefix):
return [prefix+'/'+pattern for pattern in [
'markitup/*.*',
'markitup/sets/*/*.*',
'markitup/sets/*/images/*.png',
'markitup/skins/*/*.*',
'markitup/skins/*/images/*.png',
'markitup/templates/*.*'
]]
setup(
name='django-markitup',
version='2.2.2.post%s' % hgrev,
description='Markup handling for Django using the MarkItUp! universal markup editor',
long_description=long_description,
author='Carl Meyer',
author_email='[email protected]',
url='http://bitbucket.org/carljm/django-markitup/',
packages=['markitup', 'markitup.templatetags'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
zip_safe=False,
test_suite='runtests.runtests',
tests_require='Django>=1.3',
package_data={'markitup': ['templates/markitup/*.html'] +
_static_files('static')}
)
| bsd-3-clause | Python |
b82dee62e325d83f8aeaede406de24973ee42b42 | Update project url in setup.py | manelvf/closure-linter,google/closure-linter,google/closure-linter,manelvf/closure-linter,google/closure-linter,manelvf/closure-linter | setup.py | setup.py | #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='closure_linter',
version='2.3.17',
description='Closure Linter',
license='Apache',
author='The Closure Linter Authors',
author_email='[email protected]',
url='https://github.com/google/closure-linter',
install_requires=['python-gflags'],
package_dir={'closure_linter': 'closure_linter'},
packages=['closure_linter', 'closure_linter.common'],
entry_points = {
'console_scripts': [
'gjslint = closure_linter.gjslint:main',
'fixjsstyle = closure_linter.fixjsstyle:main'
]
}
)
| #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='closure_linter',
version='2.3.17',
description='Closure Linter',
license='Apache',
author='The Closure Linter Authors',
author_email='[email protected]',
url='http://code.google.com/p/closure-linter',
install_requires=['python-gflags'],
package_dir={'closure_linter': 'closure_linter'},
packages=['closure_linter', 'closure_linter.common'],
entry_points = {
'console_scripts': [
'gjslint = closure_linter.gjslint:main',
'fixjsstyle = closure_linter.fixjsstyle:main'
]
}
)
| apache-2.0 | Python |
45a7a979d687b75851d3901171b826faa965389e | Add setup script | vene/ambra | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='ambra',
version='0.1dev',
description='Temporal prediction by pairwise comparisons',
packages=['ambra'],
)
| bsd-2-clause | Python |
|
20a5ccf55c9292d3c360a34d190e583b84594a37 | Add zeeman energy tests. | fangohr/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python | pyoommf/test_zeeman.py | pyoommf/test_zeeman.py | from zeeman import Zeeman
def test_zeeman_mif():
H = (0.1, -0.5, -8.9e6)
zeeman = Zeeman(H)
mif_string = zeeman.get_mif()
lines = mif_string.split('\n')
assert 'Specify Oxs_FixedZeeman {' in lines[0]
assert '{ Oxs_UniformVectorField {' in lines[1]
assert 'vector' in lines[2]
line2 = lines[2].split()
assert float(line2[1][1:]) == H[0]
assert float(line2[2]) == H[1]
assert float(line2[3][0:-1]) == H[2]
def test_zeeman_formatting():
H = (0.1, -0.5, -8.9e6)
zeeman = Zeeman(H)
mif_string = zeeman.get_mif()
assert mif_string[0] == 'S'
assert mif_string[-1] == '\n'
assert mif_string[-2] == '\n'
| bsd-2-clause | Python |
|
331aecb334f4e4ff4c38b4a2b12d3a80d7327de1 | Remove unused URL from setup.py | 5monkeys/mock,Vanuan/mock | setup.py | setup.py | #! /usr/bin/python
# Copyright (C) 2007-2010 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from mock import __version__
from distutils.core import setup
import os
NAME = 'mock'
MODULES = ['mock']
DESCRIPTION = 'A Python Mocking and Patching Library for Testing'
URL = "http://www.voidspace.org.uk/python/mock/"
readme = os.path.join(os.path.dirname(__file__), 'README.txt')
LONG_DESCRIPTION = open(readme).read()
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
]
AUTHOR = 'Michael Foord'
AUTHOR_EMAIL = '[email protected]'
KEYWORDS = "testing test mock mocking unittest patching stubs fakes doubles".split(' ')
setup(
name=NAME,
version=__version__,
py_modules=MODULES,
# metadata for upload to PyPI
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
) | #! /usr/bin/python
# Copyright (C) 2007-2010 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from mock import __version__
from distutils.core import setup
import os
NAME = 'mock'
MODULES = ['mock']
DESCRIPTION = 'A Python Mocking and Patching Library for Testing'
URL = "http://www.voidspace.org.uk/python/mock/"
'http://www.voidspace.org.uk/downloads/mock-%s.zip' % __version__
readme = os.path.join(os.path.dirname(__file__), 'README.txt')
LONG_DESCRIPTION = open(readme).read()
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
]
AUTHOR = 'Michael Foord'
AUTHOR_EMAIL = '[email protected]'
KEYWORDS = "testing test mock mocking unittest patching stubs fakes doubles".split(' ')
setup(
name=NAME,
version=__version__,
py_modules=MODULES,
# metadata for upload to PyPI
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
) | bsd-2-clause | Python |
d00f9fd43cfc45747a9479f00db5d67fda658e55 | Add initial distutils configuration | mirek2580/namebench | setup.py | setup.py | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""distutils configuration."""
__author__ = '[email protected] (Thomas Stromberg)'
from namebench import VERSION
from distutils.core import setup
setup(name='namebench',
version=VERSION,
py_modules=['namebench'],
description='DNS service benchmarking tool',
author='Thomas Stromberg',
author_email='[email protected]',
url='http://namebench.googlecode.com/',
packages=('libnamebench',),
platforms=('Any',),
requires=['graphy', 'dnspython', 'jinja2'],
license='Apache 2.0',
scripts=['namebench.py'],
package_data = {'libnamebench': ['data/alexa-top-10000-global.txt',
'templates/ascii.tmpl',
'templates/html.tmpl',
'namebench.cfg']},
# package_data=[('data', ['data/alexa-top-10000-global.txt']),
# ('templates', ['templates/ascii.tmpl',
# 'templates/html.tmpl']),
# ('config', ['namebench.cfg'])]
)
| apache-2.0 | Python |
|
8439263d6ff66e659a8051d3efc0475020048629 | update v.# make tag and set to release | atvKumar/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,arcturusannamalai/open-tamil,tuxnani/open-telugu,tshrinivasan/open-tamil,tuxnani/open-telugu,atvKumar/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,tuxnani/open-telugu,tshrinivasan/open-tamil,Ezhil-Language-Foundation/open-tamil,atvKumar/open-tamil,tuxnani/open-telugu,tshrinivasan/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,atvKumar/open-tamil,atvKumar/open-tamil,arcturusannamalai/open-tamil,atvKumar/open-tamil,tshrinivasan/open-tamil,tshrinivasan/open-tamil,tshrinivasan/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,arcturusannamalai/open-tamil,tshrinivasan/open-tamil,atvKumar/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,tuxnani/open-telugu | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (C) 2013-2014 முத்தையா அண்ணாமலை
# open-tamil project
from distutils.core import setup
from codecs import open
setup(name='Open-Tamil',
version='0.2.8',
description='Tamil language text processing tools',
author='M. Annamalai, T. Arulalan,',
author_email='[email protected]',
url='https://github.com/arcturusannamalai/open-tamil',
packages=['tamil','transliterate','ngram'],
license='GPLv3',
platforms='PC,Linux,Mac',
classifiers='Natural Language :: Tamil',
long_description=open('README.md','r','UTF-8').read(),
download_url='https://github.com/arcturusannamalai/open-tamil/archive/latest.zip',#pip
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# (C) 2013-2014 முத்தையா அண்ணாமலை
# open-tamil project
from distutils.core import setup
from codecs import open
setup(name='Open-Tamil',
version='0.2.4',
description='Tamil language text processing tools',
author='Muthiah Annamalai',
author_email='[email protected]',
url='https://github.com/arcturusannamalai/open-tamil',
packages=['tamil','transliterate','ngram'],
license='GPLv3',
platforms='PC,Linux,Mac',
classifiers='Natural Language :: Tamil',
long_description=open('README.md','r','UTF-8').read(),
download_url='https://github.com/arcturusannamalai/open-tamil/archive/latest.zip',#pip
)
| mit | Python |
c75ee6a0ee2f542463b5ca8cb81b06a6a6650d4c | Add initial setup file | ctma/consul_kv | setup.py | setup.py | from setuptools import setup
setup(
name='python2-consul',
packages=['python2-consul'],
version='0.0.1',
install_requires=[
'certifi==2017.4.17',
'chardet==3.0.4',
'idna==2.5',
'PyYAML==3.12',
'requests==2.18.1',
'urllib3==1.21.1',
'validators==0.12.0',
'pytest==3.2.2'
]
)
| mit | Python |
|
af2effaf147b8e473f7b9c655842617a91414278 | Upgrade the requirement on taskotron-python-versions to include latest changes in shared functions | fedora-python/portingdb,irushchyshyn/portingdb,irushchyshyn/portingdb,irushchyshyn/portingdb,ari3s/portingdb,irushchyshyn/portingdb,fedora-python/portingdb,ari3s/portingdb,fedora-python/portingdb,ari3s/portingdb,ari3s/portingdb | setup.py | setup.py |
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
super().finalize_options()
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
requires = [
'sqlalchemy >= 1.0, < 2.0',
'PyYAML >= 3.11, < 4.0',
'click >= 3.3, < 7.0',
'flask >= 0.10, < 1.0',
'markdown >= 2.4, < 3.0',
'dogpile.cache >= 0.5.5, < 1.0',
'taskotron-python-versions >= 0.1.dev2',
]
tests_require = ['pytest']
setup_args = dict(
name='portingdb',
version='0.1',
packages=['portingdb'],
url='https://github.com/fedora-python/portingdb',
description="""Database of packages that need Python 3 porting""",
author='Petr Viktorin',
author_email='[email protected]',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=requires,
tests_require=tests_require,
cmdclass={'test': PyTest},
)
if __name__ == '__main__':
setup(**setup_args)
|
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
super().finalize_options()
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
requires = [
'sqlalchemy >= 1.0, < 2.0',
'PyYAML >= 3.11, < 4.0',
'click >= 3.3, < 7.0',
'flask >= 0.10, < 1.0',
'markdown >= 2.4, < 3.0',
'dogpile.cache >= 0.5.5, < 1.0',
'taskotron-python-versions',
]
tests_require = ['pytest']
setup_args = dict(
name='portingdb',
version='0.1',
packages=['portingdb'],
url='https://github.com/fedora-python/portingdb',
description="""Database of packages that need Python 3 porting""",
author='Petr Viktorin',
author_email='[email protected]',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=requires,
tests_require=tests_require,
cmdclass={'test': PyTest},
)
if __name__ == '__main__':
setup(**setup_args)
| mit | Python |
47b2e9890a0f3022ffbbf83a6e722b2e77e3443b | Fix dajax setup.py | jayfk/django-dajax,Leonime/django-dajax,jorgebastida/django-dajax,jorgebastida/django-dajax,Leonime/django-dajax,jayfk/django-dajax,jorgebastida/django-dajax | setup.py | setup.py | from distutils.core import setup
setup(
name='django-dajax',
version='0.9',
author='Jorge Bastida',
author_email='[email protected]',
description=('Easy to use library to create asynchronous presentation '
'logic with django and dajaxice'),
url='http://dajaxproject.com',
license='BSD',
packages=['dajax'],
package_data={'dajax': ['static/dajax/*']},
long_description=('dajax is a powerful tool to easily and super-quickly '
'develop asynchronous presentation logic in web '
'applications using python and almost no JS code. It '
'supports up to four of the most popular JS frameworks: '
'jQuery, Prototype, Dojo and mootols.'),
install_requires=[
'django-dajaxice>=0.5'
],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities']
)
| from distutils.core import setup
setup(
name='django-dajax',
version='0.9',
author='Jorge Bastida',
author_email='[email protected]',
description=('Easy to use library to create asynchronous presentation '
'logic with django and dajaxice'),
url='http://dajaxproject.com',
license='BSD',
packages=['dajax'],
package_data={'dajax': ['static/*']},
long_description=('dajax is a powerful tool to easily and super-quickly '
'develop asynchronous presentation logic in web '
'applications using python and almost no JS code. It '
'supports up to four of the most popular JS frameworks: '
'jQuery, Prototype, Dojo and mootols.'),
install_requires=[
'django-dajaxice>=0.5'
],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities']
)
| bsd-3-clause | Python |
330650e7fe7c1a9aa0178812d08af332e927fe98 | add minimal setup.py | librallu/cohorte-herald,librallu/cohorte-herald,librallu/cohorte-herald | setup.py | setup.py | from setuptools import setup
setup(name='Cohorte Micronode',
version='0.9',
description='Cohorte Micronode Repository',
url='https://github.com/librallu/cohorte-herald',
author='Luc Libralesso',
author_email='[email protected]',
license='Apache License 2.0',
packages=[],
zip_safe=False)
| apache-2.0 | Python |
|
a90162a43e4e1817bd818b66e4ad6e377ab8af92 | Update the setup.py version. | florianjacob/pelican,number5/pelican,GiovanniMoretti/pelican,abrahamvarricatt/pelican,gymglish/pelican,crmackay/pelican,Scheirle/pelican,lazycoder-ru/pelican,ingwinlu/pelican,TC01/pelican,ehashman/pelican,jo-tham/pelican,ingwinlu/pelican,talha131/pelican,zackw/pelican,farseerfc/pelican,simonjj/pelican,JeremyMorgan/pelican,catdog2/pelican,deved69/pelican-1,zackw/pelican,lucasplus/pelican,liyonghelpme/myBlog,HyperGroups/pelican,liyonghelpme/myBlog,farseerfc/pelican,iurisilvio/pelican,jvehent/pelican,TC01/pelican,lazycoder-ru/pelican,douglaskastle/pelican,51itclub/pelican,rbarraud/pelican,levanhien8/pelican,simonjj/pelican,eevee/pelican,kernc/pelican,iKevinY/pelican,ehashman/pelican,kernc/pelican,sunzhongwei/pelican,simonjj/pelican,Rogdham/pelican,avaris/pelican,kennethlyn/pelican,Polyconseil/pelican,crmackay/pelican,TC01/pelican,deved69/pelican-1,ls2uper/pelican,jvehent/pelican,btnpushnmunky/pelican,catdog2/pelican,UdeskDeveloper/pelican,Scheirle/pelican,lucasplus/pelican,51itclub/pelican,crmackay/pelican,gymglish/pelican,GiovanniMoretti/pelican,iurisilvio/pelican,number5/pelican,justinmayer/pelican,levanhien8/pelican,11craft/pelican,kennethlyn/pelican,douglaskastle/pelican,gymglish/pelican,karlcow/pelican,HyperGroups/pelican,btnpushnmunky/pelican,number5/pelican,51itclub/pelican,alexras/pelican,koobs/pelican,lazycoder-ru/pelican,janaurka/git-debug-presentiation,jo-tham/pelican,garbas/pelican,koobs/pelican,alexras/pelican,deved69/pelican-1,getpelican/pelican,jimperio/pelican,deanishe/pelican,Rogdham/pelican,eevee/pelican,eevee/pelican,avaris/pelican,fbs/pelican,arty-name/pelican,11craft/pelican,treyhunner/pelican,iKevinY/pelican,catdog2/pelican,liyonghelpme/myBlog,rbarraud/pelican,abrahamvarricatt/pelican,Rogdham/pelican,douglaskastle/pelican,joetboole/pelican,janaurka/git-debug-presentiation,liyonghelpme/myBlog,liyonghelpme/myBlog,goerz/pelican,getpelican/pelican,jimperio/pelican,abrahamvarricatt/pelican,jvehent/pelican,UdeskDeveloper/pelican,janaurka/git-debug-presentiation,JeremyMorgan/pelican,ehashman/pelican,Natim/pelican,deanishe/pelican,kennethlyn/pelican,joetboole/pelican,UdeskDeveloper/pelican,Summonee/pelican,joetboole/pelican,kernc/pelican,garbas/pelican,btnpushnmunky/pelican,iurisilvio/pelican,karlcow/pelican,karlcow/pelican,koobs/pelican,goerz/pelican,sunzhongwei/pelican,ionelmc/pelican,goerz/pelican,ls2uper/pelican,zackw/pelican,rbarraud/pelican,alexras/pelican,11craft/pelican,Polyconseil/pelican,HyperGroups/pelican,jimperio/pelican,talha131/pelican,Summonee/pelican,treyhunner/pelican,sunzhongwei/pelican,Scheirle/pelican,Summonee/pelican,ls2uper/pelican,deanishe/pelican,0xMF/pelican,sunzhongwei/pelican,levanhien8/pelican,lucasplus/pelican,treyhunner/pelican,GiovanniMoretti/pelican,garbas/pelican,JeremyMorgan/pelican,florianjacob/pelican,florianjacob/pelican | setup.py | setup.py | from distutils.core import setup
import sys
requires = ['feedgenerator', 'jinja2', 'pygments']
if sys.version_info < (2,7):
requires.append('argparse')
setup(
name = "pelican",
version = '1.2',
url = 'http://hg.lolnet.org/pelican/',
author = 'Alexis Metaireau',
author_email = '[email protected]',
description = "A tool to generate a static blog, with restructured text input files.",
long_description=open('README.rst').read(),
packages = ['pelican'],
package_data = {'pelican': ['themes/templates/*']},
requires = requires,
scripts = ['bin/pelican'],
classifiers = ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| from distutils.core import setup
import sys
requires = ['feedgenerator', 'jinja2', 'pygments']
if sys.version_info < (2,7):
requires.append('argparse')
setup(
name = "pelican",
version = '1.1.1',
url = 'http://hg.lolnet.org/pelican/',
author = 'Alexis Metaireau',
author_email = '[email protected]',
description = "A tool to generate a static blog, with restructured text input files.",
long_description=open('README.rst').read(),
packages = ['pelican'],
package_data = {'pelican': ['themes/templates/*']},
requires = requires,
scripts = ['bin/pelican'],
classifiers = ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| agpl-3.0 | Python |
06c67a7df4e2fd5cbc221f2a9c3f64179af91344 | Add setup.py | alexsilva/django-xadmin,vincent-fei/django-xadmin,taxido/django-xadmin,merlian/django-xadmin,AndyHelix/django-xadmin,hochanh/django-xadmin,zhiqiangYang/django-xadmin,wbcyclist/django-xadmin,alexsilva/django-xadmin,wbcyclist/django-xadmin,t0nyren/django-xadmin,jneight/django-xadmin,tvrcopgg/edm_xadmin,zhiqiangYang/django-xadmin,zhiqiangYang/django-xadmin,jeanmask/opps-admin,Keleir/django-xadmin,sshwsfc/django-xadmin,jneight/django-xadmin,sshwsfc/django-xadmin,Keleir/django-xadmin,iedparis8/django-xadmin,sshwsfc/xadmin,huaishan/django-xadmin,zhiqiangYang/django-xadmin,huaishan/django-xadmin,jneight/django-xadmin,f1aky/xadmin,AndyHelix/django-xadmin,t0nyren/django-xadmin,ly0/xxadmin,t0nyren/django-xadmin,huaishan/django-xadmin,sshwsfc/django-xadmin,taxido/django-xadmin,Keleir/django-xadmin,merlian/django-xadmin,cupen/django-xadmin,vincent-fei/django-xadmin,marguslaak/django-xadmin,sshwsfc/xadmin,vincent-fei/django-xadmin,cupen/django-xadmin,marguslaak/django-xadmin,vincent-fei/django-xadmin,wbcyclist/django-xadmin,pobear/django-xadmin,t0nyren/django-xadmin,sshwsfc/xadmin,taxido/django-xadmin,cgcgbcbc/django-xadmin,hochanh/django-xadmin,huaishan/django-xadmin,iedparis8/django-xadmin,f1aky/xadmin,f1aky/xadmin,Keleir/django-xadmin,tvrcopgg/edm_xadmin,ly0/xxadmin,tvrcopgg/edm_xadmin,ly0/xxadmin,sshwsfc/django-xadmin,AndyHelix/django-xadmin,pobear/django-xadmin,hochanh/django-xadmin,sshwsfc/xadmin,pobear/django-xadmin,pobear/django-xadmin,marguslaak/django-xadmin,merlian/django-xadmin,ly0/xxadmin,tvrcopgg/edm_xadmin,taxido/django-xadmin,iedparis8/django-xadmin,cgcgbcbc/django-xadmin,alexsilva/django-xadmin,alexsilva/django-xadmin,marguslaak/django-xadmin,cupen/django-xadmin,merlian/django-xadmin,hochanh/django-xadmin,f1aky/xadmin,cgcgbcbc/django-xadmin,AndyHelix/django-xadmin,cupen/django-xadmin | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='django-exadmin',
version='0.1.0',
description='New style and free plugin django admin module, UI base bootstrap2.',
author='TM (sshwsfc)',
author_email='[email protected]',
url='http://github.com/sshwsfc/django-exadmin',
download_url='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
]
)
| bsd-3-clause | Python |
|
2c874c09e7bf35a0ea6a7a5029c9b17ec5f057af | Fix mongoengine version. | jazzband/django-mongonaut,pydanny/django-mongonaut,jazzband/django-mongonaut,pydanny/django-mongonaut,lchsk/django-mongonaut,jazzband/django-mongonaut,pydanny/django-mongonaut,lchsk/django-mongonaut,lchsk/django-mongonaut | setup.py | setup.py | from setuptools import setup, find_packages
import mongonaut
LONG_DESCRIPTION = open('README.rst').read()
setup(
name='django-mongonaut',
version=mongonaut.__version__,
description="An introspective interface for Django and MongoDB",
long_description=LONG_DESCRIPTION,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='mongodb,django',
author=mongonaut.__author__,
author_email='[email protected]',
url='http://github.com/pydanny/django-mongonaut',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=['mongoengine>=0.5.2'],
zip_safe=False,
)
| from setuptools import setup, find_packages
import mongonaut
LONG_DESCRIPTION = open('README.rst').read()
setup(
name='django-mongonaut',
version=mongonaut.__version__,
description="An introspective interface for Django and MongoDB",
long_description=LONG_DESCRIPTION,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='mongodb,django',
author=mongonaut.__author__,
author_email='[email protected]',
url='http://github.com/pydanny/django-mongonaut',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=['mongoengine==0.5.2'],
zip_safe=False,
)
| mit | Python |
ccbb7e11edc63a128b7006e015539fdabd8f3a7f | Set up frontend for longpolling | c00w/bitHopper,c00w/bitHopper | bitHopper/LongPoll.py | bitHopper/LongPoll.py | from gevent.event import AsyncResult
_event = AsyncResult()
def wait():
"""
Gets the New Block work unit to send to clients
"""
return _event.get()
def trigger(work):
"""
Call to trigger a LP
"""
old = self._event
self._event = event.AsyncResult()
old.set(work)
| mit | Python |
|
34ad457ab831173efd3758af926deb17daf53feb | Add sitemap | uhuramedia/Havel,uhuramedia/Havel | resources/sitemaps.py | resources/sitemaps.py | from django.contrib.sitemaps import Sitemap
from resources.models import Resource
from django.utils import translation
class ResourceSitemap(Sitemap):
def items(self):
return Resource.objects.filter(noindex=False, is_published=True,
language=translation.get_language())
def lastmod(self, obj):
return obj.modified
| bsd-3-clause | Python |
|
67cca3176d1e2b5def3ebbd64f4bd56a8976529b | add res.company file | OCA/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil,akretion/l10n-brazil | l10n_br_sale/res_company.py | l10n_br_sale/res_company.py | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2014 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU Affero General Public License for more details. #
# #
#You should have received a copy of the GNU Affero General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import models, fields
class ResCompany(models.Model):
_inherit = 'res.company'
sale_fiscal_category_id = fields.Many2one(
'l10n_br_account.fiscal.category', u'Categoria Fiscal Padrão Compras',
domain="[('journal_type', '=', 'sale')]")
| agpl-3.0 | Python |
|
a562aa0ac58b2ee4fec3f9ff0b70a595db4c48ad | add test case for csv validation, with first test already implemented | DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj | doajtest/unit/test_reapp_csv_validate.py | doajtest/unit/test_reapp_csv_validate.py | from doajtest.helpers import DoajTestCase
from portality.clcsv import ClCsv
from portality import reapplication
from copy import deepcopy
import os
APPLICATION_COL = [
"The Title",
"http://journal.url",
"Alternative Title",
"1234-5678",
"9876-5432",
"The Publisher",
"Society Institution",
"Platform Host Aggregator",
"Contact Name",
"[email protected]",
"[email protected]",
"US",
"Yes",
2,
"GBP",
"Yes",
4,
"USD",
16,
"http://articles.last.year",
"Yes",
"http://waiver.policy",
"LOCKSS, CLOCKSS, A national library, Other",
"Trinity",
"A safe place",
"http://digital.archiving.policy",
"Yes",
"DOI, ARK, Other",
"PURL",
"Yes",
"Yes",
"http://download.stats",
1980,
"HTML, XML, Other",
"Wordperfect",
"word, key",
"EN, FR",
"http://editorial.board",
"Open peer review",
"http://review.process",
"http://aims.scope",
"http://author.instructions",
"Yes",
"http://plagiarism.screening",
8,
"http://oa.statement",
"Yes",
"http://licence.embedded",
"Other",
"CC MY",
"BY, NC",
"http://licence.url",
"Yes",
"Sherpa/Romeo, Other",
"Store it",
"Other",
"Sometimes",
"http://copyright",
"Other",
"Occasionally",
"http://publishing.rights"
]
class TestReAppCsv(DoajTestCase):
def setUp(self):
super(TestReAppCsv, self).setUp()
self._make_valid_csv()
self._random_binary()
def tearDown(self):
super(TestReAppCsv, self).tearDown()
if os.path.exists("valid.csv"):
os.remove("valid.csv")
if os.path.exists("random_binary"):
os.remove("random_binary")
def _make_valid_csv(self):
sheet = ClCsv("valid.csv")
# first column is the questions
qs = reapplication.Suggestion2QuestionXwalk.question_list()
sheet.set_column("", qs)
# add 3 columns of results for testing purposes
c1 = deepcopy(APPLICATION_COL)
c1[0] = "First Title"
c1[3] = "1234-5678"
c1[4] = "9876-5432"
sheet.set_column(c1[3], c1)
c2 = deepcopy(APPLICATION_COL)
c2[0] = "Second Title"
c2[3] = "2345-6789"
c2[4] = "8765-4321"
sheet.set_column(c2[3], c2)
c3 = deepcopy(APPLICATION_COL)
c3[0] = "Third Title"
c3[3] = "3456-7890"
c3[4] = "7654-3210"
sheet.set_column(c3[3], c3)
sheet.save()
def _random_binary(self):
with open('random_binary', 'wb') as fout:
fout.write(os.urandom(1024))
def test_01_open_csv(self):
# first try a valid csv
sheet = reapplication.open_csv("valid.csv")
assert sheet is not None
headers = sheet.headers()
assert headers == ["", "1234-5678", "2345-6789", "3456-7890"], headers
# now try one that won't parse
with self.assertRaises(reapplication.CsvValidationException):
sheet = reapplication.open_csv("random_binary")
def test_02_structure(self):
pass
def test_03_contents(self):
pass
| apache-2.0 | Python |
|
df84cf964214420987c51813b8960ce068223adf | Add request handler | Tiglas/pickup-planner,Tiglas/pickup-planner,Tiglas/pickup-planner,Tiglas/pickup-planner,Tiglas/pickup-planner | request_handler/request_handler.py | request_handler/request_handler.py | #!flask/bin/python
from flask import Flask, jsonify, abort
from flask import make_response
from flask import request
from flask import url_for
import psycopg2 as pg
app = Flask(__name__)
def make_public_request(request):
new_request = {}
new_request['uri'] = url_for('get_requests', request_id=request[0], _external=True)
new_request['source'] = request[1]
new_request['destination'] = request[2]
return new_request
@app.route('/clientapp/requests', methods=['GET'])
def get_requests():
''' Get requests from the database
'''
conn = pg.connect(database="ngot", host="127.0.0.1", port="5432")
cursor = conn.cursor()
cursor.execute("SELECT request_id, source, destination from requests")
rows = list(cursor.fetchall())
cursor.close()
conn.close()
return jsonify({'requests': [make_public_request(req) for req in rows]})
@app.route('/clientapp/vehicle_trips', methods=['GET'])
def get_vehicle_trips():
''' Query the database and return generated vehicle trips
'''
conn = pg.connect(database="ngot", host="127.0.0.1", port="5432")
cursor = conn.cursor()
pg.extensions.register_type(
pg.extensions.new_array_type(
(1017,), 'PICKUP_POINTS[]', pg.STRING))
cursor.execute("SELECT pickup_points from vehicletrips")
rows = cursor.fetchone()
cursor.close()
conn.close()
return jsonify({'vehicle_trips': rows})
@app.route('/clientapp/requests', methods=['POST'])
def create_request():
#if not request.json in request.json:
#abort(404)
conn = pg.connect(database="ngot", host="127.0.0.1", port="5432")
cursor = conn.cursor()
#request_id = request.json['request_id']
source = request.json['source']
destination = request.json['destination']
cursor.execute("INSERT INTO requests (source, destination) values (%s, %s)", (source, destination))
rows = cursor.rowcount
conn.commit()
cursor.close()
conn.close()
return jsonify({'rows': rows}), 201
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
#app.run(debug=True)
| mit | Python |
|
4e54128e5c0b9c762e5f93ae0d8791eeddde2264 | Add JSON serializer | pombredanne/dxr,KiemVM/Mozilla--dxr,pelmers/dxr,pelmers/dxr,bozzmob/dxr,gartung/dxr,KiemVM/Mozilla--dxr,KiemVM/Mozilla--dxr,jay-z007/dxr,nrc/dxr,srenatus/dxr,bozzmob/dxr,jbradberry/dxr,bozzmob/dxr,bozzmob/dxr,gartung/dxr,KiemVM/Mozilla--dxr,KiemVM/Mozilla--dxr,erikrose/dxr,kleintom/dxr,bozzmob/dxr,jay-z007/dxr,kleintom/dxr,srenatus/dxr,jbradberry/dxr,erikrose/dxr,srenatus/dxr,pombredanne/dxr,kleintom/dxr,jay-z007/dxr,jbradberry/dxr,nrc/dxr,jay-z007/dxr,jonasfj/dxr,jbradberry/dxr,kleintom/dxr,jbradberry/dxr,gartung/dxr,erikrose/dxr,nrc/dxr,kleintom/dxr,pombredanne/dxr,jbradberry/dxr,pelmers/dxr,pombredanne/dxr,pombredanne/dxr,jonasfj/dxr,pombredanne/dxr,gartung/dxr,jonasfj/dxr,nrc/dxr,jonasfj/dxr,kleintom/dxr,gartung/dxr,pelmers/dxr,pombredanne/dxr,pelmers/dxr,jonasfj/dxr,kleintom/dxr,pelmers/dxr,KiemVM/Mozilla--dxr,erikrose/dxr,srenatus/dxr,jay-z007/dxr,nrc/dxr,nrc/dxr,gartung/dxr,jay-z007/dxr,srenatus/dxr,erikrose/dxr,jay-z007/dxr,bozzmob/dxr,jbradberry/dxr,srenatus/dxr,pelmers/dxr,gartung/dxr,jonasfj/dxr,bozzmob/dxr | dxr/json.py | dxr/json.py | #!/usr/bin/env python2
class JsonOutput:
need_separator = False
content = ''
def open(self):
self.content += '{'
self.need_separator = False
def close(self):
self.content += '}'
self.need_separator = True
def open_list(self):
self.content += '['
self.need_separator = False
def close_list(self):
self.content += ']'
self.need_separator = True
def key_value(self, key, value, quote_value):
if self.need_separator is True:
self.content += ','
if key is not None:
self.content += '"' + key + '"'
self.content += ' : '
if quote_value is True:
self.content += '"' + value + '"'
else:
self.content += value
self.need_separator = True
def key_dict(self, key, nested_values):
if self.need_separator is True:
self.content += ','
if key is not None:
self.content += '"' + key + '"'
self.content += ' : '
self.open()
for subkey in nested_values.keys():
self.add(subkey, nested_values[subkey])
self.close()
self.need_separator = True
def key_list(self, key, values):
if self.need_separator is True:
self.content += ','
self.content += '"' + key + '"'
self.content += ' : '
self.open_list()
for subvalue in values:
self.add(None, subvalue)
self.close_list()
self.need_separator = True
def add(self, key, value):
if isinstance(value, dict):
self.key_dict(key, value)
elif isinstance(value, list):
self.key_list(key, value)
elif isinstance(value, int):
self.key_value(key, str(value), False)
else:
self.key_value(key, str(value), True)
def print_str(self):
return '{' + self.content + '}'
#if __name__ == '__main__':
# json = JsonOutput()
#
# json.add('foo', 'bar')
# json.add('age', 666)
# json.add('hash', { 'aa': 'bb', 'cc': 'dd', 'zz': [ 1, 3, 5]})
# json.add('list', [1, 2, 3])
# json.add('mixed', [ {'Foo': 'bar', 'Tu': 'ruru' }, { 'lala': 'whee', 'pi': 3 } ])
#
# print json.print_str();
| mit | Python |
|
30eec7bb18285b82a7d67a0a3d9098afc5b9e286 | Create QRfactorization.py | Effective-Quadratures/Effective-Quadratures,psesh/Effective-Quadratures | effective_quadratures/QRfactorization.py | effective_quadratures/QRfactorization.py | # A set of functions just for QR factorization, pivoting and iterative-QR
| lgpl-2.1 | Python |
|
324243dfd61afd8ce244a9a02ffc800c5c73ce55 | Add modified chart with better values | chrisgilmerproj/brewday,chrisgilmerproj/brewday | charts/daniels_designing_great_beers/appendix_two_course_grind_potential_extract_modified.py | charts/daniels_designing_great_beers/appendix_two_course_grind_potential_extract_modified.py |
from brew.utilities import sg_from_dry_basis
"""
Ray Daniels
Designing Great Beers
Appendix 2: Course Grind Potential Extract (modified)
Notes:
The chart appears to have been developed with the moisture content set
to zero (0.0) and the Brew House Efficiency set to 100% (1.0). This
is not typical and the book even states that you should expect moisture
content at around 4.0% and Brew House Efficiency at arount 90.0%.
This version has been modified with more typical values.
"""
def get_chart():
mc = 4
bhe = 0.9
chart = []
for dbcg in range(5000, 7600, 100) + range(7600, 8025, 25):
gu = sg_from_dry_basis(
dbcg / 100.0,
moisture_content=mc,
brew_house_efficiency=bhe)
sg = 1 + (gu / 1000.0)
chart.append([round(dbcg / 100.0, 2), round(gu, 2), round(sg, 4)])
return chart
def print_chart():
chart = get_chart()
print("DBCG\tGU\t1 lb./gallon")
print("'As-Is'\t\tYields SG")
print("-------\t-----\t------------")
for dbcg, gu, sg in chart:
print("{0:0.2f}\t{1:0.2f}\t{2:0.4f}".format(dbcg, gu, sg))
def main():
print_chart()
if __name__ == "__main__":
main()
| mit | Python |
|
7172d06ced60b2c69b9ac2762019ff95f3fd7da5 | Create twice.py | iwyos13/Robosys2 | twice.py | twice.py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
n = 0
def cb(message):
//rospy.loginfo(message.data*2)
global n
n = message.data*2
if __name__ == '__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Int32, cb)
//rospy.spin()
pub = rospy.Publisher('twice', Int32, queue_size=1)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
pub.publish(n)
rate.sleep()
| bsd-2-clause | Python |
|
a882409ede1898a3b4e2fb4619089b33c1427315 | Add migration | DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,rdmorganiser/rdmo,rdmorganiser/rdmo,rdmorganiser/rdmo | apps/conditions/migrations/0005_empty_relation.py | apps/conditions/migrations/0005_empty_relation.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-08-10 14:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conditions', '0004_condition_title'),
]
operations = [
migrations.AlterField(
model_name='condition',
name='relation',
field=models.CharField(choices=[('eq', 'is equal to (==)'), ('neq', 'is not equal to (!=)'), ('contains', 'contains'), ('gt', 'is greater than (>)'), ('gte', 'is greater than or equal (>=)'), ('lt', 'is lesser than (<)'), ('lte', 'is lesser than or equal (<=)'), ('empty', 'is empty'), ('notempty', 'is not empty')], max_length=8),
),
]
| apache-2.0 | Python |
|
673a6ee654d7e540fe9c473904b6d1e326928c58 | Create run_test.py | chohner/staged-recipes,jjhelmus/staged-recipes,kwilcox/staged-recipes,birdsarah/staged-recipes,basnijholt/staged-recipes,scopatz/staged-recipes,NOAA-ORR-ERD/staged-recipes,jochym/staged-recipes,cpaulik/staged-recipes,ocefpaf/staged-recipes,johanneskoester/staged-recipes,shadowwalkersb/staged-recipes,SylvainCorlay/staged-recipes,ceholden/staged-recipes,shadowwalkersb/staged-recipes,jakirkham/staged-recipes,igortg/staged-recipes,isuruf/staged-recipes,pmlandwehr/staged-recipes,mariusvniekerk/staged-recipes,jochym/staged-recipes,igortg/staged-recipes,sannykr/staged-recipes,guillochon/staged-recipes,goanpeca/staged-recipes,hadim/staged-recipes,patricksnape/staged-recipes,petrushy/staged-recipes,barkls/staged-recipes,sodre/staged-recipes,larray-project/staged-recipes,chohner/staged-recipes,kwilcox/staged-recipes,mcs07/staged-recipes,johanneskoester/staged-recipes,NOAA-ORR-ERD/staged-recipes,barkls/staged-recipes,Juanlu001/staged-recipes,asmeurer/staged-recipes,larray-project/staged-recipes,dschreij/staged-recipes,cpaulik/staged-recipes,rvalieris/staged-recipes,ocefpaf/staged-recipes,glemaitre/staged-recipes,Cashalow/staged-recipes,SylvainCorlay/staged-recipes,guillochon/staged-recipes,mcs07/staged-recipes,stuertz/staged-recipes,petrushy/staged-recipes,sodre/staged-recipes,rmcgibbo/staged-recipes,Juanlu001/staged-recipes,basnijholt/staged-recipes,grlee77/staged-recipes,jjhelmus/staged-recipes,jakirkham/staged-recipes,chrisburr/staged-recipes,sannykr/staged-recipes,scopatz/staged-recipes,ReimarBauer/staged-recipes,chrisburr/staged-recipes,asmeurer/staged-recipes,goanpeca/staged-recipes,hadim/staged-recipes,patricksnape/staged-recipes,rmcgibbo/staged-recipes,conda-forge/staged-recipes,ReimarBauer/staged-recipes,isuruf/staged-recipes,synapticarbors/staged-recipes,birdsarah/staged-recipes,rvalieris/staged-recipes,glemaitre/staged-recipes,mariusvniekerk/staged-recipes,ceholden/staged-recipes,grlee77/staged-recipes,conda-forge/staged-recipes,sodre/staged-recipes,pmlandwehr/staged-recipes,synapticarbors/staged-recipes,Cashalow/staged-recipes,dschreij/staged-recipes,stuertz/staged-recipes | recipes/django-storages/run_test.py | recipes/django-storages/run_test.py | import django
from django.conf import settings
settings.configure(INSTALLED_APPS=['storages', 'django.contrib.contenttypes', 'django.contrib.auth'])
django.setup()
import storages
| bsd-3-clause | Python |
|
87de57c86b5d607b1fa795b46cefb3a722919f72 | add script for testing speed | jcmgray/quijy | scripts/time_quimb.py | scripts/time_quimb.py | import timeit
# ----------------------------- dense dot ----------------------------------- #
setup = """
import quimb
a = quimb.rand_herm(2**4)
b = quimb.rand_herm(2**4)
"""
stmt = """
a @ b
"""
t = timeit.timeit(stmt, setup=setup, number=100000)
print("Small dot".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
a = quimb.rand_herm(2**10)
b = quimb.rand_herm(2**10)
"""
stmt = """
a @ b
"""
t = timeit.timeit(stmt, setup=setup, number=10)
print("Big dot".ljust(20) + ": {:.3} sec".format(t))
# ----------------------------- dense eigsys -------------------------------- #
setup = """
import quimb
mat = quimb.rand_herm(2**4) """
stmt = """
quimb.eigsys(mat) """
t = timeit.timeit(stmt, setup=setup, number=10000)
print("Small eigsys".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
mat = quimb.rand_herm(2**10) """
stmt = """
quimb.eigsys(mat) """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Big eigsys".ljust(20) + ": {:.3} sec".format(t))
# ----------------------------- sparse eigsys ------------------------------- #
setup = """
import quimb
mat = quimb.rand_herm(2**14, sparse=True) """
stmt = """
quimb.seigsys(mat, backend='scipy') """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Scipy seigsys".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
mat = quimb.rand_herm(2**14, sparse=True) """
stmt = """
quimb.seigsys(mat, backend='slepc') """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Slepc seigsys".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
import qdmbl
mat = qdmbl.ham_qd(10, 1, sparse=True) """
stmt = """
quimb.seigsys(mat, sigma=0.01, backend='scipy') """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Scipy seigsys int".ljust(20) + ": {:.3} sec".format(t))
setup = """
import quimb
import qdmbl
mat = qdmbl.ham_qd(10, 1, sparse=True) """
stmt = """
quimb.seigsys(mat, sigma=1, backend='slepc') """
t = timeit.timeit(stmt, setup=setup, number=10)
print("Slepc seigsys int".ljust(20) + ": {:.3} sec".format(t))
| mit | Python |
|
397bc67a5a214a4cad5eef20f3a13c53f90964c5 | Modify tms_nw_svr | irvs/ros_tms,irvs/ros_tms,irvs/ros_tms,irvs/ros_tms,irvs/ros_tms,irvs/ros_tms,irvs/ros_tms,irvs/ros_tms,irvs/ros_tms | scripts/tms_nw_svr.py | scripts/tms_nw_svr.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import rospy
import requests
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
import urlparse
def svr_start(port, callback):
def handler(*args):
CallbackServer(callback, *args)
server = HTTPServer(('', int(port)), handler)
server.serve_forever()
class tms_nw_svr(BaseHTTPRequestHandler):
def __init__(self, callback, *args):
self.callback = callback
BaseHTTPRequestHandler.__init__(self, args)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
query = parsed_path.query
self.send_response(200)
self.end_headers()
result = self.callback(query)
message = '\r\n'.join(result)
self.wfile.write(message)
return | bsd-3-clause | Python |
|
fec45cfaee6c5e5d02b6c3979179cdad153d5076 | add ds18b20 rpi implementation to examples | cloud4rpi/cloud4rpi | examples/raspberrypi/platform/ds18b20.py | examples/raspberrypi/platform/ds18b20.py | import os
import re
import subprocess
W1_DEVICES = '/sys/bus/w1/devices/'
W1_SENSOR_PATTERN = re.compile('(10|22|28)-.+', re.IGNORECASE)
def modprobe(module):
return subprocess.check_call(['modprobe', module])
def init_w1():
modprobe('w1-gpio')
modprobe('w1-therm')
def is_w1_sensor(path):
return \
W1_SENSOR_PATTERN.match(path) and \
os.path.isfile(sensor_full_path(path))
def sensor_full_path(sensor):
return os.path.join(W1_DEVICES, sensor, 'w1_slave')
def read_whole_file(path):
with open(path, 'r') as f:
return f.read()
class InvalidW1Address(Exception):
def __init__(self, address):
super(InvalidW1Address, self).__init__()
self.address = address
def guard_against_invalid_address(address):
if not W1_SENSOR_PATTERN.match(address):
raise InvalidW1Address(address)
class DS18b20(object):
@staticmethod
def find_all():
return [DS18b20(x) for x in os.listdir(W1_DEVICES) if is_w1_sensor(x)]
def __init__(self, address):
guard_against_invalid_address(address)
self.address = address
def read(self):
readings = read_whole_file(sensor_full_path(self.address))
temp_token = 't='
temp_index = readings.find(temp_token)
if temp_index < 0:
return None
temp = readings[temp_index + len(temp_token):]
return float(temp) / 1000
| mit | Python |
|
ba3643f6e2adc0c5c32134b5ec23403e97663237 | Create vFMCT.py | Jean-Gaby/cropTool | vFMCT.py | vFMCT.py | # -*- coding: utf-8 -*-
"""
@author: Jean-Gabriel JOLLY
"""
from tkinter import *
import PIL
from PIL import Image
import os
global rectangleList
rectangleList=[]
global numberImage, numberRectangle,totalRectangle
numberImage, numberRectangle,totalRectangle = 0,0,0
#Square position
global x1,x2,y1,y2
x1,x2,y1,y2=0,0,0,0
#===============
def leftClick(event):
chaine.configure(text = str(event.x)+" "+str(event.y))
global x1,y1
x1=event.x
y1=event.y
def holdLeftClick(event):
global numberRectangle
chaine.configure(text = str(event.x)+" "+str(event.y)+"Frame object number "+str(numberRectangle))
cadre.coords(rectangle, x1,y1,event.x,event.y)
def releaseLeftClick(event):
cadre.coords(rectangle, 0, 0, 0, 0)
global x2,y2,numberRectangle,rectangleList,totalRectangle
chaine.configure(text = "Number of frames:" + str(numberRectangle+1))
x2=event.x
y2=event.y
rectangleList.append(cadre.create_rectangle(x1,y1,x2,y2))
numberRectangle += 1
totalRectangle += 1
####CROPPING PART#####
area = (x1/hpercent, y1/hpercent, x2/hpercent, y2/hpercent)
cropped_img = img.crop(area)
cropped_img.save('name' + str(totalRectangle) + '.png')
######################
def middleClick(event):
global numberRectangle
numberRectangle += 1
id1=cadre.create_rectangle(10,10,12,12)
cadre.delete(id1)
def rightClick(event):
global rectangleList, numberRectangle, totalRectangle
if numberRectangle > 0:
chaine.configure(text = "Erasing frame number ="+str(numberRectangle))
cadre.delete(rectangleList[len(rectangleList)-1])
del rectangleList[len(rectangleList)-1]
os.remove("name" + str(totalRectangle) + ".png")
numberRectangle -= 1
totalRectangle -= 1
else:
chaine.configure(text = "Nothing to erase")
fen = Tk()
fen.title('Very Fast Multiple Cropping Tool')
height=fen.winfo_screenwidth() #^\/
width=fen.winfo_screenheight() #<>
photo = PhotoImage(file="image3.png")
###DISPLAY RESIZE MODULE###
baseheight = (fen.winfo_screenwidth()-1000) #size of the height of the screen
img = Image.open("image3.png")
hpercent = ((baseheight / float(img.size[1])))
print(hpercent)
wsize = int((float(img.size[0]) * float(hpercent)))
img2 = img.resize((wsize, baseheight), PIL.Image.ANTIALIAS)
###########################
img2.save("temporaryFile.png")
#photo2 = PhotoImage(file="image32bis.png")
photo2 = PhotoImage(file="temporaryFile.png")
cadre = Canvas(fen, width=photo2.width(), height=photo2.height(), bg="light yellow")
cadre.create_image(0, 0, anchor=NW, image=photo2) #BUG
cadre.bind("<Button-1>", leftClick)
cadre.bind("<B1-Motion>", holdLeftClick)
cadre.bind("<ButtonRelease-1>", releaseLeftClick)
cadre.bind("<Button-2>", middleClick)
cadre.bind("<ButtonRelease-3> ", rightClick)
cadre.pack()
chaine = Label(fen)
chaine.pack()
rectangle=cadre.create_rectangle(0,0,0,0)
fen.mainloop()
os.remove("temporaryFile.png")
print(numberImage)
print(numberRectangle)
print(rectangleList)
print(height)
print(width)
| apache-2.0 | Python |
|
01a659318644ef47cfe0c9ad3c484a974fb31e25 | Create __init__.py | robertclf/FAFT,robertclf/FAFT | __init__.py | __init__.py | bsd-3-clause | Python |
||
cb454d310431700e5ac9883a32f0b36e2e50e0fe | Add a check for keystone expired tokens buildup. | aacole/ursula-monitoring,sivakom/ursula-monitoring,sivakom/ursula-monitoring,blueboxgroup/ursula-monitoring,blueboxgroup/ursula-monitoring,sivakom/ursula-monitoring,aacole/ursula-monitoring,aacole/ursula-monitoring,aacole/ursula-monitoring,sivakom/ursula-monitoring,blueboxgroup/ursula-monitoring,blueboxgroup/ursula-monitoring | sensu/plugins/check-keystone-expired-tokens.py | sensu/plugins/check-keystone-expired-tokens.py | #!/opt/openstack/current/keystone/bin/python
#
# Copyright 2015, Jesse Keating <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'keystone',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from keystone import cli
from keystone.common import environment
from keystone import token
from keystone.common import sql
from oslo.utils import timeutils
WATERMARK=1000
# Monkeypatch the sql Token class to add a method
from keystone.token.persistence.backends.sql import TokenModel
from keystone.token.persistence.backends.sql import Token
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
@monkeypatch_method(Token)
def list_tokens(self):
session = sql.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires < now)
tokens = query.all()
if len(tokens) > WATERMARK:
print("Too many expired keystone tokens: %s" % len(tokens))
sys.exit(1)
# Create a class for listing the tokens and add it to the keystone-manage
# command list
class TokenList(cli.BaseApp):
"""List tokens in the DB"""
name = "token_list"
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.list_tokens()
cli.CMDS.append(TokenList)
# Now do our thing
if __name__ == '__main__':
environment.use_stdlib()
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
# keystone-manage wants a command as a argv, so give it token_list
sys.argv.append('token_list')
cli.main(argv=sys.argv, config_files=config_files)
| apache-2.0 | Python |
|
ac40e54d22717fbf1a2444a67198cdba66506df8 | Add test for input setup workflow | architecture-building-systems/CityEnergyAnalyst,architecture-building-systems/CityEnergyAnalyst,architecture-building-systems/CityEnergyAnalyst | cea/tests/test_inputs_setup_workflow.py | cea/tests/test_inputs_setup_workflow.py | import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
| mit | Python |
|
8465d9a9b2c30b0b493bdf9ba24a29e39a51c1df | add dbutil to compute archive_begin for HADS sites | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | scripts/dbutil/compute_hads_sts.py | scripts/dbutil/compute_hads_sts.py | """Compute the archive start time of a HADS/DCP network"""
from pyiem.network import Table as NetworkTable
import sys
import psycopg2
import datetime
THISYEAR = datetime.datetime.now().year
HADSDB = psycopg2.connect(database='hads', host='iemdb')
MESOSITEDB = psycopg2.connect(database='mesosite', host='iemdb')
def do(network, sid):
cursor = HADSDB.cursor()
running = None
# We work backwards
for yr in range(THISYEAR, 2001, -1):
cursor.execute("""
SELECT min(valid) from raw""" + str(yr) + """
WHERE station = %s
""", (sid,))
minv = cursor.fetchone()[0]
if minv is None:
return running
running = minv
return running
def main(argv):
"""Go main Go"""
network = argv[1]
nt = NetworkTable(network)
for sid in nt.sts.keys():
sts = do(network, sid)
if sts is None:
continue
if (nt.sts[sid]['archive_begin'] is None or
nt.sts[sid]['archive_begin'] != sts):
osts = nt.sts[sid]['archive_begin']
f = "%Y-%m-%d %H:%M"
print(("%s [%s] new sts: %s OLD sts: %s"
) % (sid, network, sts.strftime(f),
osts.strftime(f) if osts is not None else 'null'))
cursor = MESOSITEDB.cursor()
cursor.execute("""UPDATE stations SET archive_begin = %s
WHERE id = %s and network = %s""", (sts, sid, network))
cursor.close()
MESOSITEDB.commit()
if __name__ == '__main__':
main(sys.argv)
| mit | Python |
|
582b5c598da5b35032447f0eb7888051b84f844c | Add datetime to fast cache | porduna/appcomposer,morelab/appcomposer,porduna/appcomposer,morelab/appcomposer,morelab/appcomposer,go-lab/appcomposer,go-lab/appcomposer,porduna/appcomposer,morelab/appcomposer,porduna/appcomposer,go-lab/appcomposer,go-lab/appcomposer | alembic/versions/20860ffde766_add_datetime_to_fastcache.py | alembic/versions/20860ffde766_add_datetime_to_fastcache.py | """Add datetime to fastcache
Revision ID: 20860ffde766
Revises: 471e6f7722a7
Create Date: 2015-04-14 07:44:36.507406
"""
# revision identifiers, used by Alembic.
revision = '20860ffde766'
down_revision = '471e6f7722a7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationFastCaches', sa.Column('datetime', sa.DateTime(), nullable=True))
op.create_index(u'ix_TranslationFastCaches_datetime', 'TranslationFastCaches', ['datetime'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationFastCaches_datetime', table_name='TranslationFastCaches')
op.drop_column('TranslationFastCaches', 'datetime')
### end Alembic commands ###
| bsd-2-clause | Python |
|
f54f427c16b394ff1ea0f55875bfb9d02e7264b0 | add SiD calculator. | cvn001/codonPY | src/get_SiD.py | src/get_SiD.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Introduction: This script is used to calculate similarity index (SiD)
# Created by Xiangchen Li on 2017/3/19 21:15
from collections import defaultdict
from src.global_items import genetic_code
def get_sid(virus_rscu_file, host_rscu_file):
for pass_codon in ["TAG", "TAA", "TGA", "ATG", "TGG"]:
del genetic_code[pass_codon]
virus_rscu_dict = defaultdict()
with open(virus_rscu_file, 'r') as f1:
for each_line in f1.readlines()[1:]:
v_list = each_line.strip().split('\t')
v_codon = v_list[0]
v_rscu = v_list[1]
virus_rscu_dict[v_codon] = float(v_rscu)
host_rscu_dict = defaultdict()
with open(host_rscu_file, 'r') as f2:
for each_line in f2.readlines()[1:]:
h_list = each_line.strip().split('\t')
h_codon = h_list[0]
h_rscu = h_list[1]
host_rscu_dict[h_codon] = float(h_rscu)
aa = 0
bb = 0
cc = 0
for codon in genetic_code.keys():
aa += virus_rscu_dict[codon] * host_rscu_dict[codon]
bb += pow(virus_rscu_dict[codon], 2)
cc += pow(host_rscu_dict[codon], 2)
"""
R(A,B) is defined as the cosine value of the angle included
between the A and B spatial vectors, and represents the degree of
similarity between the virus and host overall codon usage patterns.
D(A,B) represents the potential effect of the overall codon usage
of the host on that of virus, and its value ranges from 0 to 1.0.
"""
rr = aa / pow(bb * cc, 0.5) # rr -> R(A,B)
dd = (1 - rr) / 2 # dd -> D(A,B)
return dd
| mit | Python |
|
cd9f80c1567c945fe40e02af56433c49c6ddad65 | Create lintcode_二进制求和.py | wangyangkobe/leetcode,wangyangkobe/leetcode,wangyangkobe/leetcode,wangyangkobe/leetcode | lintcode_二进制求和.py | lintcode_二进制求和.py | /**
* http://www.lintcode.com/zh-cn/problem/add-binary/
* 给定两个二进制字符串,返回他们的和(用二进制表示。
* 样例 a = 11 b = 1 返回 100
*/
class Solution:
# @param {string} a a number
# @param {string} b a number
# @return {string} the result
def addBinary(self, a, b):
# Write your code here
a = a[::-1]
b = b[::-1]
index = 0
result = []
flag = 0
while (index < len(a)) and (index < len(b)):
res = int(a[index]) + int(b[index]) + flag
result.append(str(res%2))
flag = res / 2
index = index + 1
while index < len(a):
res = int(a[index]) + flag
result.append(str(res%2))
flag = res / 2
index = index + 1
while index < len(b):
res = int(b[index]) + flag
result.append(str(res%2))
flag = res / 2
index = index + 1
if flag != 0:
result.append(str(flag))
return ''.join(result[::-1])
| mit | Python |
|
65b362985d502440b12efc8a6a49ab0603354fd2 | Add script to count emotional sentences according to LIWC | NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts | liwc_emotional_sentences.py | liwc_emotional_sentences.py | """Count the numbers of annotated entities and emotional sentences in the
corpus that was manually annotated.
Usage: python annotation_statistics.py <dir containing the folia files with
EmbodiedEmotions annotations>
"""
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import sentence, note
import argparse
import os
from collections import Counter
import json
import codecs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the dir containing the '
'FoLiA XML files that should be processed.')
args = parser.parse_args()
dir_name = args.dir_name
act_tag = '{http://ilk.uvt.nl/folia}div'
cur_dir = os.getcwd()
os.chdir(dir_name)
folia_counter = 0
num_sent = 0
num_emotional = 0
stats = Counter()
entity_words = {}
text_stats = {}
emotional_cats = ['liwc-Posemo', 'liwc-Negemo']
print 'Files'
for file_name in os.listdir(dir_name):
folia_counter += 1
print '{}'.format(file_name)
text_id = file_name[0:13]
text_stats[text_id] = Counter()
sents = set()
# load document
context = etree.iterparse(file_name,
events=('start', 'end'),
tag=act_tag,
huge_tree=True)
for event, elem in context:
if event == 'end' and elem.get('class') == 'act':
# load act into memory
act_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = act_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
# some t elements appear to be empty (this is not
# allowed, but it happens). So, check whether there is
# a string to add before adding it.
if sent.t:
if sent.t.string:
s = sent.t.string
# calculate stats only for unique sentences in text
if s and s not in sents:
sents.add(s)
num_sent += 1
entities = sent.find_all('entity')
emotional = False
for entity in entities:
e = entity.attrs.get('class')
if e in emotional_cats:
emotional = True
if emotional:
num_emotional += 1
del context
# clear memory
# results in segmentation fault (for some reason)
#if delete:
# elem.clear()
# while elem.getprevious() is not None:
# del elem.getparent()[0]
# del context
# print stats
print '\nBasic stats'
print '{} sentences in {} files'.format(num_sent, folia_counter)
perc = float(num_emotional)/float(num_sent)*100.0
print '{} emotional sentences ({:.2f}%)'.format(num_emotional, perc)
| apache-2.0 | Python |
|
c910e1898c1e49c60877e092032daebd289c6f31 | add scripts to export from env file to profile | N402/NoahsArk,N402/NoahsArk | scripts/env2profile.py | scripts/env2profile.py | #!/usr/bin/evn python
import os
import re
import sys
line_re = re.compile('(\S+?)\s*?=\s*?(\S+?)$')
def env2profile(env_path, out_path):
out_lines = list()
with open(env_path, 'r') as env_file:
for line in env_file.readlines():
matched = line_re.findall(line)
if matched and len(matched[0]) == 2:
name, value = matched[0]
out_lines.append('export %s=%s' % (name, value))
with open(out_path, 'w') as out_file:
out_file.write('\n'.join(out_lines))
if __name__ == '__main__':
if len(sys.argv) == 3:
_, env_path, out_path = sys.argv
env2profile(env_path, out_path)
else:
print 'Wrong numbers of args'
| mit | Python |
|
8351d98c3036021507a75b65e424d02942f09633 | Add alembic upgrade info | HERA-Team/Monitor_and_Control,HERA-Team/hera_mc,HERA-Team/hera_mc | alembic/versions/3d3c72ecbc0d_add_rtp_task_resource_record_table.py | alembic/versions/3d3c72ecbc0d_add_rtp_task_resource_record_table.py | """Add rtp_task_resource_record table
Revision ID: 3d3c72ecbc0d
Revises: c9a1ff35c6ed
Create Date: 2018-01-20 21:35:16.716477+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3d3c72ecbc0d'
down_revision = 'c9a1ff35c6ed'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('rtp_task_resource_record',
sa.Column('obsid', sa.BigInteger(), nullable=False),
sa.Column('task_name', sa.Text(), nullable=False),
sa.Column('start_time', sa.BigInteger(), nullable=False),
sa.Column('stop_time', sa.BigInteger(), nullable=False),
sa.Column('max_memory', sa.Float(), nullable=True),
sa.Column('avg_cpu_load', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['obsid'], ['hera_obs.obsid'], ),
sa.PrimaryKeyConstraint('obsid', 'task_name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('rtp_task_resource_record')
# ### end Alembic commands ###
| bsd-2-clause | Python |
|
843e6f0ccb73a387e151d7f40ef7a2b4fc1597e0 | test getmap | nimral/pathmap | pathmap/test/test_getmap.py | pathmap/test/test_getmap.py | import unittest
from .. getmap import MapDownloader
class TestGetmap(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.