commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
7c120c02097bfaa1f494627ac93d6cddf5fb9049 | FIX adding newline for chunks | ecarreras/cutools | cutools/diff/__init__.py | cutools/diff/__init__.py | from hashlib import md5
from clint.textui import puts, colored
def clean_diff(diff):
"""Removes diff header from a diff.
"""
res = []
skip = True
for line in diff.split('\n'):
if line.startswith('diff --git'):
skip = True
if line.startswith('@@ '):
skip = False
if not skip:
res.append(line)
return '\n'.join(res)
def print_diff(diff):
"""Prints colored diff.
"""
for line in diff.split('\n'):
line = unicode(line).encode('utf-8')
if line.startswith('+'):
puts(colored.green(line))
elif line.startswith('-'):
puts(colored.red(line))
else:
puts(line)
def get_chunks(diff):
"""Returns a list with all the chunks in this diff.
"""
diff = clean_diff(diff)
chunk = []
chunks = []
for line in diff.split('\n'):
if not line:
continue
if line.startswith('@@ '):
if chunk:
chunks.append('\n'.join(chunk) + '\n')
chunk = [line]
else:
chunk.append(line)
if chunk:
chunks.append('\n'.join(chunk) + '\n')
return chunks
def get_hashed_chunks(chunks):
chunks_dict = {}
for chunk in chunks:
chunks_dict[md5(unicode(chunk).encode('utf-8')).hexdigest()] = chunk
return chunks_dict
def clean_chunk(chunk):
"""Clean headers from chunk.
"""
return '\n'.join([x[1:] for x in chunk.split('\n')
if x and x[0] not in ('-', '@')])
def chunk_in_text(chunk, text):
"""Checks if chunk is inside text.
"""
chunk = clean_chunk(chunk)
return text.find(chunk) >= 0
| from hashlib import md5
from clint.textui import puts, colored
def clean_diff(diff):
"""Removes diff header from a diff.
"""
res = []
skip = True
for line in diff.split('\n'):
if line.startswith('diff --git'):
skip = True
if line.startswith('@@ '):
skip = False
if not skip:
res.append(line)
return '\n'.join(res)
def print_diff(diff):
"""Prints colored diff.
"""
for line in diff.split('\n'):
line = unicode(line).encode('utf-8')
if line.startswith('+'):
puts(colored.green(line))
elif line.startswith('-'):
puts(colored.red(line))
else:
puts(line)
def get_chunks(diff):
"""Returns a list with all the chunks in this diff.
"""
diff = clean_diff(diff)
chunk = []
chunks = []
for line in diff.split('\n'):
if not line:
continue
if line.startswith('@@ '):
if chunk:
chunks.append('\n'.join(chunk))
chunk = [line]
else:
chunk.append(line)
if chunk:
chunks.append('\n'.join(chunk))
return chunks
def get_hashed_chunks(chunks):
chunks_dict = {}
for chunk in chunks:
chunks_dict[md5(unicode(chunk).encode('utf-8')).hexdigest()] = chunk
return chunks_dict
def clean_chunk(chunk):
"""Clean headers from chunk.
"""
return '\n'.join([x[1:] for x in chunk.split('\n')
if x and x[0] not in ('-', '@')])
def chunk_in_text(chunk, text):
"""Checks if chunk is inside text.
"""
chunk = clean_chunk(chunk)
return text.find(chunk) >= 0
| isc | Python |
c8fa72a130d84d921b23f5973dafb8fa91367381 | Make ip_type a RadioSelect in the PTR form | drkitty/cyder,murrown/cyder,zeeman/cyder,murrown/cyder,OSU-Net/cyder,murrown/cyder,OSU-Net/cyder,akeym/cyder,akeym/cyder,zeeman/cyder,akeym/cyder,murrown/cyder,akeym/cyder,drkitty/cyder,zeeman/cyder,OSU-Net/cyder,zeeman/cyder,drkitty/cyder,OSU-Net/cyder,drkitty/cyder | cyder/cydns/ptr/forms.py | cyder/cydns/ptr/forms.py | from django import forms
from cyder.cydns.forms import DNSForm
from cyder.cydns.ptr.models import PTR
class PTRForm(DNSForm):
def delete_instance(self, instance):
instance.delete()
class Meta:
model = PTR
exclude = ('ip', 'reverse_domain', 'ip_upper',
'ip_lower')
widgets = {'views': forms.CheckboxSelectMultiple,
'ip_type': forms.RadioSelect}
| from django import forms
from cyder.cydns.forms import DNSForm
from cyder.cydns.ptr.models import PTR
class PTRForm(DNSForm):
def delete_instance(self, instance):
instance.delete()
class Meta:
model = PTR
exclude = ('ip', 'reverse_domain', 'ip_upper',
'ip_lower')
widgets = {'views': forms.CheckboxSelectMultiple}
| bsd-3-clause | Python |
e68c85ae4526557efd0d3c1bd45857583d542659 | handle errors in better bibtex | rafaqz/citation.vim | python/citation_vim/zotero/betterbibtex.py | python/citation_vim/zotero/betterbibtex.py | # -*- coding: utf-8 -*-
import os
import shutil
import json
import sqlite3
class betterBibtex(object):
def __init__(self, zotero_path, cache_path):
self.bb_file = os.path.join(zotero_path, 'better-bibtex/db.json')
self.bb_database = os.path.join(zotero_path, 'betterbibtex-lokijs.sqlite')
self.bb_copy = os.path.join(cache_path, 'betterbibtex.sqlite')
bb_data_query = u"""
select lokijs.data
from lokijs
where lokijs.name = "db.json"
"""
def load_citekeys(self):
"""
Loads better-bibtex citekeys if they exist.
"""
# The storage method for betterbibtex keeps changing so we'll try a few.
try:
bb_data = open(self.bb_file).read()
bb_json = json.loads(bb_data)
except:
try:
desc_strings.append(getattr(entry, desc_field))
shutil.copyfile(self.bb_database, self.bb_copy)
conn = sqlite3.connect(self.bb_copy)
cur = conn.cursor()
cur.execute(self.bb_data_query)
bb_data = cur.fetchone()[0]
bb_json = json.loads(bb_data)
except:
return {}
citekeys = {}
try:
for item in bb_json['collections'][0]['data']:
if 'citekey' in item and 'itemID' in item:
citekeys[item['itemID']] = item['citekey']
else:
citekeys[item['itemID']] = ""
except:
return {}
return citekeys
| # -*- coding: utf-8 -*-
import os
import shutil
import json
import sqlite3
class betterBibtex(object):
def __init__(self, zotero_path, cache_path):
self.bb_file = os.path.join(zotero_path, 'better-bibtex/db.json')
self.bb_database = os.path.join(zotero_path, 'betterbibtex-lokijs.sqlite')
self.bb_copy = os.path.join(cache_path, 'betterbibtex.sqlite')
bb_data_query = u"""
select lokijs.data
from lokijs
where lokijs.name = "db.json"
"""
def load_citekeys(self):
"""
Loads better-bibtex citekeys if they exist.
"""
# The storage method for betterbibtex keeps changing so we'll try a few.
try:
bb_data = open(self.bb_file).read()
bb_json = json.loads(bb_data)
except:
try:
desc_strings.append(getattr(entry, desc_field))
shutil.copyfile(self.bb_database, self.bb_copy)
conn = sqlite3.connect(self.bb_copy)
cur = conn.cursor()
cur.execute(self.bb_data_query)
bb_data = cur.fetchone()[0]
bb_json = json.loads(bb_data)
except:
return {}
citekeys = {}
for item in bb_json['collections'][0]['data']:
if 'citekey' in item:
citekeys[item['itemID']] = item['citekey']
else:
citekeys[item['itemID']] = ""
return citekeys
| mit | Python |
cbdbe14365d5caad28fe77d9c2ca1c66cbf783bd | test travis turning off db switch | sdss/marvin,bretthandrews/marvin,albireox/marvin,bretthandrews/marvin,sdss/marvin,bretthandrews/marvin,albireox/marvin,albireox/marvin,sdss/marvin,bretthandrews/marvin,albireox/marvin,sdss/marvin | python/marvin/tests/misc/test_db_switch.py | python/marvin/tests/misc/test_db_switch.py | #!/usr/bin/env python2
# encoding: utf-8
#
# test_db_switch.py
#
# Created by José Sánchez-Gallego on Sep 7, 2016.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
def create_connection(db_name):
"""Creates the connection and import the model classes."""
from marvin.db.DatabaseConnection import DatabaseConnection
database_connection_string = 'postgresql+psycopg2:///{0}'.format(db_name)
db = DatabaseConnection(database_connection_string=database_connection_string)
import marvin.db.models.DataModelClasses as mangaData
return db, mangaData
def perform_query(db, mangaData):
"""Performs a simple query and return the value."""
session = db.Session()
xfocal = session.query(mangaData.Cube.xfocal).filter(
mangaData.Cube.plate == 8485, mangaData.Cube.mangaid == '1-209232').join(
mangaData.PipelineInfo, mangaData.PipelineVersion).filter(
mangaData.PipelineVersion.version == 'v1_5_1').one()
return xfocal
# db_name = 'manga'
# db, mangaData = create_connection(db_name)
# print(perform_query(db, mangaData))
# db_name_copy = 'manga_copy'
# db, mangaData = create_connection(db_name_copy)
# print(perform_query(db, mangaData))
| #!/usr/bin/env python2
# encoding: utf-8
#
# test_db_switch.py
#
# Created by José Sánchez-Gallego on Sep 7, 2016.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
def create_connection(db_name):
"""Creates the connection and import the model classes."""
from marvin.db.DatabaseConnection import DatabaseConnection
database_connection_string = 'postgresql+psycopg2:///{0}'.format(db_name)
db = DatabaseConnection(database_connection_string=database_connection_string)
import marvin.db.models.DataModelClasses as mangaData
return db, mangaData
def perform_query(db, mangaData):
"""Performs a simple query and return the value."""
session = db.Session()
xfocal = session.query(mangaData.Cube.xfocal).filter(
mangaData.Cube.plate == 8485, mangaData.Cube.mangaid == '1-209232').join(
mangaData.PipelineInfo, mangaData.PipelineVersion).filter(
mangaData.PipelineVersion.version == 'v1_5_1').one()
return xfocal
db_name = 'manga'
db, mangaData = create_connection(db_name)
print(perform_query(db, mangaData))
db_name_copy = 'manga_copy'
db, mangaData = create_connection(db_name_copy)
print(perform_query(db, mangaData))
| bsd-3-clause | Python |
c8df75a2112cd8e6a4f929ceac21714b716e46ce | Use the IRC nickname for !twitter if one is not provided. | DASPRiD/DASBiT | dasbit/plugin/twitter.py | dasbit/plugin/twitter.py | from twisted.web.client import getPage
from urllib import urlencode
import json
class Twitter:
def __init__(self, manager):
self.client = manager.client
manager.registerCommand('twitter', 'lookup', 'twitter', '(?P<query>.*?)', self.lookup)
def lookup(self, source, query):
if query.isdigit():
url = 'http://api.twitter.com/1/statuses/show/%s.json' % query
elif len(query) > 0:
url = 'http://api.twitter.com/1/users/show.json?%s' % urlencode({'screen_name' : query})
else:
url = 'http://api.twitter.com/1/users/show.json?%s' % urlencode({'screen_name' : source.prefix['nickname']})
getPage(url).addCallback(self._returnResult, source, query.isdigit())
def _returnResult(self, value, source, isNumericLookup):
try:
data = json.loads(value)
except:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if 'error' in data:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if isNumericLookup:
user = data['user']['screen_name']
text = data['text']
id = data['id_str']
else:
user = data['screen_name']
text = data['status']['text']
id = data['status']['id_str']
url = 'https://twitter.com/#!/%s/status/%s' % (user, id)
self.client.reply(source, '<%s> %s (%s)' % (user, text, url))
| from twisted.web.client import getPage
from urllib import urlencode
import json
class Twitter:
def __init__(self, manager):
self.client = manager.client
manager.registerCommand('twitter', 'lookup', 'twitter', '(?P<query>.*?)', self.lookup)
def lookup(self, source, query):
if query.isdigit():
url = 'http://api.twitter.com/1/statuses/show/%s.json' % query
else:
url = 'http://api.twitter.com/1/users/show.json?%s' % urlencode({'screen_name' : query})
getPage(url).addCallback(self._returnResult, source, query.isdigit())
def _returnResult(self, value, source, isNumericLookup):
try:
data = json.loads(value)
except:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if 'error' in data:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if isNumericLookup:
user = data['user']['screen_name']
text = data['text']
id = data['id_str']
else:
user = data['screen_name']
text = data['status']['text']
id = data['status']['id_str']
url = 'https://twitter.com/#!/%s/status/%s' % (user, id)
self.client.reply(source, '<%s> %s (%s)' % (user, text, url))
| bsd-3-clause | Python |
774da53edef30cb2f3c45cc47c63d46f142a4e07 | Use four space indentation, repo_path to arguments | RepoReapers/reaper,RepoReapers/reaper,RepoReapers/reaper,RepoReapers/reaper | score_repo.py | score_repo.py | #!/usr/bin/env python3
import argparse
import importlib
import json
import os
import sys
def load_attribute_plugins(attributes):
for attribute in attributes:
if attribute['enabled']:
try:
attribute['implementation'] = importlib.import_module("attributes.{0}.main".format(attribute['name']))
except ImportError:
print("Failed to load the {0} attribute.".format(attribute['name']))
def process_configuration(config_file):
try:
config = json.load(config_file)
return config
except:
print("Malformatted or missing configuration.")
sys.exit(2)
def repository_path(path_string):
if os.path.exists(path_string):
if os.path.exists("{0}/.git".format(path_string)):
return path_string
else:
raise argparse.ArgumentTypeError("{0} is not a git repository.".format(path_string))
else:
raise argparse.ArgumentTypeError("{0} is not a directory.".format(path_string))
def process_arguments():
parser = argparse.ArgumentParser(description='Calculate the score of a repository.')
parser.add_argument('-c', '--config', type=argparse.FileType('r'), default='config.json', dest='config_file', help='Path to the configuration file.')
parser.add_argument('repository_id', type=int, nargs=1, help='Identifier for a repository as it appears in the GHTorrent database.')
parser.add_argument('repository_path', type=repository_path, nargs=1, help='Path to the repository source code.')
if len(sys.argv) is 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
args = process_arguments()
config = process_configuration(args.config_file)
attributes = config['attributes']
load_attribute_plugins(attributes)
score = 0
for attribute in attributes:
result = attribute['implementation'].run(config.repository_id, config.repository_path, attribute['options'])
score += result * attribute['weight']
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("Caught interrupt, exiting.")
sys.exit(1)
| #!/usr/bin/env python3
import argparse
import importlib
import json
import sys
def loadAttributePlugins(attributes):
for attribute in attributes:
if attribute['enabled']:
try:
attribute['implementation'] = importlib.import_module("attributes.{0}.main".format(attribute['name']))
except ImportError:
print("Failed to load the {0} attribute.".format(attribute['name']))
def processConfiguration(config_file):
try:
config = json.load(config_file)
return config
except:
print("Malformatted or missing configuration.")
sys.exit(2)
def processArguments():
parser = argparse.ArgumentParser(description='Calculate the score of a repository.')
parser.add_argument('-c', '--config', type=argparse.FileType('r'), default='config.json', dest='config_file', help='Path to the configuration file.')
parser.add_argument('repository_id', type=int, nargs=1, help='Identifier for a repository as it appears in the GHTorrent database.')
if len(sys.argv) is 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
args = processArguments()
config = processConfiguration(args.config_file)
attributes = config['attributes']
loadAttributePlugins(attributes)
score = 0
for attribute in attributes:
result = attribute['implementation'].run(metadata, repo_path, attribute['options'])
score += result * attribute['weight']
if __name__ == '__main__':
main()
| apache-2.0 | Python |
40bbb06c222d6be1f59d32204a7636eaf023e5d4 | reduce minimal read count per segment to 200 | medvir/SmaltAlign,medvir/SmaltAlign,medvir/SmaltAlign | select_ref.py | select_ref.py | #!/opt/miniconda/bin/python3
import sys
import subprocess
from textwrap import fill
import pandas as pd
from Bio import SeqIO
readfile = sys.argv[1]
allrefs = dict([(s.id.split('_')[0], str(s.seq))
for s in SeqIO.parse('/rv_home/stschmu/Repositories/SmaltAlign/References/flugenomes.fasta', 'fasta')])
# index flugenomes.fasta
cml = 'bwa index /rv_home/stschmu/Repositories/SmaltAlign/References/flugenomes.fasta'
subprocess.call(cml, shell=True)
# align against all genomes
cml = 'bwa mem -t 24 /rv_home/stschmu/Repositories/SmaltAlign/References/flugenomes.fasta %s | samtools view -F 4 > aln.sam' % readfile
subprocess.call(cml, shell=True)
# extract accession number, segment, serotype
cml = 'cut -f 3 aln.sam | cut -d "_" -f 1-3 | tr -d ">" | tr "_" "\t" > ref.tsv'
subprocess.call(cml, shell=True)
# manipulate with pandas to find, for each segment, the sequence with most hits
df = pd.read_table('ref.tsv', names=['accn', 'segment', 'serotype'])
count_ref = df.groupby(['segment', 'accn', 'serotype']).size()
c = count_ref.reset_index(name='counts').sort_values(['segment', 'counts'], ascending=[True, False])
c.to_csv('counts.tsv', index=False, sep='\t')
print(c.groupby('segment').head(3))
for segment in range(1, 9):
counts = c[c['segment'] == segment]
if counts.counts.sum() < 200 or counts.empty:
print(segment, 'not enough')
continue
best_acc = counts.accn.tolist()[0]
print(segment, best_acc)
best_seq = allrefs[best_acc]
with open('segment-%d.fasta' % segment, 'w') as h:
h.write('>segment-%d-%s\n' % (segment, best_acc))
h.write(fill(best_seq, width=80))
| #!/opt/miniconda/bin/python3
import sys
import subprocess
from textwrap import fill
import pandas as pd
from Bio import SeqIO
readfile = sys.argv[1]
allrefs = dict([(s.id.split('_')[0], str(s.seq))
for s in SeqIO.parse('/rv_home/stschmu/Repositories/SmaltAlign/References/flugenomes.fasta', 'fasta')])
# index flugenomes.fasta
cml = 'bwa index /rv_home/stschmu/Repositories/SmaltAlign/References/flugenomes.fasta'
subprocess.call(cml, shell=True)
# align against all genomes
cml = 'bwa mem -t 24 /rv_home/stschmu/Repositories/SmaltAlign/References/flugenomes.fasta %s | samtools view -F 4 > aln.sam' % readfile
subprocess.call(cml, shell=True)
# extract accession number, segment, serotype
cml = 'cut -f 3 aln.sam | cut -d "_" -f 1-3 | tr -d ">" | tr "_" "\t" > ref.tsv'
subprocess.call(cml, shell=True)
# manipulate with pandas to find, for each segment, the sequence with most hits
df = pd.read_table('ref.tsv', names=['accn', 'segment', 'serotype'])
count_ref = df.groupby(['segment', 'accn', 'serotype']).size()
c = count_ref.reset_index(name='counts').sort_values(['segment', 'counts'], ascending=[True, False])
c.to_csv('counts.tsv', index=False, sep='\t')
print(c.groupby('segment').head(3))
for segment in range(1, 9):
counts = c[c['segment'] == segment]
if counts.counts.sum() < 1000 or counts.empty:
print(segment, 'not enough')
continue
best_acc = counts.accn.tolist()[0]
print(segment, best_acc)
best_seq = allrefs[best_acc]
with open('segment-%d.fasta' % segment, 'w') as h:
h.write('>segment-%d-%s\n' % (segment, best_acc))
h.write(fill(best_seq, width=80))
| mit | Python |
47c7cccc674beee06c2d4d6f6f197cb860d33354 | Update bno055.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/Calamity/bno055.py | home/Calamity/bno055.py | arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM11")
bno = Runtime.createAndStart("bno","Bno055")
bno.setController(arduino)
if bno.begin():
while (True):
event = bno.getEvent()
print event.orientation.x
print event.orientation.y
print event.orientation.z
sleep(1)
| arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM11")
bno = Runtime.createAndStart("bno","Bno055")
bno.setController(arduino)
if bno.begin():
event = bno.getEvent()
print event.orientation.x
print event.orientation.y
print event.orientation.z
| apache-2.0 | Python |
59f96d2ca0f3752052d870ef9c7bc5bc21f21e40 | add header | SiLab-Bonn/basil,SiLab-Bonn/basil,MarcoVogt/basil | host/pydaq/HL/tdc_s3.py | host/pydaq/HL/tdc_s3.py | #
# ------------------------------------------------------------
# Copyright (c) SILAB , Physics Institute of Bonn University
# ------------------------------------------------------------
#
# SVN revision information:
# $Rev:: $:
# $Author:: $:
# $Date:: $:
#
from HL.HardwareLayer import HardwareLayer
import struct
import array
class tdc_s3(HardwareLayer):
'''
TDC controller interface
'''
def __init__(self, intf, conf):
HardwareLayer.__init__(self, intf, conf)
'''
Resets the TDC controller module inside the FPGA, base adress zero
'''
def reset(self):
self._intf.write(self._conf['base_addr'], [0])
'''
Initialise the TDC controller module
'''
def init(self):
self.reset()
def set_en(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 1)[0]
self._intf.write(self._conf['base_addr'] + 1, [(current & 0xfe) | enable])
def get_en(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 1)[0] & 0x01) else False
def set_exten(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 4)
self._intf.write(self._conf['base_addr'] + 1, [(current[3] & 0xfe) | enable,current[2],current[1],current[0]])
def get_exten(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 4)[3] & 0x01) else False
| #
# ------------------------------------------------------------
# Copyright (c) SILAB , Physics Institute of Bonn University
# ------------------------------------------------------------
#
# SVN revision information:
# $Rev:: 1 $:
# $Author:: TheresaObermann $:
# $Date:: 2013-10-09 10:58:06 #$:
#
from HL.HardwareLayer import HardwareLayer
import struct
import array
class tdc_s3(HardwareLayer):
'''
TDC controller interface
'''
def __init__(self, intf, conf):
HardwareLayer.__init__(self, intf, conf)
'''
Resets the TDC controller module inside the FPGA, base adress zero
'''
def reset(self):
self._intf.write(self._conf['base_addr'], [0])
'''
Initialise the TDC controller module
'''
def init(self):
self.reset()
def set_en(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 1)[0]
self._intf.write(self._conf['base_addr'] + 1, [(current & 0xfe) | enable])
def get_en(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 1)[0] & 0x01) else False
def set_exten(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 4)
self._intf.write(self._conf['base_addr'] + 1, [(current[3] & 0xfe) | enable,current[2],current[1],current[0]])
def get_exten(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 4)[3] & 0x01) else False
| bsd-3-clause | Python |
f52921e78cc6a8af38df50f0b0ba4d04b15fd768 | fix the import error in db.py | giphub/gip,giphub/gip,giphub/gip | service/db.py | service/db.py | #coding=utf-8
import torndb
import datetime
from constants.errorcode import Errorcode
from util.gip_exception import GipException
class DB(object):
def __init__(self, application):
self.mysql_read = application.mysql_conn_read
self.mysql_write = application.mysql_conn_write
#self.mongo_conn = application.mongo_conn
def sample(self):
try:
sql = ''' select count(1) from tag'''
result = self.mysql_write.query(sql)
except:
pass
finally:
return result[0]
def get_article_by_id(self,id):
try:
sql = ''' select * from article where id =%s limit 1'''%(id)
result = self.mysql_write.query(sql)
except:
pass
finally:
return result[0]
| #coding=utf-8
import torndb
import datetime
from constants.errorcode import Errorcode
from util.lt_exception import LTException
class DB(object):
def __init__(self, application):
self.mysql_read = application.mysql_conn_read
self.mysql_write = application.mysql_conn_write
#self.mongo_conn = application.mongo_conn
def sample(self):
'''
示例代码
'''
try:
sql = ''' select count(1) from tag'''
result = self.mysql_write.query(sql)
except:
# 抛出异常
pass
finally:
return result[0]
def get_article_by_id(self,id):
'''
示例代码
'''
try:
sql = ''' select * from article where id =%s limit 1'''%(id)
result = self.mysql_write.query(sql)
except:
# 抛出异常
pass
finally:
return result[0]
| mit | Python |
0b4b57f90ee3d0fe0af3ba9921adccda784d6301 | Allow to order payment profile by name, type and status. | opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind | src/waldur_mastermind/invoices/filters.py | src/waldur_mastermind/invoices/filters.py | import django_filters
from rest_framework import filters
from waldur_core.core import filters as core_filters
from . import models
class InvoiceFilter(django_filters.FilterSet):
customer = core_filters.URLFilter(
view_name='customer-detail', field_name='customer__uuid'
)
customer_uuid = django_filters.UUIDFilter(field_name='customer__uuid')
state = django_filters.MultipleChoiceFilter(choices=models.Invoice.States.CHOICES)
o = django_filters.OrderingFilter(fields=(('year', 'month'),))
class Meta:
model = models.Invoice
fields = ('year', 'month')
class PaymentProfileFilter(django_filters.FilterSet):
organization = core_filters.URLFilter(
view_name='customer-detail', field_name='organization__uuid'
)
organization_uuid = django_filters.UUIDFilter(field_name='organization__uuid')
payment_type = django_filters.MultipleChoiceFilter(
choices=models.PaymentType.CHOICES
)
o = django_filters.OrderingFilter(fields=(('name', 'payment_type', 'is_active'),))
class Meta:
model = models.PaymentProfile
fields = []
class PaymentProfileFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
if request.user.is_staff or request.user.is_support:
return queryset
return queryset.filter(is_active=True)
class PaymentFilter(django_filters.FilterSet):
profile = core_filters.URLFilter(
view_name='payment-profile-detail', field_name='profile__uuid'
)
profile_uuid = django_filters.UUIDFilter(field_name='profile__uuid')
class Meta:
model = models.Payment
fields = ['date_of_payment']
| import django_filters
from rest_framework import filters
from waldur_core.core import filters as core_filters
from . import models
class InvoiceFilter(django_filters.FilterSet):
customer = core_filters.URLFilter(
view_name='customer-detail', field_name='customer__uuid'
)
customer_uuid = django_filters.UUIDFilter(field_name='customer__uuid')
state = django_filters.MultipleChoiceFilter(choices=models.Invoice.States.CHOICES)
o = django_filters.OrderingFilter(fields=(('year', 'month'),))
class Meta:
model = models.Invoice
fields = ('year', 'month')
class PaymentProfileFilter(django_filters.FilterSet):
organization = core_filters.URLFilter(
view_name='customer-detail', field_name='organization__uuid'
)
organization_uuid = django_filters.UUIDFilter(field_name='organization__uuid')
payment_type = django_filters.MultipleChoiceFilter(
choices=models.PaymentType.CHOICES
)
class Meta:
model = models.PaymentProfile
fields = []
class PaymentProfileFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
if request.user.is_staff or request.user.is_support:
return queryset
return queryset.filter(is_active=True)
class PaymentFilter(django_filters.FilterSet):
profile = core_filters.URLFilter(
view_name='payment-profile-detail', field_name='profile__uuid'
)
profile_uuid = django_filters.UUIDFilter(field_name='profile__uuid')
class Meta:
model = models.Payment
fields = ['date_of_payment']
| mit | Python |
af0fbfe74ecaac67fb37f03e01a9aefcd06ce83f | Change default scriptPubKey in coinbase | bitcoinxt/bitcoinxt,dagurval/bitcoinxt,dagurval/bitcoinxt,bitcoinxt/bitcoinxt,bitcoinxt/bitcoinxt,dagurval/bitcoinxt,bitcoinxt/bitcoinxt,dagurval/bitcoinxt,bitcoinxt/bitcoinxt,bitcoinxt/bitcoinxt,dagurval/bitcoinxt,dagurval/bitcoinxt | qa/rpc-tests/test_framework/blocktools.py | qa/rpc-tests/test_framework/blocktools.py | # blocktools.py - utilities for manipulating blocks and transactions
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from script import CScript, CScriptOp, OP_TRUE, OP_CHECKSIG
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
counter=1
# Create a coinbase transaction, assuming no miner fees.
# If pubkey is passed in, the coinbase output will be a P2PK output;
# otherwise an anyone-can-spend output.
def create_coinbase(heightAdjust = 0, absoluteHeight = None, pubkey = None):
global counter
height = absoluteHeight if absoluteHeight is not None else counter+heightAdjust
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
counter += 1
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50*100000000
halvings = int((height)/150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey != None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction with an anyone-can-spend output, that spends the
# nth output of prevtx.
def create_transaction(prevtx, n, sig, value):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, ""))
tx.calc_sha256()
return tx
| # blocktools.py - utilities for manipulating blocks and transactions
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from script import CScript, CScriptOp
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
counter=1
# Create an anyone-can-spend coinbase transaction, assuming no miner fees
def create_coinbase(heightAdjust = 0, absoluteHeight = None):
global counter
height = absoluteHeight if absoluteHeight is not None else counter+heightAdjust
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
counter += 1
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50*100000000
halvings = int((height)/150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.scriptPubKey = ""
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction with an anyone-can-spend output, that spends the
# nth output of prevtx.
def create_transaction(prevtx, n, sig, value):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, ""))
tx.calc_sha256()
return tx
| mit | Python |
4d413d45def838d730806097484d7ccf9d49744f | Fix to test code | zenotech/MyCluster,zenotech/MyCluster | mycluster/test.py | mycluster/test.py |
import mycluster
mycluster.init()
mycluster.create_submit('hybrid:hybrid.q',script_name='test.job',num_tasks=2,
tasks_per_node=2,
my_script='test.bsh',
user_email='[email protected]',
)
mycluster.submit('test.job')
for i in mycluster.job_list():
print i, mycluster.get_job(i).status |
import mycluster
mycluster.create_submit('hybrid:hybrid.q',script_name='test.job',num_tasks=2,
tasks_per_node=2,
my_script='test.bsh',
user_email='[email protected]',
)
mycluster.submit('test.job')
for i in mycluster.job_list():
print i, mycluster.get_job(i).status | bsd-3-clause | Python |
4c29471af61989e852a813999cf37aa9a8acf76d | test anon to /users endpoint | awemulya/fieldsight-kobocat,mainakibui/kobocat,piqoni/onadata,spatialdev/onadata,GeoODK/onadata,smn/onadata,qlands/onadata,jomolinare/kobocat,GeoODK/onadata,spatialdev/onadata,mainakibui/kobocat,mainakibui/kobocat,mainakibui/kobocat,kobotoolbox/kobocat,spatialdev/onadata,hnjamba/onaclone,sounay/flaminggo-test,kobotoolbox/kobocat,smn/onadata,hnjamba/onaclone,hnjamba/onaclone,smn/onadata,hnjamba/onaclone,smn/onadata,qlands/onadata,kobotoolbox/kobocat,jomolinare/kobocat,awemulya/fieldsight-kobocat,jomolinare/kobocat,piqoni/onadata,jomolinare/kobocat,sounay/flaminggo-test,sounay/flaminggo-test,qlands/onadata,sounay/flaminggo-test,spatialdev/onadata,piqoni/onadata,piqoni/onadata,awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat,GeoODK/onadata,kobotoolbox/kobocat,GeoODK/onadata,qlands/onadata | onadata/apps/api/tests/viewsets/test_user_viewset.py | onadata/apps/api/tests/viewsets/test_user_viewset.py | import json
from onadata.apps.api.tests.viewsets.test_abstract_viewset import\
TestAbstractViewSet
from onadata.apps.api.viewsets.user_viewset import UserViewSet
class TestUserViewSet(TestAbstractViewSet):
def setUp(self):
super(self.__class__, self).setUp()
def test_user_list(self):
view = UserViewSet.as_view({'get': 'list'})
request = self.factory.get('/', **self.extra)
response = view(request)
data = [{'username': u'bob', 'first_name': u'Bob', 'last_name': u''}]
self.assertContains(response, json.dumps(data))
def test_user_list_anon(self):
view = UserViewSet.as_view({'get': 'list'})
request = self.factory.get('/')
response = view(request)
data = [{'username': u'bob', 'first_name': u'Bob', 'last_name': u''}]
self.assertContains(response, json.dumps(data))
def test_user_get(self):
view = UserViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, username='bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
def test_user_anon_get(self):
view = UserViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/')
response = view(request, username='bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
| import json
from onadata.apps.api.tests.viewsets.test_abstract_viewset import\
TestAbstractViewSet
from onadata.apps.api.viewsets.user_viewset import UserViewSet
class TestUserViewSet(TestAbstractViewSet):
def setUp(self):
super(self.__class__, self).setUp()
def test_user_list(self):
view = UserViewSet.as_view({'get': 'list'})
request = self.factory.get('/', **self.extra)
response = view(request)
data = [{'username': u'bob', 'first_name': u'Bob', 'last_name': u''}]
self.assertContains(response, json.dumps(data))
def test_user_get(self):
view = UserViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, username='bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
def test_user_anon_get(self):
view = UserViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/')
response = view(request, username='bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
| bsd-2-clause | Python |
9e30bc38cfa3cb000ab2d84730552d50ea604ac1 | configure wsgi file to use whitenoise | arnaudlimbourg/heroku-libsass-python,arnaudlimbourg/heroku-libsass-python,arnaudlimbourg/heroku-libsass-python | heroku-libsass-python/wsgi.py | heroku-libsass-python/wsgi.py | """
WSGI config for project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = ".settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from configurations.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| """
WSGI config for project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = ".settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause | Python |
d6371341c13ffe623755cf89ff03733c111bb994 | change to rga2 | NSLS-II-XPD/ipython_ophyd,NSLS-II-XPD/ipython_ophyd | profile_collection/startup/12-rga.py | profile_collection/startup/12-rga.py | ### This is RGA:2 configured for ExQ new RGA connected at 10.28.2.142 #####
from ophyd import Device, Component as Cpt
class RGA(Device):
startRGA = Cpt(EpicsSignal, 'Cmd:MID_Start-Cmd')
stopRGA = Cpt(EpicsSignal, 'Cmd:ScanAbort-Cmd')
mass1 = Cpt(EpicsSignalRO, 'P:MID1-I')
mass2 = Cpt(EpicsSignalRO, 'P:MID2-I')
mass3 = Cpt(EpicsSignalRO, 'P:MID3-I')
mass4 = Cpt(EpicsSignalRO, 'P:MID4-I')
mass5 = Cpt(EpicsSignalRO, 'P:MID5-I')
mass6 = Cpt(EpicsSignalRO, 'P:MID6-I')
mass7 = Cpt(EpicsSignalRO, 'P:MID7-I')
mass8 = Cpt(EpicsSignalRO, 'P:MID8-I')
mass9 = Cpt(EpicsSignalRO, 'P:MID9-I')
## We don't want the RGA to start and stop by any bluseky plan###
"""
def stage(self):
self.startRGA.put(1)
def unstage(self):
self.stopRGA.put(1)
def describe(self):
res = super().describe()
# This precision should be configured correctly in EPICS.
for key in res:
res[key]['precision'] = 12
return res
"""
rga = RGA('XF:28IDC-VA{RGA:2}',
name='rga',
read_attrs=['mass1', 'mass2', 'mass3', 'mass4','mass5', 'mass6', 'mass7', 'mass8', 'mass9'])
| from ophyd import Device, Component as Cpt
class RGA(Device):
startRGA = Cpt(EpicsSignal, 'Cmd:MID_Start-Cmd')
stopRGA = Cpt(EpicsSignal, 'Cmd:ScanAbort-Cmd')
mass1 = Cpt(EpicsSignalRO, 'P:MID1-I')
mass2 = Cpt(EpicsSignalRO, 'P:MID2-I')
mass3 = Cpt(EpicsSignalRO, 'P:MID3-I')
mass4 = Cpt(EpicsSignalRO, 'P:MID4-I')
mass5 = Cpt(EpicsSignalRO, 'P:MID5-I')
mass6 = Cpt(EpicsSignalRO, 'P:MID6-I')
mass7 = Cpt(EpicsSignalRO, 'P:MID7-I')
mass8 = Cpt(EpicsSignalRO, 'P:MID8-I')
mass9 = Cpt(EpicsSignalRO, 'P:MID9-I')
## We don't want the RGA to start and stop by any bluseky plan###
"""
def stage(self):
self.startRGA.put(1)
def unstage(self):
self.stopRGA.put(1)
def describe(self):
res = super().describe()
# This precision should be configured correctly in EPICS.
for key in res:
res[key]['precision'] = 12
return res
"""
rga = RGA('XF:28IDA-VA{RGA:1}',
name='rga',
read_attrs=['mass1', 'mass2', 'mass3', 'mass4','mass5', 'mass6', 'mass7', 'mass8', 'mass9'])
| bsd-2-clause | Python |
ec46226b0ae5e9d2c29aa07f2ec6749f96a36804 | add str isValidPalindrome | Daetalus/Algorithms | str/string_function.py | str/string_function.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
def reverseStr(input_str, begin, end):
# Pythonic way should be input_str[::-1]
str_list = list(input_str)
while begin < end:
str_list[begin], str_list[end] = str_list[end], str_list[begin]
begin += 1
end -= 1
return ''.join(str_list)
def reveseWord(input_str):
input_str = input_str[::-1]
str_list = input_str.split()
for i in xrange(len(str_list)):
str_list[i] = str_list[i][::-1]
return ' '.join(str_list)
def atoi(str):
# check is None first
if not str:
return 0
# Second, strip str
index = 0
while str[index] == ' ':
index += 1
# Third, check sign
positive = True
if str[index] == '-':
positive = False
index += 1
if str[index] == '+':
index += 1
# loop, get the result
result = 0
for i in xrange(index, len(str)):
# if not a digit, break, return current result
# Question: What about "213k"?
# return 0 or 213?
if not str[i].isdigit():
break
digit = ord(str[i]) - ord('0')
result = result * 10 + digit
# check overflow
if positive:
if result > 2147483647:
return 2147483647
return result
else:
if -result < -2147483648:
return -2147483648
return -result
def isPalindrome(self, s):
start = 0
end = len(s) - 1
while start < end:
while not s[start].isalnum() and start < end:
start += 1
while not s[end].isalnum() and start < end:
end -= 1
if start < end and s[start].lower() != s[end].lower():
return False
end -= 1
start += 1
return True
if __name__ == '__main__':
test = "I am a student."
result = reveseWord(test)
test1 = "World"
result = reveseWord(test1)
# result = reverseStr(test, 0, len(test) - 1)
print(result)
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
def reverseStr(input_str, begin, end):
# Pythonic way should be input_str[::-1]
str_list = list(input_str)
while begin < end:
str_list[begin], str_list[end] = str_list[end], str_list[begin]
begin += 1
end -= 1
return ''.join(str_list)
def reveseWord(input_str):
input_str = input_str[::-1]
str_list = input_str.split()
for i in xrange(len(str_list)):
str_list[i] = str_list[i][::-1]
return ' '.join(str_list)
def atoi(str):
# check is None first
if not str:
return 0
# Second, strip str
index = 0
while str[index] == ' ':
index += 1
# Third, check sign
positive = True
if str[index] == '-':
positive = False
index += 1
if str[index] == '+':
index += 1
# loop, get the result
result = 0
for i in xrange(index, len(str)):
# if not a digit, break, return current result
# Question: What about "213k"?
# return 0 or 213?
if not str[i].isdigit():
break
digit = ord(str[i]) - ord('0')
result = result * 10 + digit
# check overflow
if positive:
if result > 2147483647:
return 2147483647
return result
else:
if -result < -2147483648:
return -2147483648
return -result
if __name__ == '__main__':
test = "I am a student."
result = reveseWord(test)
test1 = "World"
result = reveseWord(test1)
# result = reverseStr(test, 0, len(test) - 1)
print(result)
| unlicense | Python |
39bf1b019897b71a3269e46816f11eefa32de507 | Fix argument order | NoRedInk/elm-ops-tooling,NoRedInk/elm-ops-tooling | elm_package.py | elm_package.py | #! /usr/bin/env python
"""
Load and save elm-package.json safely.
"""
# from typing import Dict, Tuple, IO
import copy
from collections import OrderedDict
import json
def load(fileobj):
# type: (IO[str]) -> Dict
return json.load(fileobj, object_pairs_hook=OrderedDict)
def dump(package, fileobj):
# type: (Dict, IO[str]) -> None
to_save = copy.deepcopy(package)
to_save['dependencies'] = sorted_deps(to_save['dependencies'])
json.dump(to_save, fileobj, sort_keys=False, indent=4, separators=(',', ': '))
def sorted_deps(deps):
# type: (Dict) -> Dict
return OrderedDict(sorted(deps.items()))
def sync_deps(from_deps, to_deps):
# type: (Dict, Dict) -> Tuple[List[str], Dict]
messages = []
result = copy.deepcopy(to_deps)
for (package_name, package_version) in from_deps.items():
if package_name not in to_deps:
result[package_name] = package_version
messages.append('Inserting new package {package_name} at version {package_version}'.format(
package_name=package_name, package_version=package_version)
)
elif to_deps[package_name] != package_version:
result[package_name] = package_version
messages.append('Changing {package_name} from version {other_package_version} to {package_version}'.format(
package_version=package_version, package_name=package_name,
other_package_version=to_deps[package_name])
)
return messages, result
| #! /usr/bin/env python
"""
Load and save elm-package.json safely.
"""
# from typing import Dict, Tuple, IO
import copy
from collections import OrderedDict
import json
def load(fileobj):
# type: (IO[str]) -> Dict
return json.load(fileobj, object_pairs_hook=OrderedDict)
def dump(package, fileobj):
# type: (Dict, IO[str]) -> None
to_save = copy.deepcopy(package)
to_save['dependencies'] = sorted_deps(to_save['dependencies'])
json.dump(to_save, fileobj, sort_keys=False, indent=4, separators=(',', ': '))
def sorted_deps(deps):
# type: (Dict) -> Dict
return OrderedDict(sorted(deps.items()))
def sync_deps(from_deps, to_deps):
# type: (Dict, Dict) -> Tuple[List[str], Dict]
messages = []
result = copy.deepcopy(to_deps)
for (package_name, package_version) in from_deps.items():
if package_name not in to_deps:
result[package_name] = package_version
messages.append('Inserting new package {package_name} at version {package_version}'.format(
package_name=package_name, package_version=package_version)
)
elif to_deps[package_name] != package_version:
result[package_name] = package_version
messages.append('Changing {package_name} from version {package_version} to {other_package_version}'.format(
package_version=package_version, package_name=package_name,
other_package_version=to_deps[package_name])
)
return messages, result
| bsd-3-clause | Python |
552a9e958443ffdff4b28e6e432c09e7d011df6a | Update tesselate_shapes_frame docstring | wheeler-microfluidics/svg_model | svg_model/tesselate.py | svg_model/tesselate.py | # coding: utf-8
import types
import pandas as pd
from .seidel import Triangulator
def tesselate_shapes_frame(df_shapes, shape_i_columns):
'''
Tesselate each shape path into one or more triangles.
Parameters
----------
df_shapes : pandas.DataFrame
Table containing vertices of shapes, one row per vertex, with the *at
least* the following columns:
- ``x``: The x-coordinate of the vertex.
- ``y``: The y-coordinate of the vertex.
shape_i_columns : str or list
Column(s) forming key to differentiate rows/vertices for each distinct
shape.
Returns
-------
pandas.DataFrame
Table where each row corresponds to a triangle vertex, with the following
columns:
- ``shape_i_columns[]``: The shape path index column(s).
- ``triangle_i``: The integer triangle index within each electrode path.
- ``vertex_i``: The integer vertex index within each triangle.
'''
frames = []
if isinstance(shape_i_columns, types.StringType):
shape_i_columns = [shape_i_columns]
for shape_i, df_path in df_shapes.groupby(shape_i_columns):
points_i = df_path[['x', 'y']].values
if (points_i[0] == points_i[-1]).all():
# XXX End point is the same as the start point (do not include it).
points_i = points_i[:-1]
triangulator = Triangulator(points_i)
if not isinstance(shape_i, (types.ListType, types.TupleType)):
shape_i = [shape_i]
for i, triangle_i in enumerate(triangulator.triangles()):
triangle_points_i = [shape_i + [i] + [j, x, y]
for j, (x, y) in enumerate(triangle_i)]
frames.extend(triangle_points_i)
frames = None if not frames else frames
return pd.DataFrame(frames, columns=shape_i_columns +
['triangle_i', 'vertex_i', 'x', 'y'])
| # coding: utf-8
import types
import pandas as pd
from .seidel import Triangulator
def tesselate_shapes_frame(df_shapes, shape_i_columns):
'''
Tesselate each shape path into one or more triangles.
Return `pandas.DataFrame` with columns storing the following fields
for each row (where each row corresponds to a triangle vertex):
- `shape_i_columns`: The shape path index column(s).
- `triangle_i`: The integer triangle index within each electrode path.
- `vertex_i`: The integer vertex index within each triangle.
'''
frames = []
if isinstance(shape_i_columns, types.StringType):
shape_i_columns = [shape_i_columns]
for shape_i, df_path in df_shapes.groupby(shape_i_columns):
points_i = df_path[['x', 'y']].values
if (points_i[0] == points_i[-1]).all():
# XXX End point is the same as the start point (do not include it).
points_i = points_i[:-1]
triangulator = Triangulator(points_i)
if not isinstance(shape_i, (types.ListType, types.TupleType)):
shape_i = [shape_i]
for i, triangle_i in enumerate(triangulator.triangles()):
triangle_points_i = [shape_i + [i] + [j, x, y]
for j, (x, y) in enumerate(triangle_i)]
frames.extend(triangle_points_i)
frames = None if not frames else frames
return pd.DataFrame(frames, columns=shape_i_columns +
['triangle_i', 'vertex_i', 'x', 'y'])
| lgpl-2.1 | Python |
dfee7e1c89df879f187921752485153fd6214445 | Fix typo | ipython/ipython,ipython/ipython | IPython/extensions/cythonmagic.py | IPython/extensions/cythonmagic.py | # -*- coding: utf-8 -*-
"""
The cython magic has been integrated into Cython itself,
which is now released in version 0.21.
cf github `Cython` organisation, `Cython` repo, under the
file `Cython/Build/IpythonMagic.py`
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import print_function
import IPython.utils.version as version
try:
import Cython
except:
Cython = None
try:
from Cython.Build.IpythonMagic import CythonMagics
except :
pass
## still load the magic in IPython 3.x, remove completely in future versions.
def load_ipython_extension(ip):
"""Load the extension in IPython."""
print("""The Cython magic has been moved to the Cython package, hence """)
print("""`%load_ext cythonmagic` is deprecated; please use `%load_ext Cython` instead.""")
if Cython is None or not version.check_version(Cython.__version__, "0.21"):
print("You need Cython version >=0.21 to use the Cython magic")
return
print("""\nThough, because I am nice, I'll still try to load it for you this time.""")
Cython.load_ipython_extension(ip)
| # -*- coding: utf-8 -*-
"""
The cython magic has been integrated into Cython itself,
which is now released in version 0.21.
cf github `Cython` organisation, `Cython` repo, under the
file `Cython/Build/IpythonMagic.py`
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import print_function
import IPython.utils.version as version
try:
import Cython
except:
Cython = None
try:
from Cython.Build.IpythonMagic import CythonMagics
except :
pass
## still load the magic in IPython 3.x, remove completely in future versions.
def load_ipython_extension(ip):
"""Load the extension in IPython."""
print("""The Cython magic has been move to the Cython package, hence """)
print("""`%load_ext cythonmagic` is deprecated; Please use `%load_ext Cython` instead.""")
if Cython is None or not version.check_version(Cython.__version__, "0.21"):
print("You need Cython version >=0.21 to use the Cython magic")
return
print("""\nThough, because I am nice, I'll still try to load it for you this time.""")
Cython.load_ipython_extension(ip)
| bsd-3-clause | Python |
9f56f877705bdc0171c3afddadc6d58fb867cefc | Fix PEP 8 issue. | thaim/ansible,thaim/ansible | test/units/modules/system/test_systemd.py | test/units/modules/system/test_systemd.py | import os
import tempfile
from ansible.compat.tests import unittest
from ansible.modules.system.systemd import parse_systemctl_show
class ParseSystemctlShowTestCase(unittest.TestCase):
def test_simple(self):
lines = [
'Type=simple',
'Restart=no',
'Requires=system.slice sysinit.target',
'Description=Blah blah blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Restart': 'no',
'Requires': 'system.slice sysinit.target',
'Description': 'Blah blah blah',
})
def test_multiline_exec(self):
# This was taken from a real service that specified "ExecStart=/bin/echo foo\nbar"
lines = [
'Type=simple',
'ExecStart={ path=/bin/echo ; argv[]=/bin/echo foo',
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description=blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'ExecStart': '{ path=/bin/echo ; argv[]=/bin/echo foo\n'
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description': 'blah',
})
def test_single_line_with_brace(self):
lines = [
'Type=simple',
'Description={ this is confusing',
'Restart=no',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Description': '{ this is confusing',
'Restart': 'no',
})
| import os
import tempfile
from ansible.compat.tests import unittest
from ansible.modules.system.systemd import parse_systemctl_show
class ParseSystemctlShowTestCase(unittest.TestCase):
def test_simple(self):
lines = [
'Type=simple',
'Restart=no',
'Requires=system.slice sysinit.target',
'Description=Blah blah blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Restart': 'no',
'Requires': 'system.slice sysinit.target',
'Description': 'Blah blah blah',
})
def test_multiline_exec(self):
# This was taken from a real service that specified "ExecStart=/bin/echo foo\nbar"
lines = [
'Type=simple',
'ExecStart={ path=/bin/echo ; argv[]=/bin/echo foo',
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description=blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'ExecStart': '{ path=/bin/echo ; argv[]=/bin/echo foo\nbar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description': 'blah',
})
def test_single_line_with_brace(self):
lines = [
'Type=simple',
'Description={ this is confusing',
'Restart=no',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Description': '{ this is confusing',
'Restart': 'no',
})
| mit | Python |
52c6efa0a84334522cdd76e1e85ffe6bf601ea02 | Annotate commands/export_single_user.py. | dhcrzf/zulip,eeshangarg/zulip,jphilipsen05/zulip,Galexrt/zulip,tommyip/zulip,calvinleenyc/zulip,jainayush975/zulip,synicalsyntax/zulip,amanharitsh123/zulip,kou/zulip,SmartPeople/zulip,susansls/zulip,AZtheAsian/zulip,jackrzhang/zulip,samatdav/zulip,ahmadassaf/zulip,TigorC/zulip,grave-w-grave/zulip,Jianchun1/zulip,niftynei/zulip,arpith/zulip,synicalsyntax/zulip,jrowan/zulip,krtkmj/zulip,krtkmj/zulip,vikas-parashar/zulip,Juanvulcano/zulip,j831/zulip,cosmicAsymmetry/zulip,mahim97/zulip,dawran6/zulip,zulip/zulip,jackrzhang/zulip,AZtheAsian/zulip,dhcrzf/zulip,amanharitsh123/zulip,blaze225/zulip,aakash-cr7/zulip,reyha/zulip,synicalsyntax/zulip,Juanvulcano/zulip,hackerkid/zulip,sonali0901/zulip,eeshangarg/zulip,susansls/zulip,amanharitsh123/zulip,reyha/zulip,j831/zulip,jphilipsen05/zulip,Juanvulcano/zulip,SmartPeople/zulip,vikas-parashar/zulip,Jianchun1/zulip,souravbadami/zulip,jackrzhang/zulip,souravbadami/zulip,Diptanshu8/zulip,susansls/zulip,jrowan/zulip,reyha/zulip,rishig/zulip,JPJPJPOPOP/zulip,aakash-cr7/zulip,vaidap/zulip,cosmicAsymmetry/zulip,samatdav/zulip,rishig/zulip,jainayush975/zulip,dhcrzf/zulip,krtkmj/zulip,JPJPJPOPOP/zulip,vaidap/zulip,aakash-cr7/zulip,paxapy/zulip,timabbott/zulip,niftynei/zulip,ahmadassaf/zulip,brainwane/zulip,andersk/zulip,showell/zulip,brockwhittaker/zulip,vabs22/zulip,jrowan/zulip,hackerkid/zulip,isht3/zulip,ryanbackman/zulip,Jianchun1/zulip,amanharitsh123/zulip,Jianchun1/zulip,SmartPeople/zulip,zacps/zulip,umkay/zulip,jackrzhang/zulip,sonali0901/zulip,KingxBanana/zulip,peguin40/zulip,umkay/zulip,shubhamdhama/zulip,SmartPeople/zulip,shubhamdhama/zulip,umkay/zulip,aakash-cr7/zulip,brainwane/zulip,j831/zulip,paxapy/zulip,mohsenSy/zulip,umkay/zulip,AZtheAsian/zulip,rishig/zulip,krtkmj/zulip,andersk/zulip,verma-varsha/zulip,amanharitsh123/zulip,tommyip/zulip,Galexrt/zulip,Juanvulcano/zulip,Galexrt/zulip,KingxBanana/zulip,zacps/zulip,souravbadami/zulip,showell/zulip,zulip/zulip,peguin40/zulip,dhcrzf/zulip,dhcrzf/zulip,jrowan/zulip,ryanbackman/zulip,dawran6/zulip,mahim97/zulip,rishig/zulip,arpith/zulip,sharmaeklavya2/zulip,jainayush975/zulip,zacps/zulip,KingxBanana/zulip,calvinleenyc/zulip,PhilSk/zulip,peguin40/zulip,vikas-parashar/zulip,j831/zulip,AZtheAsian/zulip,zulip/zulip,joyhchen/zulip,rht/zulip,dawran6/zulip,peguin40/zulip,jrowan/zulip,kou/zulip,mohsenSy/zulip,rht/zulip,punchagan/zulip,sharmaeklavya2/zulip,punchagan/zulip,jphilipsen05/zulip,jrowan/zulip,timabbott/zulip,dhcrzf/zulip,brainwane/zulip,peguin40/zulip,shubhamdhama/zulip,isht3/zulip,AZtheAsian/zulip,Galexrt/zulip,amyliu345/zulip,reyha/zulip,rht/zulip,souravbadami/zulip,paxapy/zulip,joyhchen/zulip,joyhchen/zulip,sonali0901/zulip,ahmadassaf/zulip,dawran6/zulip,showell/zulip,rht/zulip,brainwane/zulip,vikas-parashar/zulip,dattatreya303/zulip,PhilSk/zulip,vaidap/zulip,samatdav/zulip,dawran6/zulip,vikas-parashar/zulip,JPJPJPOPOP/zulip,jainayush975/zulip,sup95/zulip,cosmicAsymmetry/zulip,punchagan/zulip,arpith/zulip,PhilSk/zulip,ryanbackman/zulip,KingxBanana/zulip,PhilSk/zulip,vaidap/zulip,zulip/zulip,vaidap/zulip,umkay/zulip,JPJPJPOPOP/zulip,ahmadassaf/zulip,souravbadami/zulip,jphilipsen05/zulip,sharmaeklavya2/zulip,blaze225/zulip,arpith/zulip,dhcrzf/zulip,reyha/zulip,sonali0901/zulip,rishig/zulip,zacps/zulip,eeshangarg/zulip,cosmicAsymmetry/zulip,kou/zulip,vabs22/zulip,susansls/zulip,tommyip/zulip,timabbott/zulip,sonali0901/zulip,verma-varsha/zulip,zacps/zulip,jainayush975/zulip,brockwhittaker/zulip,Jianchun1/zulip,synicalsyntax/zulip,hackerkid/zulip,mahim97/zulip,krtkmj/zulip,punchagan/zulip,samatdav/zulip,ryanbackman/zulip,cosmicAsymmetry/zulip,tommyip/zulip,showell/zulip,kou/zulip,sup95/zulip,andersk/zulip,Galexrt/zulip,AZtheAsian/zulip,shubhamdhama/zulip,SmartPeople/zulip,SmartPeople/zulip,christi3k/zulip,synicalsyntax/zulip,calvinleenyc/zulip,mohsenSy/zulip,j831/zulip,amyliu345/zulip,arpith/zulip,vabs22/zulip,Galexrt/zulip,mohsenSy/zulip,christi3k/zulip,TigorC/zulip,joyhchen/zulip,susansls/zulip,eeshangarg/zulip,synicalsyntax/zulip,jphilipsen05/zulip,showell/zulip,christi3k/zulip,umkay/zulip,aakash-cr7/zulip,blaze225/zulip,synicalsyntax/zulip,amanharitsh123/zulip,jphilipsen05/zulip,jackrzhang/zulip,verma-varsha/zulip,christi3k/zulip,andersk/zulip,jainayush975/zulip,rht/zulip,blaze225/zulip,vikas-parashar/zulip,christi3k/zulip,Galexrt/zulip,kou/zulip,mahim97/zulip,zulip/zulip,andersk/zulip,isht3/zulip,tommyip/zulip,punchagan/zulip,ahmadassaf/zulip,ahmadassaf/zulip,isht3/zulip,verma-varsha/zulip,krtkmj/zulip,joyhchen/zulip,dattatreya303/zulip,hackerkid/zulip,hackerkid/zulip,KingxBanana/zulip,dattatreya303/zulip,hackerkid/zulip,aakash-cr7/zulip,kou/zulip,tommyip/zulip,shubhamdhama/zulip,grave-w-grave/zulip,sup95/zulip,punchagan/zulip,timabbott/zulip,brockwhittaker/zulip,sup95/zulip,brainwane/zulip,brainwane/zulip,eeshangarg/zulip,timabbott/zulip,sup95/zulip,amyliu345/zulip,shubhamdhama/zulip,TigorC/zulip,Diptanshu8/zulip,dawran6/zulip,samatdav/zulip,Juanvulcano/zulip,kou/zulip,souravbadami/zulip,mohsenSy/zulip,amyliu345/zulip,calvinleenyc/zulip,brainwane/zulip,tommyip/zulip,rishig/zulip,shubhamdhama/zulip,grave-w-grave/zulip,isht3/zulip,sonali0901/zulip,Jianchun1/zulip,mahim97/zulip,blaze225/zulip,blaze225/zulip,eeshangarg/zulip,verma-varsha/zulip,jackrzhang/zulip,peguin40/zulip,sup95/zulip,JPJPJPOPOP/zulip,Diptanshu8/zulip,niftynei/zulip,TigorC/zulip,showell/zulip,niftynei/zulip,verma-varsha/zulip,j831/zulip,PhilSk/zulip,TigorC/zulip,paxapy/zulip,calvinleenyc/zulip,zulip/zulip,sharmaeklavya2/zulip,reyha/zulip,vabs22/zulip,vabs22/zulip,Diptanshu8/zulip,grave-w-grave/zulip,PhilSk/zulip,eeshangarg/zulip,cosmicAsymmetry/zulip,rht/zulip,niftynei/zulip,niftynei/zulip,krtkmj/zulip,amyliu345/zulip,samatdav/zulip,brockwhittaker/zulip,TigorC/zulip,sharmaeklavya2/zulip,joyhchen/zulip,amyliu345/zulip,JPJPJPOPOP/zulip,Diptanshu8/zulip,isht3/zulip,vabs22/zulip,dattatreya303/zulip,paxapy/zulip,timabbott/zulip,arpith/zulip,rht/zulip,christi3k/zulip,ryanbackman/zulip,zulip/zulip,mahim97/zulip,brockwhittaker/zulip,mohsenSy/zulip,susansls/zulip,grave-w-grave/zulip,ryanbackman/zulip,sharmaeklavya2/zulip,andersk/zulip,dattatreya303/zulip,dattatreya303/zulip,brockwhittaker/zulip,punchagan/zulip,timabbott/zulip,ahmadassaf/zulip,hackerkid/zulip,paxapy/zulip,zacps/zulip,Juanvulcano/zulip,jackrzhang/zulip,calvinleenyc/zulip,umkay/zulip,KingxBanana/zulip,andersk/zulip,vaidap/zulip,showell/zulip,grave-w-grave/zulip,rishig/zulip,Diptanshu8/zulip | zerver/management/commands/export_single_user.py | zerver/management/commands/export_single_user.py | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
import os
import shutil
import subprocess
import tempfile
import ujson
from zerver.lib.export import do_export_user
from zerver.models import UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Exports message data from a Zulip user
This command exports the message history for a single Zulip user.
Note that this only exports the user's message history and
realm-public metadata needed to understand it; it does nothing
with (for example) any bots owned by the user."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('email', metavar='<email>', type=str,
help="email of user to export")
parser.add_argument('--output',
dest='output_dir',
action="store",
default=None,
help='Directory to write exported data to.')
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
try:
user_profile = get_user_profile_by_email(options["email"])
except UserProfile.DoesNotExist:
raise CommandError("No such user.")
output_dir = options["output_dir"]
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="/tmp/zulip-export-")
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Exporting user %s" % (user_profile.email,))
do_export_user(user_profile, output_dir)
print("Finished exporting to %s; tarring" % (output_dir,))
tarball_path = output_dir.rstrip('/') + '.tar.gz'
subprocess.check_call(["tar", "--strip-components=1", "-czf", tarball_path, output_dir])
print("Tarball written to %s" % (tarball_path,))
| from __future__ import absolute_import
from __future__ import print_function
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
import os
import shutil
import subprocess
import tempfile
import ujson
from zerver.lib.export import do_export_user
from zerver.models import UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Exports message data from a Zulip user
This command exports the message history for a single Zulip user.
Note that this only exports the user's message history and
realm-public metadata needed to understand it; it does nothing
with (for example) any bots owned by the user."""
def add_arguments(self, parser):
parser.add_argument('email', metavar='<email>', type=str,
help="email of user to export")
parser.add_argument('--output',
dest='output_dir',
action="store",
default=None,
help='Directory to write exported data to.')
def handle(self, *args, **options):
try:
user_profile = get_user_profile_by_email(options["email"])
except UserProfile.DoesNotExist:
raise CommandError("No such user.")
output_dir = options["output_dir"]
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="/tmp/zulip-export-")
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Exporting user %s" % (user_profile.email,))
do_export_user(user_profile, output_dir)
print("Finished exporting to %s; tarring" % (output_dir,))
tarball_path = output_dir.rstrip('/') + '.tar.gz'
subprocess.check_call(["tar", "--strip-components=1", "-czf", tarball_path, output_dir])
print("Tarball written to %s" % (tarball_path,))
| apache-2.0 | Python |
2a2309bb4f3a8ae231106123855959d44a0e7551 | Fix linter | CartoDB/cartoframes,CartoDB/cartoframes | cartoframes/viz/helpers/size_continuous_layer.py | cartoframes/viz/helpers/size_continuous_layer.py | from __future__ import absolute_import
from ..layer import Layer
def size_continuous_layer(source, value, title='', size=None, color=None):
return Layer(
source,
style={
'point': {
'width': 'ramp(linear(sqrt(${0}), sqrt(globalMin(${0})), sqrt(globalMax(${0}))), {1})'.format(
value, size or [2, 50]),
'color': 'opacity({0}, 0.8)'.format(color or '#F46D43')
},
'line': {
'width': 'ramp(linear(${0}), {1})'.format(value, size or [1, 10]),
'color': 'opacity({0}, 0.8)'.format(color or '#4CC8A3')
}
},
popup={
'hover': {
'title': title or value,
'value': '$' + value
}
},
legend={
'type': {
'point': 'size-continuous-point',
'line': 'size-continuous-line',
'polygon': 'size-continuous-polygon'
},
'title': title or value,
'description': ''
}
)
| from __future__ import absolute_import
from ..layer import Layer
def size_continuous_layer(source, value, title='', size=None, color=None):
return Layer(
source,
style={
'point': {
'width': 'ramp(linear(sqrt(${0}), sqrt(globalMin(${0})), sqrt(globalMax(${0}))), {1})'.format(value, size or [2, 50]),
'color': 'opacity({0}, 0.8)'.format(color or '#F46D43')
},
'line': {
'width': 'ramp(linear(${0}), {1})'.format(value, size or [1, 10]),
'color': 'opacity({0}, 0.8)'.format(color or '#4CC8A3')
}
},
popup={
'hover': {
'title': title or value,
'value': '$' + value
}
},
legend={
'type': {
'point': 'size-continuous-point',
'line': 'size-continuous-line',
'polygon': 'size-continuous-polygon'
},
'title': title or value,
'description': ''
}
)
| bsd-3-clause | Python |
c426ae514227adaf9dd86f6ada6ce05bc76298c2 | Make portal_config fetch config from a URL | EndPointCorp/appctl,EndPointCorp/appctl | catkin/src/portal_config/scripts/serve_config.py | catkin/src/portal_config/scripts/serve_config.py | #!/usr/bin/env python
import rospy
import urllib2
from portal_config.srv import *
# XXX TODO: return an error if the config file isn't valid JSON
class ConfigRequestHandler():
def __init__(self, url):
self.url = url
def get_config(self):
response = urllib2.urlopen(self.url)
return response.read()
def handle_request(self, request):
config = self.get_config()
return PortalConfigResponse(config)
def main():
rospy.init_node('portal_config')
url = rospy.get_param('~url', 'http://lg-head/portal/config.json')
handler = ConfigRequestHandler(url)
s = rospy.Service(
'/portal_config/query',
PortalConfig,
handler.handle_request
)
rospy.spin()
if __name__ == '__main__':
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 smartindent
| #!/usr/bin/env python
import rospy
from portal_config.srv import *
class ConfigRequestHandler():
def __init__(self, url):
self.url = url
def get_config(self):
return '{"foo": "bar"}'
def handle_request(self, request):
config = self.get_config()
return PortalConfigResponse(config)
def main():
rospy.init_node('portal_config')
url = rospy.get_param('~url', 'http://lg-head/portal/config.json')
handler = ConfigRequestHandler(url)
s = rospy.Service(
'/portal_config/query',
PortalConfig,
handler.handle_request
)
rospy.spin()
if __name__ == '__main__':
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| apache-2.0 | Python |
cb96065fcf1f31dfbecfbb064c9414ffbc69217f | Remove all relative imports. We have always been at war with relative imports. | brad/django-localflavor-gb | forms.py | forms.py | """
GB-specific Form helpers
"""
from __future__ import absolute_import
import re
from django.contrib.localflavor.gb.gb_regions import GB_NATIONS_CHOICES, GB_REGION_CHOICES
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class GBPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(GBPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.error_messages['invalid'])
return postcode
class GBCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
super(GBCountySelect, self).__init__(attrs, choices=GB_REGION_CHOICES)
class GBNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
super(GBNationSelect, self).__init__(attrs, choices=GB_NATIONS_CHOICES)
| """
GB-specific Form helpers
"""
import re
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class GBPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(GBPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.error_messages['invalid'])
return postcode
class GBCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
from gb_regions import GB_REGION_CHOICES
super(GBCountySelect, self).__init__(attrs, choices=GB_REGION_CHOICES)
class GBNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
from gb_regions import GB_NATIONS_CHOICES
super(GBNationSelect, self).__init__(attrs, choices=GB_NATIONS_CHOICES)
| bsd-3-clause | Python |
c78c6f7e9cc305b96eb35a5a0c8f7353db5a3ed2 | Update _share.py | tago-io/tago-python | tago/account/_share.py | tago/account/_share.py | import requests # Used to make HTTP requests
import os
import json
API_TAGO = os.environ.get('TAGO_SERVER') or 'https://api.tago.io'
def invite(type, ref_id, data, default_options):
data = data if data else {}
if ref_id is None or ref_id == '':
raise ValueError('ref_id must be set')
elif data['email'] is None or data['email'] == '':
raise ValueError('email must be set in data')
return requests.post('{api_endpoint}/share/{type}/{ref_id}'.format(api_endpoint=API_TAGO,type=type,ref_id=ref_id), headers=default_options, data=json.dumps(data)).json()
def edit(type, share_id, data, default_options):
data = data if data else {}
if share_id is None or share_id == '':
raise ValueError('share_id must be set')
return requests.put('{api_endpoint}/share/{share_id}'.format(api_endpoint=API_TAGO,share_id=share_id), headers=default_options, data=json.dumps(data)).json()
def list(type, ref_id, default_options):
if ref_id is None or ref_id == '':
raise ValueError('ref_id must be set')
return requests.get('{api_endpoint}/share/{type}/{ref_id}'.format(api_endpoint=API_TAGO,type=type, ref_id=ref_id), headers=default_options).json()
def remove(type, share_id, default_options):
if share_id is None or share_id == '':
raise ValueError('share_id must be set')
return requests.delete('{api_endpoint}/share/{share_id}'.format(api_endpoint=API_TAGO,share_id=share_id), headers=default_options).json()
| import requests # Used to make HTTP requests
import os
import json
API_TAGO = os.environ.get('TAGO_SERVER') or 'https://api.tago.io'
def invite(type, ref_id, data, default_options):
data = data if data else {}
if ref_id is None or ref_id == '':
raise ValueError('ref_id must be set')
elif data['email'] is None or data['email'] == '':
raise ValueError('email must be set in data')
return requests.post('{api_endpoint}/share/{type}/{ref_id}'.format(api_endpoint=API_TAGO,type=type,ref_id=ref_id), headers=default_options, data=json.dumps(data)).json()
def edit(type, share_id, data, default_options):
data = data if data else {}
if share_id is None or share_id == '':
raise ValueError('share_id must be set')
return requests.put('{api_endpoint}/share/{share_id}'.format(api_endpoint=API_TAGO,share_id=share_id), headers=default_options, data=json.dumps(data)).json()
def list(type, ref_id, default_options):
if ref_id is None or ref_id == '':
raise ValueError('ref_id must be set')
return requests.get('{api_endpoint}/share/{type}/{ref_id}'.format(api_endpoint=API_TAGO,type=type, ref_id=ref_id), headers=default_options).json()
def remove(type, share_id, default_options):
if share_id is None or share_id == '':
raise ValueError('share_id must be set')
return requests.delete('{api_endpoint}/share/{share_id}'.format(api_endpoint=API_TAGO,share_id=share_id), headers=default_options).json()
# Not sure what exports do...
| mit | Python |
e0bebba359bca6498c212e1c1fae3d95d2a046b4 | Fix python scripts src/chrome_frame/tools/test/page_cycler/cf_cycler.py | adobe/chromium,yitian134/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,gavinp/chromium,ropik/chromium,yitian134/chromium,gavinp/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,ropik/chromium,yitian134/chromium,yitian134/chromium,ropik/chromium,gavinp/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,yitian134/chromium,ropik/chromium,ropik/chromium,ropik/chromium,adobe/chromium,ropik/chromium,gavinp/chromium,gavinp/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,adobe/chromium,yitian134/chromium,yitian134/chromium | chrome_frame/tools/test/page_cycler/cf_cycler.py | chrome_frame/tools/test/page_cycler/cf_cycler.py | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Automates IE to visit a list of web sites while running CF in full tab mode.
The page cycler automates IE and navigates it to a series of URLs. It is
designed to be run with Chrome Frame configured to load every URL inside
CF full tab mode.
TODO(robertshield): Make use of the python unittest module as per
review comments.
"""
import optparse
import sys
import time
import win32com.client
import win32gui
def LoadSiteList(path):
"""Loads a list of URLs from |path|.
Expects the URLs to be separated by newlines, with no leading or trailing
whitespace.
Args:
path: The path to a file containing a list of new-line separated URLs.
Returns:
A list of strings, each one a URL.
"""
f = open(path)
urls = f.readlines()
f.close()
return urls
def LaunchIE():
"""Starts up IE, makes it visible and returns the automation object.
Returns:
The IE automation object.
"""
ie = win32com.client.Dispatch("InternetExplorer.Application")
ie.visible = 1
win32gui.SetForegroundWindow(ie.HWND)
return ie
def RunTest(url, ie):
"""Loads |url| into the InternetExplorer.Application instance in |ie|.
Waits for the Document object to be created and then waits for
the document ready state to reach READYSTATE_COMPLETE.
Args:
url: A string containing the url to navigate to.
ie: The IE automation object to navigate.
"""
print "Navigating to " + url
ie.Navigate(url)
timer = 0
READYSTATE_COMPLETE = 4
last_ready_state = -1
for retry in xrange(60):
try:
# TODO(robertshield): Become an event sink instead of polling for
# changes to the ready state.
last_ready_state = ie.Document.ReadyState
if last_ready_state == READYSTATE_COMPLETE:
break
except:
# TODO(robertshield): Find the precise exception related to ie.Document
# being not accessible and handle it here.
print "Unexpected error:", sys.exc_info()[0]
raise
time.sleep(1)
if last_ready_state != READYSTATE_COMPLETE:
print "Timeout waiting for " + url
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--url_list', default='urllist',
help='The path to the list of URLs')
(opts, args) = parser.parse_args()
urls = LoadSiteList(opts.url_list)
ie = LaunchIE()
for url in urls:
RunTest(url, ie)
time.sleep(1)
ie.visible = 0
ie.Quit()
if __name__ == '__main__':
main()
| # Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Automates IE to visit a list of web sites while running CF in full tab mode.
The page cycler automates IE and navigates it to a series of URLs. It is
designed to be run with Chrome Frame configured to load every URL inside
CF full tab mode.
TODO(robertshield): Make use of the python unittest module as per
review comments.
"""
import optparse
import sys
import time
import win32com.client
import win32gui
def LoadSiteList(path):
"""Loads a list of URLs from |path|.
Expects the URLs to be separated by newlines, with no leading or trailing
whitespace.
Args:
path: The path to a file containing a list of new-line separated URLs.
Returns:
A list of strings, each one a URL.
"""
f = open(path)
urls = f.readlines()
f.close()
return urls
def LaunchIE():
"""Starts up IE, makes it visible and returns the automation object.
Returns:
The IE automation object.
"""
ie = win32com.client.Dispatch("InternetExplorer.Application")
ie.visible = 1
win32gui.SetForegroundWindow(ie.HWND)
return ie
def RunTest(url, ie):
"""Loads |url| into the InternetExplorer.Application instance in |ie|.
Waits for the Document object to be created and then waits for
the document ready state to reach READYSTATE_COMPLETE.
Args:
url: A string containing the url to navigate to.
ie: The IE automation object to navigate.
"""
print "Navigating to " + url
ie.Navigate(url)
timer = 0
READYSTATE_COMPLETE = 4
last_ready_state = -1
for retry in xrange(60):
try:
# TODO(robertshield): Become an event sink instead of polling for
# changes to the ready state.
last_ready_state = ie.Document.ReadyState
if last_ready_state == READYSTATE_COMPLETE:
break
except:
# TODO(robertshield): Find the precise exception related to ie.Document
# being not accessible and handle it here.
print "Unexpected error:", sys.exc_info()[0]
raise
time.sleep(1)
if last_ready_state != READYSTATE_COMPLETE:
print "Timeout waiting for " + url
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--url_list', default='urllist',
help='The path to the list of URLs')
(opts, args) = parser.parse_args()
urls = LoadSiteList(opts.url_list)
ie = LaunchIE()
for url in urls:
RunTest(url, ie)
time.sleep(1)
ie.visible = 0
ie.Quit()
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
3575415592fbd215de02e139d95ad5780bccadd2 | Add greeting method that returns given parameter | ttn6ew/cs3240-labdemo | hello.py | hello.py |
def greeting(msg):
print(msg)
if __name__ == "__main__":
greeting('hello')
| print("Hello")
| mit | Python |
f2e8f2ef957a6053345f72889c1048a871988bc0 | Add octario library path to the plugin helper | redhat-openstack/octario,redhat-openstack/octario | ir-plugin/osp_version_name.py | ir-plugin/osp_version_name.py | #!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
sys.path.append('../octario')
from octario.lib.component import Component
import logging
LOG = logging.getLogger("OctarioLogger")
LOG.setLevel(logging.ERROR)
def main(component_path):
cmpnt = Component(component_path)
release = cmpnt.get_rhos_release()
name = cmpnt.get_name()
if release is not None and name is not None:
json_out = {
'plugin': 'iroctario',
'name': name,
'version': release,
}
print(json.dumps(json_out))
if __name__ == "__main__":
"""Helper script used by InfraRed-Octario plugin to discover component
name and OSP release number.
"""
if len(sys.argv) != 2:
LOG.error("Improper number of arguments, passed %d instead of 1" %
int(len(sys.argv)-1))
sys.exit(1)
main(sys.argv[1])
| #!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
from octario.lib.component import Component
import logging
LOG = logging.getLogger("OctarioLogger")
LOG.setLevel(logging.ERROR)
def main(component_path):
cmpnt = Component(component_path)
release = cmpnt.get_rhos_release()
name = cmpnt.get_name()
if release is not None and name is not None:
json_out = {
'plugin': 'iroctario',
'name': name,
'version': release,
}
print(json.dumps(json_out))
if __name__ == "__main__":
"""Helper script used by InfraRed-Octario plugin to discover component
name and OSP release number.
"""
if len(sys.argv) != 2:
LOG.error("Improper number of arguments, passed %d instead of 1" %
int(len(sys.argv)-1))
sys.exit(1)
main(sys.argv[1])
| apache-2.0 | Python |
5a5418a9e5f817c3c3f426f57aeefe800c45cc96 | Implement tuples. | iksteen/jaspyx,ztane/jaspyx | jaspyx/visitor/types.py | jaspyx/visitor/types.py | import json
from jaspyx.visitor import BaseVisitor
class Types(BaseVisitor):
def visit_Num(self, node):
self.output(json.dumps(node.n))
def visit_Str(self, node):
self.output(json.dumps(node.s))
def visit_List(self, node):
self.group(node.elts, prefix='[', infix=', ', suffix=']')
visit_Tuple = visit_List
| import json
from jaspyx.visitor import BaseVisitor
class Types(BaseVisitor):
def visit_Num(self, node):
self.output(json.dumps(node.n))
def visit_Str(self, node):
self.output(json.dumps(node.s))
def visit_List(self, node):
self.group(node.elts, prefix='[', infix=', ', suffix=']')
| mit | Python |
5cda63163acec59a43c3975f1320b7268dcf337b | Add parameter for log level | opesci/devito,opesci/devito | devito/parameters.py | devito/parameters.py | """The parameters dictionary contains global parameter settings."""
__all__ = ['Parameters', 'parameters']
# Be EXTREMELY careful when writing to a Parameters dictionary
# Read here for reference: http://wiki.c2.com/?GlobalVariablesAreBad
# https://softwareengineering.stackexchange.com/questions/148108/why-is-global-state-so-evil
# If any issues related to global state arise, the following class should
# be made immutable. It shall only be written to at application startup
# and never modified.
class Parameters(dict):
""" A dictionary-like class to hold global configuration parameters for devito
On top of a normal dict, this provides the option to provide callback functions
so that any interested module can be informed when the configuration changes.
"""
def __init__(self, name=None, **kwargs):
self._name = name
self.update_functions = None
for key, value in iteritems(kwargs):
self[key] = value
def __setitem__(self, key, value):
super(Parameters, self).__setitem__(key, value)
# If a Parameters dictionary is being added as a child,
# ask it to tell us when it is updated
if isinstance(value, Parameters):
child_update = lambda x: self._updated(*x)
value.update_functions.push(child_update)
# Tell everyone we've been updated
self._updated(key, value)
def _updated(self, key, value):
""" Call any provided update functions so everyone knows we've been updated
"""
for f in self.update_functions:
f(key, value)
parameters = Parameters()
parameters["log_level"] = 'info'
| """The parameters dictionary contains global parameter settings."""
__all__ = ['Parameters', 'parameters']
# Be EXTREMELY careful when writing to a Parameters dictionary
# Read here for reference: http://wiki.c2.com/?GlobalVariablesAreBad
# If any issues related to global state arise, the following class should
# be made immutable. It shall only be written to at application startup
# and never modified.
class Parameters(dict):
""" A dictionary-like class to hold global configuration parameters for devito
On top of a normal dict, this provides the option to provide callback functions
so that any interested module can be informed when the configuration changes.
"""
def __init__(self, name=None, **kwargs):
self._name = name
self.update_functions = None
for key, value in iteritems(kwargs):
self[key] = value
def __setitem__(self, key, value):
super(Parameters, self).__setitem__(key, value)
# If a Parameters dictionary is being added as a child,
# ask it to tell us when it is updated
if isinstance(value, Parameters):
child_update = lambda x: self._updated(*x)
value.update_functions.push(child_update)
# Tell everyone we've been updated
self._updated(key, value)
def _updated(self, key, value):
""" Call any provided update functions so everyone knows we've been updated
"""
for f in self.update_functions:
f(key, value)
| mit | Python |
8555fc56b72dc86f266055da4b903cda7986654b | Update utils.py to prevent downcasting | zafarali/emdp | emdp/utils.py | emdp/utils.py | import numpy as np
# 1D utilities.
def convert_int_rep_to_onehot(state, vector_size):
s = np.zeros(vector_size)
s[state] = 1
return s
def convert_onehot_to_int(state):
if type(state) is not np.ndarray:
state = np.array(state)
return state.argmax().item()
#
# def xy_to_flatten_state(state, size):
# """Flatten state (x,y) into a one hot vector of size"""
# idx = self.size * state[0] + state[1]
# one_hot = np.zeros(self.size * self.size)
# one_hot[idx] = 1
# return one_hot
#
# def unflatten_state(self, onehot):
# onehot = onehot.reshape(self.size, self.size)
# x = onehot.argmax(0).max()
# y = onehot.argmax(1).max()
# return (x, y)
# def step(self, action):
# """action must be the index of an action"""
# # get the vector representing the next state probabilities:
# current_state_idx = np.argmax(self.current_state)
# next_state_probs = self.P[current_state_idx, action]
# # sample the next state
# sampled_next_state = np.random.choice(np.arange(self.P.shape[0]), p=next_state_probs)
# # observe the reward
# reward = self.r[current_state_idx, action]
# self.current_state = self.convert_int_rep_to_onehot(sampled_next_state)
# # if reward > 0 :print(reward, current_state_idx, action)
# return self.current_state, reward, sampled_next_state == self.P.shape[0] - 1, {}
| import numpy as np
# 1D utilities.
def convert_int_rep_to_onehot(state, vector_size):
s = np.zeros(vector_size)
s[state] = 1
return s
def convert_onehot_to_int(state):
if type(state) is not np.ndarray:
state = np.array(state)
return state.argmax().astype(np.int8)
#
# def xy_to_flatten_state(state, size):
# """Flatten state (x,y) into a one hot vector of size"""
# idx = self.size * state[0] + state[1]
# one_hot = np.zeros(self.size * self.size)
# one_hot[idx] = 1
# return one_hot
#
# def unflatten_state(self, onehot):
# onehot = onehot.reshape(self.size, self.size)
# x = onehot.argmax(0).max()
# y = onehot.argmax(1).max()
# return (x, y)
# def step(self, action):
# """action must be the index of an action"""
# # get the vector representing the next state probabilities:
# current_state_idx = np.argmax(self.current_state)
# next_state_probs = self.P[current_state_idx, action]
# # sample the next state
# sampled_next_state = np.random.choice(np.arange(self.P.shape[0]), p=next_state_probs)
# # observe the reward
# reward = self.r[current_state_idx, action]
# self.current_state = self.convert_int_rep_to_onehot(sampled_next_state)
# # if reward > 0 :print(reward, current_state_idx, action)
# return self.current_state, reward, sampled_next_state == self.P.shape[0] - 1, {}
| mit | Python |
76aa0d680f85298ca66de7bbcd0dbdc2342c9955 | Update Vikidia versions | happy5214/pywikibot-core,Darkdadaah/pywikibot-core,hasteur/g13bot_tools_new,jayvdb/pywikibot-core,hasteur/g13bot_tools_new,happy5214/pywikibot-core,Darkdadaah/pywikibot-core,wikimedia/pywikibot-core,wikimedia/pywikibot-core,npdoty/pywikibot,hasteur/g13bot_tools_new,jayvdb/pywikibot-core,magul/pywikibot-core,magul/pywikibot-core,npdoty/pywikibot,PersianWikipedia/pywikibot-core | pywikibot/families/vikidia_family.py | pywikibot/families/vikidia_family.py | # -*- coding: utf-8 -*-
"""Family module for Vikidia."""
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot import family
class Family(family.SubdomainFamily):
"""Family class for Vikidia."""
name = 'vikidia'
domain = 'vikidia.org'
codes = ['ca', 'de', 'en', 'es', 'eu', 'fr', 'it', 'ru', 'scn']
# Sites we want to edit but not count as real languages
test_codes = ['central', 'test']
def protocol(self, code):
"""Return https as the protocol for this family."""
return "https"
def ignore_certificate_error(self, code):
"""Ignore certificate errors."""
return True # has self-signed certificate for a different domain.
| # -*- coding: utf-8 -*-
"""Family module for Vikidia."""
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot import family
class Family(family.SubdomainFamily):
"""Family class for Vikidia."""
name = 'vikidia'
domain = 'vikidia.org'
codes = ['ca', 'en', 'es', 'eu', 'fr', 'it', 'ru', 'scn']
def protocol(self, code):
"""Return https as the protocol for this family."""
return "https"
def ignore_certificate_error(self, code):
"""Ignore certificate errors."""
return True # has self-signed certificate for a different domain.
| mit | Python |
5138db4353edf7414c79ca8e1e42c73b35313b15 | Remove various now unused interfaces. | faassen/morepath,morepath/morepath,taschini/morepath | morepath/interfaces.py | morepath/interfaces.py | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
class Interface(object):
__meta__ = ABCMeta
# class IConsumer(Interface):
# """A consumer consumes steps in a stack to find an object.
# """
# @abstractmethod
# def __call__(self, obj, stack, lookup):
# """Returns a boolean meaning that some stack has been consumed,
# an object and the rest of unconsumed stack
# """
class IRoot(Interface):
"""Mark this object as the root.
"""
class IApp(Interface):
"""An application."""
# XXX fill in details
class IConfigAction(Interface):
"""A configuration item.
"""
@abstractmethod
def discriminator(self):
"""Returns an immutable that uniquely identifies this config.
Used for configuration conflict detection.
"""
@abstractmethod
def prepare(self, obj):
"""Prepare action for configuration.
obj - the object being registered
"""
@abstractmethod
def perform(self, obj):
"""Register whatever is being configured.
obj - the object being registered
"""
class ConfigError(Exception):
"""Raised when configuration is bad
"""
class ResolveError(Exception):
"""Raised when path cannot be resolved
"""
class ModelError(ResolveError):
"""Raised when a model cannot be resolved
"""
class ResourceError(ResolveError):
"""Raised when a resource cannot be resolved
"""
class TrajectError(Exception):
"""Raised when path supplied to traject is not allowed.
"""
class LinkError(Exception):
"""Raised when a link cannot be made.
"""
| # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
class Interface(object):
__meta__ = ABCMeta
class IConsumer(Interface):
"""A consumer consumes steps in a stack to find an object.
"""
@abstractmethod
def __call__(self, obj, stack, lookup):
"""Returns a boolean meaning that some stack has been consumed,
an object and the rest of unconsumed stack
"""
class IResource(Interface):
pass
class IResponse(Interface):
pass
class IResponseFactory(Interface):
"""When called, a Response instance is returned.
"""
@abstractmethod
def __call__(self):
"""Returns a Response instance."""
class ITraject(Interface):
pass
class IInverse(Interface):
"""Marker interface to hook in inverse component in a traject."""
class IRoot(Interface):
"""Mark this object as the root.
"""
class ILookup(Interface):
"""Mark this model as an model that can change the lookup.
"""
class IApp(Interface):
"""An application."""
# XXX fill in details
class IModelBase(Interface):
"""Mark this object as a base of a model.
"""
class IPath(Interface):
"""Get the path for a model."""
class ILink(Interface):
"""Get the hyperlink for a model."""
class IConfigAction(Interface):
"""A configuration item.
"""
@abstractmethod
def discriminator(self):
"""Returns an immutable that uniquely identifies this config.
Used for configuration conflict detection.
"""
@abstractmethod
def prepare(self, obj):
"""Prepare action for configuration.
obj - the object being registered
"""
@abstractmethod
def perform(self, obj):
"""Register whatever is being configured.
obj - the object being registered
"""
class ConfigError(Exception):
"""Raised when configuration is bad
"""
class ResolveError(Exception):
"""Raised when path cannot be resolved
"""
class ModelError(ResolveError):
"""Raised when a model cannot be resolved
"""
class ResourceError(ResolveError):
"""Raised when a resource cannot be resolved
"""
class TrajectError(Exception):
"""Raised when path supplied to traject is not allowed.
"""
class LinkError(Exception):
"""Raised when a link cannot be made.
"""
| bsd-3-clause | Python |
a0791372d7943a785ae55ed31044d0316b53a2ac | Patch release | sbaechler/feincms-elephantblog,matthiask/feincms-elephantblog,michaelkuty/feincms-elephantblog,matthiask/feincms-elephantblog,matthiask/feincms-elephantblog,michaelkuty/feincms-elephantblog,sbaechler/feincms-elephantblog,feincms/feincms-elephantblog,michaelkuty/feincms-elephantblog,sbaechler/feincms-elephantblog,feincms/feincms-elephantblog | elephantblog/__init__.py | elephantblog/__init__.py | from __future__ import absolute_import, unicode_literals
VERSION = (1, 0, 1)
__version__ = '.'.join(map(str, VERSION))
| from __future__ import absolute_import, unicode_literals
VERSION = (1, 0, 0)
__version__ = '.'.join(map(str, VERSION))
| bsd-3-clause | Python |
34a6ccce1d93843d53efb5985ff5bbb7ea063e31 | add force_text a la Django | mitsuhiko/babel,python-babel/babel,mitsuhiko/babel,python-babel/babel,mitsuhiko/babel | babel/_compat.py | babel/_compat.py | import sys
import array
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
binary_type = bytes
string_types = (str,)
integer_types = (int, )
text_to_native = lambda s, enc: s
unichr = chr
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO, BytesIO
import pickle
izip = zip
imap = map
range_type = range
cmp = lambda a, b: (a > b) - (a < b)
array_tobytes = array.array.tobytes
else:
text_type = unicode
binary_type = str
string_types = (str, unicode)
integer_types = (int, long)
text_to_native = lambda s, enc: s.encode(enc)
unichr = unichr
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO as BytesIO
from StringIO import StringIO
import cPickle as pickle
from itertools import imap
from itertools import izip
range_type = xrange
cmp = cmp
array_tobytes = array.array.tostring
number_types = integer_types + (float,)
def force_text(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
return s
if isinstance(s, binary_type):
return s.decode(encoding, errors)
return text_type(s)
#
# Since Python 3.3, a fast decimal implementation is already included in the
# standard library. Otherwise use cdecimal when available
#
if sys.version_info[:2] >= (3, 3):
import decimal
else:
try:
import cdecimal as decimal
except ImportError:
import decimal
| import sys
import array
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
unichr = chr
text_to_native = lambda s, enc: s
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO, BytesIO
import pickle
izip = zip
imap = map
range_type = range
cmp = lambda a, b: (a > b) - (a < b)
array_tobytes = array.array.tobytes
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
text_to_native = lambda s, enc: s.encode(enc)
unichr = unichr
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO as BytesIO
from StringIO import StringIO
import cPickle as pickle
from itertools import imap
from itertools import izip
range_type = xrange
cmp = cmp
array_tobytes = array.array.tostring
number_types = integer_types + (float,)
#
# Since Python 3.3, a fast decimal implementation is already included in the
# standard library. Otherwise use cdecimal when available
#
if sys.version_info[:2] >= (3, 3):
import decimal
else:
try:
import cdecimal as decimal
except ImportError:
import decimal
| bsd-3-clause | Python |
b65b359402b2f38dad043b1b6d1840f0ef6d8e72 | Fix constants | prozorro-sale/openprocurement.auctions.dgf | openprocurement/auctions/dgf/constants.py | openprocurement/auctions/dgf/constants.py | from datetime import datetime, timedelta
from openprocurement.api.models import TZ, ORA_CODES
def read_json(name):
import os.path
from json import loads
curr_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(curr_dir, name)
with open(file_path) as lang_file:
data = lang_file.read()
return loads(data)
#document types
DOCUMENT_TYPE_OFFLINE = ['x_dgfAssetFamiliarization']
DOCUMENT_TYPE_URL_ONLY = ['virtualDataRoom']
#requiremnt periods
MINIMAL_EXPOSITION_PERIOD = timedelta(days=7)
MINIMAL_PERIOD_FROM_RECTIFICATION_END = timedelta(days=5)
VERIFY_AUCTION_PROTOCOL_TIME = timedelta(days=6)
AWARD_PAYMENT_TIME = timedelta(days=20)
CONTRACT_SIGNING_TIME = timedelta(days=20)
#time constants
DGF_ID_REQUIRED_FROM = datetime(2017, 1, 1, tzinfo=TZ)
DGF_DECISION_REQUIRED_FROM = datetime(2017, 1, 1, tzinfo=TZ)
CLASSIFICATION_PRECISELY_FROM = datetime(2017, 7, 19, tzinfo=TZ)
MINIMAL_EXPOSITION_REQUIRED_FROM = datetime(2017, 11, 17, tzinfo=TZ)
DGF_ADDRESS_REQUIRED_FROM = datetime(2018, 2, 9, tzinfo=TZ)
RECTIFICATION_END_EDITING_AND_VALIDATION_REQUIRED_FROM = datetime(2018, 2, 9, tzinfo=TZ)
#codes
CAVPS_CODES = read_json('cav_ps.json')
CPVS_CODES = read_json('cpvs.json')
ORA_CODES[0:0] = ["UA-IPN", "UA-FIN"]
NUMBER_OF_BIDS_TO_BE_QUALIFIED = 2
#code units
CPV_NON_SPECIFIC_LOCATION_UNITS = ('71', '72', '73', '75', '76', '77', '79', '80', '85', '90', '92', '98')
CAV_NON_SPECIFIC_LOCATION_UNITS = ('07', '08')
| from datetime import datetime, timedelta
from openprocurement.api.models import TZ, ORA_CODES
def read_json(name):
import os.path
from json import loads
curr_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(curr_dir, name)
with open(file_path) as lang_file:
data = lang_file.read()
return loads(data)
#document types
DOCUMENT_TYPE_OFFLINE = ['x_dgfAssetFamiliarization']
DOCUMENT_TYPE_URL_ONLY = ['virtualDataRoom']
#requiremnt periods
MINIMAL_EXPOSITION_PERIOD = timedelta(days=7)
MINIMAL_PERIOD_FROM_RECTIFICATION_END = timedelta(days=5)
VERIFY_AUCTION_PROTOCOL_TIME = timedelta(days=6)
AWARD_PAYMENT_TIME = timedelta(days=20)
CONTRACT_SIGNING_TIME = timedelta(days=20)
#time constants
DGF_ID_REQUIRED_FROM = datetime(2017, 1, 1, tzinfo=TZ)
DGF_DECISION_REQUIRED_FROM = datetime(2017, 1, 1, tzinfo=TZ)
CLASSIFICATION_PRECISELY_FROM = datetime(2017, 7, 19, tzinfo=TZ)
MINIMAL_EXPOSITION_REQUIRED_FROM = datetime(2017, 11, 17, tzinfo=TZ)
DGF_ADDRESS_REQUIRED_FROM = datetime(2018, 2, 9, tzinfo=TZ)
RECTIFICATION_END_EDITING_AND_VALIDATION_REQUIRED_FROM = datetime(2018, 01, 22, tzinfo=TZ)
#codes
CAVPS_CODES = read_json('cav_ps.json')
CPVS_CODES = read_json('cpvs.json')
ORA_CODES[0:0] = ["UA-IPN", "UA-FIN"]
NUMBER_OF_BIDS_TO_BE_QUALIFIED = 2
#code units
CPV_NON_SPECIFIC_LOCATION_UNITS = ('71', '72', '73', '75', '76', '77', '79', '80', '85', '90', '92', '98')
CAV_NON_SPECIFIC_LOCATION_UNITS = ('07', '08')
| apache-2.0 | Python |
2fb1c14f9ad0b72f1f059d7e5e233b8001c2b60b | Update auth tests | MichaelCurrin/twitterverse,MichaelCurrin/twitterverse | app/tests/integration/test_twitter_api.py | app/tests/integration/test_twitter_api.py | # -*- coding: utf-8 -*-
"""
Twitter API test module.
Do requests to the Twitter API using configured credentials. NB. These require
valid tokens for a Twitter dev account, plus a network connection.
"""
from unittest import TestCase
from lib.config import AppConf
from lib.twitter_api import authentication
conf = AppConf()
class TestAuth(TestCase):
def test_generateAppAccessToken(self):
auth = authentication._generateAppAccessToken()
def test_getTweepyConnection(self):
auth = authentication._generateAppAccessToken()
api = authentication._getTweepyConnection(auth)
def test_getAPIConnection(self):
"""
Test that App Access token can be used to connect to Twitter API.
"""
api = authentication.getAPIConnection(userFlow=False)
def test_getAppOnlyConnection(self):
"""
Test App-only token.
"""
api = authentication.getAppOnlyConnection()
| # -*- coding: utf-8 -*-
"""
Twitter API test module.
"""
from unittest import TestCase
from lib.twitter_api import authentication
class TestAuth(TestCase):
def test_generateAppToken(self):
auth = authentication._generateAppAccessToken()
def test_getTweepyConnection(self):
auth = authentication._generateAppAccessToken()
api = authentication._getTweepyConnection(auth)
def test_getAPIConnection(self):
"""
Test App Access token.
"""
api = authentication.getAPIConnection(userFlow=False)
def test_getAppOnlyConnection(self):
"""
Test App-only token.
"""
api = authentication.getAppOnlyConnection()
| mit | Python |
f91fc2a8858c243b62d1a9a369d45216fb15f443 | Change auth selenium test to use wait_element_become_present | jucacrispim/toxicbuild,jucacrispim/toxicbuild,jucacrispim/toxicbuild,jucacrispim/toxicbuild | tests/webui/steps/authentication_steps.py | tests/webui/steps/authentication_steps.py | # -*- coding: utf-8 -*-
import time
from behave import when, then, given
from toxicbuild.ui import settings
from tests.webui.steps.base_steps import ( # noqa f811
given_logged_in_webui, user_sees_main_main_page_login)
# Scenario: Someone try to access a page without being logged.
@when('someone tries to access a waterfall url without being logged')
def step_impl(context):
browser = context.browser
base_url = 'http://{}:{}/'.format(settings.TEST_WEB_HOST,
settings.TORNADO_PORT)
url = base_url + 'waterfall/some-repo'
browser.get(url)
@then('he sees the login page') # noqa f401
def step_impl(context):
browser = context.browser
def fn():
try:
el = browser.find_element_by_id('inputUsername')
except Exception:
el = None
return el
el = browser.wait_element_become_present(fn)
assert el
# Scenario: Do login
@given('the user is in the login page') # noqa f401
def step_impl(context):
browser = context.browser
base_url = 'http://{}:{}/'.format(settings.TEST_WEB_HOST,
settings.TORNADO_PORT)
url = base_url + 'login'
browser.get(url)
@when('he inserts "{user_name}" as user name')
def user_inserts_username_login(context, user_name):
browser = context.browser
username_input = browser.find_element_by_id('inputUsername')
username_input.send_keys(user_name)
@when('inserts "{passwd}" as password')
def user_inserts_password_login(context, passwd):
browser = context.browser
passwd_input = browser.find_element_by_id('inputPassword')
passwd_input.send_keys(passwd)
@when('clicks in the login button')
def user_clicks_login_button(context):
browser = context.browser
btn = browser.find_element_by_id('btn-login')
btn.click()
@then('he sees the red warning in the password field')
def user_sees_missing_required_field_warning(context):
browser = context.browser
el = browser.find_element_by_class_name('form-control-error')
assert el
@then('he sees the invalid credentials message')
def user_sees_invalid_credentials_message(context):
browser = context.browser
el = browser.find_element_by_id('login-error-msg-container')
color = el.value_of_css_property('color')
time.sleep(0.5)
assert color != 'rgb(255, 255, 255)'
# Scenario: Do logout
@when('he clicks in the logout link') # noqa f401
def step_impl(context):
browser = context.browser
el = browser.find_element_by_class_name('nav-link')
browser.click(el)
el = browser.find_elements_by_class_name('dropdown-item-logout')[-1]
browser.click(el)
| # -*- coding: utf-8 -*-
import time
from behave import when, then, given
from toxicbuild.ui import settings
from tests.webui.steps.base_steps import ( # noqa f811
given_logged_in_webui, user_sees_main_main_page_login)
# Scenario: Someone try to access a page without being logged.
@when('someone tries to access a waterfall url without being logged')
def step_impl(context):
browser = context.browser
base_url = 'http://{}:{}/'.format(settings.TEST_WEB_HOST,
settings.TORNADO_PORT)
url = base_url + 'waterfall/some-repo'
browser.get(url)
@then('he sees the login page') # noqa f401
def step_impl(context):
browser = context.browser
el = browser.find_element_by_id('inputUsername')
assert el
# Scenario: Do login
@given('the user is in the login page') # noqa f401
def step_impl(context):
browser = context.browser
base_url = 'http://{}:{}/'.format(settings.TEST_WEB_HOST,
settings.TORNADO_PORT)
url = base_url + 'login'
browser.get(url)
@when('he inserts "{user_name}" as user name')
def user_inserts_username_login(context, user_name):
browser = context.browser
username_input = browser.find_element_by_id('inputUsername')
username_input.send_keys(user_name)
@when('inserts "{passwd}" as password')
def user_inserts_password_login(context, passwd):
browser = context.browser
passwd_input = browser.find_element_by_id('inputPassword')
passwd_input.send_keys(passwd)
@when('clicks in the login button')
def user_clicks_login_button(context):
browser = context.browser
btn = browser.find_element_by_id('btn-login')
btn.click()
@then('he sees the red warning in the password field')
def user_sees_missing_required_field_warning(context):
browser = context.browser
el = browser.find_element_by_class_name('form-control-error')
assert el
@then('he sees the invalid credentials message')
def user_sees_invalid_credentials_message(context):
browser = context.browser
el = browser.find_element_by_id('login-error-msg-container')
color = el.value_of_css_property('color')
time.sleep(0.5)
assert color != 'rgb(255, 255, 255)'
# Scenario: Do logout
@when('he clicks in the logout link') # noqa f401
def step_impl(context):
browser = context.browser
el = browser.find_element_by_class_name('nav-link')
browser.click(el)
el = browser.find_elements_by_class_name('dropdown-item-logout')[-1]
browser.click(el)
| agpl-3.0 | Python |
1ab8224372a5f839c8f0f74f3cafe7926905a7ec | Update __init__.py | breznak/nupic,breznak/nupic,breznak/nupic | nupic/__init__.py | nupic/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
__version__ = "0.2.4.dev0"
| # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
__version__ = "0.2.4.dev0"
NUPIC_ROOT = os.environ.get('NUPIC', os.path.dirname(os.path.realpath(__file__)))
| agpl-3.0 | Python |
39986540e1ad1c4712405e46b988459f2abbf6e9 | Update for new python | baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite | Communication/testUDP.py | Communication/testUDP.py | # -------------------------------------------------------
import socket, traceback
import time
host = ''
#host = '192.168.201.251'
port = 1234
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind((host, port))
filein = open('saveUDP.txt', 'w')
t0 = time.time()
while time.time()-t0 < 20:
try:
message, address = s.recvfrom(9000)
print(message)
filein.write('%s\n' % (message))
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
filein.close()
# -------------------------------------------------------
| # -------------------------------------------------------
import socket, traceback
import time
host = ''
port = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind((host, port))
filein = open('saveUDP.txt', 'w')
t0 = time.time()
while time.time()-t0 < 20:
try:
message, address = s.recvfrom(9000)
print message
filein.write('%s\n' % (message))
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
filein.close()
# -------------------------------------------------------
| mit | Python |
862c42a8abf0836604f56a9008018f34c405ca13 | update version number | F5Networks/f5-common-python,F5Networks/f5-common-python,wojtek0806/f5-common-python | f5/__init__.py | f5/__init__.py | # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '0.1.6'
| # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '0.1.5'
| apache-2.0 | Python |
db36e08a81d16463d8c76b896593aaeb91c057a0 | Refactor into get_check_digit_from_checkable_int method | mlibrary/image-conversion-and-validation,mlibrary/image-conversion-and-validation | falcom/luhn.py | falcom/luhn.py | # Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
def get_check_digit_from_checkable_int (number):
return (9 * ((number // 10) + rotate_digit(number % 10))) % 10
def rotate_digit (digit):
if digit > 4:
return (digit * 2) - 9
else:
return digit * 2
def get_check_digit (number = None):
if number:
return get_check_digit_from_checkable_int(int(number))
else:
return None
| # Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
def convert_into_luhn_checkable_int (number):
if number:
return int(number)
else:
return None
def rotate_digit (digit):
if digit > 4:
return (digit * 2) - 9
else:
return digit * 2
def get_check_digit (number = None):
number = convert_into_luhn_checkable_int(number)
if number is None:
return None
return (9 * ((number // 10) + rotate_digit(number % 10))) % 10
| bsd-3-clause | Python |
29be4cad4ab90fe5d1fc087f0de2e8a575ced40b | Bump version | walkr/nanoservice | nanoservice/version.py | nanoservice/version.py | VERSION = '0.3.1'
| VERSION = '0.3.0'
| mit | Python |
f66e3e965c00c455608dba994575098e1cd246ae | Update request_tracking_codes.py | osantana/correios,olist/correios,solidarium/correios | samples/request_tracking_codes.py | samples/request_tracking_codes.py | # Copyright 2017 Adler Medrado
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..correios.client import Correios
from ..correios.models.user import User, Service
def get_tracking_codes(service, quantity):
olist_user = User('Your Company\'s Name', 'Your Company\'s CNPJ')
client = Correios('Your Correio\'s username', 'Your correio\'s password')
tracking_codes = client.request_tracking_codes(olist_user, Service.get(service), quantity=quantity)
print(tracking_codes)
get_tracking_codes('41068', 1) # Request 1 PAC Tracking Code
get_tracking_codes('40068', 1) # Request 1 SEDEX Tracking Code
| from ..correios.client import Correios
from ..correios.models.user import User, Service
def get_tracking_codes(service, quantity):
olist_user = User('Your Company\'s Name', 'Your Company\'s CNPJ')
client = Correios('Your Correio\'s username', 'Your correio\'s password')
tracking_codes = client.request_tracking_codes(olist_user, Service.get(service), quantity=quantity)
print(tracking_codes)
get_tracking_codes('41068', 1) # Request 1 PAC Tracking Code
get_tracking_codes('40068', 1) # Request 1 SEDEX Tracking Code
| apache-2.0 | Python |
f3791ea0ed11d46edf9998b80fd1ddd54d7e9b20 | Bump to pre-release version 1.0.rc2. | appfolio/farcy,appfolio/farcy,appfolio/farcy | farcy/const.py | farcy/const.py | """Constants used throughout Farcy."""
import os
import re
__version__ = '1.0.rc2'
VERSION_STR = 'farcy v{0}'.format(__version__)
CONFIG_DIR = os.path.expanduser('~/.config/farcy')
MD_VERSION_STR = ('[{0}](https://github.com/appfolio/farcy)'
.format(VERSION_STR))
FARCY_COMMENT_START = '_{0}_'.format(MD_VERSION_STR)
NUMBER_RE = re.compile('(\d+)')
APPROVAL_PHRASES = [x.strip() for x in """
Amazing
Bravo
Excellent
Great job
Lookin' good
Outstanding work
Perfect
Spectacular
Tremendous
Well done
Wicked awesome
Winning
Wonderful
Wow
You are awesome
You do not miss a thing
""".split('\n') if x.strip()]
STATUS_CONTEXT = 'farcy'
| """Constants used throughout Farcy."""
import os
import re
__version__ = '1.0.rc1'
VERSION_STR = 'farcy v{0}'.format(__version__)
CONFIG_DIR = os.path.expanduser('~/.config/farcy')
MD_VERSION_STR = ('[{0}](https://github.com/appfolio/farcy)'
.format(VERSION_STR))
FARCY_COMMENT_START = '_{0}_'.format(MD_VERSION_STR)
NUMBER_RE = re.compile('(\d+)')
APPROVAL_PHRASES = [x.strip() for x in """
Amazing
Bravo
Excellent
Great job
Lookin' good
Outstanding work
Perfect
Spectacular
Tremendous
Well done
Wicked awesome
Winning
Wonderful
Wow
You are awesome
You do not miss a thing
""".split('\n') if x.strip()]
STATUS_CONTEXT = 'farcy'
| bsd-2-clause | Python |
035d5feee8ea0691e5777a7b96c362877bcf01ca | Add logging | ThibF/G-youmus,ThibF/G-youmus | consumerSQS/consumer.py | consumerSQS/consumer.py | import boto3
import logging
from config import config
import answer
import json
import time
import librarian
def work(message):
message = json.loads(message)
print(type(message))
print(message)
return sendToManager(message)
def sendToManager(message):
if("entry" in message):
um = librarian.User_manager(message["entry"][0]["messaging"][0]["sender"]["id"])
try:
um.user_event("MESSAGE",str(message["entry"][0]["messaging"][0]["message"]["text"]))
except Exception as e:
print(e)
um.user_event("MESSAGE",str(message["entry"][0]["messaging"][0]["message"]["attachments"][0]["url"]))
return True
else:
um = librarian.User_manager(int(message["state"]))
um.user_event("MESSAGE",str(message["code"]))
return True
def parrot_work(message):
try :
msgToSend = "I ear your request :"+str(message["entry"][0]["messaging"][0]["message"]["text"])
idReceiver = message["entry"][0]["messaging"][0]["sender"]["id"]
print (idReceiver+":"+msgToSend)
answer.send_message(msgToSend,idReceiver)
answer.send_message("I will",idReceiver)
return True
except Exception as e:
print(e)
return False
sqs = boto3.resource('sqs',aws_access_key_id = config["access_key"], aws_secret_access_key=config["secret_access_key"], region_name="us-west-2", endpoint_url="https://sqs.us-west-2.amazonaws.com/731910755973/MessagesYouMus.fifo")
while True:
queue = sqs.get_queue_by_name(QueueName='MessagesYouMus.fifo')
for msg in queue.receive_messages():
try:
print("Received ="+str(json.loads(msg.body)))
status = work(json.loads(msg.body))
msg.delete()
except Exception as e:
print(e)
time.sleep(0.2)
| import boto3
import logging
from config import config
import answer
import json
import time
import librarian
def work(message):
message = json.loads(message)
print(type(message))
print(message)
return sendToManager(message)
def sendToManager(message):
if("entry" in message):
um = librarian.User_manager(message["entry"][0]["messaging"][0]["sender"]["id"])
try:
um.user_event("MESSAGE",str(message["entry"][0]["messaging"][0]["message"]["text"]))
except Exception as e:
print(e)
um.user_event("MESSAGE",str(message["entry"][0]["messaging"][0]["message"]["attachments"][0]["url"]))
return True
else:
um = librarian.User_manager(int(message["state"]))
um.user_event("MESSAGE",str(message["code"]))
return True
def parrot_work(message):
try :
msgToSend = "I ear your request :"+str(message["entry"][0]["messaging"][0]["message"]["text"])
idReceiver = message["entry"][0]["messaging"][0]["sender"]["id"]
print (idReceiver+":"+msgToSend)
answer.send_message(msgToSend,idReceiver)
answer.send_message("I will",idReceiver)
return True
except Exception as e:
print(e)
return False
sqs = boto3.resource('sqs',aws_access_key_id = config["access_key"], aws_secret_access_key=config["secret_access_key"], region_name="us-west-2", endpoint_url="https://sqs.us-west-2.amazonaws.com/731910755973/MessagesYouMus.fifo")
while True:
queue = sqs.get_queue_by_name(QueueName='MessagesYouMus.fifo')
for msg in queue.receive_messages():
try:
print("Received ="+str(json.loads(msg.body)))
status = work(json.loads(msg.body))
msg.delete()
except:
pass
time.sleep(0.2)
| mit | Python |
e14b8c6b06c75414f42f730e4c1e1a9208e335b0 | correct shebang | ypnos/sonne,ypnos/sonne,ypnos/sonne | fetch/fetch.py | fetch/fetch.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Greedy climate data fetch
"""
filename = 'wwis.json'
indexurl = 'http://worldweather.wmo.int/en/json/full_city_list.txt'
baseurl = 'http://worldweather.wmo.int/en/json/{0}_en.xml'
guideurl = 'http://worldweather.wmo.int/en/dataguide.html'
notice = 'Please note the guidelines at {0}'
usage = """{0} <index file> [output file]
Data will be downloaded into {1} if no second argument given.
The full index file is available for download at {2}
You can re-run this script to continue downloading in the case of failures."""
from sys import argv
import urllib.request
import csv
import simplejson as json
import time
import sys
def fetch_entry(id):
url = baseurl.format(id)
try:
f = urllib.request.urlopen(url).read()
entry = json.loads(f.decode())
except:
return -1
time.sleep(0.1) # don't DoS
return entry
def nice_entry(entry, country):
data = entry['city']
data['country'] = country
return data
if __name__ == '__main__':
if len(argv) < 2:
print(usage.format(argv[0], filename, indexurl))
exit(1)
print(notice.format(guideurl))
if len(argv) > 2:
filename = argv[2]
data = {}
try:
with open(filename, 'r') as f:
data = json.load(f)
except:
pass
with open(argv[1], 'r', newline='') as f:
reader = csv.reader(f, delimiter=';', quotechar='"')
for row in reader:
if len(row) < 3:
print('?', end='', file=sys.stderr)
continue
if row[0] == 'Country':
continue
key = row[2]
if key in data:
print('✓', end='', file=sys.stderr)
continue
sys.stderr.flush()
entry = fetch_entry(key)
if entry == -1:
print('⚡', end='', file=sys.stderr)
break # bail out, save what we have
print('.', end='', file=sys.stderr)
data[key] = nice_entry(entry, row[0])
print('', file=sys.stderr)
with open(filename, 'w') as f:
json.dump(data, f, sort_keys=True, indent='\t')
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Greedy climate data fetch
"""
filename = 'wwis.json'
indexurl = 'http://worldweather.wmo.int/en/json/full_city_list.txt'
baseurl = 'http://worldweather.wmo.int/en/json/{0}_en.xml'
guideurl = 'http://worldweather.wmo.int/en/dataguide.html'
notice = 'Please note the guidelines at {0}'
usage = """{0} <index file> [output file]
Data will be downloaded into {1} if no second argument given.
The full index file is available for download at {2}
You can re-run this script to continue downloading in the case of failures."""
from sys import argv
import urllib.request
import csv
import simplejson as json
import time
import sys
def fetch_entry(id):
url = baseurl.format(id)
try:
f = urllib.request.urlopen(url).read()
entry = json.loads(f.decode())
except:
return -1
time.sleep(0.1) # don't DoS
return entry
def nice_entry(entry, country):
data = entry['city']
data['country'] = country
return data
if __name__ == '__main__':
if len(argv) < 2:
print(usage.format(argv[0], filename, indexurl))
exit(1)
print(notice.format(guideurl))
if len(argv) > 2:
filename = argv[2]
data = {}
try:
with open(filename, 'r') as f:
data = json.load(f)
except:
pass
with open(argv[1], 'r', newline='') as f:
reader = csv.reader(f, delimiter=';', quotechar='"')
for row in reader:
if len(row) < 3:
print('?', end='', file=sys.stderr)
continue
if row[0] == 'Country':
continue
key = row[2]
if key in data:
print('✓', end='', file=sys.stderr)
continue
sys.stderr.flush()
entry = fetch_entry(key)
if entry == -1:
print('⚡', end='', file=sys.stderr)
break # bail out, save what we have
print('.', end='', file=sys.stderr)
data[key] = nice_entry(entry, row[0])
print('', file=sys.stderr)
with open(filename, 'w') as f:
json.dump(data, f, sort_keys=True, indent='\t')
| agpl-3.0 | Python |
aeca55a5ca5a8b15314cc7bd31a3c89361436318 | Add return type check test | rwhitt2049/trouve,rwhitt2049/nimble | tests/test_pandas_integration.py | tests/test_pandas_integration.py | from unittest import TestCase, main
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition, sample_period=1)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
self.assertEqual(pd.core.series.Series, type(test_series))
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
self.assertEqual(np.ndarray, type(validation_array))
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition, sample_period=1)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
self.assertEqual(pd.core.series.Series, type(test_series))
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
self.assertEqual(np.ndarray, type(validation_array))
if __name__ == '__main__':
main()
| from unittest import TestCase, main
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition, sample_period=1)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition, sample_period=1)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
self.asser
if __name__ == '__main__':
main()
| mit | Python |
7e13edfea2ee0c055f890fba08fa645141cd2f7d | add colourbar | ChristosT/colour-surfaces | helix.py | helix.py | # Create the data.
from numpy import pi, sin, cos, mgrid
[u,v] = mgrid[-5:5:0.01,0:2*pi+0.1:0.1]
a=2
x = u*cos(v)
y = u*sin(v)
z = a*v
K=-a**2/(u**2 +a**2)**2
from mayavi import mlab
s = mlab.mesh(x, y, z,scalars=K)
mlab.colorbar(orientation='horizontal',title='Gaussian Curvature')
mlab.show()
| # Create the data.
from numpy import pi, sin, cos, mgrid
[u,v] = mgrid[-5:5:0.01,0:2*pi+0.1:0.1]
a=2
x = u*cos(v)
y = u*sin(v)
z = a*v
K=-a**2/(u**2 +a**2)**2
from mayavi import mlab
s = mlab.mesh(x, y, z,scalars=K)
mlab.show()
| mit | Python |
b5146035b7f4ae641a53bb956e9afee62c50c347 | Change cache directory for vendor LST | daq-tools/kotori,daq-tools/kotori,daq-tools/kotori,daq-tools/kotori,daq-tools/kotori,zerotired/kotori,zerotired/kotori,daq-tools/kotori,zerotired/kotori,zerotired/kotori,daq-tools/kotori,zerotired/kotori,zerotired/kotori | kotori/vendor/lst/h2m/util.py | kotori/vendor/lst/h2m/util.py | # -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <[email protected]>
import os
from appdirs import user_cache_dir
from kotori.daq.intercom.c import LibraryAdapter, StructRegistryByID
#from kotori.daq.intercom.cffi_adapter import LibraryAdapterCFFI
def setup_h2m_structs_pyclibrary():
cache_dir = os.path.join(user_cache_dir('kotori'), 'lst')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapter(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
def setup_h2m_structs_cffi():
cache_dir = os.path.join(user_cache_dir('kotori'), 'lst')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapterCFFI(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
setup_h2m_structs = setup_h2m_structs_pyclibrary
| # -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <[email protected]>
import os
from appdirs import user_cache_dir
from kotori.daq.intercom.c import LibraryAdapter, StructRegistryByID
#from kotori.daq.intercom.cffi_adapter import LibraryAdapterCFFI
def setup_h2m_structs_pyclibrary():
cache_dir = user_cache_dir('lst', 'elmyra')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapter(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
def setup_h2m_structs_cffi():
cache_dir = user_cache_dir('lst', 'elmyra')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapterCFFI(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
setup_h2m_structs = setup_h2m_structs_pyclibrary
| agpl-3.0 | Python |
e6b11c0c110d0457cc31d7d798a2b35e19a0f56e | fix wrong parser | bcicen/slackn,bcicen/slack-notify | slackn/cli.py | slackn/cli.py | import sys
import logging
from argparse import ArgumentParser
from slackn.core import Queue, Notifier
from slackn.version import version
log = logging.getLogger('slackn')
def get_queue(s):
if ':' in s:
host,port = s.split(':')
else:
host,port = (s, 6379)
return Queue(host,port)
def process():
parser = ArgumentParser(description='slackn_process v%s' % version)
parser.add_argument('--slack-channel',
help='channel to send notifications')
parser.add_argument('--slack-token',
help='channel to send notifications')
parser.add_argument('--redis',
default='127.0.0.1:6379',
help='redis host:port to connect to')
args = parser.parse_args()
queue = get_queue(args.redis)
notifier = Notifier(args.slack_token, args.slack_channel)
for hostname,msgs in queue.dump().items():
notifier.add_host(hostname, msgs)
queue.increment('sent', len(msgs))
notifier.send()
def notify():
common_parser = ArgumentParser(add_help=False)
common_parser.add_argument('--redis',
help='redis host to connect to (127.0.0.1:6379)',
default='127.0.0.1:6379')
parser = ArgumentParser(description='slackn-notify %s' % version,
parents=[common_parser])
subparsers = parser.add_subparsers(description='notification type',
dest='subcommand')
parser_host = subparsers.add_parser('host')
parser_host.add_argument('hostname')
parser_host.add_argument('hoststate')
parser_host.add_argument('hostoutput')
parser_host.add_argument('nagiostype')
parser_service = subparsers.add_parser('service')
parser_service.add_argument('hostname')
parser_service.add_argument('servicedesc')
parser_service.add_argument('servicestate')
parser_service.add_argument('serviceoutput')
parser_service.add_argument('nagiostype')
args = parser.parse_args()
if not args.subcommand:
print('no notification type provided')
sys.exit(1)
queue = get_queue(args.redis)
notify_args = { k:v for k,v in args.__dict__.items() }
for k in ('redis','subcommand'):
del notify_args[k]
notify_args['type'] = args.subcommand
queue.submit(notify_args)
| import sys
import logging
from argparse import ArgumentParser
from slackn.core import Queue, Notifier
from slackn.version import version
log = logging.getLogger('slackn')
def get_queue(s):
if ':' in s:
host,port = s.split(':')
else:
host,port = (s, 6379)
return Queue(host,port)
def process():
parser = ArgumentParser(description='slackn_process v%s' % version)
parser.add_argument('--slack-channel',
help='channel to send notifications')
parser.add_argument('--slack-token',
help='channel to send notifications')
parser.add_argument('--redis',
default='127.0.0.1:6379',
help='redis host:port to connect to')
args = parser.parse_args()
queue = get_queue(args.redis)
notifier = Notifier(args.slack_token, args.slack_channel)
for hostname,msgs in queue.dump().items():
notifier.add_host(hostname, msgs)
queue.increment('sent', len(msgs))
notifier.send()
def notify():
common_parser = ArgumentParser(add_help=False)
common_parser.add_argument('--redis',
help='redis host to connect to (127.0.0.1:6379)',
default='127.0.0.1:6379')
parser = ArgumentParser(description='slackn-notify %s' % version,
parents=[common_parser])
subparsers = parser.add_subparsers(description='notification type',
dest='subcommand')
parser_host = subparsers.add_parser('host')
parser_host.add_argument('hostname')
parser_host.add_argument('hoststate')
parser_host.add_argument('hostoutput')
parser_host.add_argument('nagiostype')
parser_service = subparsers.add_parser('service')
parser_service.add_argument('hostname')
parser_service.add_argument('servicedesc')
parser_service.add_argument('servicestate')
parser_service.add_argument('serviceoutput')
parser_host.add_argument('nagiostype')
args = parser.parse_args()
if not args.subcommand:
print('no notification type provided')
sys.exit(1)
queue = get_queue(args.redis)
notify_args = { k:v for k,v in args.__dict__.items() }
for k in ('redis','subcommand'):
del notify_args[k]
notify_args['type'] = args.subcommand
queue.submit(notify_args)
| mit | Python |
463d044cfa70de6bde04c380c459274acb71a1b6 | add database | zjuguxi/flask_study,zjuguxi/flask_study,zjuguxi/flask_study | hello.py | hello.py | from flask import Flask, render_template, session, redirect, url_for, flash
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask.ext.sqlalchemy import SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64), unique = True)
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(64), unique = True, index = True)
def __repr__(self):
return '<User %r>' % self.username
class NameForm(Form):
name = StringField('What\'s your name?', validators = [Required()])
submit = SubmitField('Submit')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods = ['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
old_name = session.get('name')
if old_name is not None and old_name != form.name.data:
flash('Looks like you have changed your name!')
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'))
if __name__ == '__main__':
manager.run() | from flask import Flask, render_template, session, redirect, url_for, flash
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
class NameForm(Form):
name = StringField('What\'s your name?', validators = [Required()])
submit = SubmitField('Submit')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods = ['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
old_name = session.get('name')
if old_name is not None and old_name != form.name.data:
flash('Looks like you have changed your name!')
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'))
if __name__ == '__main__':
manager.run() | mit | Python |
9de3dacc7c687bc5e4d11a5a334f5ef5cc4d2f37 | Fix call to genome mapping code | RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline | rnacentral_pipeline/cli/genome_mapping.py | rnacentral_pipeline/cli/genome_mapping.py | # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import click
from rnacentral_pipeline.rnacentral.genome_mapping import urls
from rnacentral_pipeline.rnacentral.genome_mapping import blat
@click.group('genome-mapping')
def cli():
"""
This group of commands deals with figuring out what data to map as well as
parsing the result into a format for loading.
"""
pass
@cli.command('select-hits')
@click.argument('assembly_id')
@click.argument('hits', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def select_hits(assembly_id, hits, output):
blat.write_selected(assembly_id, hits, output)
@cli.command('url-for')
@click.option('--host', default='ensembl')
@click.argument('species')
@click.argument('assembly_id')
@click.argument('output', default='-', type=click.File('w'))
def find_remote_url(species, assembly_id, output, host=None):
url = urls.url_for(species, assembly_id, host=host)
output.write(url)
@cli.command('urls-for')
@click.argument('filename', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def find_remote_urls(filename, output):
urls.write_urls_for(filename, output)
| # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import click
from rnacentral_pipeline.rnacentral import genome_mapping
@click.group('genome-mapping')
def cli():
"""
This group of commands deals with figuring out what data to map as well as
parsing the result into a format for loading.
"""
pass
@cli.command('select-hits')
@click.argument('assembly_id')
@click.argument('hits', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def select_hits(assembly_id, hits, output):
genome_mapping.blat.write_selected(assembly_id, hits, output)
@cli.command('url-for')
@click.option('--host', default='ensembl')
@click.argument('species')
@click.argument('assembly_id')
@click.argument('output', default='-', type=click.File('w'))
def find_remote_url(species, assembly_id, output, host=None):
url = genome_mapping.urls.url_for(species, assembly_id, host=host)
output.write(url)
@cli.command('urls-for')
@click.argument('filename', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def find_remote_urls(filename, output):
genome_mapping.urls.write_urls_for(filename, output)
| apache-2.0 | Python |
6424edf4186236443ba4ec5a1b2ffcc26de7c695 | add classifications | jamesturk/tot,jamesturk/tot,jamesturk/tot,jamesturk/tot,jamesturk/tot | fl/__init__.py | fl/__init__.py | # encoding=utf-8
from pupa.scrape import Jurisdiction, Organization
from .votes import FlVoteScraper
from .bills import FlBillScraper
from .people import FlPersonScraper
class Florida(Jurisdiction):
division_id = "ocd-division/country:us/state:fl"
classification = "government"
name = "Florida"
url = "http://myflorida.com"
scrapers = {
# "votes": FlVoteScraper,
"bills": FlBillScraper,
"people": FlPersonScraper,
}
parties = [{'name': 'Republican'},
{'name': 'Democratic'}]
legislative_sessions = [
{'name': '2011 Regular Session', 'identifier': '2011', 'classification': 'primary'},
{'name': '2012 Regular Session', 'identifier': '2012', 'classification': 'primary'},
{'name': '2012 Extraordinary Apportionment Session', 'identifier': '2012B', 'classification': 'special'},
{'name': '2013 Regular Session', 'identifier': '2013', 'classification': 'primary'},
{'name': '2014 Regular Session', 'identifier': '2014', 'classification': 'primary'},
{'name': '2014 Special Session A', 'identifier': '2014A', 'classification': 'special'},
{'name': '2015 Regular Session', 'identifier': '2015', 'classification': 'primary'},
{'name': '2015 Special Session A', 'identifier': '2015A', 'classification': 'special'},
{'name': '2015 Special Session B', 'identifier': '2015B', 'classification': 'special'},
{'name': '2016 Regular Session', 'identifier': '2016', 'classification': 'primary'},
]
def get_organizations(self):
legis = Organization(name="Florida Legislature", classification="legislature")
upper = Organization('Florida Senate', classification='upper', parent_id=legis._id)
lower = Organization('Florida House', classification='lower', parent_id=legis._id)
for n in range(1, 41):
upper.add_post(label=str(n), role='Senator')
for n in range(1, 121):
lower.add_post(label=str(n), role='Representative')
yield legis
yield upper
yield lower
| # encoding=utf-8
from pupa.scrape import Jurisdiction, Organization
from .votes import FlVoteScraper
from .bills import FlBillScraper
from .people import FlPersonScraper
class Florida(Jurisdiction):
division_id = "ocd-division/country:us/state:fl"
classification = "government"
name = "Florida"
url = "http://myflorida.com"
scrapers = {
# "votes": FlVoteScraper,
"bills": FlBillScraper,
"people": FlPersonScraper,
}
parties = [{'name': 'Republican'},
{'name': 'Democratic'},
{'name': 'Independent'}]
legislative_sessions = [
{'name': '2011 Regular Session', 'identifier': '2011', },
{'name': '2012 Regular Session', 'identifier': '2012', },
{'name': '2012 Extraordinary Apportionment Session', 'identifier': '2012B', },
{'name': '2013 Regular Session', 'identifier': '2013', },
{'name': '2014 Regular Session', 'identifier': '2014', },
{'name': '2014 Special Session A', 'identifier': '2014A', },
{'name': '2015 Regular Session', 'identifier': '2015', },
{'name': '2015 Special Session A', 'identifier': '2015A', },
{'name': '2015 Special Session B', 'identifier': '2015B', },
{'name': '2016 Regular Session', 'identifier': '2016', },
]
def get_organizations(self):
legis = Organization(name="Florida Legislature", classification="legislature")
upper = Organization('Florida Senate', classification='upper', parent_id=legis._id)
lower = Organization('Florida House', classification='lower', parent_id=legis._id)
for n in range(1, 41):
upper.add_post(label=str(n), role='Senator')
for n in range(1, 121):
lower.add_post(label=str(n), role='Representative')
yield legis
yield upper
yield lower
| mit | Python |
40d9ceb14c57c109e8f6371b1a4c677fa33e1669 | Bump base package requirements (#10078) | DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core | snmp/setup.py | snmp/setup.py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'snmp', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=21.1.0'
setup(
name='datadog-snmp',
version=ABOUT['__version__'],
description='The SNMP check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent snmp check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
# The package we're going to ship
packages=['datadog_checks.snmp'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
| # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'snmp', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=15.7.0'
setup(
name='datadog-snmp',
version=ABOUT['__version__'],
description='The SNMP check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent snmp check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
# The package we're going to ship
packages=['datadog_checks.snmp'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
| bsd-3-clause | Python |
0a0ebb7dd3267d727e6af598f6d964cd4d73fd69 | Add TODO for multiple e-mail verification clicks. | SUNET/eduid-signup,SUNET/eduid-signup,SUNET/eduid-signup | eduid_signup/utils.py | eduid_signup/utils.py | from uuid import uuid4
from hashlib import sha256
import datetime
from pyramid.httpexceptions import HTTPInternalServerError
from eduid_signup.i18n import TranslationString as _
from eduid_signup.compat import text_type
def generate_verification_link(request):
code = text_type(uuid4())
link = request.route_url("email_verification_link", code=code)
return (link, code)
def verify_email_code(collection, code):
result = collection.find_and_modify(
{
"code": code,
"verified": False
}, {
"$set": {
"verified": True,
"verified_ts": datetime.utcnow(),
}
},
new=True,
safe=True
)
# XXX need to handle user clicking on confirmation link more than
# once gracefully. Should show page saying that e-mail address was
# already confirmed, but NOT allow user to auth_token login to
# dashboard from that page.
if result is None:
raise HTTPInternalServerError(_("Your email can't be verified now, "
"try it later"))
return True
def check_email_status(db, email):
"""
Check the email registration status.
If the email doesn't exist in database, then return 'new'.
If exists and it hasn't been verified, then return 'not_verified'.
If exists and it has been verified before, then return 'verified'.
"""
email = db.registered.find_one({'email': email})
if not email:
return 'new'
if email.get('verified', False):
return 'verified'
else:
return 'not_verified'
def generate_auth_token(shared_key, email, nonce, timestamp, generator=sha256):
"""
The shared_key is a secret between the two systems
The public word must must go through form POST or GET
"""
return generator("{0}|{1}|{2}|{3}".format(
shared_key, email, nonce, timestamp)).hexdigest()
| from uuid import uuid4
from hashlib import sha256
import datetime
from pyramid.httpexceptions import HTTPInternalServerError
from eduid_signup.i18n import TranslationString as _
from eduid_signup.compat import text_type
def generate_verification_link(request):
code = text_type(uuid4())
link = request.route_url("email_verification_link", code=code)
return (link, code)
def verify_email_code(collection, code):
result = collection.find_and_modify(
{
"code": code,
"verified": False
}, {
"$set": {
"verified": True,
"verified_ts": datetime.utcnow(),
}
},
new=True,
safe=True
)
if result is None:
raise HTTPInternalServerError(_("Your email can't be verified now, "
"try it later"))
return True
def check_email_status(db, email):
"""
Check the email registration status.
If the email doesn't exist in database, then return 'new'.
If exists and it hasn't been verified, then return 'not_verified'.
If exists and it has been verified before, then return 'verified'.
"""
email = db.registered.find_one({'email': email})
if not email:
return 'new'
if email.get('verified', False):
return 'verified'
else:
return 'not_verified'
def generate_auth_token(shared_key, email, nonce, timestamp, generator=sha256):
"""
The shared_key is a secret between the two systems
The public word must must go through form POST or GET
"""
return generator("{0}|{1}|{2}|{3}".format(
shared_key, email, nonce, timestamp)).hexdigest()
| bsd-3-clause | Python |
f9141964ffa4ed36420b8ba564407c2ca661ac46 | edit on glitter | CptShock/AuroraModules | glitter.py | glitter.py | from willie.module import commands
import random
@commands('glitter')
def ans(bot, trigger):
bot.say("*'-.*\(^O^)/*.-'*") | from willie.module import commands
import random
@commands('glitter')
def ans(bot, trigger):
bot.reply("*'-.*\(^O^)/*.-'*") | mit | Python |
3a83ff315db6f34fb8e656309580060cf708b8a1 | Refactor request body | LWprogramming/Hack-Brown2017 | request.py | request.py | '''
Code adapted from https://westus.dev.cognitive.microsoft.com/docs/services/TextAnalytics.V2.0/operations/56f30ceeeda5650db055a3c9
'''
import http.client, urllib.request, urllib.parse, urllib.error
import script
import numpy as np
def main():
'''
Sends a single POST request with a test bit of text.
'''
headers = headers()
params = urllib.parse.urlencode({})
sample_text = 'I had a wonderful experience! The rooms were wonderful and the staff were helpful.' # from default given at https://www.microsoft.com/cognitive-services/en-us/text-analytics-api
body = body_from_string_vectors(np.array([sample_text]))
try:
conn = http.client.HTTPSConnection('westus.api.cognitive.microsoft.com')
conn.request("POST", "/text/analytics/v2.0/sentiment?%s" % params, str(body), headers)
response = conn.getresponse()
data = response.read()
print(data) # score is on a scale from 0 to 1, with 0 being the most negative sentiment and 1 being the most positive sentiment. Includes some metadata.
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def body_from_string_vectors(vector):
'''
Takes in a numpy vector of strings, each string representing a separate quote from someone.
'''
body_documents_list = []
for string in vector:
body_documents_list += {
'language': 'en',
'id': '1',
'text': string
}
body = {
'documents': {
body_documents_list
}
}
def generate_headers():
api_key = script.get_api_key()
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': api_key
}
return headers
if __name__ == '__main__':
main()
| '''
Code adapted from https://westus.dev.cognitive.microsoft.com/docs/services/TextAnalytics.V2.0/operations/56f30ceeeda5650db055a3c9
'''
import http.client, urllib.request, urllib.parse, urllib.error
import script
def main():
'''
Sends a single POST request with a test bit of text.
'''
headers = headers()
params = urllib.parse.urlencode({})
body = {
"documents": [
{
"language": "en",
"id": "1",
"text": "I had a wonderful experience! The rooms were wonderful and the staff were helpful."
}
]
}
try:
conn = http.client.HTTPSConnection('westus.api.cognitive.microsoft.com')
conn.request("POST", "/text/analytics/v2.0/sentiment?%s" % params, str(body), headers)
response = conn.getresponse()
data = response.read()
print(data) # score is on a scale from 0 to 1, with 0 being the most negative sentiment and 1 being the most positive sentiment. Includes some metadata.
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def generate_headers():
api_key = script.get_api_key()
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': api_key
}
return headers
if __name__ == '__main__':
main()
| mit | Python |
c5d68743bf6392ae5e4c6bd80ed6727bfebf77fd | Solve basic/string2.py Please enter the commit message for your changes. Lines starting | DevilFruit99/GooglePythonClass,DevilFruit99/GooglePythonClass | basic/string2.py | basic/string2.py | #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
result=s
if len(s)>=3:
if s[-3:]=='ing':
result=result+'ly'
else :
result=result+'ing'
return result
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
notindex=s.find('not')
badindex=s.find('bad')
result=s
if ((notindex>-1) & (badindex>-1))&(notindex<badindex):
result=s.replace(s[notindex:badindex+3],'good')
return result
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
ahalf=int(round(len(a)/2.0))
bhalf=int(round(len(b)/2.0))
afront=a[:ahalf]
aback=a[ahalf:]
bfront=b[:bhalf]
bback=b[bhalf:]
return afront+bfront+aback+bback
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
return
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
return
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| apache-2.0 | Python |
634aa9818875c15c3db0ac0763fc15889936b79e | Add a structure test macro to make test writing easier. | GoogleContainerTools/container-structure-test,GoogleContainerTools/container-structure-test | tests.bzl | tests.bzl | # Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule for running structure tests."""
load(
"@io_bazel_rules_docker//docker:docker.bzl",
"docker_build",
)
def _impl(ctx):
ext_run_location = ctx.executable._structure_test.short_path
config_location = ctx.file.config.short_path
load_location = ctx.executable.image.short_path
# docker_build rules always generate an image named 'bazel/$package:$name'.
image_name = "bazel/%s:%s" % (ctx.attr.image.label.package, ctx.attr.image.label.name)
# Generate a shell script to execute ext_run with the correct flags.
test_contents = """\
#!/bin/bash
set -ex
# Execute the image loader script.
%s
# Run the tests.
%s \
-i %s \
-c %s""" % (load_location, ext_run_location, image_name, config_location)
ctx.file_action(
output=ctx.outputs.executable,
content=test_contents
)
return struct(runfiles=ctx.runfiles(files = [
ctx.executable._structure_test,
ctx.executable.image,
ctx.file.config] +
ctx.attr.image.files.to_list() +
ctx.attr.image.data_runfiles.files.to_list()
),
)
structure_test = rule(
attrs = {
"_structure_test": attr.label(
default = Label("//structure_tests:ext_run"),
cfg = "target",
allow_files = True,
executable = True,
),
"image": attr.label(
mandatory = True,
executable = True,
cfg = "target",
),
"config": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
},
executable = True,
test = True,
implementation = _impl,
)
def structure_test_with_files(name, image, config, files):
"""A macro for including extra files inside an image before testing it."""
child_image_name = "%s.child_image" % name
docker_build(
name = child_image_name,
base = image,
files = files,
)
structure_test(
name = name,
image = child_image_name,
config = config,
)
| # Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule for running structure tests."""
def _impl(ctx):
ext_run_location = ctx.executable._structure_test.short_path
config_location = ctx.file.config.short_path
load_location = ctx.executable.image.short_path
# docker_build rules always generate an image named 'bazel/$package:$name'.
image_name = "bazel/%s:%s" % (ctx.attr.image.label.package, ctx.attr.image.label.name)
# Generate a shell script to execute ext_run with the correct flags.
test_contents = """\
#!/bin/bash
set -ex
# Execute the image loader script.
%s
# Run the tests.
%s \
-i %s \
-c %s""" % (load_location, ext_run_location, image_name, config_location)
ctx.file_action(
output=ctx.outputs.executable,
content=test_contents
)
return struct(runfiles=ctx.runfiles(files = [
ctx.executable._structure_test,
ctx.executable.image,
ctx.file.config] +
ctx.attr.image.files.to_list() +
ctx.attr.image.data_runfiles.files.to_list()
),
)
structure_test = rule(
attrs = {
"_structure_test": attr.label(
default = Label("//structure_tests:ext_run"),
cfg = "target",
allow_files = True,
executable = True,
),
"image": attr.label(
mandatory = True,
executable = True,
cfg = "target",
),
"config": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
},
executable = True,
test = True,
implementation = _impl,
)
| apache-2.0 | Python |
f5df42e6049b31b1c147da7160e0595e595c6dbc | Add logging to grade | edx/ease,edx/ease | grade.py | grade.py | #Grader called by pyxserver_wsgi.py
#Loads a grader file, which is a dict containing the prompt of the question,
#a feature extractor object, and a trained model.
#Extracts features and runs trained model on the submission to produce a final score.
#Correctness determined by ratio of score to max possible score.
#Requires aspell to be installed and added to the path.
import sys
import pickle
import os
import numpy
import logging
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
from essay_set import EssaySet
#Imports needed to unpickle grader data
import feature_extractor
import sklearn.ensemble
log = logging.getLogger(__name__)
def grade(grader_path,submission,sandbox=None):
log.debug("Grader path: {0}\n Submission: {1}".format(grader_path,submission))
results = {'errors': [],'tests': [],'correct': False,'score': 0, 'feedback' : []}
#Try to find and load the model file
try:
grader_data=pickle.load(file(grader_path,"r"))
except:
results['errors'].append("Could not find a valid model file.")
grader_set=EssaySet(type="test")
#Try to add essays to essay set object
try:
grader_set.add_essay(str(submission),0)
grader_set.update_prompt(str(grader_data['prompt']))
except:
results['errors'].append("Essay could not be added to essay set:{0}".format(submission))
#Try to extract features from submission and assign score via the model
try:
grader_feats=grader_data['extractor'].gen_feats(grader_set)
results['feedback']=grader_data['extractor'].gen_feedback(grader_set)
results['score']=int(grader_data['model'].predict(grader_feats)[0])
except :
results['errors'].append("Could not extract features and score essay.")
#Determine maximum score and correctness of response
max_score=numpy.max(grader_data['model'].classes_)
if results['score']/float(max_score) >= .66:
results['correct']=True
else:
results['correct']=False
return results
| #Grader called by pyxserver_wsgi.py
#Loads a grader file, which is a dict containing the prompt of the question,
#a feature extractor object, and a trained model.
#Extracts features and runs trained model on the submission to produce a final score.
#Correctness determined by ratio of score to max possible score.
#Requires aspell to be installed and added to the path.
import sys
import pickle
import os
import numpy
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
from essay_set import EssaySet
#Imports needed to unpickle grader data
import feature_extractor
import sklearn.ensemble
def grade(grader_path,submission,sandbox=None):
results = {'errors': [],'tests': [],'correct': False,'score': 0, 'feedback' : []}
#Try to find and load the model file
try:
grader_data=pickle.load(file(grader_path,"r"))
except:
results['errors'].append("Could not find a valid model file.")
grader_set=EssaySet(type="test")
#Try to add essays to essay set object
try:
grader_set.add_essay(str(submission),0)
grader_set.update_prompt(str(grader_data['prompt']))
except:
results['errors'].append("Essay could not be added to essay set:{0}".format(submission))
#Try to extract features from submission and assign score via the model
try:
grader_feats=grader_data['extractor'].gen_feats(grader_set)
results['feedback']=grader_data['extractor'].gen_feedback(grader_set)
results['score']=int(grader_data['model'].predict(grader_feats)[0])
except:
results['errors'].append("Could not extract features and score essay.")
#Determine maximum score and correctness of response
max_score=numpy.max(grader_data['model'].classes_)
if results['score']/float(max_score) >= .66:
results['correct']=True
else:
results['correct']=False
return results
| agpl-3.0 | Python |
f96989d067f6fd073d04f96bdf2ae314c9b02d49 | Use request helper function in LayersScraper | kshvmdn/uoft-scrapers,cobalt-uoft/uoft-scrapers,arkon/uoft-scrapers,g3wanghc/uoft-scrapers | uoftscrapers/scrapers/utils/layers.py | uoftscrapers/scrapers/utils/layers.py | import requests
import json
from . import Scraper
class LayersScraper:
"""A superclass for scraping Layers of the UofT Map.
Map is located at http://map.utoronto.ca
"""
host = 'http://map.utoronto.ca/'
@staticmethod
def get_layers_json(campus):
"""Retrieve the JSON structure from host."""
Scraper.logger.info('Retrieving map layers for %s.' % campus.upper())
headers = {'Referer': LayersScraper.host}
data = Scraper.get('%s%s%s' % (
LayersScraper.host,
'data/map/',
campus
), headers=headers, json=True)
return data['layers']
@staticmethod
def get_value(entry, val, number=False):
"""Retrieve the desired value from the parsed response dictionary."""
if val in entry.keys():
return entry[val]
else:
return 0 if number else ''
| import requests
import json
from . import Scraper
class LayersScraper:
"""A superclass for scraping Layers of the UofT Map.
Map is located at http://map.utoronto.ca
"""
host = 'http://map.utoronto.ca/'
s = requests.Session()
@staticmethod
def get_layers_json(campus):
"""Retrieve the JSON structure from host."""
Scraper.logger.info('Retrieving map layers for %s.' % campus.upper())
headers = {
'Referer': LayersScraper.host
}
html = LayersScraper.s.get('%s%s%s' % (
LayersScraper.host,
'data/map/',
campus
), headers=headers).text
data = json.loads(html)
return data['layers']
@staticmethod
def get_value(entry, val, number=False):
"""Retrieve the desired value from the parsed response dictionary."""
if val in entry.keys():
return entry[val]
else:
return 0 if number else ''
| mit | Python |
b747da4fe99372e53850a964f450c7b00a4d81c9 | Add node add/delete, edge del | jwarren116/data-structures | graph.py | graph.py |
class SimpleGraph(object):
"""This is a simple graph program that will allow us
to impliment a graph data structure"""
def __init__(self, dict_graph={}):
self.dict_graph = dict_graph
def node(self):
'''return a list of all nodes in the graph'''
return list(__dict_graph.keys())
def edges():
'''return a list of all edges in the graph'''
return list(add_edge())
def add_node(self, n):
'''adds a new node 'n' to the graph'''
if n not in self.dict_graph:
self.dict_graph[n] = []
def add_edge(self, n1, n2):
'''adds a new edge to the graph connecting 'n1' and 'n2',
if either n1 or n2 are not already present in the graph,
they should be added.'''
if n1 in self.dict_graph:
self.dict_graph[n1].append(n2)
else:
self.dict_graph[n1] == [n2]
def del_node(self, n):
'''deletes the node 'n' from the graph,
raises an error if no such node exists'''
try:
del self.dict_graph[n]
except KeyError:
raise ValueError('That node does not exist')
def del_edge(self, n1, n2):
'''deletes the edge connecting 'n1' and 'n2' from the graph,
raises an error if no such edge exists'''
try:
self.dict_graph[n1].remove(n2)
except ValueError:
raise ValueError('That edge does not exist')
def has_node(self, n):
'''True if node 'n' is contained in the graph, False if not.'''
return n in self.dict_graph
def neighbors(self, n):
'''returns the list of all nodes connected to 'n' by edges,
raises an error if n is not in graph'''
def adjacent(self, n1, n2):
'''returns True if there is an edge connecting n1 and n2, False if not,
raises an error if either of the supplied nodes are not in g'''
pass
|
class SimpleGraph(object):
"""This is a simple graph program that will allow us
to impliment a graph data structure"""
def __init__(self, dict_graph={}):
self.dict_graph = dict_graph
def node(self):
'''return a list of all nodes in the graph'''
return list(__dict_graph.keys())
def edges():
'''return a list of all edges in the graph'''
return list(add_edge())
def add_node(self, n):
'''adds a new node 'n' to the graph'''
pass
def add_edge(self, n1, n2):
'''adds a new edge to the graph connecting 'n1' and 'n2',
if either n1 or n2 are not already present in the graph,
they should be added.'''
edge = set(edge)
(n1, n2) = tuple(edge)
if n1 in self.dict_graph:
self.dict_graph[n1].append(n2)
else:
self.dict_graph[n1] == [n2]
def del_node(self, n):
'''deletes the node 'n' from the graph,
raises an error if no such node exists'''
pass
def del_edge(self, n1, n2):
'''deletes the edge connecting 'n1' and 'n2' from the graph,
raises an error if no such edge exists'''
pass
def has_node(self, n):
'''True if node 'n' is contained in the graph, False if not.'''
pass
def neighbors(self, n):
'''returns the list of all nodes connected to 'n' by edges,
raises an error if n is not in g'''
pass
def adjacent(self, n1, n2):
'''returns True if there is an edge connecting n1 and n2, False if not,
raises an error if either of the supplied nodes are not in g'''
pass
| mit | Python |
4dbde6b8c33a85508ae9c375fef4d4caabfb4d15 | add function build_valid_filename | encorehu/nlp | nlp/extractors/base.py | nlp/extractors/base.py | import re
class BaseExtractor(object):
def build_valid_filename(self, text):
dst=text
for x in '\t\n\':;",.[](){}~!@#$%^&*_+-=/<>?':
dst=dst.replace(x,' ')
dst=dst.replace(' ','-').replace('--','-').replace('--','-')
dst=dst.strip('-')
return dst
def find_between(self, text, s1, s2=None):
if not s1:
raise Exception('s1 is None!')
pos1 = text.find(s1)
if s2 and pos1 != -1:
pos2 = text.find(s2, pos1)
else:
pos2 = -1
if pos2 != -1 and pos2>pos1:
return text[pos1+len(s1):pos2]
else:
return ''
def _extract(self, html):
result =[]
return result
def extract(self, html):
return self._extract(html)
class BaseRegexExtractor(object):
regex = None
def _extract(self, html, regex=None):
result =[]
if regex == None:
regex = self.regex
if regex == None:
return result
p = re.compile(regex)
result = p.findall(html)
return result
def extract(self, html, regex=None):
return self._extract(html, regex=regex)
| import re
class BaseExtractor(object):
def _extract(self, html):
result =[]
return result
def find_between(self, text, s1, s2=None):
if not s1:
raise Exception('s1 is None!')
pos1 = text.find(s1)
if s2 and pos1 != -1:
pos2 = text.find(s2, pos1)
else:
pos2 = -1
if pos2 != -1 and pos2>pos1:
return text[pos1+len(s1):pos2]
else:
return ''
def extract(self, html):
return self._extract(html)
class BaseRegexExtractor(object):
regex = None
def _extract(self, html, regex=None):
result =[]
if regex == None:
regex = self.regex
if regex == None:
return result
p = re.compile(regex)
result = p.findall(html)
return result
def extract(self, html, regex=None):
return self._extract(html, regex=regex)
| mit | Python |
a2837ab778d39e66c6178dae34a3bebdc638061f | fix test | The-Compiler/pytest,nicoddemus/repo-test,ericdill/pytest,jaraco/pytest,etataurov/pytest,Haibo-Wang-ORG/pytest,vodik/pytest,lukas-bednar/pytest,doordash/pytest,chillbear/pytest,JonathonSonesen/pytest,pelme/pytest,bukzor/pytest,omarkohl/pytest,mbirtwell/pytest,inirudebwoy/pytest,vodik/pytest,vmalloc/dessert,untitaker/pytest,flub/pytest,nicoddemus/pytest,hackebrot/pytest,jb098/pytest,hpk42/pytest,mhils/pytest,rouge8/pytest,pfctdayelise/pytest,RonnyPfannschmidt/pytest,Bachmann1234/pytest,pytest-dev/pytest,MengJueM/pytest,ericdill/pytest,icemac/pytest,chiller/pytest,Carreau/pytest,alfredodeza/pytest,Bachmann1234/pytest,mhils/pytest,ionelmc/pytest,userzimmermann/pytest,hunse/pytest,skylarjhdownes/pytest,codewarrior0/pytest,Bjwebb/pytest,rmfitzpatrick/pytest,mdboom/pytest,ropez/pytest,MengJueM/pytest,ionelmc/pytest,The-Compiler/pytest,wfxiang08/pytest,icemac/pytest,Akasurde/pytest,Carreau/pytest,gabrielcnr/pytest,pelme/pytest,lukas-bednar/pytest,eli-b/pytest,ropez/pytest,abusalimov/pytest,ojake/pytest,doordash/pytest,Bjwebb/pytest,oleg-alexandrov/pytest,hunse/pytest,userzimmermann/pytest,davidszotten/pytest,txomon/pytest,jb098/pytest,chiller/pytest,rouge8/pytest,abusalimov/pytest,mdboom/pytest,untitaker/pytest,JonathonSonesen/pytest,nicoddemus/repo-test,Haibo-Wang-ORG/pytest,nicoddemus/pytest,Magicjarvis/py,MichaelAquilina/pytest,mbirtwell/pytest,bubenkoff/pytest,codewarrior0/pytest,gabrielcnr/pytest,markshao/pytest,ddboline/pytest,tareqalayan/pytest,omarkohl/pytest,wfxiang08/pytest,malinoff/pytest,inirudebwoy/pytest,takluyver/pytest,bukzor/pytest,tomviner/pytest,chillbear/pytest,takluyver/pytest,ojake/pytest,hpk42/pytest,oleg-alexandrov/pytest,bubenkoff/pytest,ghostsquad/pytest,pytest-dev/py,tomviner/pytest,ghostsquad/pytest,tgoodlet/pytest | py/test/testing/test_outcome.py | py/test/testing/test_outcome.py |
import py
import marshal
class TestRaises:
def test_raises(self):
py.test.raises(ValueError, "int('qwe')")
def test_raises_exec(self):
py.test.raises(ValueError, "a,x = []")
def test_raises_syntax_error(self):
py.test.raises(SyntaxError, "qwe qwe qwe")
def test_raises_function(self):
py.test.raises(ValueError, int, 'hello')
def test_importorskip():
from py.__.test.outcome import Skipped
try:
sys = py.test.importorskip("sys")
assert sys == py.std.sys
#path = py.test.importorskip("os.path")
#assert path == py.std.os.path
py.test.raises(Skipped, "py.test.importorskip('alskdj')")
py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
path = py.test.importorskip("py", minversion=".".join(py.__version__))
mod = py.std.new.module("hello123")
mod.__version__ = "1.3"
py.test.raises(Skipped, """
py.test.importorskip("hello123", minversion="5.0")
""")
except Skipped:
print py.code.ExceptionInfo()
py.test.fail("spurious skip")
def test_pytest_exit():
try:
py.test.exit("hello")
except:
excinfo = py.code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
|
import py
import marshal
class TestRaises:
def test_raises(self):
py.test.raises(ValueError, "int('qwe')")
def test_raises_exec(self):
py.test.raises(ValueError, "a,x = []")
def test_raises_syntax_error(self):
py.test.raises(SyntaxError, "qwe qwe qwe")
def test_raises_function(self):
py.test.raises(ValueError, int, 'hello')
def test_importorskip():
from py.__.test.outcome import Skipped
try:
sys = py.test.importorskip("sys")
assert sys == py.std.sys
#path = py.test.importorskip("os.path")
#assert path == py.std.os.path
py.test.raises(Skipped, "py.test.importorskip('alskdj')")
py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
path = py.test.importorskip("py", minversion=".".join(py.__version__))
py.test.raises(Skipped, """
py.test.importorskip("py", minversion="5.0")
""")
except Skipped:
print py.code.ExceptionInfo()
py.test.fail("spurious skip")
def test_pytest_exit():
try:
py.test.exit("hello")
except:
excinfo = py.code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
| mit | Python |
38dc94240fdecaa0676921d32f749ca31da94c49 | Add unit tests for similarity graph and density estimation utilities. | CoAxLab/DeBaCl | debacl/test/test_utils.py | debacl/test/test_utils.py | #####################################
## Brian P. Kent
## test_utils.py
## created: 20140529
## updated: 20140712
## Test the DeBaCl utility functions.
#####################################
import unittest
import numpy as np
import scipy.special as spspec
import sys
from debacl import utils as utl
class TestDensityEstimates(unittest.TestCase):
"""
Unit test class for density estimate functions in DeBaCl utilities.
"""
def setUp(self):
# Input parameters
self.r_k = 1.
self.n = 100
self.p = 2
self.k = 5.
# Correct density estimate
unit_ball_volume = np.pi**(self.p/2.) / spspec.gamma(1 + self.p/2.0)
normalizer = self.k / (self.n * unit_ball_volume)
self.fhat = normalizer / (self.r_k**self.p)
def test_knn_density(self):
fhat = utl.knn_density(self.r_k, self.n, self.p, self.k)
self.assertEqual(self.fhat, fhat)
class TestNeighborGraphs(unittest.TestCase):
"""
Unit test class for neighbor graphs.
"""
def setUp(self):
## Make data
n = 5
self.X = np.arange(5).reshape((n, 1))
## Graph parameters
self.k = 3
self.epsilon = 1.01
## Answers
self.knn = {
0: set([0, 1, 2]),
1: set([1, 0, 2]),
2: set([2, 1, 3]),
3: set([3, 2, 4]),
4: set([4, 3, 2])}
self.r_k = np.array([2., 1., 1., 1., 2.])
self.eps_nn = {
0: set([0, 1]),
1: set([1, 0, 2]),
2: set([2, 1, 3]),
3: set([3, 2, 4]),
4: set([4, 3])}
self.edge_list = [(0, 1), (1, 2), (2, 3), (3, 4)]
def test_knn_graph(self):
"""
Test construction of the k-nearest neighbor graph.
"""
knn, r_k = utl.knn_graph(self.X, k=self.k, method='brute-force')
np.testing.assert_array_equal(r_k, self.r_k)
for idx, neighbors in knn.iteritems():
self.assertSetEqual(self.knn[idx], set(neighbors))
def test_epsilon_graph(self):
"""
Test construction of the epsilon-nearest neighbor graph.
"""
eps_nn = utl.epsilon_graph(self.X, self.epsilon)
for idx, neighbors in eps_nn.iteritems():
self.assertSetEqual(self.eps_nn[idx], set(neighbors))
def test_type_conversions(self):
"""
Test conversion between graph representations.
"""
edge_list = utl.adjacency_to_edge_list(self.eps_nn, self_edge=False)
edge_list = sorted([tuple(sorted(x)) for x in edge_list])
for e, ans in zip(edge_list, self.edge_list):
self.assertTupleEqual(e, ans)
| #####################################
## Brian P. Kent
## test_utils.py
## created: 20140529
## updated: 20140529
## Test the DeBaCl utility functions.
#####################################
import unittest
import numpy as np
import scipy.special as spspec
import sys
sys.path.insert(0, '/home/brian/Projects/debacl/DeBaCl/')
from debacl import utils as utl
## Example from the unittest introduction
# class TestSequenceFunctions(unittest.TestCase):
# def setUp(self):
# self.seq = range(10)
# def test_choice(self):
# element = random.choice(self.seq)
# self.assertTrue(element in self.seq)
class TestDensityEstimates(unittest.TestCase):
"""
Unit test class for density estimate functions in DeBaCl utilities.
"""
def setUp(self):
# Input parameters
self.r_k = 1.
self.n = 100
self.p = 2
self.k = 5.
# Correct density estimate
unit_ball_volume = np.pi**(self.p/2.) / spspec.gamma(1 + self.p/2.0)
normalizer = self.k / (self.n * unit_ball_volume)
self.fhat = normalizer / (self.r_k**self.p)
def test_knn_density(self):
fhat = utl.knnDensity(self.r_k, self.n, self.p, self.k)
self.assertEqual(self.fhat, fhat)
class TestNeighborGraphs(unittest.TestCase):
"""
Unit test class for neighbor graphs.
"""
def setUp(self):
pass
def test_knn_graph(self):
pass
def test_epsilon_graph(self):
pass
def test_gaussian_graph(self):
pass
class TestTreeConstructionUtils(unittest.TestCase):
"""
Unit test class for stages of level set tree construction.
"""
def setUp(self):
pass
def test_density_grid(self):
pass
def test_background_assignment(self):
pass | bsd-3-clause | Python |
f0f1fb06896294f2657083aa7a077d852ea8bb4b | add sort order | dictoss/active-task-summary,dictoss/active-task-summary,dictoss/active-task-summary,dictoss/active-task-summary | ats/admin.py | ats/admin.py | from django.contrib import admin
from .models import ProjectWorker
class ProjectWorkerAdmin(admin.ModelAdmin):
list_filter = ['user', 'project', 'job']
ordering = ['user', 'project', 'job']
admin.site.register(ProjectWorker, ProjectWorkerAdmin)
| from django.contrib import admin
from .models import ProjectWorker
class ProjectWorkerAdmin(admin.ModelAdmin):
list_filter = ['user', 'project', 'job']
admin.site.register(ProjectWorker, ProjectWorkerAdmin)
| bsd-2-clause | Python |
6447899ec344d14fbb78b9a2bbbe8b75451f10f2 | Set isolation level to reapeatable read | d120/pyophase,d120/pyophase,d120/pyophase,d120/pyophase | pyophase/settings_production.py | pyophase/settings_production.py | """
This is the settings file used in production.
First, it imports all default settings, then overrides respective ones.
Secrets are stored in and imported from an additional file, not set under version control.
"""
from pyophase import settings_secrets as secrets
from .settings import *
SECRET_KEY = secrets.SECRET_KEY
DEBUG = False
ALLOWED_HOSTS = ['.fachschaft.informatik.tu-darmstadt.de', '.d120.de']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': 'localhost',
'NAME': 'pyophase',
'USER': 'pyophase',
'PASSWORD': secrets.DB_PASSWORD,
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
'isolation_level': "repeatable read"
}
}
}
STATIC_URL = '/ophase/static/'
LOGIN_URL = '/ophase/accounts/login/'
MEDIA_URL = '/ophase/media/'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
ADMINS = (('pyophase-dev', '[email protected]'),)
SERVER_EMAIL = "[email protected]"
DEFAULT_FROM_EMAIL = SERVER_EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'mail.d120.de'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'pyophase'
EMAIL_HOST_PASSWORD = secrets.MAIL_PASSWORD
TUID_FORCE_SERVICE_URL = 'https://www.fachschaft.informatik.tu-darmstadt.de/ophase/sso/login/'
FILE_UPLOAD_PERMISSIONS = 0o644
| """
This is the settings file used in production.
First, it imports all default settings, then overrides respective ones.
Secrets are stored in and imported from an additional file, not set under version control.
"""
from pyophase import settings_secrets as secrets
from .settings import *
SECRET_KEY = secrets.SECRET_KEY
DEBUG = False
ALLOWED_HOSTS = ['.fachschaft.informatik.tu-darmstadt.de', '.d120.de']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': 'localhost',
'NAME': 'pyophase',
'USER': 'pyophase',
'PASSWORD': secrets.DB_PASSWORD,
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
}
}
}
STATIC_URL = '/ophase/static/'
LOGIN_URL = '/ophase/accounts/login/'
MEDIA_URL = '/ophase/media/'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
ADMINS = (('pyophase-dev', '[email protected]'),)
SERVER_EMAIL = "[email protected]"
DEFAULT_FROM_EMAIL = SERVER_EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'mail.d120.de'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'pyophase'
EMAIL_HOST_PASSWORD = secrets.MAIL_PASSWORD
TUID_FORCE_SERVICE_URL = 'https://www.fachschaft.informatik.tu-darmstadt.de/ophase/sso/login/'
FILE_UPLOAD_PERMISSIONS = 0o644
| agpl-3.0 | Python |
4839121f90934f7e52e51c05d052d27124680be7 | Remove confusing and useless "\n" | pyQode/pyqode.python,pyQode/pyqode.python,mmolero/pyqode.python,zwadar/pyqode.python | pyqode/python/backend/server.py | pyqode/python/backend/server.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main server script for a pyqode.python backend. You can directly use this
script in your application if it fits your needs or use it as a starting point
for writing your own server.
::
usage: server.py [-h] [-s [SYSPATH [SYSPATH ...]]] port
positional arguments:
port the local tcp port to use to run the server
optional arguments:
-h, --help show this help message and exit
-s [SYSPATH [SYSPATH ...]], --syspath [SYSPATH [SYSPATH ...]]
"""
import argparse
import sys
if __name__ == '__main__':
"""
Server process' entry point
"""
# setup argument parser and parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("port", help="the local tcp port to use to run "
"the server")
parser.add_argument('-s', '--syspath', nargs='*')
args = parser.parse_args()
# add user paths to sys.path
if args.syspath:
for path in args.syspath:
print('append path %s to sys.path' % path)
sys.path.append(path)
from pyqode.core import backend
from pyqode.python.backend.workers import JediCompletionProvider
# setup completion providers
backend.CodeCompletionWorker.providers.append(JediCompletionProvider())
backend.CodeCompletionWorker.providers.append(
backend.DocumentWordsProvider())
# starts the server
backend.serve_forever(args)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main server script for a pyqode.python backend. You can directly use this
script in your application if it fits your needs or use it as a starting point
for writing your own server.
::
usage: server.py [-h] [-s [SYSPATH [SYSPATH ...]]] port
positional arguments:
port the local tcp port to use to run the server
optional arguments:
-h, --help show this help message and exit
-s [SYSPATH [SYSPATH ...]], --syspath [SYSPATH [SYSPATH ...]]
"""
import argparse
import sys
if __name__ == '__main__':
"""
Server process' entry point
"""
# setup argument parser and parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("port", help="the local tcp port to use to run "
"the server")
parser.add_argument('-s', '--syspath', nargs='*')
args = parser.parse_args()
# add user paths to sys.path
if args.syspath:
for path in args.syspath:
print('append path %s to sys.path\n' % path)
sys.path.append(path)
from pyqode.core import backend
from pyqode.python.backend.workers import JediCompletionProvider
# setup completion providers
backend.CodeCompletionWorker.providers.append(JediCompletionProvider())
backend.CodeCompletionWorker.providers.append(
backend.DocumentWordsProvider())
# starts the server
backend.serve_forever(args)
| mit | Python |
31852bbf09e4f416f93c7720ecd9eca8cfe32d38 | Update version | MoiTux/pyramid-request-log | pyramid_request_log/__init__.py | pyramid_request_log/__init__.py | from __future__ import absolute_import
from .config import includeme
__version__ = '0.7'
| from __future__ import absolute_import
from .config import includeme
__version__ = '0.6'
| mit | Python |
791b6720e489353bb5a2b35906dd88f558f26c33 | Handle NotImplementedError | Meisterschueler/ogn-python,glidernet/ogn-python,Meisterschueler/ogn-python,glidernet/ogn-python,Meisterschueler/ogn-python,glidernet/ogn-python,glidernet/ogn-python,Meisterschueler/ogn-python | ogn/gateway/process.py | ogn/gateway/process.py | import logging
from ogn.commands.dbutils import session
from ogn.model import AircraftBeacon, ReceiverBeacon, Location
from ogn.parser import parse, ParseError
logger = logging.getLogger(__name__)
def replace_lonlat_with_wkt(message):
location = Location(message['longitude'], message['latitude'])
message['location_wkt'] = location.to_wkt()
del message['latitude']
del message['longitude']
return message
def message_to_beacon(raw_message, reference_date):
beacon = None
if raw_message[0] != '#':
try:
message = parse(raw_message, reference_date)
if message['aprs_type'] == 'position':
message = replace_lonlat_with_wkt(message)
if message['beacon_type'] == 'aircraft_beacon':
beacon = AircraftBeacon(**message)
elif message['beacon_type'] == 'receiver_beacon':
beacon = ReceiverBeacon(**message)
else:
print("Whoops: what is this: {}".format(message))
except NotImplementedError as e:
logger.error('Received message: {}'.format(raw_message))
logger.error(e)
except ParseError as e:
logger.error('Received message: {}'.format(raw_message))
logger.error('Drop packet, {}'.format(e.message))
except TypeError as e:
logger.error('TypeError: {}'.format(raw_message))
return beacon
def process_beacon(raw_message, reference_date=None):
beacon = message_to_beacon(raw_message, reference_date)
if beacon is not None:
session.add(beacon)
session.commit()
logger.debug('Received message: {}'.format(raw_message))
| import logging
from ogn.commands.dbutils import session
from ogn.model import AircraftBeacon, ReceiverBeacon, Location
from ogn.parser import parse, ParseError
logger = logging.getLogger(__name__)
def replace_lonlat_with_wkt(message):
location = Location(message['longitude'], message['latitude'])
message['location_wkt'] = location.to_wkt()
del message['latitude']
del message['longitude']
return message
def message_to_beacon(raw_message, reference_date):
beacon = None
if raw_message[0] != '#':
try:
message = parse(raw_message, reference_date)
if message['aprs_type'] == 'position':
message = replace_lonlat_with_wkt(message)
if message['beacon_type'] == 'aircraft_beacon':
beacon = AircraftBeacon(**message)
elif message['beacon_type'] == 'receiver_beacon':
beacon = ReceiverBeacon(**message)
else:
print("Whoops: what is this: {}".format(message))
except ParseError as e:
logger.error('Received message: {}'.format(raw_message))
logger.error('Drop packet, {}'.format(e.message))
except TypeError as e:
logger.error('TypeError: {}'.format(raw_message))
return beacon
def process_beacon(raw_message, reference_date=None):
beacon = message_to_beacon(raw_message, reference_date)
if beacon is not None:
session.add(beacon)
session.commit()
logger.debug('Received message: {}'.format(raw_message))
| agpl-3.0 | Python |
5c5f7981905c757cd5a750c2b2d09ea6bc6f1f28 | Add BoolTypeFactory class | thombashi/DataProperty | dataproperty/_factory.py | dataproperty/_factory.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from __future__ import absolute_import
import abc
import six
from .converter import NopConverterCreator
from .converter import IntegerConverterCreator
from .converter import FloatConverterCreator
from .converter import BoolConverterCreator
from .converter import DateTimeConverterCreator
from ._type_checker_creator import NoneTypeCheckerCreator
from ._type_checker_creator import IntegerTypeCheckerCreator
from ._type_checker_creator import FloatTypeCheckerCreator
from ._type_checker_creator import BoolTypeCheckerCreator
from ._type_checker_creator import DateTimeTypeCheckerCreator
from ._type_checker_creator import InfinityCheckerCreator
from ._type_checker_creator import NanCheckerCreator
@six.add_metaclass(abc.ABCMeta)
class TypeConverterFactoryInterface(object):
"""
Abstract factory class of type converter.
"""
@abc.abstractproperty
def type_checker_factory(self): # pragma: no cover
pass
@abc.abstractproperty
def value_converter_factory(self): # pragma: no cover
pass
class NoneTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NoneTypeCheckerCreator()
@property
def value_converter_factory(self):
return NopConverterCreator()
class IntegerTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return IntegerTypeCheckerCreator()
@property
def value_converter_factory(self):
return IntegerConverterCreator()
class FloatTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return FloatTypeCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class DateTimeTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return DateTimeTypeCheckerCreator()
@property
def value_converter_factory(self):
return DateTimeConverterCreator()
class BoolTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return BoolTypeCheckerCreator()
@property
def value_converter_factory(self):
return BoolConverterCreator()
class InfinityTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return InfinityCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class NanTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NanCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <[email protected]>
"""
from __future__ import absolute_import
import abc
import six
from .converter import NopConverterCreator
from .converter import IntegerConverterCreator
from .converter import FloatConverterCreator
from .converter import DateTimeConverterCreator
from ._type_checker_creator import NoneTypeCheckerCreator
from ._type_checker_creator import IntegerTypeCheckerCreator
from ._type_checker_creator import FloatTypeCheckerCreator
from ._type_checker_creator import DateTimeTypeCheckerCreator
from ._type_checker_creator import InfinityCheckerCreator
from ._type_checker_creator import NanCheckerCreator
@six.add_metaclass(abc.ABCMeta)
class TypeConverterFactoryInterface(object):
"""
Abstract factory class of type converter.
"""
@abc.abstractproperty
def type_checker_factory(self): # pragma: no cover
pass
@abc.abstractproperty
def value_converter_factory(self): # pragma: no cover
pass
class NoneTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NoneTypeCheckerCreator()
@property
def value_converter_factory(self):
return NopConverterCreator()
class IntegerTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return IntegerTypeCheckerCreator()
@property
def value_converter_factory(self):
return IntegerConverterCreator()
class FloatTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return FloatTypeCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class DateTimeTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return DateTimeTypeCheckerCreator()
@property
def value_converter_factory(self):
return DateTimeConverterCreator()
class InfinityTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return InfinityCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class NanTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NanCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
| mit | Python |
27df09cd98d9128d89d9d9d26ee0e89223fbd990 | document idlerpg's external dependencies | zordsdavini/qtile,zordsdavini/qtile | libqtile/widget/idlerpg.py | libqtile/widget/idlerpg.py | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import base
from .generic_poll_text import GenPollUrl
import datetime
class IdleRPG(GenPollUrl):
"""
A widget for monitoring and displaying IdleRPG stats.
::
# display idlerpg stats for the player 'pants' on freenode's #idlerpg
widget.IdleRPG(url="http://xethron.lolhosting.net/xml.php?player=pants")
Widget requirements: xmltodict_.
.. _xmltodict: https://pypi.org/project/xmltodict/
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('format', 'IdleRPG: {online} TTL: {ttl}', 'Display format'),
('json', False, 'Not json :)'),
('xml', True, 'Is XML :)'),
]
def __init__(self, **config):
GenPollUrl.__init__(self, **config)
self.add_defaults(IdleRPG.defaults)
def parse(self, body):
formatted = {}
for k, v in body['player'].items():
if k == 'ttl':
formatted[k] = str(datetime.timedelta(seconds=int(v)))
elif k == 'online':
formatted[k] = "online" if v == "1" else "offline"
else:
formatted[k] = v
return self.format.format(**formatted)
| # -*- coding: utf-8 -*-
# Copyright (c) 2016 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import base
from .generic_poll_text import GenPollUrl
import datetime
class IdleRPG(GenPollUrl):
"""
A widget for monitoring and displaying IdleRPG stats.
::
# display idlerpg stats for the player 'pants' on freenode's #idlerpg
widget.IdleRPG(url="http://xethron.lolhosting.net/xml.php?player=pants")
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('format', 'IdleRPG: {online} TTL: {ttl}', 'Display format'),
('json', False, 'Not json :)'),
('xml', True, 'Is XML :)'),
]
def __init__(self, **config):
GenPollUrl.__init__(self, **config)
self.add_defaults(IdleRPG.defaults)
def parse(self, body):
formatted = {}
for k, v in body['player'].items():
if k == 'ttl':
formatted[k] = str(datetime.timedelta(seconds=int(v)))
elif k == 'online':
formatted[k] = "online" if v == "1" else "offline"
else:
formatted[k] = v
return self.format.format(**formatted)
| mit | Python |
f9f3ca75e8151b1467fddffe390aee6a8fe00259 | Change configuration for wsgi settings | Dev-Cloud-Platform/Dev-Cloud,Dev-Cloud-Platform/Dev-Cloud,Dev-Cloud-Platform/Dev-Cloud,Dev-Cloud-Platform/Dev-Cloud,Dev-Cloud-Platform/Dev-Cloud | dev_cloud/web_service/wsgi.py | dev_cloud/web_service/wsgi.py | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""
WSGI config for web_service project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
from core.settings.config import ENVIROMENT_PATH
activate_this = ENVIROMENT_PATH
execfile(activate_this, dict(__file__=activate_this))
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings.prod")
os.environ["CELERY_LOADER"] = "django"
import djcelery
djcelery.setup_loader()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""
WSGI config for web_service project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
activate_this = '../.pyenv/bin/activate_this.py '
execfile(activate_this, dict(__file__=activate_this))
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings.prod")
os.environ["CELERY_LOADER"] = "django"
import djcelery
djcelery.setup_loader()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| apache-2.0 | Python |
f3724421fa859a5970e66353b6a311aa14b866ec | Add additional spacing to improve readability | boundary/tsi-lab,jdgwartney/tsi-lab,jdgwartney/tsi-lab,jdgwartney/tsi-lab,jdgwartney/tsi-lab,boundary/tsi-lab,boundary/tsi-lab,boundary/tsi-lab | labs/lab-5/ex5-1.log.py | labs/lab-5/ex5-1.log.py | #!/usr/bin/python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from log_utils import follow
if __name__ == '__main__':
# We are expecting two arguments
# The first is the name of the script
# The second is a path to a log file
if len(sys.argv) == 2:
# Open our file for reading
log_file = open(sys.argv[1], "r")
# Create our iterable function
log_lines = follow(log_file)
# Process the lines as they are appended
for line in log_lines:
# Strip out the new line an print the line
print("{0}".format(line.strip()))
else:
# Incorrect number of arguments
# Output usage to standard out
sys.stderr.write("usage: {0} <path>\n".format(os.path.basename(sys.argv[0])))
| #!/usr/bin/python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from log_utils import follow
if __name__ == '__main__':
# We are expecting two arguments
# The first is the name of the script
# The second is a path to a log file
if len(sys.argv) == 2:
# Open our file for reading
log_file = open(sys.argv[1], "r")
# Create our iterable function
log_lines = follow(log_file)
# Process the lines as they are appended
for line in log_lines:
# Strip out the new line an print the line
print("{0}".format(line.strip()))
else:
# Incorrect number of arguments
# Output usage to standard out
sys.stderr.write("usage: {0} <path>\n".format(os.path.basename(sys.argv[0])))
| apache-2.0 | Python |
d40f1fe493ec2c71d84ac84f5dc989c68de321ca | add version option | pinkavaj/batch_isp | batch_isp.py | batch_isp.py | import argparse
from parts import Parts
from pgm_error import PgmError
from operations import Operations
from serial_io import SerialIO
class BatchISP:
def __init__(self):
parser = argparse.ArgumentParser(
description='Linux remake of Atmel\'s BatchISP utility.')
parser.add_argument('-device', type=str, required=True,
help="Device type, ? for list.")
parser.add_argument('-port', type=str,
help="Port/interface to connect.")
parser.add_argument('-hardware', type=str,
help="{ RS232 | TODO }")
parser.add_argument('-version', action='version', version='%(prog)s 0.0.0')
parser.add_argument('-operation', type=str, required=True, nargs='*',
help="... ??? TODO")
self._args = parser.parse_args()
self._parser = parser
def _getIOByHardwareName(self, hardware):
if hardware == 'RS232':
if self._args.port is None:
raise PrgError("Port not specified for RS232")
return SerialIO(self._args.port)
else:
raise PrgError("Unsupported hardware: %s" % hardware)
def run(self):
if self._args.device == '?':
parts = Parts()
print([part.getName() for part in parts.list()])
return 0
try:
part = Parts().getPartByName(self._args.device)
if not self._args.hardware is None:
hw = sef._args.hardware
else:
hw = part.listHardware()
if len(hw) != 1:
raise PrgError("Cannot determine hardware select one of: %s" % hw)
hw = hw[0]
io = self._getIOByHardwareName(hw)
operations = Operations(part, io)
for op in self._args.operation:
print(op)
except PgmError as e:
print(e)
exit(1)
| import argparse
from parts import Parts
from pgm_error import PgmError
from operations import Operations
from serial_io import SerialIO
class BatchISP:
def __init__(self):
parser = argparse.ArgumentParser(
description='Linux remake of Atmel\'s BatchISP utility.')
parser.add_argument('-device', type=str, required=True,
help="Device type, ? for list.")
parser.add_argument('-port', type=str,
help="Port/interface to connect.")
parser.add_argument('-hardware', type=str,
help="{ RS232 | TODO }")
parser.add_argument('-operation', type=str, required=True, nargs='*',
help="... ??? TODO")
self._args = parser.parse_args()
self._parser = parser
def _getIOByHardwareName(self, hardware):
if hardware == 'RS232':
if self._args.port is None:
raise PrgError("Port not specified for RS232")
return SerialIO(self._args.port)
else:
raise PrgError("Unsupported hardware: %s" % hardware)
def run(self):
if self._args.device == '?':
parts = Parts()
print([part.getName() for part in parts.list()])
return 0
try:
part = Parts().getPartByName(self._args.device)
if not self._args.hardware is None:
hw = sef._args.hardware
else:
hw = part.listHardware()
if len(hw) != 1:
raise PrgError("Cannot determine hardware select one of: %s" % hw)
hw = hw[0]
io = self._getIOByHardwareName(hw)
operations = Operations(part, io)
for op in self._args.operation:
print(op)
except PgmError as e:
print(e)
exit(1)
| apache-2.0 | Python |
dbf3af1de0bbbda178e5bbd1ca0473a83d8cb9b3 | test triggering travis | Lenijas/test-travisci,Lenijas/test-travisci,Lenijas/test-travisci | fabre_test.py | fabre_test.py | #!/usr/bin/env python
# coding=UTF-8
import sys
import pytest
sys.exit(0)
| #!/usr/bin/env python
# coding=UTF-8
import sys
sys.exit(0)
| bsd-3-clause | Python |
4cff5b7a14dfda786fef4a869e72095b7d9d83e4 | correct relative import, d'oh | SLACKHA/pyJac,kyleniemeyer/pyJac,SLACKHA/pyJac,kyleniemeyer/pyJac | pyjac/performance_tester/__main__.py | pyjac/performance_tester/__main__.py | import sys
import os
from . import performance_tester as pt
from argparse import ArgumentParser
def main(args=None):
if args is None:
# command line arguments
parser = ArgumentParser(description='performance_tester.py: '
'tests pyJac performance'
)
parser.add_argument('-w', '--working_directory',
type=str,
default='performance',
help='Directory storing the mechanisms / data.'
)
parser.add_argument('-uoo', '--use_old_opt',
action='store_true',
default=False,
required=False,
help='If True, allows performance_tester to use '
'any old optimization files found'
)
parser.add_argument('-nt', '--num_omp_threads',
type=int,
default=12,
required=False,
help='Number of threads to use for OpenMP '
'parallelization of the C codes.'
)
args = parser.parse_args()
pt.performance_tester(os.path.dirname(os.path.abspath(pt.__file__)),
args.working_directory,
args.use_old_opt, args.num_omp_threads
)
if __name__ == '__main__':
sys.exit(main())
| import sys
import os
import .performance_tester as pt
from argparse import ArgumentParser
def main(args=None):
if args is None:
# command line arguments
parser = ArgumentParser(description='performance_tester.py: '
'tests pyJac performance'
)
parser.add_argument('-w', '--working_directory',
type=str,
default='performance',
help='Directory storing the mechanisms / data.'
)
parser.add_argument('-uoo', '--use_old_opt',
action='store_true',
default=False,
required=False,
help='If True, allows performance_tester to use '
'any old optimization files found'
)
parser.add_argument('-nt', '--num_omp_threads',
type=int,
default=12,
required=False,
help='Number of threads to use for OpenMP '
'parallelization of the C codes.'
)
args = parser.parse_args()
pt.performance_tester(os.path.dirname(os.path.abspath(pt.__file__)),
args.working_directory,
args.use_old_opt, args.num_omp_threads
)
if __name__ == '__main__':
sys.exit(main())
| mit | Python |
7596de67f67f5bdc9350067a896dcd4b7b4c7650 | Stop requiring the path of the users file; only require the name. | hawkrives/gobbldygook,hawkrives/gobbldygook,hawkrives/gobbldygook | gobbldygook.py | gobbldygook.py | #!/usr/bin/env python3
import argparse, csv, os
from course import Course, all_courses, all_labs, getCourse
from student import Student
def argument_parse():
parser = argparse.ArgumentParser(description="This program works best if you give it some data. However, we have some example stuff to show you anyway.)")
parser.add_argument('-l', "--load", default='example')
parser.add_argument('-f', "--find")
parser.add_argument("--demo")
parser.add_argument("--stress")
parser.add_argument("--debug")
return parser
def parse_filename(fname):
filename = fname.name
filename = filename.split('.')[0] # Remove the extension
filename = filename.split('/')[1] # Remove the path seperator
start_year, end_year, semester = filename.split(sep='-')
if semester == 's1':
semester = "fall"
elif semester == 's2':
semester = "spring"
elif semester == 'ss1':
semester = "summer session 1"
elif semester == 'ss2':
semester = "summer session 2"
return int(filename[0:4]), semester
def load_data(filename):
with open(filename) as infile:
year, semester = parse_filename(infile)
if year not in all_courses:
all_courses[year] = {}
if semester not in all_courses[year]:
all_courses[year][semester] = {}
infile.readline() # Remove the csv header line
csvfile = csv.reader(infile)
for row in csvfile:
tmp = Course(data=row)
if tmp.course_status == 'X':
pass
elif tmp.course_type == "Lab":
all_labs[tmp.id] = tmp
else:
all_courses[tmp.id] = tmp
all_courses[year][tmp.id] = tmp
all_courses[year][semester][tmp.id] = tmp
def read_data():
path = 'data/'
for filename in os.listdir(path):
if filename[0] is not '.':
load_data(path + filename)
def main():
parser = argument_parse()
args = parser.parse_args()
read_data()
user = Student(filename='users/'+args.load+'.yaml')
print(user)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import argparse, csv, os
from course import Course, all_courses, all_labs, getCourse
from student import Student
def argument_parse():
parser = argparse.ArgumentParser(description="This program works best if you give it some data. However, we have some example stuff to show you anyway.)")
parser.add_argument('-l', "--load", default='users/example.yaml')
parser.add_argument('-f', "--find")
parser.add_argument("--demo")
parser.add_argument("--stress")
parser.add_argument("--debug")
return parser
def parse_filename(fname):
filename = fname.name
filename = filename.split('.')[0] # Remove the extension
filename = filename.split('/')[1] # Remove the path seperator
start_year, end_year, semester = filename.split(sep='-')
if semester == 's1':
semester = "fall"
elif semester == 's2':
semester = "spring"
elif semester == 'ss1':
semester = "summer session 1"
elif semester == 'ss2':
semester = "summer session 2"
return int(filename[0:4]), semester
def load_data(filename):
with open(filename) as infile:
year, semester = parse_filename(infile)
if year not in all_courses:
all_courses[year] = {}
if semester not in all_courses[year]:
all_courses[year][semester] = {}
infile.readline() # Remove the csv header line
csvfile = csv.reader(infile)
for row in csvfile:
tmp = Course(data=row)
if tmp.course_status == 'X':
pass
elif tmp.course_type == "Lab":
all_labs[tmp.id] = tmp
else:
all_courses[tmp.id] = tmp
all_courses[year][tmp.id] = tmp
all_courses[year][semester][tmp.id] = tmp
def read_data():
path = 'data/'
for filename in os.listdir(path):
if filename[0] is not '.':
load_data(path + filename)
def main():
parser = argument_parse()
args = parser.parse_args()
read_data()
user = Student(filename=args.load)
print(user)
if __name__ == '__main__':
main()
| agpl-3.0 | Python |
d73c6addf064ba7b78c4874a6affc6bac6dfee1f | Add image feature detection | grenmester/hunt-master,grenmester/hunt-master,grenmester/hunt-master,grenmester/hunt-master,grenmester/hunt-master | image.py | image.py | from __future__ import division
import numpy as np
import cv2
import time, io
from matplotlib import pyplot as plt
from google.cloud import vision
MIN_MATCH_COUNT = 200
# only using match count right now
MIN_MATCH_RATIO = .2
def compare(img1_name, img2_name):
"""
Return whether img1 and img2 differ signficiantly
Determined through feature matching and comparison
(the number of good matches must be greater than MIN_MATCH_COUNT)
"""
img1 = cv2.imread(img1_name)
img2 = cv2.imread(img2_name)
# Initiate SIFT detector
sift = cv2.xfeatures2d.SURF_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# count the number of good matches
num_good_matches = 0
for m,n in matches:
if m.distance < 0.7*n.distance:
num_good_matches += 1
print('Number of good features matched: ' + str(num_good_matches))
return num_good_matches>MIN_MATCH_COUNT
def features(img_path,labels=True,logos=True,landmarks=True):
"""
Returns a list of features from an image
Optionally pass a certainty_threshold value to give a threshold in [0,1] on how certain
Google's identification is.
"""
v_c = vision.Client()
with io.open(img_path, 'rb') as image_file:
content = image_file.read()
img = v_c.image(content=content)
output = []
if labels:
labels = [label.description for label in img.detect_labels()]
output += labels
if logos:
logos = [logo.description for logo in img.detect_logos()]
output += logos
if landmarks:
landmarks = [landmark.description for landmark in img.detect_landmarks()]
output += landmarks
return output
| from __future__ import division
import numpy as np
import cv2
import time
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 200
# only using match count right now
MIN_MATCH_RATIO = .2
def compare(img1_name, img2_name):
"""
Return whether img1 and img2 differ signficiantly
Determined through feature matching and comparison
(the number of good matches must be greater than MIN_MATCH_COUNT)
"""
img1 = cv2.imread(img1_name)
img2 = cv2.imread(img2_name)
# Initiate SIFT detector
sift = cv2.xfeatures2d.SURF_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# count the number of good matches
num_good_matches = 0
for m,n in matches:
if m.distance < 0.7*n.distance:
num_good_matches += 1
print('Number of good features matched: ' + str(num_good_matches))
return num_good_matches>MIN_MATCH_COUNT
| mit | Python |
377aef17394b2dabd6db7439d3cfcd4e0d54a3c2 | Allow codata tests to be run as script. | jasonmccampbell/scipy-refactor,scipy/scipy-svn,lesserwhirls/scipy-cwt,jasonmccampbell/scipy-refactor,jasonmccampbell/scipy-refactor,lesserwhirls/scipy-cwt,lesserwhirls/scipy-cwt,lesserwhirls/scipy-cwt,jasonmccampbell/scipy-refactor,scipy/scipy-svn,scipy/scipy-svn,scipy/scipy-svn | scipy/constants/tests/test_codata.py | scipy/constants/tests/test_codata.py |
import warnings
from scipy.constants import find
from numpy.testing import assert_equal, run_module_suite
def test_find():
warnings.simplefilter('ignore', DeprecationWarning)
keys = find('weak mixing', disp=False)
assert_equal(keys, ['weak mixing angle'])
keys = find('qwertyuiop', disp=False)
assert_equal(keys, [])
keys = find('natural unit', disp=False)
assert_equal(keys, sorted(['natural unit of velocity',
'natural unit of action',
'natural unit of action in eV s',
'natural unit of mass',
'natural unit of energy',
'natural unit of energy in MeV',
'natural unit of momentum',
'natural unit of momentum in MeV/c',
'natural unit of length',
'natural unit of time']))
if __name__ == "__main__":
run_module_suite()
|
import warnings
from scipy.constants import find
from numpy.testing import assert_equal
def test_find():
warnings.simplefilter('ignore', DeprecationWarning)
keys = find('weak mixing', disp=False)
assert_equal(keys, ['weak mixing angle'])
keys = find('qwertyuiop', disp=False)
assert_equal(keys, [])
keys = find('natural unit', disp=False)
assert_equal(keys, sorted(['natural unit of velocity',
'natural unit of action',
'natural unit of action in eV s',
'natural unit of mass',
'natural unit of energy',
'natural unit of energy in MeV',
'natural unit of momentum',
'natural unit of momentum in MeV/c',
'natural unit of length',
'natural unit of time']))
| bsd-3-clause | Python |
e4fbd6f8e13861053a4a29c776ae24b934639fa5 | fix ports on yaml script | DaMSL/K3,DaMSL/K3,yliu120/K3 | tools/scripts/mosaic/gen_yaml.py | tools/scripts/mosaic/gen_yaml.py | #!/usr/bin/env python3
#
# Create a yaml file for running a mosaic file
# Note: *requires pyyaml*
import argparse
import yaml
def address(port):
return ['127.0.0.1', port]
def create_peers(peers):
res = []
for p in peers:
res += [{'addr':address(p[1])}]
return res
def entity(role, port, peers):
return {'role':role, 'me':address(port), 'peers':create_peers(peers)}
def create_file(num_switches, num_nodes):
peers = []
peers += [('master', 40000)]
peers += [('timer', 40001)]
switch_ports = 50001
for i in range(num_switches):
peers += [('switch', switch_ports + i)]
node_ports = 60001
for i in range(num_nodes):
peers += [('node', node_ports + i)]
# convert to dictionaries
peers2 = []
for p in peers:
peers2 += [entity(p[0], p[1], peers)]
# dump out
print("---")
for i, p in enumerate(peers2):
print(yaml.dump(p, default_flow_style=True))
if i < len(peers2) - 1:
print("---")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--switches", type=int, help="number of switches", dest="num_switches", default=1)
parser.add_argument("-n", "--nodes", type=int, help="number of nodes", dest="num_nodes", default=1)
args = parser.parse_args()
create_file(args.num_switches, args.num_nodes)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
#
# Create a yaml file for running a mosaic file
# Note: *requires pyyaml*
import argparse
import yaml
def address(port):
return ['127.0.0.1', port]
def create_peers(peers):
res = []
for p in peers:
res += [{'addr':address(p[1])}]
return res
def entity(role, port, peers):
return {'role':role, 'me':address(port), 'peers':create_peers(peers)}
def create_file(num_switches, num_nodes):
peers = []
peers += [('master', 40000)]
peers += [('timer', 40001)]
switch_ports = 50000
for i in range(num_switches):
peers += [('switch', switch_ports + i)]
node_ports = 60000
for i in range(num_nodes):
peers += [('node', node_ports + i)]
# convert to dictionaries
peers2 = []
for p in peers:
peers2 += [entity(p[0], p[1], peers)]
# dump out
print("---")
for i, p in enumerate(peers2):
print(yaml.dump(p, default_flow_style=True))
if i < len(peers2) - 1:
print("---")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--switches", type=int, help="number of switches", dest="num_switches", default=1)
parser.add_argument("-n", "--nodes", type=int, help="number of nodes", dest="num_nodes", default=1)
args = parser.parse_args()
create_file(args.num_switches, args.num_nodes)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
8e664b417d978d040d780dc252418fce087c47f4 | Fix version option | ARMmbed/greentea | src/htrun/htrun.py | src/htrun/htrun.py | #
# Copyright (c) 2021-2022 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Greentea Host Tests Runner."""
from multiprocessing import freeze_support
from htrun import init_host_test_cli_params
from htrun.host_tests_runner.host_test_default import DefaultTestSelector
from htrun.host_tests_toolbox.host_functional import handle_send_break_cmd
def main():
"""Drive command line tool 'htrun' which is using DefaultTestSelector.
1. Create DefaultTestSelector object and pass command line parameters.
2. Call default test execution function run() to start test instrumentation.
"""
freeze_support()
result = 0
cli_params = init_host_test_cli_params()
if cli_params.version: # --version
import pkg_resources # part of setuptools
version = pkg_resources.require("greentea-host")[0].version
print(version)
elif cli_params.send_break_cmd: # -b with -p PORT (and optional -r RESET_TYPE)
handle_send_break_cmd(
port=cli_params.port,
disk=cli_params.disk,
reset_type=cli_params.forced_reset_type,
baudrate=cli_params.baud_rate,
verbose=cli_params.verbose,
)
else:
test_selector = DefaultTestSelector(cli_params)
try:
result = test_selector.execute()
# Ensure we don't return a negative value
if result < 0 or result > 255:
result = 1
except (KeyboardInterrupt, SystemExit):
test_selector.finish()
result = 1
raise
else:
test_selector.finish()
return result
| #
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Greentea Host Tests Runner."""
from multiprocessing import freeze_support
from htrun import init_host_test_cli_params
from htrun.host_tests_runner.host_test_default import DefaultTestSelector
from htrun.host_tests_toolbox.host_functional import handle_send_break_cmd
def main():
"""Drive command line tool 'htrun' which is using DefaultTestSelector.
1. Create DefaultTestSelector object and pass command line parameters.
2. Call default test execution function run() to start test instrumentation.
"""
freeze_support()
result = 0
cli_params = init_host_test_cli_params()
if cli_params.version: # --version
import pkg_resources # part of setuptools
version = pkg_resources.require("htrun")[0].version
print(version)
elif cli_params.send_break_cmd: # -b with -p PORT (and optional -r RESET_TYPE)
handle_send_break_cmd(
port=cli_params.port,
disk=cli_params.disk,
reset_type=cli_params.forced_reset_type,
baudrate=cli_params.baud_rate,
verbose=cli_params.verbose,
)
else:
test_selector = DefaultTestSelector(cli_params)
try:
result = test_selector.execute()
# Ensure we don't return a negative value
if result < 0 or result > 255:
result = 1
except (KeyboardInterrupt, SystemExit):
test_selector.finish()
result = 1
raise
else:
test_selector.finish()
return result
| apache-2.0 | Python |
625c70580770b5bb00a64d15e14d15c623db21ee | Update urls.py | bdang2012/taiga-back-casting,Tigerwhit4/taiga-back,xdevelsistemas/taiga-back-community,gam-phon/taiga-back,19kestier/taiga-back,crr0004/taiga-back,taigaio/taiga-back,coopsource/taiga-back,gam-phon/taiga-back,rajiteh/taiga-back,Rademade/taiga-back,astagi/taiga-back,coopsource/taiga-back,EvgeneOskin/taiga-back,coopsource/taiga-back,frt-arch/taiga-back,CoolCloud/taiga-back,dayatz/taiga-back,dayatz/taiga-back,crr0004/taiga-back,gam-phon/taiga-back,Zaneh-/bearded-tribble-back,Rademade/taiga-back,Rademade/taiga-back,dycodedev/taiga-back,CoolCloud/taiga-back,astronaut1712/taiga-back,CoolCloud/taiga-back,rajiteh/taiga-back,gauravjns/taiga-back,seanchen/taiga-back,WALR/taiga-back,WALR/taiga-back,frt-arch/taiga-back,astagi/taiga-back,jeffdwyatt/taiga-back,Tigerwhit4/taiga-back,bdang2012/taiga-back-casting,jeffdwyatt/taiga-back,rajiteh/taiga-back,dycodedev/taiga-back,jeffdwyatt/taiga-back,seanchen/taiga-back,obimod/taiga-back,WALR/taiga-back,Rademade/taiga-back,astronaut1712/taiga-back,crr0004/taiga-back,CMLL/taiga-back,obimod/taiga-back,Tigerwhit4/taiga-back,coopsource/taiga-back,forging2012/taiga-back,forging2012/taiga-back,joshisa/taiga-back,gauravjns/taiga-back,seanchen/taiga-back,bdang2012/taiga-back-casting,xdevelsistemas/taiga-back-community,CMLL/taiga-back,CMLL/taiga-back,astronaut1712/taiga-back,dycodedev/taiga-back,rajiteh/taiga-back,forging2012/taiga-back,CMLL/taiga-back,taigaio/taiga-back,Rademade/taiga-back,dycodedev/taiga-back,joshisa/taiga-back,Zaneh-/bearded-tribble-back,dayatz/taiga-back,crr0004/taiga-back,bdang2012/taiga-back-casting,jeffdwyatt/taiga-back,astagi/taiga-back,EvgeneOskin/taiga-back,EvgeneOskin/taiga-back,seanchen/taiga-back,Tigerwhit4/taiga-back,taigaio/taiga-back,joshisa/taiga-back,gauravjns/taiga-back,obimod/taiga-back,xdevelsistemas/taiga-back-community,Zaneh-/bearded-tribble-back,forging2012/taiga-back,gauravjns/taiga-back,CoolCloud/taiga-back,gam-phon/taiga-back,astagi/taiga-back,frt-arch/taiga-back,joshisa/taiga-back,astronaut1712/taiga-back,19kestier/taiga-back,obimod/taiga-back,EvgeneOskin/taiga-back,19kestier/taiga-back,WALR/taiga-back | taiga/base/utils/urls.py | taiga/base/utils/urls.py | import django_sites as sites
URL_TEMPLATE = "{scheme}://{domain}/{path}"
def build_url(path, scheme="http", domain="localhost"):
return URL_TEMPLATE.format(scheme=scheme, domain=domain, path=path.lstrip("/"))
def is_absolute_url(path):
"""Test wether or not `path` is absolute url."""
return path.startswith("http") or path.startswith("https")
def get_absolute_url(path):
"""Return a path as an absolute url."""
if is_absolute_url(path):
return path
site = sites.get_current()
return build_url(path, scheme=site.scheme, domain=site.domain)
| import django_sites as sites
URL_TEMPLATE = "{scheme}://{domain}/{path}"
def build_url(path, scheme="http", domain="localhost"):
return URL_TEMPLATE.format(scheme=scheme, domain=domain, path=path.lstrip("/"))
def is_absolute_url(path):
"""Test wether or not `path` is absolute url."""
return path.startswith("http")
def get_absolute_url(path):
"""Return a path as an absolute url."""
if is_absolute_url(path):
return path
site = sites.get_current()
return build_url(path, scheme=site.scheme, domain=site.domain)
| agpl-3.0 | Python |
1656cbd6b62690017af810e795b8a23b3907a1fa | bump 1.0.2 | meng89/epubuilder,meng89/epubuilder,meng89/epubuilder | epubuilder/version.py | epubuilder/version.py | # coding=utf-8
__version__ = '1.0.2'
| # coding=utf-8
__version__ = '1.0.1'
| mit | Python |
c8fdcf888f6c34e8396f11b3e7ab3088af59abb6 | Add tests for slice intersection and sanitization. | RaoUmer/distarray,enthought/distarray,enthought/distarray,RaoUmer/distarray | distarray/tests/test_utils.py | distarray/tests/test_utils.py | import unittest
from distarray import utils
from numpy import arange
from numpy.testing import assert_array_equal
class TestMultPartitions(unittest.TestCase):
"""
Test the multiplicative parition code.
"""
def test_both_methods(self):
"""
Do the two methods of computing the multiplicative partitions agree?
"""
for s in [2, 3]:
for n in range(2, 512):
self.assertEqual(utils.mult_partitions(n, s),
utils.create_factors(n, s))
class TestSanitizeIndices(unittest.TestCase):
def test_point(self):
itype, inds = utils.sanitize_indices(1)
self.assertEqual(itype, 'point')
self.assertEqual(inds, (1,))
def test_slice(self):
itype, inds = utils.sanitize_indices(slice(1,10))
self.assertEqual(itype, 'view')
self.assertEqual(inds, (slice(1,10),))
def test_mixed(self):
provided = (5, 3, slice(7, 10, 2), 99, slice(1,10))
itype, inds = utils.sanitize_indices(provided)
self.assertEqual(itype, 'view')
self.assertEqual(inds, provided)
class TestSliceIntersection(unittest.TestCase):
def test_containment(self):
arr = arange(20)
slc = utils.slice_intersection(slice(1,10), slice(2, 4))
assert_array_equal(arr[slc], arr[slice(2, 4, 1)])
def test_overlapping(self):
arr = arange(20)
slc = utils.slice_intersection(slice(1,10), slice(4, 15))
assert_array_equal(arr[slc], arr[slice(4, 10)])
def test_disjoint(self):
arr = arange(20)
slc = utils.slice_intersection(slice(1,10), slice(11, 15))
assert_array_equal(arr[slc], arr[slice(11, 10)])
if __name__ == '__main__':
unittest.main(verbosity=2)
| import unittest
from distarray import utils
class TestMultPartitions(unittest.TestCase):
"""
Test the multiplicative parition code.
"""
def test_both_methods(self):
"""
Do the two methods of computing the multiplicative partitions agree?
"""
for s in [2, 3]:
for n in range(2, 512):
self.assertEqual(utils.mult_partitions(n, s),
utils.create_factors(n, s))
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | Python |
673b123b147b99f49357b02c227b1d34ae653485 | Set up some realistic defaults. | chrisnorman7/pyrts,chrisnorman7/pyrts,chrisnorman7/pyrts | bootstrap.py | bootstrap.py | """This script will bootstrap the database to a minimal level for usage.
You will be left with the following buildings:
* Town Hall (homely).
* Farm (requires Town Hall).
* Stable (requires Farm)
You will be left with the following land features:
* Mine (provides gold)
* Quarry (provides stone)
* Lake (provides water)
* Forest (provides wood)
* Field (provides food)
You will be left with the following mobiles:
* Peasant (provided by Town Hall).
Peasants can build town halls and farms, and can exploit wood, and gold.
* Farmer (provided by Farm)
Farmers can build farms and stables, and can exploit food, water, and wood.
* Scout (provided by Stable)
"""
import os.path
from server.db import BuildingType, FeatureType, MobileType, dump
from server.db.util import _filename as fn
def main():
if os.path.isfile(fn):
return print('Refusing to continue with existing database file.')
town_hall = BuildingType(
name='Town Hall', homely=True, gold=15, wood=30, stone=10
)
farm = BuildingType(
name='Farm', depends=town_hall, gold=5, wood=5, stone=1
)
stable = BuildingType(
name='Stable', depends=farm, wood=30, stone=15, gold=30
)
for thing in (town_hall, farm, stable):
thing.save()
peasant = MobileType(name='Peasant', wood=1, gold=1)
farmer = MobileType(name='Farmer', food=1, water=1)
scout = MobileType(name='Scout', stone=1)
for thing in (peasant, farmer, scout):
thing.save()
peasant.add_building(town_hall)
peasant.add_building(farm)
town_hall.add_recruit(peasant, food=1, water=1, gold=3).save()
farmer.add_building(farm)
farm.add_recruit(farmer, food=2, gold=4, water=2)
stable.add_recruit(scout, food=4, water=5, gold=6)
FeatureType(name='Mine', gold=1).save()
FeatureType(name='Quarry', stone=1).save()
FeatureType(name='Lake', water=1).save()
FeatureType(name='Forest', wood=1).save()
FeatureType(name='Field', food=1).save()
dump()
print('Done.')
if __name__ == '__main__':
main()
| """This script will bootstrap the database to a minimal level for usage.
You will be left with the following buildings:
* Town Hall (homely).
* Farm (requires Town Hall).
* Stable (requires Farm)
You will be left with the following land features:
* Mine (provides gold)
* Quarry (provides stone)
* Lake (provides water)
* Forest (provides wood)
* Field (provides food)
You will be left with the following mobiles:
* Peasant (provided by Town Hall).
Peasants can build town halls and farms, and can exploit wood, and gold.
* Farmer (provided by Farm)
Farmers can build farms and stables, and can exploit food, water, and wood.
* Scout (provided by Stable)
"""
import os.path
from server.db import BuildingType, FeatureType, MobileType, dump
from server.db.util import _filename as fn
def main():
if os.path.isfile(fn):
return print('Refusing to continue with existing database file.')
town_hall = BuildingType(name='Town Hall', homely=True)
farm = BuildingType(name='Farm', depends=town_hall)
stable = BuildingType(name='Stable', depends=farm)
for thing in (town_hall, farm, stable):
thing.save()
peasant = MobileType(name='Peasant', wood=1, gold=1)
peasant.save()
for t in (town_hall, farm):
t.builders.append(peasant)
town_hall.add_recruit(peasant, food=1, water=1, gold=1).save()
farmer = MobileType(name='Farmer', food=1, water=1, wood=1)
farmer.save()
for t in (farm, stable):
t.builders.append(farmer)
farm.add_recruit(farmer, food=2, gold=2, water=2)
scout = MobileType(name='Scout', stone=1)
scout.save()
stable.add_recruit(scout, food=4, water=5, gold=3)
FeatureType(name='Mine', gold=1).save()
FeatureType(name='Quarry', stone=1).save()
FeatureType(name='Lake', water=1).save()
FeatureType(name='Forest', wood=1).save()
FeatureType(name='Field', food=1).save()
dump()
print('Done.')
if __name__ == '__main__':
main()
| mpl-2.0 | Python |
208b6cf99d90494df9a0f6d66a0ea3669ff5fe66 | remove get, add ls and rm | sliceofcode/dogbot,slice/dogbot,slice/dogbot,slice/dogbot,sliceofcode/dogbot | dog/ext/config.py | dog/ext/config.py | import logging
from discord.ext import commands
from dog import Cog
log = logging.getLogger(__name__)
class Config(Cog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.permitted_keys = [
'woof_response'
]
@commands.group()
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def config(self, ctx):
""" Manages server-specific configuration for the bot. """
@config.command(name='set')
async def config_set(self, ctx, name: str, value: str):
""" Sets a config field for this server. """
if name not in self.permitted_keys:
await ctx.send('That configuration value is not allowed.')
return
await self.bot.redis.set(f'{ctx.guild.id}:{name}', value)
await ctx.send('\N{OK HAND SIGN}')
@config.command(name='permitted')
async def config_permitted(self, ctx):
""" Views permitted configuration keys. """
await ctx.send(', '.join(self.permitted_keys))
@config.command(name='is_set')
async def config_is_set(self, ctx, name: str):
""" Checks if a configuration key is set. """
is_set = await self.bot.config_is_set(ctx.guild, name)
await ctx.send('Yes, it is set.' if is_set else 'No, it is not set.')
@config.command(name='list', aliases=['ls'])
async def config_list(self, ctx):
""" Lists set configuration keys for this server. """
keys = [k.decode().split(':')[1] for k in await self.bot.redis.keys(f'{ctx.guild.id}:*')]
await ctx.send('Set configuration keys in this server: ' + ', '.join(keys))
@config.command(name='remove', aliases=['rm', 'del'])
async def config_remove(self, ctx, name: str):
""" Removes a config field for this server. """
await self.bot.redis.delete(f'{ctx.guild.id}:{name}')
await ctx.send('\N{OK HAND SIGN}')
def setup(bot):
bot.add_cog(Config(bot))
| import logging
from discord.ext import commands
from dog import Cog
log = logging.getLogger(__name__)
class Config(Cog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.permitted_keys = [
'woof_response'
]
@commands.group()
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def config(self, ctx):
""" Manages server-specific configuration for the bot. """
@config.command(name='set')
async def config_set(self, ctx, name: str, value: str):
""" Sets a config field for this server. """
if name not in self.permitted_keys:
await ctx.send('That configuration value is not allowed.')
return
await self.bot.redis.set(f'{ctx.guild.id}:{name}', value)
await ctx.send('\N{OK HAND SIGN}')
@config.command(name='permitted')
async def config_permitted(self, ctx):
""" Views permitted configuration keys. """
await ctx.send(', '.join(self.permitted_keys))
@config.command(name='is_set')
async def config_is_set(self, ctx, name: str):
""" Checks if a configuration key is set. """
is_set = await self.bot.config_is_set(ctx.guild, name)
await ctx.send('Yes, it is set.' if is_set else 'No, it is not set.')
@config.command(name='get')
async def config_get(self, ctx, name: str):
""" Fetches a config field for this server. """
result = await self.bot.redis.get(f'{ctx.guild.id}:{name}')
if result is not None:
result = result.decode()
else:
result = '`<nothing>`'
await ctx.send(f'`{name}`: {result}')
def setup(bot):
bot.add_cog(Config(bot))
| mit | Python |
546f1188444365a365dc1dd7a81c2ffc974cf8b2 | change of documentation in the param vector class | LeonAgmonNacht/Genetic-Algorithm-Firewall-TAU | ParamVector.py | ParamVector.py | class ParamVector(object):
"""
This class represents the vectors that defines a firewall
a ParamVector is a class that represents a vector that defines a firewall.
we have our indicator functions that should get parameters, lets call these functions g1...gn
for each gi we can say that there is a vector (ai1,...,aim) of scalars. so we can represent every firewall Fl
as the sum of fi^gi where fi is a mutate function.
so we can think about a vector of different sized vectors, where every vector i is:
fi, ai1,...aim
"""
# the functions that can be used to mutate a ParamVector, instances of ProbabilityFunction
mutate_functions = []
@staticmethod
def generate_random_data():
"""
creates a ParamVector with random data
:return: an instance of ParamVector that is defined using random data
"""
pass | class ParamVector(object):
"""
This class represents the vectors that defines a firewall
a ParamVector is a class that represents a vector that defines a firewall.
we have our indicator functions that should get parameters, lets call these functions g1...gn
for each gi we can say that there is a vector (ai1,...,aim) of scalars. so we can represent every firewall Fl
as the sum of Fi^gi where Fi is a mutate function.
so we can think about a vector of different sized vectors, where every vector i is:
fi, ai1,...aim
"""
# the functions that can be used to mutate a ParamVector, instances of ProbabilityFunction
mutate_functions = []
@staticmethod
def generate_random_data():
"""
creates a ParamVector with random data
:return: an instance of ParamVector that is defined using random data
"""
pass | mit | Python |
1c7317ea85206541c8d518a3fc6cb338ad6873d3 | Fix requires_auth decorator | norbert/fickle | fickle/api.py | fickle/api.py | import os
from functools import wraps
import flask
from flask import request, json
USERNAME = 'fickle'
def Response(data, status = 200):
body = json.dumps(data)
return flask.Response(body, status = status, mimetype = 'application/json')
def SuccessResponse(dataset_id = None):
return Response({ 'success': True, 'id': dataset_id })
def ErrorResponse(status = 400):
return Response({ 'success': False }, status = status)
def check_auth(username, password):
setting = os.environ.get('FICKLE_PASSWORD')
if setting:
return username == USERNAME and password == setting
else:
return True
def requires_auth(f):
if not bool(os.environ.get('FICKLE_PASSWORD')):
return f
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return ErrorResponse(403)
return f(*args, **kwargs)
return decorated
def API(name, backend):
app = flask.Flask(name)
app.config.from_object(name)
@app.route('/')
@requires_auth
def api_root():
return SuccessResponse(backend.dataset_id)
@app.route('/load', methods=['POST'])
@requires_auth
def api_load():
backend.load(request.json)
return SuccessResponse(backend.dataset_id)
@app.route('/fit', methods=['POST'])
@requires_auth
def api_fit():
if not backend.loaded():
return ErrorResponse()
backend.fit()
return SuccessResponse(backend.dataset_id)
@app.route('/validate', methods=['POST'])
@requires_auth
def api_validate():
if not backend.loaded():
return ErrorResponse()
data = backend.validate()
return Response(data)
@app.route('/predict', methods=['POST'])
@requires_auth
def api_predict():
if not backend.trained():
return ErrorResponse()
data = backend.predict(request.json).tolist()
return Response(data)
return app
| import os
from functools import wraps
import flask
from flask import request, json
USERNAME = 'fickle'
def Response(data, status = 200):
body = json.dumps(data)
return flask.Response(body, status = status, mimetype = 'application/json')
def SuccessResponse(dataset_id = None):
return Response({ 'success': True, 'id': dataset_id })
def ErrorResponse(status = 400):
return Response({ 'success': False }, status = status)
def check_auth(username, password):
setting = os.environ.get('FICKLE_PASSWORD')
if setting:
return username == USERNAME and password == setting
else:
return True
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return ErrorResponse(403)
return f(*args, **kwargs)
return decorated
def API(name, backend):
app = flask.Flask(name)
app.config.from_object(name)
@app.route('/')
@requires_auth
def api_root():
return SuccessResponse(backend.dataset_id)
@app.route('/load', methods=['POST'])
@requires_auth
def api_load():
backend.load(request.json)
return SuccessResponse(backend.dataset_id)
@app.route('/fit', methods=['POST'])
@requires_auth
def api_fit():
if not backend.loaded():
return ErrorResponse()
backend.fit()
return SuccessResponse(backend.dataset_id)
@app.route('/validate', methods=['POST'])
@requires_auth
def api_validate():
if not backend.loaded():
return ErrorResponse()
data = backend.validate()
return Response(data)
@app.route('/predict', methods=['POST'])
@requires_auth
def api_predict():
if not backend.trained():
return ErrorResponse()
data = backend.predict(request.json).tolist()
return Response(data)
return app
| mit | Python |
9437e024b1e1630e06d1b05972eb9049af442be0 | fix bad copy/paste | rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs,rboman/progs | build_all.py | build_all.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
# list of projects
progs = [
{'path': 'apps/EHD', 'travis': True},
{'path': 'apps/fractal/cpp', 'travis': False},
{'path': 'apps/GenMAI', 'travis': True},
{'path': 'apps/md5', 'travis': True},
{'path': 'apps/minibarreTE', 'travis': True},
{'path': 'cmake/findGMM', 'travis': True},
{'path': 'cmake/findMKL', 'travis': False},
{'path': 'langtests/cpp11', 'travis': True},
{'path': 'langtests/exporttpl', 'travis': True},
{'path': 'langtests/singleton', 'travis': True},
{'path': 'metafor/arbre', 'travis': True},
{'path': 'metafor/drmeta', 'travis': True},
{'path': 'metafor/mailsph', 'travis': False},
{'path': 'sandbox/fortran', 'travis': True},
{'path': 'sandbox/fortranc', 'travis': True},
{'path': 'student/dcm1', 'travis': False},
{'path': 'student/dcm2', 'travis': True},
{'path': 'student/lejeune', 'travis': True},
{'path': 'student/mico', 'travis': True},
{'path': 'student/ndh', 'travis': True},
]
def getArgs():
# parse args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--travis", help="run only travis tests",
action="store_true")
args = parser.parse_args()
return args
def build_one(basedir, p):
"""build project 'p'
"""
args = getArgs()
fullpath = os.path.join(basedir, *(p['path'].split('/')))
if(p['travis'] or not args.travis):
print '=> running build.py in', fullpath
os.chdir(fullpath)
subprocess.call(['python', 'build.py'])
def build_all(basedir):
"""build everything in 'basedir'
"""
for p in progs:
build_one(basedir, p)
def rm_builds(basedir):
"""remove all 'build' directories in 'basedir'
"""
import shutil
for path, subdirs, files in os.walk(basedir):
for name in subdirs:
if name == 'build':
fullname = os.path.join(path, name)
print 'removing', fullname
shutil.rmtree(fullname)
if __name__ == "__main__":
basedir = os.path.abspath(os.path.dirname(__file__))
rm_builds(basedir)
build_all(basedir)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
# list of projects
progs = [
{'path': 'apps/EHD', 'travis': True},
{'path': 'apps/fractal/cpp', 'travis': False},
{'path': 'apps/GenMAI', 'travis': True},
{'path': 'apps/md5', 'travis': True},
{'path': 'apps/minibarreTE', 'travis': True},
{'path': 'cmake/findGMM', 'travis': True},
{'path': 'cmake/findMKL', 'travis': False},
{'path': 'langtests/cpp11', 'travis': True},
{'path': 'langtests/exporttpl', 'travis': True},
{'path': 'langtests/singleton', 'travis': True},
{'path': 'metafor/arbre', 'travis': True},
{'path': 'metafor/drmeta', 'travis': True},
{'path': 'metafor/mailsph', 'travis': False},
{'path': 'sandbox/fortran', 'travis': True},
{'path': 'sandbox/fortranc', 'travis': True},
{'path': 'student/dcm1', 'travis': False},
{'path': 'student/dcm2', 'travis': True},
{'path': 'student/lejeune', 'lejeune': True},
{'path': 'student/mico', 'travis': True},
{'path': 'student/ndh', 'travis': True},
]
def getArgs():
# parse args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--travis", help="run only travis tests",
action="store_true")
args = parser.parse_args()
return args
def build_one(basedir, p):
"""build project 'p'
"""
args = getArgs()
fullpath = os.path.join(basedir, *(p['path'].split('/')))
if(p['travis'] or not args.travis):
print '=> running build.py in', fullpath
os.chdir(fullpath)
subprocess.call(['python', 'build.py'])
def build_all(basedir):
"""build everything in 'basedir'
"""
for p in progs:
build_one(basedir, p)
def rm_builds(basedir):
"""remove all 'build' directories in 'basedir'
"""
import shutil
for path, subdirs, files in os.walk(basedir):
for name in subdirs:
if name == 'build':
fullname = os.path.join(path, name)
print 'removing', fullname
shutil.rmtree(fullname)
if __name__ == "__main__":
basedir = os.path.abspath(os.path.dirname(__file__))
rm_builds(basedir)
build_all(basedir)
| apache-2.0 | Python |
a8cb15b1983c48547edfeb53bfb63245f7e7c892 | Revert "log integrations with zabbix through pyzabbix" | globocom/dbaas-zabbix,globocom/dbaas-zabbix | dbaas_zabbix/__init__.py | dbaas_zabbix/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from dbaas_zabbix.dbaas_api import DatabaseAsAServiceApi
from dbaas_zabbix.provider_factory import ProviderFactory
from pyzabbix import ZabbixAPI
def factory_for(**kwargs):
databaseinfra = kwargs['databaseinfra']
credentials = kwargs['credentials']
del kwargs['databaseinfra']
del kwargs['credentials']
zabbix_api = ZabbixAPI
if kwargs.get('zabbix_api'):
zabbix_api = kwargs.get('zabbix_api')
del kwargs['zabbix_api']
dbaas_api = DatabaseAsAServiceApi(databaseinfra, credentials)
return ProviderFactory.factory(dbaas_api, zabbix_api=zabbix_api, **kwargs)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import sys
from dbaas_zabbix.dbaas_api import DatabaseAsAServiceApi
from dbaas_zabbix.provider_factory import ProviderFactory
from pyzabbix import ZabbixAPI
stream = logging.StreamHandler(sys.stdout)
stream.setLevel(logging.DEBUG)
log = logging.getLogger('pyzabbix')
log.addHandler(stream)
log.setLevel(logging.DEBUG)
def factory_for(**kwargs):
databaseinfra = kwargs['databaseinfra']
credentials = kwargs['credentials']
del kwargs['databaseinfra']
del kwargs['credentials']
zabbix_api = ZabbixAPI
if kwargs.get('zabbix_api'):
zabbix_api = kwargs.get('zabbix_api')
del kwargs['zabbix_api']
dbaas_api = DatabaseAsAServiceApi(databaseinfra, credentials)
return ProviderFactory.factory(dbaas_api, zabbix_api=zabbix_api, **kwargs)
| bsd-3-clause | Python |
b3c1b3b66d1c720172e731d1bfc44cfb44c992a3 | Revert of [Android] Re-enable content_browsertests on main waterfall. (https://codereview.chromium.org/132403005/) | dushu1203/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,anirudhSK/chromium,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,anirudhSK/chromium,dednal/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,M4sse/chromium.src,anirudhSK/chromium,axinging/chromium-crosswalk,ChromiumWebApps/chromium,littlstar/chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,jaruba/chromium.src,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,markYoungH/chromium.src,M4sse/chromium.src,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,littlstar/chromium.src,anirudhSK/chromium,M4sse/chromium.src,dednal/chromium.src,dushu1203/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,ChromiumWebApps/chromium,jaruba/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,jaruba/chromium.src,anirudhSK/chromium,Just-D/chromium-1,fujunwei/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,ltilve/chromium,patrickm/chromium.src,markYoungH/chromium.src,ltilve/chromium,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,Jonekee/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,jaruba/chromium.src,markYoungH/chromium.src,dushu1203/chromium.src,Chilledheart/chromium,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,krieger-od/nwjs_chromium.src,Chilledheart/chromium,ChromiumWebApps/chromium,axinging/chromium-crosswalk,anirudhSK/chromium,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,markYoungH/chromium.src,ltilve/chromium,patrickm/chromium.src,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,M4sse/chromium.src,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,Chilledheart/chromium,dushu1203/chromium.src,Jonekee/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,anirudhSK/chromium,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,ondra-novak/chromium.src,dushu1203/chromium.src,Just-D/chromium-1,jaruba/chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,Chilledheart/chromium,ChromiumWebApps/chromium,axinging/chromium-crosswalk,Chilledheart/chromium,dushu1203/chromium.src,ltilve/chromium,littlstar/chromium.src,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,jaruba/chromium.src,patrickm/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,ChromiumWebApps/chromium,markYoungH/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,littlstar/chromium.src,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,dednal/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,Jonekee/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk | build/android/pylib/gtest/gtest_config.py | build/android/pylib/gtest/gtest_config.py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration file for android gtest suites."""
# Add new suites here before upgrading them to the stable list below.
EXPERIMENTAL_TEST_SUITES = [
'content_browsertests',
'content_gl_tests',
]
# Do not modify this list without approval of an android owner.
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
'android_webview_unittests',
'base_unittests',
'cc_unittests',
'components_unittests',
'content_unittests',
'gl_tests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
'webkit_unit_tests',
'breakpad_unittests',
'sandbox_linux_unittests',
]
WEBRTC_CHROMIUM_TEST_SUITES = [
'content_browsertests',
]
WEBRTC_NATIVE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'neteq_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_engine_core_unittests',
'voice_engine_unittests',
]
| # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration file for android gtest suites."""
# Add new suites here before upgrading them to the stable list below.
EXPERIMENTAL_TEST_SUITES = [
'content_gl_tests',
]
# Do not modify this list without approval of an android owner.
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
'android_webview_unittests',
'base_unittests',
'cc_unittests',
'components_unittests',
'content_unittests',
'gl_tests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
'webkit_unit_tests',
'breakpad_unittests',
'sandbox_linux_unittests',
'content_browsertests',
]
WEBRTC_CHROMIUM_TEST_SUITES = [
'content_browsertests',
]
WEBRTC_NATIVE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'neteq_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_engine_core_unittests',
'voice_engine_unittests',
]
| bsd-3-clause | Python |
3812403655153e86a8b0e1ac68c9b15e69d6a4e3 | Update BUILD_OSS to 4770. | fcitx/mozc,fcitx/mozc,google/mozc,fcitx/mozc,google/mozc,fcitx/mozc,google/mozc,fcitx/mozc,google/mozc,google/mozc | src/data/version/mozc_version_template.bzl | src/data/version/mozc_version_template.bzl | # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 28
# BUILD number used for the OSS version.
BUILD_OSS = 4770
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 11
| # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 28
# BUILD number used for the OSS version.
BUILD_OSS = 4750
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 11
| bsd-3-clause | Python |
b6d747599661f3ce19b4d2f6ea9f80ec9839a2d8 | Update couchm.reactor.py | Anton04/MQTT-Stage,Anton04/MQTT-Stage | resources/reactors/couchm.reactor.py | resources/reactors/couchm.reactor.py | #!/usr/bin/python
import argparse
import mosquitto
#from pushover import PushoverClient
import os, sys
import urllib2
import json, base64
import ConfigParser
#Posting data to couchDB
def post(doc):
global config
url = 'http://%(server)s/%(database)s/_design/energy_data/_update/measurement' % config
# print url
request = urllib2.Request(url, data=json.dumps(doc))
auth = base64.encodestring('%(user)s:%(password)s' % config).replace('\n', '')
request.add_header('Authorization', 'Basic ' + auth)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'POST'
urllib2.urlopen(request, timeout=1)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='host', default="localhost", help='MQTT host send results to')
parser.add_argument('-t', dest='topic', default="", help='MQTT topic to process')
parser.add_argument('-m', dest='message', default="", help='MQTT message to process')
args = parser.parse_args()
#Where am I
path = os.path.abspath(os.path.dirname(sys.argv[0]))
#Load config file...
ConfigFile = path + "/couchm.cfg"
try:
f = open(ConfigFile,"r")
f.close()
except:
print "Please provide a valid config file! In the same folder as the couchDB script!"
exit(1)
#Read config file.
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.read(ConfigFile)
#Load basic config.
config = {}
config["user"] = config.get("CouchDB","user")
config["password"] = config.get("CouchDB","password")
config["server"] = config.get("CouchDB","server")
config["database"] = config.get("CouchDB","database")
source = config.get("CouchM","source")
if args.message[0] == '"':
args.message = args.message[1:]
if args.message[-1] == '"':
args.message = args.message[:-1]
data = json.loads(args.message)
#Post data to couchm
post({
"source": source,
"timestamp": str(data["time"]),
"ElectricPower": str(data["value"]),
"ElectricEnergy": str(0),
"PowerThreshold": str(1),
"ElectricPowerUnoccupied": "0",
"ElectricEnergyOccupied": "0",
"ElectricEnergyUnoccupied": "0"
})
| #!/usr/bin/python
import argparse
import mosquitto
#from pushover import PushoverClient
import os, sys
import urllib2
import json, base64
#Posting data to couchDB
def post(doc):
global config
url = 'http://%(server)s/%(database)s/_design/energy_data/_update/measurement' % config
# print url
request = urllib2.Request(url, data=json.dumps(doc))
auth = base64.encodestring('%(user)s:%(password)s' % config).replace('\n', '')
request.add_header('Authorization', 'Basic ' + auth)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'POST'
urllib2.urlopen(request, timeout=1)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='host', default="localhost", help='MQTT host send results to')
parser.add_argument('-t', dest='topic', default="", help='MQTT topic to process')
parser.add_argument('-m', dest='message', default="", help='MQTT message to process')
args = parser.parse_args()
#Where am I
path = os.path.abspath(os.path.dirname(sys.argv[0]))
#Load config file...
ConfigFile = path + "/couchm.cfg"
try:
f = open(ConfigFile,"r")
f.close()
except:
print "Please provide a valid config file! In the same folder as the couchDB script!"
exit(1)
#Read config file.
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.read(ConfigFile)
#Load basic config.
config = {}
config["user"] = config.get("CouchDB","user")
config["password"] = config.get("CouchDB","password")
config["server"] = config.get("CouchDB","server")
config["database"] = config.get("CouchDB","database")
source = config.get("CouchM","source")
if args.message[0] == '"':
args.message = args.message[1:]
if args.message[-1] == '"':
args.message = args.message[:-1]
data = json.loads(args.message)
#Post data to couchm
post({
"source": source,
"timestamp": str(data["time"]),
"ElectricPower": str(data["value"]),
"ElectricEnergy": str(0),
"PowerThreshold": str(1),
"ElectricPowerUnoccupied": "0",
"ElectricEnergyOccupied": "0",
"ElectricEnergyUnoccupied": "0"
})
| mit | Python |
7a2fd7bbdaed3ffda3cb8740d38e5f3e88dd8ce8 | add name for the thread | Impactstory/oadoi,Impactstory/oadoi,Impactstory/sherlockoa,Impactstory/oadoi,Impactstory/sherlockoa | update.py | update.py | from time import time
from app import db
import argparse
from jobs import update_registry
from util import elapsed
# needs to be imported so the definitions get loaded into the registry
import jobs_defs
"""
examples of calling this:
# update everything
python update.py Person.refresh --limit 10 --chunk 5 --rq
# update one thing not using rq
python update.py Package.test --id 0000-1111-2222-3333
"""
def parse_update_optional_args(parser):
# just for updating lots
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many jobs to do")
parser.add_argument('--chunk', "-ch", nargs="?", default=10, type=int, help="how many to take off db at once")
parser.add_argument('--after', nargs="?", type=str, help="minimum id or id start, ie 0000-0001")
parser.add_argument('--rq', action="store_true", default=False, help="do jobs in this thread")
parser.add_argument('--order', action="store_true", default=True, help="order them")
parser.add_argument('--append', action="store_true", default=False, help="append, dont' clear queue")
parser.add_argument('--name', nargs="?", type=str, help="name for the thread")
# just for updating one
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update")
parser.add_argument('--doi', nargs="?", type=str, help="doi of the one thing you want to update")
# parse and run
parsed_args = parser.parse_args()
return parsed_args
def run_update(parsed_args):
update = update_registry.get(parsed_args.fn)
start = time()
#convenience method for handling an doi
if parsed_args.doi:
from publication import Crossref
from util import clean_doi
my_pub = db.session.query(Crossref).filter(Crossref.id==clean_doi(parsed_args.doi)).first()
parsed_args.id = my_pub.id
print u"Got database hit for this doi: {}".format(my_pub.id)
update.run(**vars(parsed_args))
db.session.remove()
print "finished update in {} secconds".format(elapsed(start))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
# for everything
parser.add_argument('fn', type=str, help="what function you want to run")
parsed_args = parse_update_optional_args(parser)
run_update(parsed_args)
| from time import time
from app import db
import argparse
from jobs import update_registry
from util import elapsed
# needs to be imported so the definitions get loaded into the registry
import jobs_defs
"""
examples of calling this:
# update everything
python update.py Person.refresh --limit 10 --chunk 5 --rq
# update one thing not using rq
python update.py Package.test --id 0000-1111-2222-3333
"""
def parse_update_optional_args(parser):
# just for updating lots
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many jobs to do")
parser.add_argument('--chunk', "-ch", nargs="?", default=10, type=int, help="how many to take off db at once")
parser.add_argument('--after', nargs="?", type=str, help="minimum id or id start, ie 0000-0001")
parser.add_argument('--rq', action="store_true", default=False, help="do jobs in this thread")
parser.add_argument('--order', action="store_true", default=True, help="order them")
parser.add_argument('--append', action="store_true", default=False, help="append, dont' clear queue")
# just for updating one
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update")
parser.add_argument('--doi', nargs="?", type=str, help="doi of the one thing you want to update")
# parse and run
parsed_args = parser.parse_args()
return parsed_args
def run_update(parsed_args):
update = update_registry.get(parsed_args.fn)
start = time()
#convenience method for handling an doi
if parsed_args.doi:
from publication import Crossref
from util import clean_doi
my_pub = db.session.query(Crossref).filter(Crossref.id==clean_doi(parsed_args.doi)).first()
parsed_args.id = my_pub.id
print u"Got database hit for this doi: {}".format(my_pub.id)
update.run(**vars(parsed_args))
db.session.remove()
print "finished update in {} secconds".format(elapsed(start))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
# for everything
parser.add_argument('fn', type=str, help="what function you want to run")
parsed_args = parse_update_optional_args(parser)
run_update(parsed_args)
| mit | Python |
0b54c244e6e4b745a678fe69fc1be7c16850203d | Fix a mistake. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/distutils/example_without_dependency/setup.py | python/distutils/example_without_dependency/setup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyNurseryRhymesDemo
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from nursery_rhymes import __version__ as VERSION
from distutils.core import setup
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries']
PACKAGES = ['nursery_rhymes']
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='Jeremie DECOCK',
author_email='[email protected]',
maintainer='Jeremie DECOCK',
maintainer_email='[email protected]',
name='nursery_rhymes',
description='A snippet to show how to install a project with setuptools',
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# where the package may be downloaded
scripts = ["rowyourboat"],
classifiers=CLASSIFIERS,
#license='MIT license', # Useless if license is already in CLASSIFIERS
packages=PACKAGES,
version=VERSION)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyNurseryRhymesDemo
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from nursery_rhymes import __version__ as VERSION
from distutils.core import setup
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries']
PACKAGES = ['nursery_rhymes']
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='Jeremie DECOCK',
author_email='[email protected]',
maintainer='Jeremie DECOCK',
maintainer_email='[email protected]',
name='nursery_rhymes',
description='A snippet to show how to install a project with setuptools',
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# where the package may be downloaded
scripts = ["rowyourboat"],
classifiers=CLASSIFIERS,
#license='MIT license', # Useless if license is already in CLASSIFIERS
packages=PACKAGES,
version=VERSION)
| mit | Python |
6c11b9cc9b213928e32d883d4f557f7421da6802 | Add kamerstukken to dossier API | openkamer/openkamer,openkamer/openkamer,openkamer/openkamer,openkamer/openkamer | document/api.py | document/api.py | from rest_framework import serializers, viewsets
from document.models import Document, Kamerstuk, Dossier
class DossierSerializer(serializers.HyperlinkedModelSerializer):
documents = serializers.HyperlinkedRelatedField(read_only=True,
view_name='document-detail',
many=True)
kamerstukken = serializers.HyperlinkedRelatedField(read_only=True,
view_name='kamerstuk-detail',
many=True)
class Meta:
model = Dossier
fields = ('id', 'dossier_id', 'title', 'kamerstukken', 'documents')
class DossierViewSet(viewsets.ModelViewSet):
queryset = Dossier.objects.all()
serializer_class = DossierSerializer
class DocumentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Document
fields = ('id', 'dossier', 'raw_type', 'raw_title', 'publisher', 'date_published', 'document_url')
class DocumentViewSet(viewsets.ModelViewSet):
queryset = Document.objects.all()
serializer_class = DocumentSerializer
class KamerstukSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Kamerstuk
fields = ('id', 'document', 'id_main', 'id_sub', 'type_short', 'type_long')
class KamerstukViewSet(viewsets.ModelViewSet):
queryset = Kamerstuk.objects.all()
serializer_class = KamerstukSerializer
| from rest_framework import serializers, viewsets
from document.models import Document, Kamerstuk, Dossier
class DossierSerializer(serializers.HyperlinkedModelSerializer):
documents = serializers.HyperlinkedRelatedField(read_only=True,
view_name='document-detail',
many=True)
class Meta:
model = Dossier
fields = ('id', 'dossier_id', 'title', 'documents')
class DossierViewSet(viewsets.ModelViewSet):
queryset = Dossier.objects.all()
serializer_class = DossierSerializer
class DocumentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Document
fields = ('id', 'dossier', 'raw_type', 'raw_title', 'publisher', 'date_published', 'document_url')
class DocumentViewSet(viewsets.ModelViewSet):
queryset = Document.objects.all()
serializer_class = DocumentSerializer
class KamerstukSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Kamerstuk
fields = ('id', 'document', 'id_main', 'id_sub', 'type_short', 'type_long')
class KamerstukViewSet(viewsets.ModelViewSet):
queryset = Kamerstuk.objects.all()
serializer_class = KamerstukSerializer
| mit | Python |
51e35e88597d2c34905222cd04a46a2a840c0d92 | Refactor Poly ABC | oneklc/dimod,oneklc/dimod | dimod/core/polysampler.py | dimod/core/polysampler.py | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============================================================================
import abc
from six import add_metaclass
from dimod.core.composite import Composite
from dimod.higherorder.polynomial import BinaryPolynomial
__all__ = 'PolySampler', 'ComposedPolySampler'
@add_metaclass(abc.ABCMeta)
class PolySampler:
"""Sampler supports binary polynomials.
Binary polynomials are an extension of binary quadratic models that allow
higher-order interactions.
"""
@abc.abstractmethod
def sample_poly(self, polynomial, **kwargs):
"""Sample from a higher-order polynomial."""
pass
def sample_hising(self, h, J, **kwargs):
return self.sample_poly(BinaryPolynomial.from_hising(h, J), **kwargs)
def sample_hubo(self, H, **kwargs):
return self.sample_poly(BinaryPolynomial.from_hubo(H), **kwargs)
class ComposedPolySampler(PolySampler, Composite):
pass
| # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============================================================================
import abc
from six import add_metaclass
__all__ = 'PolySampler',
@add_metaclass(abc.ABCMeta)
class PolySampler:
"""Sampler/Composite supports binary polynomials.
Binary polynomials are an extension of binary quadratic models that allow
higher-order interactions.
"""
@abc.abstractmethod
def sample_poly(self, polynomial, **kwargs):
"""Sample from a higher-order polynomial."""
pass
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.