commit
stringlengths 40
40
| old_file
stringlengths 4
264
| new_file
stringlengths 4
264
| old_contents
stringlengths 0
3.26k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
624
| message
stringlengths 15
4.7k
| lang
stringclasses 3
values | license
stringclasses 13
values | repos
stringlengths 5
91.5k
|
---|---|---|---|---|---|---|---|---|---|
d537ea32462c7ef46634d1527702c4c4a6d37e1e
|
tests/query_test/test_udfs.py
|
tests/query_test/test_udfs.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
class TestUdfs(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestUdfs, cls).add_test_dimensions()
# UDFs require codegen
cls.TestMatrix.add_constraint(
lambda v: v.get_value('exec_option')['disable_codegen'] == False)
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
def test_udfs(self, vector):
self.run_test_case('QueryTest/udf', vector)
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
class TestUdfs(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestUdfs, cls).add_test_dimensions()
# UDFs require codegen
cls.TestMatrix.add_constraint(
lambda v: v.get_value('exec_option')['disable_codegen'] == False)
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
# This must run serially because other tests executing 'invalidate metadata' will nuke
# all loaded functions.
# TODO: This can be run in parallel once functions are persisted correctly.
@pytest.mark.execute_serially
def test_udfs(self, vector):
self.run_test_case('QueryTest/udf', vector)
|
Fix UDF test, take two
|
Fix UDF test, take two
Change-Id: I817389d94dab665199d2c1b7365e8ce0d1495c41
Reviewed-on: http://gerrit.ent.cloudera.com:8080/504
Reviewed-by: Skye Wanderman-Milne <[email protected]>
Tested-by: Skye Wanderman-Milne <[email protected]>
|
Python
|
apache-2.0
|
caseyching/Impala,theyaa/Impala,lirui-intel/Impala,ibmsoe/ImpalaPPC,gistic/PublicSpatialImpala,mapr/impala,cgvarela/Impala,rampage644/impala-cut,cchanning/Impala,cloudera/recordservice,caseyching/Impala,grundprinzip/Impala,placrosse/ImpalaToGo,kapilrastogi/Impala,brightchen/Impala,brightchen/Impala,tempbottle/Impala,rampage644/impala-cut,kapilrastogi/Impala,andybab/Impala,gistic/PublicSpatialImpala,bratatidas9/Impala-1,ImpalaToGo/ImpalaToGo,henryr/Impala,scalingdata/Impala,scalingdata/Impala,henryr/Impala,lirui-intel/Impala,tempbottle/Impala,henryr/Impala,placrosse/ImpalaToGo,gerashegalov/Impala,ibmsoe/ImpalaPPC,brightchen/Impala,ImpalaToGo/ImpalaToGo,theyaa/Impala,ImpalaToGo/ImpalaToGo,brightchen/Impala,cloudera/recordservice,XiaominZhang/Impala,gistic/PublicSpatialImpala,rampage644/impala-cut,lirui-intel/Impala,bowlofstew/Impala,cchanning/Impala,AtScaleInc/Impala,cloudera/recordservice,lnliuxing/Impala,lirui-intel/Impala,cloudera/recordservice,gistic/PublicSpatialImpala,cchanning/Impala,ibmsoe/ImpalaPPC,rdblue/Impala,rampage644/impala-cut,theyaa/Impala,cloudera/recordservice,theyaa/Impala,caseyching/Impala,rdblue/Impala,grundprinzip/Impala,scalingdata/Impala,bratatidas9/Impala-1,cgvarela/Impala,cgvarela/Impala,andybab/Impala,lnliuxing/Impala,andybab/Impala,theyaa/Impala,kapilrastogi/Impala,cloudera/recordservice,kapilrastogi/Impala,XiaominZhang/Impala,lnliuxing/Impala,theyaa/Impala,caseyching/Impala,grundprinzip/Impala,tempbottle/Impala,rampage644/impala-cut,gerashegalov/Impala,cchanning/Impala,caseyching/Impala,kapilrastogi/Impala,cgvarela/Impala,caseyching/Impala,rdblue/Impala,rdblue/Impala,ImpalaToGo/ImpalaToGo,lirui-intel/Impala,rdblue/Impala,gerashegalov/Impala,cchanning/Impala,tempbottle/Impala,bowlofstew/Impala,rdblue/Impala,caseyching/Impala,rdblue/Impala,XiaominZhang/Impala,cgvarela/Impala,mapr/impala,bowlofstew/Impala,XiaominZhang/Impala,henryr/Impala,gerashegalov/Impala,cloudera/recordservice,theyaa/Impala,ibmsoe/ImpalaPPC,mapr/impala,grundprinzip/Impala,cchanning/Impala,gistic/PublicSpatialImpala,andybab/Impala,mapr/impala,lnliuxing/Impala,bowlofstew/Impala,bowlofstew/Impala,grundprinzip/Impala,henryr/Impala,bowlofstew/Impala,scalingdata/Impala,lnliuxing/Impala,ImpalaToGo/ImpalaToGo,scalingdata/Impala,gerashegalov/Impala,mapr/impala,bratatidas9/Impala-1,kapilrastogi/Impala,AtScaleInc/Impala,AtScaleInc/Impala,lirui-intel/Impala,grundprinzip/Impala,tempbottle/Impala,bratatidas9/Impala-1,placrosse/ImpalaToGo,gistic/PublicSpatialImpala,AtScaleInc/Impala,XiaominZhang/Impala,henryr/Impala,brightchen/Impala,bratatidas9/Impala-1,bowlofstew/Impala,gerashegalov/Impala,brightchen/Impala,AtScaleInc/Impala,scalingdata/Impala,tempbottle/Impala,lnliuxing/Impala,placrosse/ImpalaToGo,XiaominZhang/Impala,bratatidas9/Impala-1,placrosse/ImpalaToGo,AtScaleInc/Impala,bratatidas9/Impala-1,lnliuxing/Impala,gerashegalov/Impala,ImpalaToGo/ImpalaToGo,ibmsoe/ImpalaPPC,cgvarela/Impala,rampage644/impala-cut,cchanning/Impala,andybab/Impala,brightchen/Impala,cgvarela/Impala,lirui-intel/Impala,kapilrastogi/Impala,andybab/Impala,placrosse/ImpalaToGo,XiaominZhang/Impala,ibmsoe/ImpalaPPC,tempbottle/Impala,ibmsoe/ImpalaPPC
|
454740f2657efa88efa16abdba93dc427bcf4d70
|
run.py
|
run.py
|
from PdfProcessor import *
import argparse
from datetime import datetime
import ConfigParser
import ProcessLogger
parser = argparse.ArgumentParser(description='Processes the pdf and extracts the text')
parser.add_argument('-i','--infile', help='File path of the input pdf file.', required=True)
parser.add_argument('-o','--outdir', help='File name of the output csv file.', required=True)
results = parser.parse_args()
logger = ProcessLogger.getLogger('run')
logger.info("Processing started at %s ", str(datetime.now()))
logger.info("input: %s", results.infile)
logger.info("outdir: %s", results.outdir)
configParser = ConfigParser.RawConfigParser()
configParser.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings.config'))
pdfProcessor = PDFProcessor(results.infile, results.outdir)
pdfProcessor.setConfigParser(configParser)
if pdfProcessor.isStructured():
pdfProcessor.extractTextFromStructuredDoc()
else:
pdfProcessor.extractTextFromScannedDoc()
pdfProcessor.writeStats()
logger.info("Processing ended at %s ", str(datetime.now()));
|
from PdfProcessor import *
import argparse
from datetime import datetime
import ConfigParser
import ProcessLogger
import traceback
parser = argparse.ArgumentParser(description='Processes the pdf and extracts the text')
parser.add_argument('-i','--infile', help='File path of the input pdf file.', required=True)
parser.add_argument('-o','--outdir', help='File name of the output csv file.', required=True)
results = parser.parse_args()
try:
logger = ProcessLogger.getLogger('run')
logger.info("Processing started at %s ", str(datetime.now()))
logger.info("input: %s", results.infile)
logger.info("outdir: %s", results.outdir)
configParser = ConfigParser.RawConfigParser()
configParser.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings.config'))
pdfProcessor = PDFProcessor(results.infile, results.outdir)
pdfProcessor.setConfigParser(configParser)
if pdfProcessor.isStructured():
pdfProcessor.extractTextFromStructuredDoc()
else:
pdfProcessor.extractTextFromScannedDoc()
pdfProcessor.writeStats()
except OSError as e:
logger.error("OSError: %s [%s] in %s", e.strerror, e.errno, e.filename);
logger.debug(traceback.format_exception(*sys.exc_info()))
except Exception as e:
logger.error("Exception: %s ", e);
logger.debug(traceback.format_exception(*sys.exc_info()))
logger.info("Processing ended at %s ", str(datetime.now()));
|
Add try catch to capture all the exceptions that might generate anywhere todo: need to capture exceptions in specific places and raise them to log from the main catch
|
Add try catch to capture all the exceptions that might generate anywhere
todo: need to capture exceptions in specific places and raise them to log from the main catch
|
Python
|
mit
|
manishgs/pdf-processor,anjesh/pdf-processor,manishgs/pdf-processor,anjesh/pdf-processor
|
70b4be757d671bc86876b4568632bb6fe6064001
|
admin_interface/templatetags/admin_interface_tags.py
|
admin_interface/templatetags/admin_interface_tags.py
|
# -*- coding: utf-8 -*-
from django import template
from admin_interface.models import Theme
register = template.Library()
@register.assignment_tag(takes_context = True)
def get_admin_interface_theme(context):
theme = None
request = context.get('request', None)
if request:
theme = getattr(request, 'admin_interface_theme', None)
if not theme:
theme = Theme.get_active_theme()
if request:
request.admin_interface_theme = theme
return theme
|
# -*- coding: utf-8 -*-
from django import template
from admin_interface.models import Theme
register = template.Library()
@register.simple_tag(takes_context = True)
def get_admin_interface_theme(context):
theme = None
request = context.get('request', None)
if request:
theme = getattr(request, 'admin_interface_theme', None)
if not theme:
theme = Theme.get_active_theme()
if request:
request.admin_interface_theme = theme
return theme
|
Fix a Django deprecation warning
|
Fix a Django deprecation warning
Replace deprecated assignment_tag:
`python/lib/python3.5/site-packages/admin_interface/templatetags/admin_interface_tags.py:11: RemovedInDjango20Warning: assignment_tag() is deprecated. Use simple_tag() instead`
|
Python
|
mit
|
fabiocaccamo/django-admin-interface,fabiocaccamo/django-admin-interface,fabiocaccamo/django-admin-interface
|
b37814280dc06dbf8aefec4490f6b73a47f05c1a
|
custom_fixers/fix_alt_unicode.py
|
custom_fixers/fix_alt_unicode.py
|
# Taken from jinja2. Thanks, Armin Ronacher.
# See also http://lucumr.pocoo.org/2010/2/11/porting-to-python-3-a-guide
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, BlankLine
class FixAltUnicode(fixer_base.BaseFix):
PATTERN = """
func=funcdef< 'def' name='__unicode__'
parameters< '(' NAME ')' > any+ >
"""
def transform(self, node, results):
name = results['name']
name.replace(Name('__str__', prefix=name.prefix))
|
# Taken from jinja2. Thanks, Armin Ronacher.
# See also http://lucumr.pocoo.org/2010/2/11/porting-to-python-3-a-guide
from lib2to3 import fixer_base
class FixAltUnicode(fixer_base.BaseFix):
PATTERN = "'__unicode__'"
def transform(self, node, results):
new = node.clone()
new.value = '__str__'
return new
|
Simplify python3 unicode fixer and make it replace all occurrences of __unicode__ with __str__.
|
Simplify python3 unicode fixer and make it replace all occurrences of __unicode__ with __str__.
|
Python
|
mit
|
live-clones/pybtex
|
eb4cda636a0b0ceb5312b161e97ae5f8376c9f8e
|
indra/tests/test_biolookup_client.py
|
indra/tests/test_biolookup_client.py
|
from indra.databases import biolookup_client
def test_lookup_curie():
curie = 'pubchem.compound:40976'
res = biolookup_client.lookup_curie(curie)
assert res['name'] == '(17R)-13-ethyl-17-ethynyl-17-hydroxy-11-' \
'methylidene-2,6,7,8,9,10,12,14,15,16-decahydro-1H-' \
'cyclopenta[a]phenanthren-3-one', res
def test_lookup():
res = biolookup_client.lookup('FPLX', 'ERK')
assert res['name'] == 'ERK', res
def test_get_name():
res = biolookup_client.get_name('CHEBI', 'CHEBI:408174')
assert res == 'arformoterol', res
|
from indra.databases import biolookup_client
def test_lookup_curie():
curie = 'pubchem.compound:40976'
res = biolookup_client.lookup_curie(curie)
assert res['name'] == '(17R)-13-ethyl-17-ethynyl-17-hydroxy-11-' \
'methylidene-2,6,7,8,9,10,12,14,15,16-decahydro-1H-' \
'cyclopenta[a]phenanthren-3-one', res
def test_lookup():
res = biolookup_client.lookup('HGNC', '1097')
assert res['name'] == 'BRAF', res
def test_get_name():
res = biolookup_client.get_name('CHEBI', 'CHEBI:408174')
assert res == 'arformoterol', res
|
Change biolookup test to work around service bug
|
Change biolookup test to work around service bug
|
Python
|
bsd-2-clause
|
johnbachman/indra,bgyori/indra,johnbachman/indra,bgyori/indra,bgyori/indra,sorgerlab/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/indra
|
c535c22884dbb0df227d4ad142e4d4515415ca29
|
tests/backends/gstreamer_test.py
|
tests/backends/gstreamer_test.py
|
import unittest
import os
from mopidy.models import Playlist, Track
from mopidy.backends.gstreamer import GStreamerBackend
from tests.backends.base import (BasePlaybackControllerTest,
BaseCurrentPlaylistControllerTest)
folder = os.path.dirname(__file__)
folder = os.path.join(folder, '..', 'data')
folder = os.path.abspath(folder)
song = os.path.join(folder, 'song%s.mp3')
song = 'file://' + song
# FIXME can be switched to generic test
class GStreamerCurrentPlaylistHandlerTest(BaseCurrentPlaylistControllerTest, unittest.TestCase):
tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)]
backend_class = GStreamerBackend
class GStreamerPlaybackControllerTest(BasePlaybackControllerTest, unittest.TestCase):
tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)]
backend_class = GStreamerBackend
if __name__ == '__main__':
unittest.main()
|
import unittest
import os
from mopidy.models import Playlist, Track
from mopidy.backends.gstreamer import GStreamerBackend
from tests.backends.base import (BasePlaybackControllerTest,
BaseCurrentPlaylistControllerTest)
folder = os.path.dirname(__file__)
folder = os.path.join(folder, '..', 'data')
folder = os.path.abspath(folder)
song = os.path.join(folder, 'song%s.wav')
song = 'file://' + song
# FIXME can be switched to generic test
class GStreamerCurrentPlaylistHandlerTest(BaseCurrentPlaylistControllerTest, unittest.TestCase):
tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)]
backend_class = GStreamerBackend
class GStreamerPlaybackControllerTest(BasePlaybackControllerTest, unittest.TestCase):
tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)]
backend_class = GStreamerBackend
if __name__ == '__main__':
unittest.main()
|
Switch to wav test files for gstreamer tests
|
Switch to wav test files for gstreamer tests
|
Python
|
apache-2.0
|
dbrgn/mopidy,jcass77/mopidy,woutervanwijk/mopidy,jcass77/mopidy,tkem/mopidy,kingosticks/mopidy,ali/mopidy,diandiankan/mopidy,hkariti/mopidy,bacontext/mopidy,rawdlite/mopidy,jcass77/mopidy,bacontext/mopidy,pacificIT/mopidy,ZenithDK/mopidy,adamcik/mopidy,mopidy/mopidy,dbrgn/mopidy,mokieyue/mopidy,hkariti/mopidy,woutervanwijk/mopidy,kingosticks/mopidy,SuperStarPL/mopidy,tkem/mopidy,tkem/mopidy,adamcik/mopidy,jodal/mopidy,dbrgn/mopidy,swak/mopidy,diandiankan/mopidy,ZenithDK/mopidy,bencevans/mopidy,vrs01/mopidy,adamcik/mopidy,jodal/mopidy,priestd09/mopidy,glogiotatidis/mopidy,ali/mopidy,diandiankan/mopidy,bencevans/mopidy,kingosticks/mopidy,bencevans/mopidy,swak/mopidy,quartz55/mopidy,ali/mopidy,ZenithDK/mopidy,jmarsik/mopidy,priestd09/mopidy,jmarsik/mopidy,SuperStarPL/mopidy,liamw9534/mopidy,ZenithDK/mopidy,liamw9534/mopidy,quartz55/mopidy,SuperStarPL/mopidy,jmarsik/mopidy,rawdlite/mopidy,priestd09/mopidy,tkem/mopidy,hkariti/mopidy,rawdlite/mopidy,mopidy/mopidy,swak/mopidy,quartz55/mopidy,abarisain/mopidy,bacontext/mopidy,jmarsik/mopidy,pacificIT/mopidy,pacificIT/mopidy,abarisain/mopidy,hkariti/mopidy,mopidy/mopidy,bacontext/mopidy,glogiotatidis/mopidy,vrs01/mopidy,vrs01/mopidy,mokieyue/mopidy,glogiotatidis/mopidy,diandiankan/mopidy,dbrgn/mopidy,pacificIT/mopidy,rawdlite/mopidy,mokieyue/mopidy,swak/mopidy,SuperStarPL/mopidy,glogiotatidis/mopidy,jodal/mopidy,mokieyue/mopidy,vrs01/mopidy,quartz55/mopidy,bencevans/mopidy,ali/mopidy
|
eac2f296e855f92d040321edee943ad5f8a8fb39
|
nodeconductor/events/views.py
|
nodeconductor/events/views.py
|
from rest_framework import generics, response
from nodeconductor.events import elasticsearch_client
class EventListView(generics.GenericAPIView):
def list(self, request, *args, **kwargs):
order_by = request.GET.get('o', '-@timestamp')
elasticsearch_list = elasticsearch_client.ElasticsearchResultList(user=request.user, sort=order_by)
page = self.paginate_queryset(elasticsearch_list)
if page is not None:
return self.get_paginated_response(page)
return response.Response(elasticsearch_list)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
|
from rest_framework import generics, response
from nodeconductor.events import elasticsearch_client
class EventListView(generics.GenericAPIView):
def list(self, request, *args, **kwargs):
order_by = request.GET.get('o', '-@timestamp')
event_types = request.GET.getlist('event_type')
search_text = request.GET.get('search_text')
elasticsearch_list = elasticsearch_client.ElasticsearchResultList(
user=request.user, sort=order_by, event_types=event_types, search_text=search_text)
page = self.paginate_queryset(elasticsearch_list)
if page is not None:
return self.get_paginated_response(page)
return response.Response(elasticsearch_list)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
|
Add filtering to view (nc-463)
|
Add filtering to view (nc-463)
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
72e948719145579eb7dfb9385b921f8eb6ea1384
|
tests/v4/conftest.py
|
tests/v4/conftest.py
|
from .context import tohu
from tohu.v4.primitive_generators import *
from tohu.v4.derived_generators import *
__all__ = ['EXEMPLAR_GENERATORS', 'EXEMPLAR_PRIMITIVE_GENERATORS', 'EXEMPLAR_DERIVED_GENERATORS']
def add(x, y):
return x + y
EXEMPLAR_PRIMITIVE_GENERATORS = [
Constant("quux"),
Integer(100, 200),
HashDigest(length=6),
FakerGenerator(method="name"),
IterateOver('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcde', p=[0.1, 0.05, 0.7, 0.03, 0.12]),
Timestamp(date='2018-01-01'),
]
EXEMPLAR_DERIVED_GENERATORS = [
Apply(add, Integer(100, 200), Integer(300, 400)),
Apply(add, Apply(add, Integer(100, 200), Integer(300, 400)), Apply(add, Integer(500, 600), Integer(700, 800))),
]
EXEMPLAR_CUSTOM_GENERATORS = []
EXEMPLAR_GENERATORS = EXEMPLAR_PRIMITIVE_GENERATORS + EXEMPLAR_DERIVED_GENERATORS + EXEMPLAR_CUSTOM_GENERATORS
|
from .context import tohu
from tohu.v4.primitive_generators import *
from tohu.v4.derived_generators import *
__all__ = ['EXEMPLAR_GENERATORS', 'EXEMPLAR_PRIMITIVE_GENERATORS', 'EXEMPLAR_DERIVED_GENERATORS']
def add(x, y):
return x + y
EXEMPLAR_PRIMITIVE_GENERATORS = [
Boolean(p=0.3),
Constant("quux"),
FakerGenerator(method="name"),
Float(12.34, 56.78),
HashDigest(length=6),
Integer(100, 200),
IterateOver('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcdefghijklmnopqrstuvwxyz'),
SelectOne('abcde', p=[0.1, 0.05, 0.7, 0.03, 0.12]),
Timestamp(date='2018-01-01'),
]
EXEMPLAR_DERIVED_GENERATORS = [
Apply(add, Integer(100, 200), Integer(300, 400)),
Apply(add, Apply(add, Integer(100, 200), Integer(300, 400)), Apply(add, Integer(500, 600), Integer(700, 800))),
]
EXEMPLAR_CUSTOM_GENERATORS = []
EXEMPLAR_GENERATORS = EXEMPLAR_PRIMITIVE_GENERATORS + EXEMPLAR_DERIVED_GENERATORS + EXEMPLAR_CUSTOM_GENERATORS
|
Add more exemplar primitive generators
|
Add more exemplar primitive generators
|
Python
|
mit
|
maxalbert/tohu
|
12d22221df5786caee510cc167c9ef29f9155488
|
var/www/cgi-bin/abundanceConf.py
|
var/www/cgi-bin/abundanceConf.py
|
#!/home/daniel/Software/anaconda3/bin/python
# Import modules for CGI handling
import cgi, cgitb
from abundanceDriver import abundancedriver
from emailSender import sendEmail
def cgi2dict(form):
"""Convert the form from cgi.FieldStorage to a python dictionary"""
params = {}
for key in form.keys():
params[key] = form[key].value
return params
def abundance(form):
"""Create the configuration file for running the abundance driver"""
# Make the StarMe_ares.cfg
fout = '/tmp/linelist.moog {Teff} {logg} {feh} {vt}'.format(**form)
with open('/tmp/StarMe_abundance.cfg', 'w') as f:
f.writelines(fout+'\n')
abundancedriver('/tmp/StarMe_abundance.cfg')
if __name__ == '__main__':
# Enable debugging
import os
os.system('touch /tmp/test1')
cgitb.enable()
form = cgi.FieldStorage()
# Run ARES for one or several line lists
formDict = cgi2dict(form)
abundance(formDict)
sendEmail(to=formDict['email'], driver='abundances', data='/tmp/abundances.dat')
# Show the finished html page
print "Content-type: text/html\n\n"
with open('../html/finish.html', 'r') as lines:
for line in lines:
print line
|
#!/home/daniel/Software/anaconda3/bin/python
# Import modules for CGI handling
import cgi, cgitb
from abundanceDriver import abundancedriver
from emailSender import sendEmail
def cgi2dict(form):
"""Convert the form from cgi.FieldStorage to a python dictionary"""
params = {}
for key in form.keys():
params[key] = form[key].value
return params
def abundance(form):
"""Create the configuration file for running the abundance driver"""
# Make the StarMe_ares.cfg
fout = '/tmp/linelist.moog {Teff} {logg} {feh} {vt}'.format(**form)
with open('/tmp/StarMe_abundance.cfg', 'w') as f:
f.writelines(fout+'\n')
abundancedriver('/tmp/StarMe_abundance.cfg')
if __name__ == '__main__':
# Enable debugging
cgitb.enable()
form = cgi.FieldStorage()
# Run ARES for one or several line lists
formDict = cgi2dict(form)
abundance(formDict)
sendEmail(to=formDict['email'], driver='abundances', data='/tmp/abundresults.dat')
# Show the finished html page
print "Content-type: text/html\n\n"
with open('../html/finish.html', 'r') as lines:
for line in lines:
print line
|
Correct name of output file
|
Correct name of output file
|
Python
|
mit
|
DanielAndreasen/FASMA-web,DanielAndreasen/FASMA-web,DanielAndreasen/FASMA-web,DanielAndreasen/FASMA-web
|
24b8e2f7440926d6d1c384a7289dfb5d1124e82f
|
opps/core/admin/__init__.py
|
opps/core/admin/__init__.py
|
# -*- coding: utf-8 -*-
from opps.core.admin.channel import *
from opps.core.admin.profile import *
from opps.core.admin.source import *
|
# -*- coding: utf-8 -*-
from opps.core.admin.article import *
from opps.core.admin.channel import *
from opps.core.admin.profile import *
from opps.core.admin.source import *
|
Add article on core admin
|
Add article on core admin
|
Python
|
mit
|
YACOWS/opps,opps/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,YACOWS/opps,williamroot/opps,opps/opps,YACOWS/opps,jeanmask/opps,jeanmask/opps,williamroot/opps,opps/opps,jeanmask/opps,opps/opps,williamroot/opps
|
ea416504c287bc5a3716289b57ebfd15bb770b9d
|
sql/branch.py
|
sql/branch.py
|
from gratipay import wireup
env = wireup.env()
db = wireup.db(env)
participants = []
with open('./sql/emails.txt') as f:
emails = [line.rstrip() for line in f]
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE email_address IN %s
""", (tuple(emails), ))
for p in participants:
p.queue_email('double_emails')
|
import sys
from gratipay import wireup
env = wireup.env()
db = wireup.db(env)
# Temporary, will fill with actual values when running script
email_txt = """
[email protected]
[email protected]
"""
emails = [email.strip() for email in email_txt.split()]
assert len(emails) == 176
participants = []
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE email_address IN %s
""", (tuple(emails), ))
for p in participants:
p.queue_email('double_emails')
print("Done")
sys.exit()
|
Use a string instead of a file
|
Use a string instead of a file
(so that I can redirect input to heroku from a local file)
|
Python
|
mit
|
eXcomm/gratipay.com,studio666/gratipay.com,eXcomm/gratipay.com,gratipay/gratipay.com,eXcomm/gratipay.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com,studio666/gratipay.com,mccolgst/www.gittip.com,gratipay/gratipay.com,eXcomm/gratipay.com,mccolgst/www.gittip.com,gratipay/gratipay.com,studio666/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com
|
34035c4b272e9271834c531990c404940eee8633
|
apps/votes/models.py
|
apps/votes/models.py
|
from django.db import models
from meps.models import MEP
class Proposal(models.Model):
id = models.CharField(max_length=63, primary_key=True)
title = models.CharField(max_length=255, unique=True)
class SubProposal(models.Model):
datetime = models.DateTimeField()
subject = models.CharField(max_length=255)
part = models.CharField(max_length=255)
description = models.CharField(max_length=511)
weight = models.IntegerField(null=True)
vote = models.ForeignKey(Proposal)
recommendation = models.CharField(max_length=15, choices=((u'against', u'against'), (u'for', u'for')), null=True)
class Vote(models.Model):
choice = models.CharField(max_length=15, choices=((u'for', u'for'), (u'against', u'against'), (u'abstention', u'abstention')))
name = models.CharField(max_length=127)
mep = models.ForeignKey(MEP)
|
from django.db import models
from meps.models import MEP
class Proposal(models.Model):
id = models.CharField(max_length=63, primary_key=True)
title = models.CharField(max_length=255, unique=True)
class SubProposal(models.Model):
datetime = models.DateTimeField()
subject = models.CharField(max_length=255)
part = models.CharField(max_length=255)
description = models.CharField(max_length=511)
weight = models.IntegerField(null=True)
vote = models.ForeignKey(Proposal)
recommendation = models.CharField(max_length=15, choices=((u'against', u'against'), (u'for', u'for')), null=True)
class Vote(models.Model):
choice = models.CharField(max_length=15, choices=((u'for', u'for'), (u'against', u'against'), (u'abstention', u'abstention')))
name = models.CharField(max_length=127)
sub_proposal = models.ForeignKey(SubProposal)
mep = models.ForeignKey(MEP)
|
Add a link between vote and subproposal
|
[enh] Add a link between vote and subproposal
|
Python
|
agpl-3.0
|
yohanboniface/memopol-core,yohanboniface/memopol-core,yohanboniface/memopol-core
|
496481e3bd6392a44788fadc7cf517fc36143e96
|
contrib/plugins/w3cdate.py
|
contrib/plugins/w3cdate.py
|
"""
Add a 'w3cdate' key to every entry -- this contains the date in ISO8601 format
WARNING: you must have PyXML installed as part of your python installation
in order for this plugin to work
Place this plugin early in your load_plugins list, so that the w3cdate will
be available to subsequent plugins
"""
__author__ = "Ted Leung <[email protected]>"
__version__ = "$Id:"
__copyright__ = "Copyright (c) 2003 Ted Leung"
__license__ = "Python"
import xml.utils.iso8601
import time
def cb_prepare(args):
request = args["request"]
form = request.getHttp()['form']
config = request.getConfiguration()
data = request.getData()
entry_list = data['entry_list']
for i in range(len(entry_list)):
entry = entry_list[i]
t = entry['timetuple']
# adjust for daylight savings time
t = t[0],t[1],t[2],t[3]+time.localtime()[-1],t[4],t[5],t[6],t[7],t[8]
entry['w3cdate'] = xml.utils.iso8601.ctime(time.mktime(t))
|
"""
Add a 'w3cdate' key to every entry -- this contains the date in ISO8601 format
WARNING: you must have PyXML installed as part of your python installation
in order for this plugin to work
Place this plugin early in your load_plugins list, so that the w3cdate will
be available to subsequent plugins
"""
__author__ = "Ted Leung <[email protected]>"
__version__ = "$Id:"
__copyright__ = "Copyright (c) 2003 Ted Leung"
__license__ = "Python"
import xml.utils.iso8601
import time
from Pyblosxom import tools
def cb_story(args):
request = tools.get_registry()["request"]
data = request.getData()
entry_list = data['entry_list']
for i in range(len(entry_list)):
entry = entry_list[i]
t = entry['timetuple']
# adjust for daylight savings time
tzoffset = 0
if time.timezone != 0:
tzoffset = time.altzone
entry['w3cdate'] = xml.utils.iso8601.tostring(time.mktime(t),tzoffset)
|
Change to cb_story, clean up TZ handling some more
|
Change to cb_story, clean up TZ handling some more
|
Python
|
mit
|
willkg/douglas,daitangio/pyblosxom,willkg/douglas,daitangio/pyblosxom
|
e5e83b75e250ee3c6d8084e23ee777d519293cb6
|
swprobe/__init__.py
|
swprobe/__init__.py
|
# Copyright (c) 2012 Spil Games
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version_info = (0 , 3, 0)
version = __version__ = ".".join(map(str, version_info))
|
# Copyright (c) 2012 Spil Games
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version_info = (0 , 3, 1)
version = __version__ = ".".join(map(str, version_info))
|
Fix for keystone / swift 1.8.0
|
Fix for keystone / swift 1.8.0
|
Python
|
apache-2.0
|
spilgames/swprobe,spilgames/swprobe
|
9fa55bc43a3f83a57318799ba8b9f2769676bd44
|
test/test_flvlib.py
|
test/test_flvlib.py
|
import unittest
import test_primitives, test_astypes, test_helpers
def get_suite():
modules = (test_primitives, test_astypes, test_helpers)
suites = [unittest.TestLoader().loadTestsFromModule(module) for
module in modules]
return unittest.TestSuite(suites)
def main():
unittest.TextTestRunner(verbosity=2).run(get_suite())
if __name__ == "__main__":
main()
|
import unittest
import test_primitives, test_astypes, test_helpers, test_tags
def get_suite():
modules = (test_primitives, test_astypes, test_helpers, test_tags)
suites = [unittest.TestLoader().loadTestsFromModule(module) for
module in modules]
return unittest.TestSuite(suites)
def main():
unittest.TextTestRunner(verbosity=2).run(get_suite())
if __name__ == "__main__":
main()
|
Include the tags module tests in the full library testsuite.
|
Include the tags module tests in the full library testsuite.
|
Python
|
mit
|
wulczer/flvlib
|
47c2936e65d00a08896b4e60060ff737b7a2f675
|
app/tests/workstations_tests/test_migrations.py
|
app/tests/workstations_tests/test_migrations.py
|
import pytest
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
@pytest.mark.django_db(transaction=True)
def test_workstation_group_migration():
executor = MigrationExecutor(connection)
app = "workstations"
migrate_from = [(app, "0001_initial")]
migrate_to = [(app, "0004_auto_20190813_1302")]
executor.migrate(migrate_from)
old_apps = executor.loader.project_state(migrate_from).apps
Workstation = old_apps.get_model(app, "Workstation")
old_ws = Workstation.objects.create(title="foo")
assert not hasattr(old_ws, "editors_group")
assert not hasattr(old_ws, "users_group")
# Reload
executor.loader.build_graph()
# Migrate forwards
executor.migrate(migrate_to)
new_apps = executor.loader.project_state(migrate_to).apps
Workstation = new_apps.get_model(app, "Workstation")
new_ws = Workstation.objects.get(title="foo")
assert new_ws.editors_group
assert new_ws.users_group
assert new_ws.slug == old_ws.slug
assert new_ws.title == old_ws.title
|
import pytest
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from guardian.shortcuts import get_perms
from grandchallenge.workstations.models import Workstation
from tests.factories import UserFactory
@pytest.mark.django_db(transaction=True)
def test_workstation_group_migration():
executor = MigrationExecutor(connection)
app = "workstations"
migrate_from = [(app, "0001_initial")]
migrate_to = [(app, "0004_auto_20190813_1302")]
executor.migrate(migrate_from)
old_apps = executor.loader.project_state(migrate_from).apps
user = UserFactory()
OldWorkstation = old_apps.get_model(app, "Workstation")
old_ws = OldWorkstation.objects.create(title="foo")
assert not hasattr(old_ws, "editors_group")
assert not hasattr(old_ws, "users_group")
# Reload
executor.loader.build_graph()
# Migrate forwards
executor.migrate(migrate_to)
new_ws = Workstation.objects.get(title="foo")
new_ws.add_user(user=user)
assert new_ws.editors_group
assert new_ws.users_group
assert new_ws.slug == old_ws.slug
assert new_ws.title == old_ws.title
assert "view_workstation" in get_perms(user, new_ws)
|
Check that the permission migrations work
|
Check that the permission migrations work
|
Python
|
apache-2.0
|
comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django
|
2e18e05659e9ba88f2fcce77259792f84b25e5fa
|
_pydevd_frame_eval/pydevd_frame_eval_main.py
|
_pydevd_frame_eval/pydevd_frame_eval_main.py
|
import os
import sys
IS_PY36_OR_OLDER = False
if (sys.version_info[0] == 3 and sys.version_info[1] >= 6) or sys.version_info[0] > 3:
IS_PY36_OR_OLDER = True
set_frame_eval = None
stop_frame_eval = None
if IS_PY36_OR_OLDER:
try:
from _pydevd_frame_eval.pydevd_frame_evaluator import frame_eval_func, stop_frame_eval
except ImportError:
from _pydev_bundle.pydev_monkey import log_error_once
dirname = os.path.dirname(__file__)
log_error_once("warning: Debugger speedups for Python 3.6 not found. Run '\"%s\" \"%s\" build_ext --inplace' to build." % (
sys.executable, os.path.join(dirname, 'setup.py')))
|
import os
import sys
IS_PY36_OR_OLDER = False
if (sys.version_info[0] == 3 and sys.version_info[1] >= 6) or sys.version_info[0] > 3:
IS_PY36_OR_OLDER = True
set_frame_eval = None
stop_frame_eval = None
use_frame_eval = os.environ.get('PYDEVD_USE_FRAME_EVAL', None)
if use_frame_eval == 'NO':
frame_eval_func, stop_frame_eval = None, None
else:
if IS_PY36_OR_OLDER:
try:
from _pydevd_frame_eval.pydevd_frame_evaluator import frame_eval_func, stop_frame_eval
except ImportError:
from _pydev_bundle.pydev_monkey import log_error_once
dirname = os.path.dirname(__file__)
log_error_once("warning: Debugger speedups for Python 3.6 not found. Run '\"%s\" \"%s\" build_ext --inplace' to build." % (
sys.executable, os.path.join(dirname, 'setup.py')))
|
Add ability to disable frame evaluation
|
Add ability to disable frame evaluation
(cherry picked from commit 6cd89d0)
|
Python
|
epl-1.0
|
Elizaveta239/PyDev.Debugger,Elizaveta239/PyDev.Debugger,fabioz/PyDev.Debugger,fabioz/PyDev.Debugger,fabioz/PyDev.Debugger,Elizaveta239/PyDev.Debugger,Elizaveta239/PyDev.Debugger,fabioz/PyDev.Debugger,Elizaveta239/PyDev.Debugger,fabioz/PyDev.Debugger
|
bda88dfe6e0a2f16f0c3be74a42cf8783aae1d9e
|
django_enum_js/views.py
|
django_enum_js/views.py
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.safestring import mark_safe
from django_enum_js import enum_wrapper
def enums_js(request):
enums = enum_wrapper.get_json_formatted_enums()
return render_to_response('django_enum_js/enums_js.tpl', { 'enums': mark_safe(enums), }, context_instance=RequestContext(request), mimetype='application/javascript')
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.safestring import mark_safe
from django_enum_js import enum_wrapper
def enums_js(request):
enums = enum_wrapper.get_json_formatted_enums()
return render_to_response('django_enum_js/enums_js.tpl', { 'enums': mark_safe(enums), }, context_instance=RequestContext(request), content_type='application/javascript')
|
Fix to support django v1.7
|
Fix to support django v1.7
|
Python
|
mit
|
leifdenby/django_enum_js
|
54296c607b735ce06b3420efecb312f52876e012
|
django_react_templatetags/context_processors.py
|
django_react_templatetags/context_processors.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def react_context_processor(request):
"""Expose a global list of react components to be processed"""
print("react_context_processor is no longer required.")
return {
'REACT_COMPONENTS': [],
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
def react_context_processor(request):
"""Expose a global list of react components to be processed"""
warnings.warn(
"react_context_processor is no longer required.", DeprecationWarning
)
return {
'REACT_COMPONENTS': [],
}
|
Replace warning message with deprecation warning
|
Replace warning message with deprecation warning
|
Python
|
mit
|
Frojd/django-react-templatetags,Frojd/django-react-templatetags,Frojd/django-react-templatetags
|
1fed9f26010f24af14abff9444862ed0861adb63
|
thinglang/runner.py
|
thinglang/runner.py
|
from thinglang.execution.execution import ExecutionEngine
from thinglang.lexer.lexer import lexer
from thinglang.parser.parser import parse
def run(source):
if not source:
raise ValueError('Got empty source')
source = source.strip().replace(' ' * 4, '\t')
lexical_groups = list(lexer(source))
root_node = parse(lexical_groups)
with ExecutionEngine(root_node) as engine:
engine.execute()
return engine.results()
|
from thinglang.execution.execution import ExecutionEngine
from thinglang.lexer.lexer import lexer
from thinglang.parser.parser import parse
from thinglang.parser.simplifier import simplify
def run(source):
if not source:
raise ValueError('Source cannot be empty')
source = source.strip().replace(' ' * 4, '\t')
lexical_groups = list(lexer(source))
tree = parse(lexical_groups)
root_node = simplify(tree)
with ExecutionEngine(root_node) as engine:
engine.execute()
return engine.results()
|
Add simplification between parsing and execution
|
Add simplification between parsing and execution
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
6138f02896bc865a98480be36300bf670a6defa8
|
plugin/complete_database.py
|
plugin/complete_database.py
|
import vim
import re
import json
from os import path
current = vim.eval("expand('%:p')")
ccd = vim.eval("l:ccd")
opts = []
with open(ccd) as database:
data = json.load(database)
for d in data:
# hax for headers
fmatch = re.search(r'(.*)\.(\w+)$', current)
dmatch = re.search(r'(.*)\.(\w+)$', d['file'])
if fmatch.group(1) == dmatch.group(1):
for result in re.finditer(r'-D\s*[^\s]+', d['command']):
opts.append(result.group(0))
for result in re.finditer(r'-isystem\s*[^\s]+', d['command']):
opts.append(result.group(0))
for result in re.finditer(r'-I\s*([^\s]+)', d['command']):
opts.append('-I' + path.join(d['directory'], result.group(1)))
break
vim.command("let l:clang_options = '" + ' '.join(opts) + "'")
|
import vim
import re
import json
from os import path
curr_file = vim.eval("expand('%:p')")
curr_file_noext = path.splitext(curr_file)[0]
ccd = vim.eval("l:ccd")
opts = []
with open(ccd) as database:
# Search for the right entry in the database matching file names
for d in json.load(database):
# This is an entry without a file attribute
if 'file' not in d:
continue
# This entry is about a different file. We consider file names
# without extension to handle header files which do not have
# an entry in the database.
d_file_noext = path.splitext(d['file'])[0]
if d_file_noext != curr_file_noext:
continue
for result in re.finditer(r'-D\s*[^\s]+', d['command']):
opts.append(result.group(0))
for result in re.finditer(r'-isystem\s*[^\s]+', d['command']):
opts.append(result.group(0))
for result in re.finditer(r'-I\s*([^\s]+)', d['command']):
opts.append('-I' + path.join(d['directory'], result.group(1)))
break
vim.command("let l:clang_options = '" + ' '.join(opts) + "'")
|
Replace re by os.path utils
|
compdb: Replace re by os.path utils
Instead of using regular expressions to drop file name ending use
os.path.splitext().
|
Python
|
isc
|
justmao945/vim-clang,justmao945/vim-clang
|
0fe7cd8cf316dc6d4ef547d733b634de64fc768c
|
dbaas/dbaas_services/analyzing/admin/analyze.py
|
dbaas/dbaas_services/analyzing/admin/analyze.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from dbaas_services.analyzing.service import AnalyzeRepositoryService
from dbaas_services.analyzing.forms import AnalyzeRepositoryForm
class AnalyzeRepositoryAdmin(admin.DjangoServicesAdmin):
form = AnalyzeRepositoryForm
service_class = AnalyzeRepositoryService
search_fields = ("database_name", "engine_name",
"environment_name", "instance_name", "databaseinfra_name")
list_filter = ("analyzed_at", "memory_alarm", "cpu_alarm", "volume_alarm")
list_display = ("analyzed_at", "databaseinfra_name", "database_name", "engine_name",
"environment_name", "instance_name", "cpu_alarm",
"memory_alarm", "volume_alarm")
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from dbaas_services.analyzing.service import AnalyzeRepositoryService
from dbaas_services.analyzing.forms import AnalyzeRepositoryForm
class AnalyzeRepositoryAdmin(admin.DjangoServicesAdmin):
form = AnalyzeRepositoryForm
service_class = AnalyzeRepositoryService
search_fields = ("database_name", "engine_name",
"environment_name", "instance_name", "databaseinfra_name")
list_filter = ("analyzed_at", "memory_alarm", "cpu_alarm", "volume_alarm", "engine_name",
"environment_name", "databaseinfra_name")
list_display = ("analyzed_at", "databaseinfra_name", "database_name", "engine_name",
"environment_name", "instance_name", "cpu_alarm",
"memory_alarm", "volume_alarm")
|
Add more options on filters
|
Add more options on filters
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
ecf71bd004d99b679936e07453f5a938e19f71dc
|
megalist_dataflow/setup.py
|
megalist_dataflow/setup.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
setuptools.setup(
name='megalist_dataflow',
version='0.1',
author='Alvaro Stivi',
author_email='[email protected]',
url='https://cse.googlesource.com/solutions/megalist',
install_requires=['googleads==20.0.0', 'google-api-python-client==1.7.9',
'bloom-filter==1.3', 'google-cloud-core==1.0.2',
'google-cloud-datastore==1.9.0'],
packages=setuptools.find_packages(),
)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
setuptools.setup(
name='megalist_dataflow',
version='0.1',
author='Alvaro Stivi',
author_email='[email protected]',
url='https://cse.googlesource.com/solutions/megalist',
install_requires=['googleads==20.0.0', 'google-api-python-client==1.7.9',
'bloom-filter==1.3', 'google-cloud-core==1.0.2',
'google-cloud-datastore==1.9.0, aiohttp==3.6.2'],
packages=setuptools.find_packages(),
)
|
Add aiohttp as a execution requirement
|
Add aiohttp as a execution requirement
|
Python
|
apache-2.0
|
google/megalista,google/megalista
|
2d09314ab58bb766372dc6e263fb17428b1fd3cd
|
doc/pool_scripts/cats.py
|
doc/pool_scripts/cats.py
|
import os
import photomosaic.flickr
import photomosaic as pm
if not os.path.isfile('~/pools/cats/pool.json'):
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
pm.set_options(flickr_api_key=FLICKR_API_KEY)
photomosaic.flickr.from_search('cats', '~/pools/cats/')
pool = pm.make_pool('~/pools/cats/*.jpg')
pm.export_pool(pool, '~/pools/cats/pool.json') # save color analysis for future reuse
|
import os
import photomosaic.flickr
import photomosaic as pm
if not os.path.isfile(os.path.expanduser('~/pools/cats/pool.json')):
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
pm.set_options(flickr_api_key=FLICKR_API_KEY)
photomosaic.flickr.from_search('cats', '~/pools/cats/')
pool = pm.make_pool('~/pools/cats/*.jpg')
pm.export_pool(pool, '~/pools/cats/pool.json') # save color analysis for future reuse
|
Fix check for existing pools.
|
BLD: Fix check for existing pools.
|
Python
|
bsd-3-clause
|
danielballan/photomosaic
|
4b545d2e72080537672bb4ebb990708cad678344
|
entrypoint.py
|
entrypoint.py
|
#!/usr/bin/python3
#
# Define containerized environment for running Diosix on Qemu
#
# On Google Cloud Run: Creates HTTP server on port 8080
# or whatever was specified using the PORT system variable.
# Outputs via the HTTP port. This requires K_SERVICE to be set.
#
# On all other environments: Log to stdout
#
# syntax: entrypoint.py <command>
#
# Author: Chris Williams <[email protected]>
#
import os
import sys
global command_result
from flask import Flask
if __name__ == "__main__":
if not os.environ.get('K_SERVICE'):
print('Running locally')
stream = os.popen('. $HOME/.cargo/env && cd /build/diosix && {}'.format(' '.join(sys.argv[1:])))
output = stream.read()
output
else:
print('Running HTTP service {} {} {} for Google Cloud', os.environ.get('K_SERVICE'), os.environ.get('K_REVISION'), os.environ.get('K_CONFIGURATION'))
app = Flask(__name__)
@app.route('/')
def ContainerService():
return 'Container built. Use docker images and docker run in the Google Cloud shell to run this container.\n'
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
|
#!/usr/bin/python3
#
# Define containerized environment for running Diosix on Qemu
#
# On Google Cloud Run: Creates HTTP server on port 8080
# or whatever was specified using the PORT system variable.
# Outputs via the HTTP port. This requires K_SERVICE to be set.
#
# On all other environments: Log to stdout
#
# syntax: entrypoint.py <command>
#
# Author: Chris Williams <[email protected]>
#
import os
import sys
global command_result
from flask import Flask
if __name__ == "__main__":
if not os.environ.get('K_SERVICE'):
print('Running locally')
os.system('. $HOME/.cargo/env && cd /build/diosix && {}'.format(' '.join(sys.argv[1:])))
else:
print('Running HTTP service {} {} {} for Google Cloud', os.environ.get('K_SERVICE'), os.environ.get('K_REVISION'), os.environ.get('K_CONFIGURATION'))
app = Flask(__name__)
@app.route('/')
def ContainerService():
return 'Container built. Use docker images and docker run in the Google Cloud shell to run this container.\n'
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
|
Debug Google Cloud Run support
|
Debug Google Cloud Run support
|
Python
|
mit
|
diodesign/diosix
|
4a25286506cc8e50b5e1225b12015f4d0da3ccfc
|
smbackend/urls.py
|
smbackend/urls.py
|
from django.conf.urls import patterns, include, url
from services.api import all_views as services_views
from services.api import AccessibilityRuleView
from observations.api import views as observations_views
from rest_framework import routers
from observations.views import obtain_auth_token
from munigeo.api import all_views as munigeo_views
# from django.contrib import admin
# admin.autodiscover()
router = routers.DefaultRouter()
registered_api_views = set()
for view in services_views + munigeo_views + observations_views:
kwargs = {}
if view['name'] in registered_api_views:
continue
else:
registered_api_views.add(view['name'])
if 'base_name' in view:
kwargs['base_name'] = view['base_name']
router.register(view['name'], view['class'], **kwargs)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'smbackend.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^', include(v1_api.urls)),
# url(r'^admin/', include(admin.site.urls)),
url(r'^open311/', 'services.views.post_service_request', name='services'),
url(r'^v1/', include(router.urls)),
url(r'^api-token-auth/', obtain_auth_token)
)
|
from django.conf.urls import patterns, include, url
from services.api import all_views as services_views
from services.api import AccessibilityRuleView
from observations.api import views as observations_views
from rest_framework import routers
from observations.views import obtain_auth_token
from munigeo.api import all_views as munigeo_views
# from django.contrib import admin
# admin.autodiscover()
router = routers.DefaultRouter()
registered_api_views = set()
for view in services_views + munigeo_views + observations_views:
kwargs = {}
if view['name'] in registered_api_views:
continue
else:
registered_api_views.add(view['name'])
if 'base_name' in view:
kwargs['base_name'] = view['base_name']
router.register(view['name'], view['class'], **kwargs)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'smbackend.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^', include(v1_api.urls)),
# url(r'^admin/', include(admin.site.urls)),
url(r'^open311/', 'services.views.post_service_request', name='services'),
url(r'^v1/', include(router.urls)),
url(r'^v1/api-token-auth/', obtain_auth_token)
)
|
Put api token auth endpoint under v1.
|
Put api token auth endpoint under v1.
|
Python
|
agpl-3.0
|
City-of-Helsinki/smbackend,City-of-Helsinki/smbackend
|
7d69bcc6474d954b311251bf077750e0418170cb
|
button.py
|
button.py
|
import RPi.GPIO as GPIO
import time
import os
from optparse import OptionParser
# Parse input arguments
parser = OptionParser()
parser.add_option("-t", "--testGPIO", action="store_true", help="Test GPIO connection, does not call the JS script.")
# The option --pin sets the Input Pin for your Button
# It default to GPIO24 or HardwarePin 19
parser.add_option("-p", "--pin", dest="pin", help="GPIO pin to use. If not provided it defaults to HardwarePin 19.", default=19)
(options, args) = parser.parse_args()
testingGPIO = options.testGPIO != None
buttonPin = options.pin
#sets GPIO Mode to use Hardware Pin Layout
GPIO.setmode(GPIO.BCM)
#sets GPIO Pin to INPUT mode with a Pull Down Resistor
GPIO.setup(buttonPin,GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
if(testingGPIO):
print "Press the connected button. If you are pressing but you do not see any further output then....there is something wrong with the connection."
while True:
#waits for Pin Input and then exectures the script below
if (GPIO.input(buttonPin)):
if (testingGPIO):
print "PIN " + buttonPing + " works correctly."
continue
#the script that will be executed (as root)
os.system("node /home/pi/guest-password-printer/index.js")
|
import RPi.GPIO as GPIO
import time
import os
from optparse import OptionParser
# Parse input arguments
parser = OptionParser()
parser.add_option("-t", "--testGPIO", action="store_true", help="Test GPIO connection, does not call the JS script.")
# The option --pin sets the Input Pin for your Button
# It default to GPIO24 or HardwarePin 19
parser.add_option("-p", "--pin", dest="pin", help="GPIO pin to use. If not provided it defaults to HardwarePin 19.", default=19)
(options, args) = parser.parse_args()
testingGPIO = options.testGPIO != None
buttonPin = options.pin
#sets GPIO Mode to use Hardware Pin Layout
GPIO.setmode(GPIO.BCM)
#sets GPIO Pin to INPUT mode with a Pull Down Resistor
GPIO.setup(buttonPin,GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
if(testingGPIO):
print "Press the connected button. If you are pressing but you do not see any further output then....there is something wrong with the connection."
while True:
#waits for Pin Input and then exectures the script below
if (GPIO.input(buttonPin)):
if (testingGPIO):
print "PIN " + buttonPin + " works correctly."
continue
#the script that will be executed (as root)
os.system("node index.js")
|
Fix typo and execute JS script found in local folder.
|
Fix typo and execute JS script found in local folder.
|
Python
|
mit
|
henne-/guest-password-printer,henne-/guest-password-printer
|
6c095c0e14c084666b9417b4bd269f396804bfab
|
src/ensign/_interfaces.py
|
src/ensign/_interfaces.py
|
# pylint: skip-file
from zope.interface import Attribute, Interface
class IFlag(Interface):
"""
Flag Interface.
Any kind of flag must implement this interface.
"""
TYPE = Attribute("""Flag type""")
store = Attribute("""Flag storage backend""")
name = Attribute("""Flag name""")
value = Attribute("""Flag value""")
active = Attribute("""Flag activity indicator""")
info = Attribute("""Flag descriptive information""")
def create(name, store, **kwargs):
"""
Create a new flag with the given name and, optionally, extra data,
persisted in the given store.
"""
def _check():
"""
Check whether the flag current value means the feature is active.
"""
class IStorage(Interface):
"""
Storage Interface.
Any kind of backing storage for flags must implement this interface.
"""
def create(name, type, **kwargs):
"""Create a new flag."""
def exists(name):
"""Check if the flag exists in the store."""
def load(name, type):
"""Load a value."""
def store(name, value, type):
"""Store a value."""
def used(name):
"""Get last used date."""
def info(name):
"""Get flag descriptive information."""
|
# pylint: skip-file
from zope.interface import Attribute, Interface
class IFlag(Interface):
"""
Flag Interface.
Any kind of flag must implement this interface.
"""
TYPE = Attribute("""Flag type""")
store = Attribute("""Flag storage backend""")
name = Attribute("""Flag name""")
value = Attribute("""Flag value""")
active = Attribute("""Flag activity indicator""")
info = Attribute("""Flag descriptive information""")
def create(name, store, **kwargs):
"""
Create a new flag with the given name and, optionally, extra data,
persisted in the given store.
"""
def all(store):
"""
Retrieve all flags in the store.
"""
def _check():
"""
Check whether the flag current value means the feature is active.
"""
class IStorage(Interface):
"""
Storage Interface.
Any kind of backing storage for flags must implement this interface.
"""
def create(name, type, **kwargs):
"""Create a new flag."""
def exists(name):
"""Check if the flag exists in the store."""
def load(name, type):
"""Load a value."""
def store(name, value, type):
"""Store a value."""
def used(name):
"""Get last used date."""
def info(name):
"""Get flag descriptive information."""
def all():
"""Get all flags."""
|
Update interface with the latest changes in functionality.
|
Update interface with the latest changes in functionality.
|
Python
|
isc
|
bolsote/py-cd-talk,bolsote/py-cd-talk
|
f535228e38f33263289f28d46e910ccb0a98a381
|
tournamentcontrol/competition/constants.py
|
tournamentcontrol/competition/constants.py
|
import pytz
from dateutil.rrule import DAILY, WEEKLY
from django.utils.translation import ugettext_lazy as _
GENDER_CHOICES = (
('M', _('Male')),
('F', _('Female')),
('X', _('Unspecified')),
)
SEASON_MODE_CHOICES = (
(WEEKLY, _("Season")),
(DAILY, _("Tournament")),
)
WIN_LOSE = {
'W': _("Winner"),
'L': _("Loser"),
}
###################
# TIME ZONE NAMES #
###################
"""
Ideally this would be a better list for the specific uses of the site in
question. For example, it is perhaps much easier to list just the Australian
time zones for sites deployed for Australian customers.
This is also implemented in touchtechnology.common.forms and should probably
be moved and better leveraged in future release.
See https://bitbucket.org/touchtechnology/common/issue/16/
"""
PYTZ_TIME_ZONE_CHOICES = [('\x20Standard', (('UTC', 'UTC'), ('GMT', 'GMT')))]
for iso, name in pytz.country_names.items():
values = sorted(pytz.country_timezones.get(iso, []))
names = [s.rsplit("/", 1)[1].replace("_", " ") for s in values]
PYTZ_TIME_ZONE_CHOICES.append((name, zip(values, names)))
PYTZ_TIME_ZONE_CHOICES.sort()
|
import pytz
from dateutil.rrule import DAILY, WEEKLY
from django.utils.translation import ugettext_lazy as _
GENDER_CHOICES = (
('M', _('Male')),
('F', _('Female')),
('X', _('Unspecified')),
)
SEASON_MODE_CHOICES = (
(WEEKLY, _("Season")),
(DAILY, _("Tournament")),
)
WIN_LOSE = {
'W': _("Winner"),
'L': _("Loser"),
}
###################
# TIME ZONE NAMES #
###################
"""
Ideally this would be a better list for the specific uses of the site in
question. For example, it is perhaps much easier to list just the Australian
time zones for sites deployed for Australian customers.
This is also implemented in touchtechnology.common.forms and should probably
be moved and better leveraged in future release.
See https://bitbucket.org/touchtechnology/common/issue/16/
"""
PYTZ_TIME_ZONE_CHOICES = [('\x20Standard', (('UTC', 'UTC'), ('GMT', 'GMT')))]
for iso, name in pytz.country_names.items():
values = sorted(pytz.country_timezones.get(iso, []))
names = [s.rsplit("/", 1)[1].replace("_", " ") for s in values]
PYTZ_TIME_ZONE_CHOICES.append((name, [each for each in zip(values, names)]))
PYTZ_TIME_ZONE_CHOICES.sort()
|
Use list comprehension to evaluate PYTZ_TIME_ZONE_CHOICES
|
Use list comprehension to evaluate PYTZ_TIME_ZONE_CHOICES
During Python 3 conversion this must have been missed.
|
Python
|
bsd-3-clause
|
goodtune/vitriolic,goodtune/vitriolic,goodtune/vitriolic,goodtune/vitriolic
|
c0787c468e1b71d7e9db93b5f5990ae9bb506d82
|
pystruct/datasets/dataset_loaders.py
|
pystruct/datasets/dataset_loaders.py
|
import cPickle
from os.path import dirname
from os.path import join
import numpy as np
def load_letters():
"""Load the OCR letters dataset.
This is a chain classification task.
Each example consists of a word, segmented into letters.
The first letter of each word is ommited from the data,
as it was a capital letter (in contrast to all other letters).
"""
module_path = dirname(__file__)
data_file = open(join(module_path, 'letters.pickle'),'rb')
data = cPickle.load(data_file)
# we add an easy to use image representation:
data['images'] = [np.hstack([l.reshape(16, 8) for l in word])
for word in data['data']]
return data
def load_scene():
module_path = dirname(__file__)
data_file = open(join(module_path, 'scene.pickle'))
return cPickle.load(data_file)
def load_snakes():
module_path = dirname(__file__)
data_file = open(join(module_path, 'snakes.pickle'))
return cPickle.load(data_file)
|
import cPickle
from os.path import dirname
from os.path import join
import numpy as np
def load_letters():
"""Load the OCR letters dataset.
This is a chain classification task.
Each example consists of a word, segmented into letters.
The first letter of each word is ommited from the data,
as it was a capital letter (in contrast to all other letters).
"""
module_path = dirname(__file__)
data_file = open(join(module_path, 'letters.pickle'),'rb')
data = cPickle.load(data_file)
# we add an easy to use image representation:
data['images'] = [np.hstack([l.reshape(16, 8) for l in word])
for word in data['data']]
return data
def load_scene():
module_path = dirname(__file__)
data_file = open(join(module_path, 'scene.pickle'),'rb')
return cPickle.load(data_file)
def load_snakes():
module_path = dirname(__file__)
data_file = open(join(module_path, 'snakes.pickle'),'rb')
return cPickle.load(data_file)
|
FIX other two sample data load for Windows
|
FIX other two sample data load for Windows
|
Python
|
bsd-2-clause
|
massmutual/pystruct,pystruct/pystruct,amueller/pystruct,d-mittal/pystruct,wattlebird/pystruct,pystruct/pystruct,d-mittal/pystruct,wattlebird/pystruct,massmutual/pystruct,amueller/pystruct
|
f0ef4f5e269d7f2d7fd347e8f458c1c9ce1ffb34
|
mqueue/hooks/redis/__init__.py
|
mqueue/hooks/redis/__init__.py
|
import redis
import time
from mqueue.conf import DOMAIN
from mqueue.hooks.redis import serializer
from mqueue.conf import HOOKS
conf = HOOKS["redis"]
R = redis.StrictRedis(host=conf["host"], port=conf["port"], db=conf["db"])
event_num = int(time.time())
def save(event, conf):
name = DOMAIN+"_event"+str(event_num)
event.request = event.request.replace("\n", "//")
data = serializer.Pack(event)
R.set(name, data)
|
import redis
import time
from mqueue.conf import DOMAIN
from mqueue.hooks.redis import serializer
from mqueue.conf import HOOKS
conf = HOOKS["redis"]
R = redis.StrictRedis(host=conf["host"], port=conf["port"], db=conf["db"])
event_num = int(time.time())
def save(event, conf):
global event_num
global R
name = DOMAIN + "_event" + str(event_num)
event.request = event.request.replace("\n", "//")
data = serializer.Pack(event)
R.set(name, data)
event_num += 1
|
Fix bug in redis hook
|
Fix bug in redis hook
|
Python
|
mit
|
synw/django-mqueue,synw/django-mqueue,synw/django-mqueue
|
d5167d8ba1b3107e5ce121eca76b5496bf8d6448
|
qipipe/registration/ants/template.py
|
qipipe/registration/ants/template.py
|
import os
import logging
import envoy
from .ants_error import ANTSError
def create_template(metric, files):
"""
Builds a template from the given image files.
:param metric: the similarity metric
:param files: the image files
:return: the template file name
"""
CMD = "buildtemplateparallel.sh -d 2 -c 2 -j 4 -d 2 -s {metric} -o {output} {files}"
PREFIX = 'reg_'
SUFFIX = 'template.nii.gz'
tmpl = PREFIX + SUFFIX
if os.path.exists(tmpl):
logging.info("Registration template already exists: %s" % tmpl)
return tmpl
cmd = CMD.format(metric=metric.name, output=PREFIX, files=' '.join(files))
logging.info("Building the %s registration template with the following command:" % tmpl)
logging.info(cmd)
r = envoy.run(cmd)
if r.status_code:
logging.error("Build registration template failed with error code %d" % r.status_code)
logging.error(r.std_err)
raise ANTSError("Build registration template unsuccessful; see the log for details")
if not os.path.exists(tmpl):
logging.error("Build registration template was not created.")
raise ANTSError("Build registration template unsuccessful; see the log for details")
logging.info("Built the registration template %s." % tmpl)
return tmpl
|
import os
import logging
import envoy
from .ants_error import ANTSError
def create_template(metric, files):
"""
Builds a template from the given image files.
:param metric: the similarity metric
:param files: the image files
:return: the template file name
"""
CMD = "buildtemplateparallel.sh -d 2 -c 2 -j 4 -d 2 -s {metric} -o {output} {files}"
PREFIX = 'reg_'
SUFFIX = 'template.nii.gz'
tmpl = PREFIX + SUFFIX
if os.path.exists(tmpl):
logging.info("Registration template already exists: %s" % tmpl)
return tmpl
cmd = CMD.format(metric=metric.name, output=PREFIX, files=' '.join(files))
logging.info("Building the %s registration template with the following command:" % tmpl)
logging.info(cmd[:80])
r = envoy.run(cmd)
if r.status_code:
logging.error("Build registration template failed with error code %d" % r.status_code)
logging.error(r.std_err)
raise ANTSError("Build registration template unsuccessful; see the log for details")
if not os.path.exists(tmpl):
logging.error("Build registration template was not created.")
raise ANTSError("Build registration template unsuccessful; see the log for details")
logging.info("Built the registration template %s." % tmpl)
return tmpl
|
Truncate a long log message.
|
Truncate a long log message.
|
Python
|
bsd-2-clause
|
ohsu-qin/qipipe
|
7ee29cfee740d6096fca8379253073077890a54c
|
examples/util/wordcount_redis.py
|
examples/util/wordcount_redis.py
|
from disco.schemes.scheme_redis import redis_output_stream
from disco.worker.task_io import task_output_stream
from disco.core import Job, result_iterator
class WordCount(Job):
reduce_output_stream = (task_output_stream, redis_output_stream)
@staticmethod
def map(line, params):
k, v = line
yield v, 1
@staticmethod
def reduce(iter, params):
from disco.util import kvgroup
for word, counts in kvgroup(sorted(iter)):
yield word, sum(counts)
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print "Usage: python wordcount_redis.py <input redis> <output redis>"
sys.exit(1)
from wordcount_redis import WordCount
job = WordCount()
job.params = {}
job.params['url'] = sys.argv[2]
job.run(input=[sys.argv[1]])
job.wait(show=True)
|
"""
Usage:
python wordcount_redis.py redis://redis_server:6379:0 redis://redis_server:6379:1
The input is read from db 0 and the output is written to db 1. The inputs
should be of the form (key, list_of_values) (they are read from the server with the
lrange command. See the redis documentation for more info).
The output will also be of the form (key, list_of_values). The reason we use
this approach is to unify the mechanism for the intermediate input-outputs
(which must be (key, list_of_values) with the inputs and outputs).
"""
from disco.schemes.scheme_redis import redis_output_stream
from disco.worker.task_io import task_output_stream
from disco.core import Job, result_iterator
class WordCount(Job):
reduce_output_stream = (task_output_stream, redis_output_stream)
@staticmethod
def map(line, params):
k, v = line
yield v, 1
@staticmethod
def reduce(iter, params):
from disco.util import kvgroup
for word, counts in kvgroup(sorted(iter)):
yield word, sum(counts)
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print "Usage: python wordcount_redis.py <input redis> <output redis>"
sys.exit(1)
from wordcount_redis import WordCount
job = WordCount()
job.params = {}
job.params['url'] = sys.argv[2]
job.run(input=[sys.argv[1]])
job.wait(show=True)
|
Add more info to the redis example.
|
examples: Add more info to the redis example.
|
Python
|
bsd-3-clause
|
seabirdzh/disco,discoproject/disco,oldmantaiter/disco,pooya/disco,oldmantaiter/disco,simudream/disco,pombredanne/disco,seabirdzh/disco,discoproject/disco,simudream/disco,discoproject/disco,ErikDubbelboer/disco,mozilla/disco,mozilla/disco,pombredanne/disco,ErikDubbelboer/disco,mwilliams3/disco,pooya/disco,mwilliams3/disco,pombredanne/disco,beni55/disco,pombredanne/disco,pooya/disco,mwilliams3/disco,pombredanne/disco,beni55/disco,seabirdzh/disco,ktkt2009/disco,ktkt2009/disco,discoproject/disco,seabirdzh/disco,seabirdzh/disco,mwilliams3/disco,beni55/disco,beni55/disco,simudream/disco,ktkt2009/disco,ErikDubbelboer/disco,ktkt2009/disco,mwilliams3/disco,simudream/disco,oldmantaiter/disco,mozilla/disco,ErikDubbelboer/disco,simudream/disco,mozilla/disco,beni55/disco,ErikDubbelboer/disco,pooya/disco,ktkt2009/disco,discoproject/disco,oldmantaiter/disco,oldmantaiter/disco
|
1c939a99e377ff1dfe037c47dd99f635d3cb0a1f
|
polling_stations/apps/data_collection/management/commands/import_cotswold.py
|
polling_stations/apps/data_collection/management/commands/import_cotswold.py
|
from data_collection.management.commands import BaseXpressWebLookupCsvImporter
class Command(BaseXpressWebLookupCsvImporter):
council_id = 'E07000079'
addresses_name = 'CotswoldPropertyPostCodePollingStationWebLookup-2017-03-27.TSV'
stations_name = 'CotswoldPropertyPostCodePollingStationWebLookup-2017-03-27.TSV'
elections = [
'local.gloucestershire.2017-05-04',
'parl.2017-06-08'
]
csv_delimiter = '\t'
|
from data_collection.management.commands import BaseXpressWebLookupCsvImporter
class Command(BaseXpressWebLookupCsvImporter):
council_id = 'E07000079'
addresses_name = 'CotswoldPropertyPostCodePollingStationWebLookup-2017-03-27.TSV'
stations_name = 'CotswoldPropertyPostCodePollingStationWebLookup-2017-03-27.TSV'
elections = [
'local.gloucestershire.2017-05-04',
#'parl.2017-06-08'
]
csv_delimiter = '\t'
|
Remove Cotswold election id (update expected)
|
Remove Cotswold election id (update expected)
|
Python
|
bsd-3-clause
|
chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations
|
ad3554ae58f65a295ac94c131d8193e0b2e7e6f8
|
termsuggester/word2vec.py
|
termsuggester/word2vec.py
|
from gensim.models import Word2Vec
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
class Word2VecSuggester():
def __init__(self, modelfile):
try:
self.model = Word2Vec.load(modelfile)
logger.info('Load Word2Vec model "{}"'.format(modelfile))
except IOError:
logger.warn('Unable to load Word2Vec model "{}"'.format(modelfile))
logger.warn('Was the train_word2vec script run?')
self.model = None
def suggest_terms(self, query_word):
if self.model is not None:
results = self.model.most_similar(positive=[query_word],
negative=[])
suggestions = {}
for word, weight in results:
suggestions[word] = weight
return suggestions
else:
return {}
|
from gensim.models import Word2Vec
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
class Word2VecSuggester():
def __init__(self, modelfile):
try:
self.model = Word2Vec.load(modelfile)
logger.info('Load Word2Vec model "{}"'.format(modelfile))
except IOError:
logger.warn('Unable to load Word2Vec model "{}"'.format(modelfile))
logger.warn('Was the train_word2vec script run?')
self.model = None
def suggest_terms(self, query_word):
# TODO: make the number of terms returned a parameter of the function
if self.model is not None:
results = self.model.most_similar(positive=[query_word],
negative=[], topn=10)
suggestions = {}
for word, weight in results:
suggestions[word] = weight
return suggestions
else:
return {}
|
Add reminder to look at the number of terms returned
|
Add reminder to look at the number of terms returned
|
Python
|
apache-2.0
|
nlesc-sherlock/concept-search,nlesc-sherlock/concept-search,nlesc-sherlock/concept-search,nlesc-sherlock/concept-search
|
73a375a3adb140c270444e886b3df842e0b28a86
|
numpy/core/tests/test_print.py
|
numpy/core/tests/test_print.py
|
import numpy as np
from numpy.testing import *
def check_float_type(tp):
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(tp(x)), str(float(x)))
def test_float_types():
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.float, np.double, np.longdouble] :
yield check_float_type, t
def check_complex_type(tp):
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(tp(x)), str(complex(x)))
assert_equal(str(tp(x*1j)), str(complex(x*1j)))
assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)))
def test_complex_types():
"""Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.cfloat, np.cdouble, np.clongdouble] :
yield check_complex_type, t
if __name__ == "__main__":
run_module_suite()
|
import numpy as np
from numpy.testing import *
def check_float_type(tp):
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(tp(x)), str(float(x)))
def test_float_types():
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.float32, np.double, np.longdouble] :
yield check_float_type, t
def check_complex_type(tp):
for x in [0, 1,-1, 1e10, 1e20] :
assert_equal(str(tp(x)), str(complex(x)))
assert_equal(str(tp(x*1j)), str(complex(x*1j)))
assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)))
def test_complex_types():
"""Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.complex64, np.cdouble, np.clongdouble] :
yield check_complex_type, t
if __name__ == "__main__":
run_module_suite()
|
Fix formatting tests: cfloat and cdouble as well as np.float and np.double are the same; make sure we test 4 bytes float.
|
Fix formatting tests: cfloat and cdouble as well as np.float and np.double are the same; make sure we test 4 bytes float.
|
Python
|
bsd-3-clause
|
chiffa/numpy,gfyoung/numpy,ekalosak/numpy,hainm/numpy,sigma-random/numpy,GrimDerp/numpy,sonnyhu/numpy,embray/numpy,GaZ3ll3/numpy,nbeaver/numpy,dimasad/numpy,gmcastil/numpy,musically-ut/numpy,kirillzhuravlev/numpy,simongibbons/numpy,ContinuumIO/numpy,simongibbons/numpy,mindw/numpy,ogrisel/numpy,rgommers/numpy,pelson/numpy,grlee77/numpy,mwiebe/numpy,mindw/numpy,SiccarPoint/numpy,pizzathief/numpy,AustereCuriosity/numpy,shoyer/numpy,jankoslavic/numpy,maniteja123/numpy,Yusa95/numpy,pdebuyl/numpy,dch312/numpy,jakirkham/numpy,mindw/numpy,cowlicks/numpy,behzadnouri/numpy,andsor/numpy,andsor/numpy,charris/numpy,joferkington/numpy,ewmoore/numpy,trankmichael/numpy,WarrenWeckesser/numpy,Srisai85/numpy,matthew-brett/numpy,CMartelLML/numpy,ewmoore/numpy,behzadnouri/numpy,brandon-rhodes/numpy,Eric89GXL/numpy,dwf/numpy,MSeifert04/numpy,rajathkumarmp/numpy,dato-code/numpy,dwillmer/numpy,Srisai85/numpy,bertrand-l/numpy,NextThought/pypy-numpy,Linkid/numpy,felipebetancur/numpy,jakirkham/numpy,githubmlai/numpy,tacaswell/numpy,ContinuumIO/numpy,maniteja123/numpy,mhvk/numpy,sigma-random/numpy,rmcgibbo/numpy,KaelChen/numpy,charris/numpy,mathdd/numpy,yiakwy/numpy,empeeu/numpy,pelson/numpy,cjermain/numpy,ddasilva/numpy,mhvk/numpy,MaPePeR/numpy,pbrod/numpy,rajathkumarmp/numpy,tynn/numpy,dch312/numpy,stuarteberg/numpy,groutr/numpy,njase/numpy,WarrenWeckesser/numpy,astrofrog/numpy,mortada/numpy,charris/numpy,skwbc/numpy,BMJHayward/numpy,empeeu/numpy,has2k1/numpy,madphysicist/numpy,Eric89GXL/numpy,empeeu/numpy,ChanderG/numpy,MaPePeR/numpy,Dapid/numpy,ajdawson/numpy,joferkington/numpy,tdsmith/numpy,dwf/numpy,numpy/numpy,ChanderG/numpy,dwf/numpy,endolith/numpy,skwbc/numpy,yiakwy/numpy,ddasilva/numpy,BabeNovelty/numpy,Anwesh43/numpy,brandon-rhodes/numpy,jonathanunderwood/numpy,rmcgibbo/numpy,GrimDerp/numpy,kirillzhuravlev/numpy,rherault-insa/numpy,immerrr/numpy,CMartelLML/numpy,pbrod/numpy,astrofrog/numpy,dato-code/numpy,jschueller/numpy,yiakwy/numpy,pdebuyl/numpy,utke1/numpy,larsmans/numpy,kiwifb/numpy,jorisvandenbossche/numpy,githubmlai/numpy,felipebetancur/numpy,naritta/numpy,SiccarPoint/numpy,gmcastil/numpy,ssanderson/numpy,immerrr/numpy,mhvk/numpy,BMJHayward/numpy,nbeaver/numpy,ssanderson/numpy,chiffa/numpy,ahaldane/numpy,pyparallel/numpy,skymanaditya1/numpy,dato-code/numpy,CMartelLML/numpy,sonnyhu/numpy,shoyer/numpy,ajdawson/numpy,dwf/numpy,abalkin/numpy,utke1/numpy,immerrr/numpy,pbrod/numpy,mwiebe/numpy,argriffing/numpy,Dapid/numpy,rgommers/numpy,trankmichael/numpy,MichaelAquilina/numpy,ChristopherHogan/numpy,skymanaditya1/numpy,ChanderG/numpy,BabeNovelty/numpy,immerrr/numpy,leifdenby/numpy,matthew-brett/numpy,b-carter/numpy,numpy/numpy,rherault-insa/numpy,WarrenWeckesser/numpy,madphysicist/numpy,rgommers/numpy,nbeaver/numpy,brandon-rhodes/numpy,ajdawson/numpy,musically-ut/numpy,mortada/numpy,KaelChen/numpy,solarjoe/numpy,stuarteberg/numpy,Yusa95/numpy,mingwpy/numpy,AustereCuriosity/numpy,mhvk/numpy,has2k1/numpy,simongibbons/numpy,chatcannon/numpy,ChristopherHogan/numpy,matthew-brett/numpy,githubmlai/numpy,larsmans/numpy,ESSS/numpy,solarjoe/numpy,ewmoore/numpy,WillieMaddox/numpy,ESSS/numpy,pdebuyl/numpy,rudimeier/numpy,pelson/numpy,pyparallel/numpy,mingwpy/numpy,shoyer/numpy,maniteja123/numpy,nguyentu1602/numpy,trankmichael/numpy,ViralLeadership/numpy,MaPePeR/numpy,KaelChen/numpy,mathdd/numpy,sigma-random/numpy,tynn/numpy,grlee77/numpy,mingwpy/numpy,nguyentu1602/numpy,ChanderG/numpy,ahaldane/numpy,felipebetancur/numpy,musically-ut/numpy,sinhrks/numpy,jorisvandenbossche/numpy,jankoslavic/numpy,endolith/numpy,jakirkham/numpy,rhythmsosad/numpy,cjermain/numpy,jorisvandenbossche/numpy,SiccarPoint/numpy,pelson/numpy,nguyentu1602/numpy,Dapid/numpy,SunghanKim/numpy,bmorris3/numpy,ESSS/numpy,seberg/numpy,shoyer/numpy,hainm/numpy,trankmichael/numpy,moreati/numpy,grlee77/numpy,moreati/numpy,tynn/numpy,b-carter/numpy,anntzer/numpy,GrimDerp/numpy,dch312/numpy,ajdawson/numpy,behzadnouri/numpy,WarrenWeckesser/numpy,dwillmer/numpy,endolith/numpy,ekalosak/numpy,njase/numpy,dwf/numpy,drasmuss/numpy,seberg/numpy,chatcannon/numpy,cowlicks/numpy,njase/numpy,stefanv/numpy,has2k1/numpy,Linkid/numpy,yiakwy/numpy,madphysicist/numpy,tdsmith/numpy,mattip/numpy,bmorris3/numpy,sinhrks/numpy,leifdenby/numpy,numpy/numpy-refactor,rherault-insa/numpy,kiwifb/numpy,larsmans/numpy,MSeifert04/numpy,astrofrog/numpy,rajathkumarmp/numpy,dimasad/numpy,MichaelAquilina/numpy,mortada/numpy,mathdd/numpy,MichaelAquilina/numpy,MSeifert04/numpy,andsor/numpy,pdebuyl/numpy,gfyoung/numpy,naritta/numpy,pizzathief/numpy,ekalosak/numpy,numpy/numpy-refactor,hainm/numpy,bringingheavendown/numpy,rmcgibbo/numpy,anntzer/numpy,rhythmsosad/numpy,argriffing/numpy,embray/numpy,Yusa95/numpy,ViralLeadership/numpy,embray/numpy,ssanderson/numpy,numpy/numpy,jonathanunderwood/numpy,dimasad/numpy,bringingheavendown/numpy,mattip/numpy,cjermain/numpy,WillieMaddox/numpy,ogrisel/numpy,ewmoore/numpy,rmcgibbo/numpy,rgommers/numpy,BMJHayward/numpy,jschueller/numpy,charris/numpy,Srisai85/numpy,Eric89GXL/numpy,stefanv/numpy,jorisvandenbossche/numpy,anntzer/numpy,embray/numpy,utke1/numpy,sinhrks/numpy,shoyer/numpy,stuarteberg/numpy,ChristopherHogan/numpy,abalkin/numpy,simongibbons/numpy,cowlicks/numpy,numpy/numpy-refactor,sonnyhu/numpy,kirillzhuravlev/numpy,bertrand-l/numpy,ogrisel/numpy,mortada/numpy,jschueller/numpy,skwbc/numpy,pyparallel/numpy,GrimDerp/numpy,ViralLeadership/numpy,ogrisel/numpy,skymanaditya1/numpy,ddasilva/numpy,abalkin/numpy,bertrand-l/numpy,astrofrog/numpy,madphysicist/numpy,embray/numpy,rhythmsosad/numpy,Anwesh43/numpy,skymanaditya1/numpy,sonnyhu/numpy,madphysicist/numpy,AustereCuriosity/numpy,chiffa/numpy,ogrisel/numpy,brandon-rhodes/numpy,pbrod/numpy,pelson/numpy,rudimeier/numpy,BabeNovelty/numpy,pizzathief/numpy,MichaelAquilina/numpy,SunghanKim/numpy,larsmans/numpy,sigma-random/numpy,grlee77/numpy,KaelChen/numpy,jakirkham/numpy,Yusa95/numpy,tacaswell/numpy,empeeu/numpy,solarjoe/numpy,BMJHayward/numpy,jakirkham/numpy,pizzathief/numpy,mathdd/numpy,MSeifert04/numpy,matthew-brett/numpy,stefanv/numpy,ahaldane/numpy,grlee77/numpy,SiccarPoint/numpy,drasmuss/numpy,bmorris3/numpy,NextThought/pypy-numpy,rhythmsosad/numpy,naritta/numpy,chatcannon/numpy,Linkid/numpy,numpy/numpy-refactor,drasmuss/numpy,jonathanunderwood/numpy,kirillzhuravlev/numpy,rudimeier/numpy,dato-code/numpy,andsor/numpy,astrofrog/numpy,GaZ3ll3/numpy,felipebetancur/numpy,stefanv/numpy,dimasad/numpy,b-carter/numpy,WillieMaddox/numpy,groutr/numpy,has2k1/numpy,bmorris3/numpy,ewmoore/numpy,jorisvandenbossche/numpy,tdsmith/numpy,sinhrks/numpy,seberg/numpy,joferkington/numpy,pizzathief/numpy,dch312/numpy,Anwesh43/numpy,mingwpy/numpy,jschueller/numpy,WarrenWeckesser/numpy,Eric89GXL/numpy,musically-ut/numpy,argriffing/numpy,cjermain/numpy,numpy/numpy-refactor,mattip/numpy,pbrod/numpy,ChristopherHogan/numpy,tacaswell/numpy,bringingheavendown/numpy,SunghanKim/numpy,seberg/numpy,Anwesh43/numpy,dwillmer/numpy,nguyentu1602/numpy,groutr/numpy,mhvk/numpy,numpy/numpy,MSeifert04/numpy,SunghanKim/numpy,ekalosak/numpy,simongibbons/numpy,Srisai85/numpy,mindw/numpy,githubmlai/numpy,ahaldane/numpy,stuarteberg/numpy,GaZ3ll3/numpy,gfyoung/numpy,hainm/numpy,NextThought/pypy-numpy,rajathkumarmp/numpy,kiwifb/numpy,leifdenby/numpy,MaPePeR/numpy,NextThought/pypy-numpy,endolith/numpy,joferkington/numpy,ahaldane/numpy,Linkid/numpy,jankoslavic/numpy,mattip/numpy,anntzer/numpy,gmcastil/numpy,rudimeier/numpy,BabeNovelty/numpy,ContinuumIO/numpy,jankoslavic/numpy,stefanv/numpy,naritta/numpy,matthew-brett/numpy,mwiebe/numpy,cowlicks/numpy,moreati/numpy,dwillmer/numpy,tdsmith/numpy,GaZ3ll3/numpy,CMartelLML/numpy
|
9c7660fd63bc1c48a0533e867c7d18faf9d90c03
|
main.py
|
main.py
|
#!/usr/bin/env python
from morss import main
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from morss import main, cgi_wrapper as application
if __name__ == '__main__':
main()
|
Make use thru uwsgi easier
|
Make use thru uwsgi easier
Import the main cgi_wrapper as "application" in main.py
|
Python
|
agpl-3.0
|
pictuga/morss,pictuga/morss,pictuga/morss
|
3cff942af436f16aab2078e6aeedd3073f4a5522
|
flask_roots/extension.py
|
flask_roots/extension.py
|
from __future__ import absolute_import
import os
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.images import Images
# from flask.ext.mail import Mail
from flask.ext.login import LoginManager
from flask.ext.acl import AuthManager
class Roots(object):
def __init__(self, app):
self.extensions = {}
self.init_app(app)
def init_app(self, app):
# Establish two-way links.
self.app = app
app.roots = self
app.extensions['roots'] = self
from .config import setup_config
setup_config(app)
from .logs import setup_logs
setup_logs(app)
from .session import setup_session
setup_session(app)
self.extensions['login_manager'] = LoginManager(app)
self.extensions['auth'] = AuthManager(app)
from .mako import MakoTemplates
self.extensions['mako'] = MakoTemplates(app)
self.extensions['images'] = Images(app)
self.extensions['db'] = db = SQLAlchemy(app)
db.metadata.bind = db.engine # WTF do I need to do this for?!
from .routing import setup_routing
setup_routing(app)
from .errors import setup_errors
setup_errors(app)
|
from __future__ import absolute_import
import os
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.images import Images
from flask.ext.mail import Mail
from flask.ext.login import LoginManager
from flask.ext.acl import AuthManager
class Roots(object):
def __init__(self, app):
self.extensions = {}
self.init_app(app)
def init_app(self, app):
# Establish two-way links.
self.app = app
app.roots = self
app.extensions['roots'] = self
from .config import setup_config
setup_config(app)
from .logs import setup_logs
setup_logs(app)
from .session import setup_session
setup_session(app)
self.extensions['login_manager'] = login = LoginManager(app)
login.user_callback = lambda uid: None
self.extensions['auth'] = AuthManager(app)
from .mako import MakoTemplates
self.extensions['mako'] = MakoTemplates(app)
self.extensions['images'] = Images(app)
self.extensions['db'] = db = SQLAlchemy(app)
db.metadata.bind = db.engine # WTF do I need to do this for?!
self.extensions['mail'] = Mail(app)
from .routing import setup_routing
setup_routing(app)
from .errors import setup_errors
setup_errors(app)
|
Add Flask-Mail, and null user loader
|
Add Flask-Mail, and null user loader
|
Python
|
bsd-3-clause
|
mikeboers/Flask-Roots,mikeboers/Flask-Roots
|
054dc32d30ca9175a6c8b40af52491b8e3a98978
|
heufybot/modules/util/webutils.py
|
heufybot/modules/util/webutils.py
|
from twisted.plugin import IPlugin
from heufybot.moduleinterface import BotModule, IBotModule
from heufybot.utils.logutils import logExceptionTrace
from zope.interface import implements
import re, requests
class WebUtils(BotModule):
implements(IPlugin, IBotModule)
name = "WebUtils"
canDisable = False
def hookBot(self, bot):
self.bot = bot
def actions(self):
return [ ("fetch-url", 1, self.fetchURL) ]
def fetchURL(self, url, params = None, extraHeaders = None):
headers = { "user-agent": "Mozilla/5.0" }
if extraHeaders:
headers.update(extraHeaders)
try:
request = requests.get(url, params=params, headers=headers)
pageType = request.headers["content-type"]
if not re.match("^(text/.*|application/((rss|atom|rdf)\+)?xml(;.*)?|application/(.*)json(;.*)?)$", pageType):
# Make sure we don't download any unwanted things
return None
return request
except requests.RequestException as ex:
logExceptionTrace("Error while fetching from {}: {}".format(url, ex))
return None
webutils = WebUtils()
|
from twisted.plugin import IPlugin
from twisted.python import log
from heufybot.moduleinterface import BotModule, IBotModule
from heufybot.utils.logutils import logExceptionTrace
from zope.interface import implements
import logging, re, requests
class WebUtils(BotModule):
implements(IPlugin, IBotModule)
name = "WebUtils"
canDisable = False
def hookBot(self, bot):
self.bot = bot
def actions(self):
return [ ("fetch-url", 1, self.fetchURL) ]
def fetchURL(self, url, params = None, extraHeaders = None):
headers = { "user-agent": "Mozilla/5.0" }
if extraHeaders:
headers.update(extraHeaders)
try:
request = requests.get(url, params=params, headers=headers)
pageType = request.headers["content-type"]
if not re.match("^(text/.*|application/((rss|atom|rdf)\+)?xml(;.*)?|application/(.*)json(;.*)?)$", pageType):
# Make sure we don't download any unwanted things
return None
log.msg(request.url, level=logging.DEBUG)
return request
except requests.RequestException as ex:
logExceptionTrace("Error while fetching from {}: {}".format(url, ex))
return None
webutils = WebUtils()
|
Debug the URL that's being requested
|
Debug the URL that's being requested
|
Python
|
mit
|
Heufneutje/PyHeufyBot,Heufneutje/PyHeufyBot
|
1c9f12f808ffa0f1d4f16ea9f35021a83126243f
|
get-solr-download-url.py
|
get-solr-download-url.py
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, print_function, unicode_literals
import sys
import requests
# Try to import urljoin from the Python 3 reorganized stdlib first:
try:
from urlparse.parse import urljoin
except ImportError:
from urlparse import urljoin
if len(sys.argv) != 2:
print('Usage: %s SOLR_VERSION' % sys.argv[0], file=sys.stderr)
sys.exit(1)
solr_version = sys.argv[1]
tarball = 'solr-{0}.tgz'.format(solr_version)
dist_path = 'lucene/solr/{0}/{1}'.format(solr_version, tarball)
download_url = urljoin('http://archive.apache.org/dist/', dist_path)
mirror_response = requests.get("http://www.apache.org/dyn/mirrors/mirrors.cgi/%s?asjson=1" % dist_path)
if mirror_response.ok:
mirror_data = mirror_response.json()
download_url = urljoin(mirror_data['preferred'], mirror_data['path_info'])
print(download_url)
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, print_function, unicode_literals
import sys
import requests
# Try to import urljoin from the Python 3 reorganized stdlib first:
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
if len(sys.argv) != 2:
print('Usage: %s SOLR_VERSION' % sys.argv[0], file=sys.stderr)
sys.exit(1)
solr_version = sys.argv[1]
tarball = 'solr-{0}.tgz'.format(solr_version)
dist_path = 'lucene/solr/{0}/{1}'.format(solr_version, tarball)
download_url = urljoin('http://archive.apache.org/dist/', dist_path)
mirror_response = requests.get("http://www.apache.org/dyn/mirrors/mirrors.cgi/%s?asjson=1" % dist_path)
if mirror_response.ok:
mirror_data = mirror_response.json()
download_url = urljoin(mirror_data['preferred'], mirror_data['path_info'])
print(download_url)
|
Update test Solr download script to work with default Python 3
|
Update test Solr download script to work with default Python 3
|
Python
|
bsd-3-clause
|
upayavira/pysolr,mylanium/pysolr,mbeacom/pysolr,shasha79/pysolr,django-searchstack/skisolr,toastdriven/pysolr,CANTUS-Project/pysolr-tornado,toastdriven/pysolr,mylanium/pysolr,mbeacom/pysolr,upayavira/pysolr,django-haystack/pysolr,swistakm/pysolr,CANTUS-Project/pysolr-tornado,swistakm/pysolr,django-haystack/pysolr,rokaka/pysolr,rokaka/pysolr,django-searchstack/skisolr,shasha79/pysolr
|
0c0a1d0ec480c7df9dd8821d40af7791e46db453
|
tests/lib/test_finance.py
|
tests/lib/test_finance.py
|
# Copyright (c) 2013 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
from tests import OldPythonTestCase
__author__ = 'felix_kluge'
from pycroft.lib.finance import create_semester
from pycroft.lib.config import get,config
from pycroft.model.finance import FinanceAccount
from sqlalchemy.orm import backref
from pycroft.model import session
import time
from datetime import date
class Test_010_Semester(OldPythonTestCase):
def test_0010_create_semester_accounts(self):
"""
This test should verify that all semester-related finance-accounts have
been created.
"""
new_semester = create_semester("NewSemesterName", 2500, 1500, date(2013, 9, 1), date(2014, 2, 1))
config._configpath = "../tests/example/test_config.json"
for account in config["finance"]["semester_accounts"]:
for new_account in new_semester.accounts:
if(new_account.tag == account["tag"]):
new_account_equivalent = new_account
compare_account = FinanceAccount(type=account["type"],name=account["name"],semester=new_semester,tag=account["tag"])
self.assertEqual(new_account_equivalent.name, compare_account.name)
self.assertEqual(new_account_equivalent.type, compare_account.type)
|
# Copyright (c) 2013 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
from tests import OldPythonTestCase
__author__ = 'felix_kluge'
from pycroft.lib.finance import create_semester, import_csv
from pycroft.lib.config import get, config
from pycroft.model.finance import FinanceAccount, Journal, JournalEntry
from sqlalchemy.orm import backref
from pycroft.model import session
import time
from datetime import date, datetime
class Test_010_Semester(OldPythonTestCase):
def test_0010_create_semester_accounts(self):
"""
This test should verify that all semester-related finance-accounts have
been created.
"""
new_semester = create_semester("NewSemesterName",
2500, 1500,
date(2013, 9, 1),
date(2014, 2, 1))
config._configpath = "../tests/example/test_config.json"
for account in config["finance"]["semester_accounts"]:
new_created_account = FinanceAccount.q.filter(
FinanceAccount.semester == new_semester,
FinanceAccount.tag == account["tag"]).first()
self.assertEqual(new_created_account.name, account["name"])
self.assertEqual(new_created_account.type, account["type"])
session.session.commit()
|
Fix for wrong test: create_semester_accounts
|
Fix for wrong test: create_semester_accounts
refs #448
|
Python
|
apache-2.0
|
agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft
|
169a8612eb06410a5ae7e110227f7bea010d2ba9
|
tests/test_ghostscript.py
|
tests/test_ghostscript.py
|
import subprocess
import unittest
class GhostscriptTest(unittest.TestCase):
def test_installed(self):
process = subprocess.Popen(
['gs', '--version'],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
self.assertEqual(stderr, "")
self.assertRegexpMatches(stdout, r'9\.\d\d')
|
import subprocess
import unittest
class GhostscriptTest(unittest.TestCase):
def test_installed(self):
process = subprocess.Popen(
['gs', '--version'],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
self.assertEqual(str(stderr), "")
self.assertRegexpMatches(str(stdout), r'9\.\d\d')
|
Make stdout and stderr into strings.
|
Make stdout and stderr into strings.
|
Python
|
mit
|
YPlan/treepoem
|
ba37080645153d66a8ae1c8df10312806999f8ec
|
tests/test_observation.py
|
tests/test_observation.py
|
import unittest
from datetime import datetime
from dateutil.tz import tzutc
from fmi import FMI
class TestObservations(unittest.TestCase):
def test_lappeenranta(self):
now = datetime.now(tz=tzutc())
f = FMI(place='Lappeenranta')
for point in f.observations():
assert point.time < now
assert isinstance(point.temperature, float)
|
import unittest
from datetime import datetime
from dateutil.tz import tzutc
from fmi import FMI
class TestObservations(unittest.TestCase):
def test_lappeenranta(self):
now = datetime.now(tz=tzutc())
f = FMI(place='Lappeenranta')
for point in f.observations():
assert point.time < now
assert isinstance(point.temperature, float)
for point in f.observations(fmisid=101237):
assert point.time < now
assert isinstance(point.temperature, float)
|
Add use of fmisid to tests.
|
Add use of fmisid to tests.
|
Python
|
mit
|
kipe/fmi
|
9369f72c4fe9a544e24f10a1db976589dc013424
|
plinth/modules/sso/__init__.py
|
plinth/modules/sso/__init__.py
|
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Plinth module to configure Single Sign On services.
"""
from plinth import actions
from django.utils.translation import ugettext_lazy as _
version = 1
is_essential = True
depends = ['security']
name = _('Single Sign On')
managed_packages = ['libapache2-mod-auth-pubtkt', 'openssl', 'python3-openssl']
def setup(helper, old_version=None):
"""Install the required packages"""
helper.install(managed_packages)
actions.superuser_run('auth-pubtkt', ['enable-mod'])
actions.superuser_run('auth-pubtkt', ['create-key-pair'])
|
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Plinth module to configure Single Sign On services.
"""
from plinth import actions
from django.utils.translation import ugettext_lazy as _
version = 1
is_essential = True
depends = ['security', 'apache']
name = _('Single Sign On')
managed_packages = ['libapache2-mod-auth-pubtkt', 'openssl', 'python3-openssl']
def setup(helper, old_version=None):
"""Install the required packages"""
helper.install(managed_packages)
actions.superuser_run('auth-pubtkt', ['enable-mod'])
actions.superuser_run('auth-pubtkt', ['create-key-pair'])
|
Add dependency on apache module
|
sso: Add dependency on apache module
This ensures that Apache is fully setup before setting up mod-auth-pubtkt.
Signed-off-by: Sunil Mohan Adapa <[email protected]>
Reviewed-by: James Valleroy <[email protected]>
|
Python
|
agpl-3.0
|
kkampardi/Plinth,harry-7/Plinth,kkampardi/Plinth,harry-7/Plinth,harry-7/Plinth,kkampardi/Plinth,kkampardi/Plinth,harry-7/Plinth,kkampardi/Plinth,harry-7/Plinth
|
ee98b5a5c6b82671738bc60e68ea87d838c5400f
|
migrations/0020_change_ds_name_to_non_uniqe.py
|
migrations/0020_change_ds_name_to_non_uniqe.py
|
from redash.models import db
import peewee
from playhouse.migrate import PostgresqlMigrator, migrate
if __name__ == '__main__':
migrator = PostgresqlMigrator(db.database)
with db.database.transaction():
# Change the uniqueness constraint on data source name to be (org, name):
success = False
for index_name in ['unique_name', 'data_sources_name']:
try:
print "Trying to remove data source name uniqueness index with the name: {}".format(index_name)
migrate(migrator.drop_index("data_sources", index_name))
print "Success!"
success = True
break
except peewee.ProgrammingError:
db.close_db(None)
if not success:
print "Failed removing uniqueness constraint on data source name."
print "Please verify its name in the schema, update the migration and run again."
exit(1)
migrate(
migrator.add_index('data_sources', ('org_id', 'name'), unique=True)
)
db.close_db(None)
|
from redash.models import db
import peewee
from playhouse.migrate import PostgresqlMigrator, migrate
if __name__ == '__main__':
migrator = PostgresqlMigrator(db.database)
with db.database.transaction():
# Change the uniqueness constraint on data source name to be (org, name):
# In some cases it's a constraint:
db.database.execute_sql('ALTER TABLE data_sources DROP CONSTRAINT IF EXISTS unique_name')
# In others only an index:
db.database.execute_sql('DROP INDEX IF EXISTS data_sources_name')
migrate(
migrator.add_index('data_sources', ('org_id', 'name'), unique=True)
)
db.close_db(None)
|
Improve the migration for unique data source name
|
Improve the migration for unique data source name
|
Python
|
bsd-2-clause
|
ninneko/redash,EverlyWell/redash,hudl/redash,useabode/redash,pubnative/redash,chriszs/redash,akariv/redash,alexanderlz/redash,easytaxibr/redash,pubnative/redash,amino-data/redash,rockwotj/redash,ninneko/redash,guaguadev/redash,rockwotj/redash,denisov-vlad/redash,imsally/redash,ninneko/redash,useabode/redash,crowdworks/redash,EverlyWell/redash,jmvasquez/redashtest,hudl/redash,denisov-vlad/redash,moritz9/redash,stefanseifert/redash,alexanderlz/redash,alexanderlz/redash,chriszs/redash,rockwotj/redash,M32Media/redash,vishesh92/redash,stefanseifert/redash,easytaxibr/redash,EverlyWell/redash,pubnative/redash,44px/redash,ninneko/redash,hudl/redash,amino-data/redash,M32Media/redash,moritz9/redash,guaguadev/redash,chriszs/redash,chriszs/redash,44px/redash,imsally/redash,EverlyWell/redash,amino-data/redash,44px/redash,guaguadev/redash,guaguadev/redash,denisov-vlad/redash,44px/redash,easytaxibr/redash,hudl/redash,getredash/redash,moritz9/redash,ninneko/redash,imsally/redash,imsally/redash,M32Media/redash,jmvasquez/redashtest,easytaxibr/redash,crowdworks/redash,stefanseifert/redash,akariv/redash,moritz9/redash,pubnative/redash,useabode/redash,getredash/redash,crowdworks/redash,pubnative/redash,vishesh92/redash,jmvasquez/redashtest,amino-data/redash,denisov-vlad/redash,getredash/redash,M32Media/redash,vishesh92/redash,guaguadev/redash,jmvasquez/redashtest,easytaxibr/redash,stefanseifert/redash,useabode/redash,akariv/redash,akariv/redash,akariv/redash,alexanderlz/redash,stefanseifert/redash,getredash/redash,vishesh92/redash,getredash/redash,rockwotj/redash,denisov-vlad/redash,crowdworks/redash,jmvasquez/redashtest
|
ca15e6523bd34e551528dce6c6ee3dcb70cf7806
|
pyinfra/modules/util/files.py
|
pyinfra/modules/util/files.py
|
# pyinfra
# File: pyinfra/modules/util/files.py
# Desc: common functions for handling the filesystem
from types import NoneType
def ensure_mode_int(mode):
# Already an int (/None)?
if isinstance(mode, (int, NoneType)):
return mode
try:
# Try making an int ('700' -> 700)
return int(mode)
except (TypeError, ValueError):
pass
# Return as-is (ie +x which we don't need to normalise, it always gets run)
return mode
def sed_replace(state, filename, line, replace, flags=None):
flags = ''.join(flags) if flags else ''
line = line.replace('/', '\/')
replace = replace.replace('/', '\/')
temp_filename = state.get_temp_filename()
return 'sed "s/{0}/{1}/{2}" {3} > {4} && mv {4} {3}'.format(
line, replace, flags, filename, temp_filename
)
def chmod(target, mode, recursive=False):
return 'chmod {0}{1} {2}'.format(('-R ' if recursive else ''), mode, target)
def chown(target, user, group=None, recursive=False):
command = 'chown'
user_group = None
if user and group:
user_group = '{0}:{1}'.format(user, group)
elif user:
user_group = user
elif group:
command = 'chgrp'
user_group = group
return '{0}{1} {2} {3}'.format(
command,
' -R' if recursive else '',
user_group,
target
)
|
# pyinfra
# File: pyinfra/modules/util/files.py
# Desc: common functions for handling the filesystem
from types import NoneType
def ensure_mode_int(mode):
# Already an int (/None)?
if isinstance(mode, (int, NoneType)):
return mode
try:
# Try making an int ('700' -> 700)
return int(mode)
except (TypeError, ValueError):
pass
# Return as-is (ie +x which we don't need to normalise, it always gets run)
return mode
def sed_replace(state, filename, line, replace, flags=None):
flags = ''.join(flags) if flags else ''
line = line.replace('/', '\/')
replace = replace.replace('/', '\/')
return 'sed -i "s/{0}/{1}/{2}" {3}'.format(
line, replace, flags, filename
)
def chmod(target, mode, recursive=False):
return 'chmod {0}{1} {2}'.format(('-R ' if recursive else ''), mode, target)
def chown(target, user, group=None, recursive=False):
command = 'chown'
user_group = None
if user and group:
user_group = '{0}:{1}'.format(user, group)
elif user:
user_group = user
elif group:
command = 'chgrp'
user_group = group
return '{0}{1} {2} {3}'.format(
command,
' -R' if recursive else '',
user_group,
target
)
|
Use sed inline (unsure why mv was used originally).
|
Use sed inline (unsure why mv was used originally).
|
Python
|
mit
|
Fizzadar/pyinfra,Fizzadar/pyinfra
|
5a4d9255c59be0d5dda8272e0e7ced71822f4d40
|
prime-factors/prime_factors.py
|
prime-factors/prime_factors.py
|
import sieve
def prime_factors(n):
primes = sieve.sieve(n)
factors = []
for p in primes:
while n % p == 0:
factors += [p]
n //= p
return factors
|
def prime_factors(n):
factors = []
factor = 2
while n != 1:
while n % factor == 0:
factors += [factor]
n //= factor
factor += 1
return factors
|
Fix memory issues by just trying every number
|
Fix memory issues by just trying every number
|
Python
|
agpl-3.0
|
CubicComet/exercism-python-solutions
|
8ea3350c6944946b60732308c912dc240952237c
|
project/settings_production.py
|
project/settings_production.py
|
from .settings import *
# Update SITE infos to use the common port 80 to publish the webapp
SITE_FIXED = {
'name': "Recalbox Manager",
'ip': None, # If 'None' find the ip automatically. Use a string to define another ip/hostname
'port': None, # If 'None' no port is added to hostname, so the server have to be reachable from port 80
}
# Production path to the Recalbox logs file
RECALBOX_LOGFILE_PATH = "/recalbox/share/system/logs"
# Use packaged assets
ASSETS_PACKAGED = True
|
from .settings import *
# Update SITE infos to use the common port 80 to publish the webapp
SITE_FIXED = {
'name': "Recalbox Manager",
'ip': None, # If 'None' find the ip automatically. Use a string to define another ip/hostname
'port': None, # If 'None' no port is added to hostname, so the server have to be reachable from port 80
}
# Production path to the Recalbox logs file
RECALBOX_LOGFILE_PATH = "/root/recalbox.log"
# Use packaged assets
ASSETS_PACKAGED = True
|
Revert "Set the right recalbox.log path"
|
Revert "Set the right recalbox.log path"
|
Python
|
mit
|
recalbox/recalbox-manager,recalbox/recalbox-manager,recalbox/recalbox-manager,sveetch/recalbox-manager,sveetch/recalbox-manager,sveetch/recalbox-manager,sveetch/recalbox-manager,recalbox/recalbox-manager,sveetch/recalbox-manager,recalbox/recalbox-manager
|
fd4688cc899b08253cc50b345bb7e836081783d8
|
bayespy/inference/vmp/nodes/__init__.py
|
bayespy/inference/vmp/nodes/__init__.py
|
######################################################################
# Copyright (C) 2011,2012 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
# Import some most commonly used nodes
from . import *
from .gaussian import Gaussian, GaussianARD
from .wishart import Wishart
from .gamma import Gamma
from .dirichlet import Dirichlet
from .categorical import Categorical
from .dot import Dot, SumMultiply
from .mixture import Mixture
from .gaussian_markov_chain import GaussianMarkovChain
from .gaussian_markov_chain import VaryingGaussianMarkovChain
from .gaussian_markov_chain import SwitchingGaussianMarkovChain
from .categorical_markov_chain import CategoricalMarkovChain
|
######################################################################
# Copyright (C) 2011,2012 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
# Import some most commonly used nodes
from . import *
from .binomial import Binomial
from .categorical import Categorical
from .beta import Beta
from .dirichlet import Dirichlet
from .gaussian import Gaussian, GaussianARD
from .wishart import Wishart
from .gamma import Gamma
from .gaussian_markov_chain import GaussianMarkovChain
from .gaussian_markov_chain import VaryingGaussianMarkovChain
from .gaussian_markov_chain import SwitchingGaussianMarkovChain
from .categorical_markov_chain import CategoricalMarkovChain
from .mixture import Mixture
from .dot import Dot
from .dot import SumMultiply
|
Add Beta and Binomial to automatically imported nodes
|
ENH: Add Beta and Binomial to automatically imported nodes
|
Python
|
mit
|
jluttine/bayespy,SalemAmeen/bayespy,fivejjs/bayespy,bayespy/bayespy
|
ed21e865f346b700c48458f22e3d3f1841f63451
|
api/swd6/api/app.py
|
api/swd6/api/app.py
|
import flask
import flask_cors
from sqlalchemy_jsonapi import flaskext as flask_jsonapi
import logging
from swd6.config import CONF
from swd6.db.models import db
logging.basicConfig(level=logging.DEBUG)
app = flask.Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = CONF.db.uri
app.config['SERVER_NAME'] = CONF.api.host
app.logger.setLevel(logging.DEBUG)
flask_cors.CORS(app, origins=CONF.api.cors_hosts)
logging.getLogger('flask_cors').level = logging.DEBUG
db.init_app(app)
api = flask_jsonapi.FlaskJSONAPI(app, db)
|
import flask
import flask_cors
from sqlalchemy_jsonapi import flaskext as flask_jsonapi
import logging
from swd6.config import CONF
from swd6.db.models import db
logging.basicConfig(level=logging.DEBUG)
app = flask.Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = CONF.db.uri
app.config['SERVER_NAME'] = CONF.api.host
app.logger.setLevel(logging.DEBUG)
flask_cors.CORS(app, origins=CONF.api.cors_hosts)
logging.getLogger('flask_cors').level = logging.DEBUG
db.init_app(app)
import json
import uuid
import datetime
import decimal
class JSONAPIEncoder(json.JSONEncoder):
""" JSONEncoder Implementation that allows for UUID and datetime """
def default(self, value):
"""
Handle UUID, datetime, decimal, and callables.
:param value: Value to encode
"""
if isinstance(value, uuid.UUID):
return str(value)
elif isinstance(value, datetime.datetime):
return value.isoformat()
elif isinstance(value, decimal.Decimal):
return str(value)
elif callable(value):
return str(value)
return json.JSONEncoder.default(self, value)
flask_jsonapi.FlaskJSONAPI.json_encoder = JSONAPIEncoder
api = flask_jsonapi.FlaskJSONAPI(app, db)
|
Fix JSON encoder to work with Decimal fields
|
Fix JSON encoder to work with Decimal fields
Need to get a patch upstream, but this is a suitable temporary
workaround.
|
Python
|
apache-2.0
|
jimbobhickville/swd6,jimbobhickville/swd6,jimbobhickville/swd6
|
e2330caffae04bc31376a2e0f66f0e86ebf92532
|
kNearestNeighbors/howItWorksKNearestNeighbors.py
|
kNearestNeighbors/howItWorksKNearestNeighbors.py
|
# -*- coding: utf-8 -*-
"""K Nearest Neighbors classification for machine learning.
This file demonstrate knowledge of K Nearest Neighbors classification. By
building the algorithm from scratch.
The idea of K Nearest Neighbors classification is to best divide and separate
the data based on clustering the data and classifying based on the proximity
to it's K closest neighbors and their classifications.
'Closeness' is measured by the euclidean distance.
dataset is breast cancer data from: http://archive.ics.uci.edu/ml/datasets.html
Example:
$ python howItWorksKNearestNeighbors.py
Todo:
*
"""
from collections import Counter
import numpy as np
# import matplotlib.pyplot as plt
from matplotlib import style
# from math import sqrt
import warnings
style.use('fivethirtyeight')
# hardcoded testdata
dataset = {'k': [[1, 2], [2, 3], [3, 1]], 'r': [[6, 5], [7, 7], [8, 6]]}
new_features = [5, 7]
# [[plt.scatter(ii[0], ii[1], s=100, color=i) for ii in dataset[i]] for i in dataset]
# plt.scatter(new_features[0], new_features[1], s=100)
# plt.show()
def k_nearest_neighbors(data, predict, k=3):
"""Function to calculate k nearest neighbors.
Based on the parameter 'predict' we find the points in the local proximity
of the training data and their label. In a larger dataset it would make
sense to specify a radius to avoid going over all data points each time,
but with the current dataset it does not matter so I avoid it to simplify.
Args:
data (dictionary): a dictionary where the keys are labels and the
values are a list of lists of features.
predict (list): a list of features that we will classify
k (int): an int that is the amount of neighbors to be counted. Should
be an odd number and higher than len(data) to avoid errors.
Returns:
str: The return value. The label that the predicted parameter has.
"""
if len(data) >= k:
warnings.warn('K is set to a value less than total voting groups')
distances = []
for group in data:
for features in data[group]:
# euclidean_distance = np.sqrt(np.sum((np.array(features)-np.array(predict))**2))
euclidean_distance = np.linalg.norm(np.array(features) - np.array(predict)) # faster
distances.append([euclidean_distance, group])
votes = [i[1] for i in sorted(distances)[:k]]
print(Counter(votes).most_common(1))
vote_result = Counter(votes).most_common(1)[0][0]
return vote_result
result = k_nearest_neighbors(dataset, new_features, k=5)
print(result)
|
Add my own K-nearest-neighbor algorithm
|
Add my own K-nearest-neighbor algorithm
|
Python
|
mit
|
a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms
|
|
06d0287a8fef0679b281296e6ed76e0b6c803acb
|
sorl/thumbnail/management/commands/thumbnail.py
|
sorl/thumbnail/management/commands/thumbnail.py
|
from django.core.management.base import BaseCommand, CommandError
from sorl.thumbnail.conf import settings
from sorl.thumbnail import default
class Command(BaseCommand):
help = (
u'Handles thumbnails and key value store'
)
args = '[cleanup, clear]'
option_list = BaseCommand.option_list
def handle(self, *labels, **options):
verbosity = int(options.get('verbosity'))
if len(labels) != 1:
raise CommandError('`%s` is not a valid argument' % labels)
label = labels[0]
if label not in ['cleanup', 'clear']:
raise CommandError('`%s` unknown action' % label)
if label == 'cleanup':
if verbosity >= 1:
self.stdout.write("Cleanup thumbnails ... ")
default.kvstore.cleanup()
if verbosity >= 1:
self.stdout.write("[Done]\n")
if label == 'clear':
if verbosity >= 1:
self.stdout.write("Clear the Key Value Store ... ")
default.kvstore.clear()
if verbosity >= 1:
self.stdout.write("[Done]\n")
|
import sys
from django.core.management.base import BaseCommand, CommandError
from sorl.thumbnail import default
class Command(BaseCommand):
help = (
u'Handles thumbnails and key value store'
)
args = '[cleanup, clear]'
option_list = BaseCommand.option_list
def handle(self, *labels, **options):
verbosity = int(options.get('verbosity'))
if not labels:
print self.print_help('thumbnail', '')
sys.exit(1)
if len(labels) != 1:
raise CommandError('`%s` is not a valid argument' % labels)
label = labels[0]
if label not in ['cleanup', 'clear']:
raise CommandError('`%s` unknown action' % label)
if label == 'cleanup':
if verbosity >= 1:
self.stdout.write("Cleanup thumbnails ... ", ending=' ... ')
default.kvstore.cleanup()
if verbosity >= 1:
self.stdout.write("[Done]")
elif label == 'clear':
if verbosity >= 1:
self.stdout.write("Clear the Key Value Store", ending=' ... ')
default.kvstore.clear()
if verbosity >= 1:
self.stdout.write("[Done]")
|
Improve management command to clear or clean kvstore
|
Improve management command to clear or clean kvstore
|
Python
|
bsd-3-clause
|
mariocesar/sorl-thumbnail,perdona/sorl-thumbnail,fdgogogo/sorl-thumbnail,Knotis/sorl-thumbnail,jcupitt/sorl-thumbnail,einvalentin/sorl-thumbnail,fladi/sorl-thumbnail,lampslave/sorl-thumbnail,seedinvest/sorl-thumbnail,guilouro/sorl-thumbnail,TriplePoint-Software/sorl-thumbnail,jazzband/sorl-thumbnail,jcupitt/sorl-thumbnail,lampslave/sorl-thumbnail,Knotis/sorl-thumbnail,mcenirm/sorl-thumbnail,JordanReiter/sorl-thumbnail,fdgogogo/sorl-thumbnail,Knotis/sorl-thumbnail,mariocesar/sorl-thumbnail,Resmin/sorl-thumbnail,leture/sorl-thumbnail,jazzband/sorl-thumbnail,einvalentin/sorl-thumbnail,TriplePoint-Software/sorl-thumbnail,lampslave/sorl-thumbnail,jannon/sorl-thumbnail,Resmin/sorl-thumbnail,MatthewWilkes/sorl-thumbnail,perdona/sorl-thumbnail,einvalentin/sorl-thumbnail,mariocesar/sorl-thumbnail,CGenie/sorl-thumbnail,jannon/sorl-thumbnail,leture/sorl-thumbnail,MatthewWilkes/sorl-thumbnail,guilouro/sorl-thumbnail,gregplaysguitar/sorl-thumbnail,jannon/sorl-thumbnail,mcenirm/sorl-thumbnail,MatthewWilkes/sorl-thumbnail,gregplaysguitar/sorl-thumbnail,guilouro/sorl-thumbnail,seedinvest/sorl-thumbnail,chriscauley/sorl-thumbnail,seedinvest/sorl-thumbnail,chriscauley/sorl-thumbnail,CGenie/sorl-thumbnail,TriplePoint-Software/sorl-thumbnail,perdona/sorl-thumbnail,jcupitt/sorl-thumbnail,jazzband/sorl-thumbnail,JordanReiter/sorl-thumbnail,mcenirm/sorl-thumbnail,leture/sorl-thumbnail,fdgogogo/sorl-thumbnail,chriscauley/sorl-thumbnail,fladi/sorl-thumbnail,JordanReiter/sorl-thumbnail,CGenie/sorl-thumbnail,gregplaysguitar/sorl-thumbnail,Resmin/sorl-thumbnail
|
5c97b9911a2dafde5fd1e4c40cda4e84974eb855
|
assembla/lib.py
|
assembla/lib.py
|
from functools import wraps
class AssemblaObject(object):
"""
Proxies getitem calls (eg: `instance['id']`) to a dictionary `instance.data['id']`.
"""
def __init__(self, data):
self.data = data
def __getitem__(self, key):
return self.data[key]
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def get(self, *args, **kwargs):
return self.data.get(*args, **kwargs)
def assembla_filter(func):
"""
Filters :data for the objects in it which possess attributes equal in
name/value to a key/value in kwargs.
Each key/value combination in kwargs is compared against the object, so
multiple keyword arguments can be passed in to constrain the filtering.
"""
@wraps(func)
def wrapper(class_instance, **kwargs):
results = func(class_instance)
if not kwargs:
return results
else:
return filter(
# Find the objects who have an equal number of matching attr/value
# combinations as `len(kwargs)`
lambda obj: len(kwargs) == len(
filter(
lambda boolean: boolean,
[obj.get(attr_name) == value
for attr_name, value in kwargs.iteritems()]
)
),
results
)
return wrapper
|
from functools import wraps
class AssemblaObject(object):
"""
Proxies getitem calls (eg: `instance['id']`) to a dictionary `instance.data['id']`.
"""
def __init__(self, data):
self.data = data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def get(self, *args, **kwargs):
return self.data.get(*args, **kwargs)
def __repr__(self):
if 'name' in self.data:
return "<%s: %s>" % (type(self).__name__, self.data['name'])
if ('number' in self.data) and ('summary' in self.data):
return "<%s: #%s - %s>" % (type(self).__name__, self.data['number'], self.data['summary'])
return super(AssemblaObject, self).__repr__()
def assembla_filter(func):
"""
Filters :data for the objects in it which possess attributes equal in
name/value to a key/value in kwargs.
Each key/value combination in kwargs is compared against the object, so
multiple keyword arguments can be passed in to constrain the filtering.
"""
@wraps(func)
def wrapper(class_instance, **kwargs):
results = func(class_instance)
if not kwargs:
return results
else:
return filter(
# Find the objects who have an equal number of matching attr/value
# combinations as `len(kwargs)`
lambda obj: len(kwargs) == len(
filter(
lambda boolean: boolean,
[obj.get(attr_name) == value
for attr_name, value in kwargs.iteritems()]
)
),
results
)
return wrapper
|
Allow keys to be set (in anticipation of write commands). Better object __repr__() for spaces and tickets.
|
Allow keys to be set (in anticipation of write commands). Better object __repr__() for spaces and tickets.
|
Python
|
mit
|
markfinger/assembla
|
cee2368dac250ef9655a3df9af3188b8abd095dc
|
spec/puzzle/examples/gph/a_basic_puzzle_spec.py
|
spec/puzzle/examples/gph/a_basic_puzzle_spec.py
|
from data import warehouse
from puzzle.examples.gph import a_basic_puzzle
from puzzle.problems import number_problem
from puzzle.puzzlepedia import prod_config
from spec.mamba import *
with description('a_basic_puzzle'):
with before.all:
warehouse.save()
prod_config.init()
self.subject = a_basic_puzzle.get()
with after.all:
prod_config.reset()
warehouse.restore()
with it('parses'):
problems = self.subject.problems()
expect(problems).to(have_len(len(a_basic_puzzle.SOURCE.split('\n')) - 2))
for problem in problems:
expect(problem).to(be_a(number_problem.NumberProblem))
with it('solves first problem'):
expect(self.subject.problem(0).solution).not_to(be_empty)
with it('gets some solutions right'):
solutions = self.subject.solutions()
matches = []
expect(solutions).to(equal([
'decimal +25',
'octal +12',
'sept e nary +1',
'binary +1',
None,
'qui nary +9',
None,
None,
'quaternary +12',
None
]))
|
from data import warehouse
from puzzle.examples.gph import a_basic_puzzle
from puzzle.problems import number_problem
from puzzle.puzzlepedia import prod_config
from spec.mamba import *
with _description('a_basic_puzzle'):
with before.all:
warehouse.save()
prod_config.init()
self.subject = a_basic_puzzle.get()
with after.all:
prod_config.reset()
warehouse.restore()
with it('parses'):
problems = self.subject.problems()
expect(problems).to(have_len(len(a_basic_puzzle.SOURCE.split('\n')) - 2))
for problem in problems:
expect(problem).to(be_a(number_problem.NumberProblem))
with it('solves first problem'):
expect(self.subject.problem(0).solution).not_to(be_empty)
with it('gets some solutions right'):
solutions = self.subject.solutions()
matches = []
expect(solutions).to(equal([
'decimal +25',
'octal +12',
'sept e nary +1',
'binary +1',
None,
'qui nary +9',
None,
None,
'quaternary +12',
None
]))
|
Disable slow test. Not intended to run.
|
Disable slow test. Not intended to run.
|
Python
|
mit
|
PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge
|
07f86c47c58d6266bd4b42c81521001aca072ff1
|
jsonconfigparser/test/__init__.py
|
jsonconfigparser/test/__init__.py
|
import unittest
from jsonconfigparser import JSONConfigParser
class JSONConfigTestCase(unittest.TestCase):
def test_init(self):
JSONConfigParser()
def test_read_string(self):
string = '[section]\n' + \
'foo = "bar"\n'
cf = JSONConfigParser()
cf.read_string(string)
self.assertEqual(cf.get('section', 'foo'), 'bar')
def test_get(self):
cf = JSONConfigParser()
cf.add_section('section')
cf.set('section', 'section', 'set-in-section')
self.assertEqual(cf.get('section', 'section'), 'set-in-section')
cf.set(cf.default_section, 'defaults', 'set-in-defaults')
self.assertEqual(cf.get('section', 'defaults'), 'set-in-defaults')
self.assertEqual(cf.get('section', 'vars',
vars={'vars': 'set-in-vars'}),
'set-in-vars')
self.assertEqual(cf.get('section', 'unset', 'fallback'), 'fallback')
suite = unittest.TestLoader().loadTestsFromTestCase(JSONConfigTestCase)
|
import unittest
from jsonconfigparser import JSONConfigParser
class JSONConfigTestCase(unittest.TestCase):
def test_init(self):
JSONConfigParser()
def test_read_string(self):
string = '[section]\n' + \
'# comment comment\n' + \
'foo = "bar"\n' + \
'\n' + \
'[section2]\n' + \
'bar = "baz"\n'
cf = JSONConfigParser()
cf.read_string(string)
self.assertEqual(cf.get('section', 'foo'), 'bar')
def test_get(self):
cf = JSONConfigParser()
cf.add_section('section')
cf.set('section', 'section', 'set-in-section')
self.assertEqual(cf.get('section', 'section'), 'set-in-section')
cf.set(cf.default_section, 'defaults', 'set-in-defaults')
self.assertEqual(cf.get('section', 'defaults'), 'set-in-defaults')
self.assertEqual(cf.get('section', 'vars',
vars={'vars': 'set-in-vars'}),
'set-in-vars')
self.assertEqual(cf.get('section', 'unset', 'fallback'), 'fallback')
suite = unittest.TestLoader().loadTestsFromTestCase(JSONConfigTestCase)
|
Add some more rubbish to example string
|
Add some more rubbish to example string
|
Python
|
bsd-3-clause
|
bwhmather/json-config-parser
|
e0dac0a621cbeed615553e5c3544f9c49de96eb2
|
metadata/FrostNumberModel/hooks/pre-stage.py
|
metadata/FrostNumberModel/hooks/pre-stage.py
|
"""A hook for modifying parameter values read from the WMT client."""
import os
import shutil
from wmt.utils.hook import find_simulation_input_file
from topoflow_utils.hook import assign_parameters
file_list = []
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
env['end_year'] = long(env['start_year']) + long(env['_run_duration'])
env['fn_out_filename'] = 'frostnumber_output.dat'
assign_parameters(env, file_list)
for fname in file_list:
src = find_simulation_input_file(env[fname])
shutil.copy(src, os.curdir)
|
"""A hook for modifying parameter values read from the WMT client."""
import os
import shutil
from wmt.utils.hook import find_simulation_input_file, yaml_dump
from topoflow_utils.hook import assign_parameters
file_list = []
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
env['end_year'] = long(env['start_year']) + long(env['_run_duration']) - 1
env['fn_out_filename'] = 'frostnumber_output.dat'
assign_parameters(env, file_list)
for fname in file_list:
src = find_simulation_input_file(env[fname])
shutil.copy(src, os.curdir)
yaml_dump('_env.yaml', env)
|
Subtract 1 from model end_year
|
Subtract 1 from model end_year
This matches the behavior of the FrostNumberModel BMI.
|
Python
|
mit
|
csdms/wmt-metadata
|
9d29061f8520506d798ad75aa296be8dc838aaf7
|
resolwe/elastic/pagination.py
|
resolwe/elastic/pagination.py
|
""".. Ignore pydocstyle D400.
==================
Elastic Paginators
==================
Paginator classes used in Elastic app.
.. autoclass:: resolwe.elastic.pagination.LimitOffsetPostPagination
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from rest_framework.pagination import LimitOffsetPagination, _positive_int
def get_query_param(request, key):
"""Get query parameter uniformly for GET and POST requests."""
value = request.query_params.get(key) or request.data.get(key)
if value is None:
raise KeyError()
return value
class LimitOffsetPostPagination(LimitOffsetPagination):
"""Limit/offset paginator.
This is standard limit/offset paginator from Django REST framework,
with difference that it supports passing ``limit`` and ``offset``
attributes also in the body of the request (not just as query
parameter).
"""
def get_limit(self, request):
"""Return limit parameter."""
if self.limit_query_param:
try:
print(get_query_param(request, self.limit_query_param))
return _positive_int(
get_query_param(request, self.limit_query_param),
strict=True,
cutoff=self.max_limit
)
except (KeyError, ValueError):
pass
return self.default_limit
def get_offset(self, request):
"""Return offset parameter."""
try:
return _positive_int(
get_query_param(request, self.offset_query_param),
)
except (KeyError, ValueError):
return 0
|
""".. Ignore pydocstyle D400.
==================
Elastic Paginators
==================
Paginator classes used in Elastic app.
.. autoclass:: resolwe.elastic.pagination.LimitOffsetPostPagination
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from rest_framework.pagination import LimitOffsetPagination, _positive_int
def get_query_param(request, key):
"""Get query parameter uniformly for GET and POST requests."""
value = request.query_params.get(key) or request.data.get(key)
if value is None:
raise KeyError()
return value
class LimitOffsetPostPagination(LimitOffsetPagination):
"""Limit/offset paginator.
This is standard limit/offset paginator from Django REST framework,
with difference that it supports passing ``limit`` and ``offset``
attributes also in the body of the request (not just as query
parameter).
"""
def get_limit(self, request):
"""Return limit parameter."""
if self.limit_query_param:
try:
return _positive_int(
get_query_param(request, self.limit_query_param),
strict=True,
cutoff=self.max_limit
)
except (KeyError, ValueError):
pass
return self.default_limit
def get_offset(self, request):
"""Return offset parameter."""
try:
return _positive_int(
get_query_param(request, self.offset_query_param),
)
except (KeyError, ValueError):
return 0
|
Remove leftover print call in paginator
|
Remove leftover print call in paginator
|
Python
|
apache-2.0
|
genialis/resolwe,jberci/resolwe,genialis/resolwe,jberci/resolwe
|
61e4693988c5b89b4a82457181813e7a6e73403b
|
utils/text.py
|
utils/text.py
|
import codecs
from django.core import exceptions
from django.utils import text
import translitcodec
def slugify(model, field, value, validator):
orig_slug = slug = text.slugify(codecs.encode(value, 'translit/long'))[:45]
i = 0
while True:
try:
try:
validator(slug)
except exceptions.ValidationError:
pass
else:
model.objects.get(**{field: slug})
i += 1
slug = orig_slug + '-' + str(i)
except model.DoesNotExist:
return slug
|
import codecs
from django.core import exceptions
from django.utils import text
import translitcodec
def no_validator(arg):
pass
def slugify(model, field, value, validator=no_validator):
orig_slug = slug = text.slugify(codecs.encode(value, 'translit/long'))[:45]
i = 0
while True:
try:
try:
validator(slug)
except exceptions.ValidationError:
pass
else:
model.objects.get(**{field: slug})
i += 1
slug = orig_slug + '-' + str(i)
except model.DoesNotExist:
return slug
|
Fix slugify for use without validator
|
Fix slugify for use without validator
|
Python
|
agpl-3.0
|
stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten
|
7c74017bc0d76ecb34e3fab44767290f51d98a09
|
humbug/test_settings.py
|
humbug/test_settings.py
|
from settings import *
DATABASES["default"] = {"NAME": "zephyr/tests/zephyrdb.test",
"ENGINE": "django.db.backends.sqlite3",
"OPTIONS": { "timeout": 20, },}
TORNADO_SERVER = 'http://localhost:9983'
|
from settings import *
DATABASES["default"] = {"NAME": "zephyr/tests/zephyrdb.test",
"ENGINE": "django.db.backends.sqlite3",
"OPTIONS": { "timeout": 20, },}
TORNADO_SERVER = 'http://localhost:9983'
# Decrease the get_updates timeout to 1 second.
# This allows CasperJS to proceed quickly to the next test step.
POLL_TIMEOUT = 1000
|
Decrease get_updates timeout for client test suite
|
Decrease get_updates timeout for client test suite
Fixes #475.
(imported from commit d8f908c55f2e519541e5383a742edbf23183539c)
|
Python
|
apache-2.0
|
ryanbackman/zulip,christi3k/zulip,dnmfarrell/zulip,littledogboy/zulip,LAndreas/zulip,timabbott/zulip,hayderimran7/zulip,xuxiao/zulip,deer-hope/zulip,zhaoweigg/zulip,m1ssou/zulip,wdaher/zulip,punchagan/zulip,lfranchi/zulip,kokoar/zulip,voidException/zulip,brainwane/zulip,blaze225/zulip,akuseru/zulip,JPJPJPOPOP/zulip,hayderimran7/zulip,MayB/zulip,ipernet/zulip,AZtheAsian/zulip,gigawhitlocks/zulip,bssrdf/zulip,bitemyapp/zulip,ashwinirudrappa/zulip,proliming/zulip,LeeRisk/zulip,littledogboy/zulip,JanzTam/zulip,developerfm/zulip,pradiptad/zulip,dotcool/zulip,alliejones/zulip,hj3938/zulip,Juanvulcano/zulip,qq1012803704/zulip,yocome/zulip,sharmaeklavya2/zulip,AZtheAsian/zulip,tdr130/zulip,vaidap/zulip,wangdeshui/zulip,sup95/zulip,blaze225/zulip,dnmfarrell/zulip,stamhe/zulip,Drooids/zulip,rht/zulip,suxinde2009/zulip,aakash-cr7/zulip,nicholasbs/zulip,qq1012803704/zulip,bowlofstew/zulip,aakash-cr7/zulip,mdavid/zulip,xuanhan863/zulip,wangdeshui/zulip,bastianh/zulip,hafeez3000/zulip,bssrdf/zulip,umkay/zulip,jphilipsen05/zulip,zofuthan/zulip,proliming/zulip,mansilladev/zulip,codeKonami/zulip,karamcnair/zulip,wavelets/zulip,paxapy/zulip,Cheppers/zulip,kaiyuanheshang/zulip,jerryge/zulip,hustlzp/zulip,nicholasbs/zulip,reyha/zulip,andersk/zulip,huangkebo/zulip,mahim97/zulip,pradiptad/zulip,jessedhillon/zulip,littledogboy/zulip,moria/zulip,DazWorrall/zulip,zofuthan/zulip,jimmy54/zulip,umkay/zulip,zorojean/zulip,xuxiao/zulip,sharmaeklavya2/zulip,stamhe/zulip,themass/zulip,Diptanshu8/zulip,andersk/zulip,technicalpickles/zulip,dhcrzf/zulip,Jianchun1/zulip,tommyip/zulip,ipernet/zulip,praveenaki/zulip,levixie/zulip,developerfm/zulip,praveenaki/zulip,punchagan/zulip,joshisa/zulip,saitodisse/zulip,hengqujushi/zulip,Batterfii/zulip,codeKonami/zulip,technicalpickles/zulip,hayderimran7/zulip,voidException/zulip,amanharitsh123/zulip,rishig/zulip,ryansnowboarder/zulip,LeeRisk/zulip,bastianh/zulip,Batterfii/zulip,ikasumiwt/zulip,ashwinirudrappa/zulip,Drooids/zulip,Cheppers/zulip,tommyip/zulip,Vallher/zulip,johnnygaddarr/zulip,wweiradio/zulip,jrowan/zulip,RobotCaleb/zulip,jphilipsen05/zulip,PhilSk/zulip,firstblade/zulip,suxinde2009/zulip,gkotian/zulip,punchagan/zulip,aakash-cr7/zulip,amyliu345/zulip,dwrpayne/zulip,moria/zulip,dxq-git/zulip,LAndreas/zulip,bssrdf/zulip,guiquanz/zulip,Gabriel0402/zulip,Batterfii/zulip,so0k/zulip,RobotCaleb/zulip,huangkebo/zulip,stamhe/zulip,AZtheAsian/zulip,noroot/zulip,xuxiao/zulip,dxq-git/zulip,vaidap/zulip,ufosky-server/zulip,rht/zulip,babbage/zulip,kaiyuanheshang/zulip,dawran6/zulip,aliceriot/zulip,dattatreya303/zulip,themass/zulip,noroot/zulip,timabbott/zulip,hj3938/zulip,RobotCaleb/zulip,brockwhittaker/zulip,Qgap/zulip,shrikrishnaholla/zulip,voidException/zulip,bowlofstew/zulip,verma-varsha/zulip,jeffcao/zulip,bitemyapp/zulip,praveenaki/zulip,LAndreas/zulip,proliming/zulip,aakash-cr7/zulip,jackrzhang/zulip,brainwane/zulip,Suninus/zulip,jainayush975/zulip,christi3k/zulip,Galexrt/zulip,gkotian/zulip,praveenaki/zulip,hackerkid/zulip,Cheppers/zulip,vakila/zulip,cosmicAsymmetry/zulip,niftynei/zulip,noroot/zulip,nicholasbs/zulip,suxinde2009/zulip,akuseru/zulip,noroot/zulip,natanovia/zulip,shubhamdhama/zulip,fw1121/zulip,eeshangarg/zulip,aakash-cr7/zulip,wangdeshui/zulip,grave-w-grave/zulip,ahmadassaf/zulip,andersk/zulip,vabs22/zulip,zofuthan/zulip,ashwinirudrappa/zulip,alliejones/zulip,Frouk/zulip,wdaher/zulip,pradiptad/zulip,bluesea/zulip,so0k/zulip,vikas-parashar/zulip,Cheppers/zulip,easyfmxu/zulip,bastianh/zulip,ufosky-server/zulip,mohsenSy/zulip,souravbadami/zulip,Batterfii/zulip,amanharitsh123/zulip,hackerkid/zulip,sharmaeklavya2/zulip,rishig/zulip,karamcnair/zulip,luyifan/zulip,jainayush975/zulip,hackerkid/zulip,glovebx/zulip,littledogboy/zulip,he15his/zulip,zwily/zulip,ApsOps/zulip,fw1121/zulip,showell/zulip,ufosky-server/zulip,dwrpayne/zulip,hengqujushi/zulip,bluesea/zulip,ikasumiwt/zulip,codeKonami/zulip,Juanvulcano/zulip,dotcool/zulip,tommyip/zulip,umkay/zulip,sup95/zulip,showell/zulip,mdavid/zulip,sonali0901/zulip,thomasboyt/zulip,natanovia/zulip,babbage/zulip,mahim97/zulip,JPJPJPOPOP/zulip,KJin99/zulip,bowlofstew/zulip,synicalsyntax/zulip,dawran6/zulip,timabbott/zulip,zofuthan/zulip,Batterfii/zulip,souravbadami/zulip,Drooids/zulip,wavelets/zulip,verma-varsha/zulip,Galexrt/zulip,atomic-labs/zulip,LeeRisk/zulip,so0k/zulip,swinghu/zulip,armooo/zulip,glovebx/zulip,xuanhan863/zulip,zachallaun/zulip,littledogboy/zulip,jeffcao/zulip,aliceriot/zulip,jphilipsen05/zulip,Frouk/zulip,grave-w-grave/zulip,karamcnair/zulip,bluesea/zulip,schatt/zulip,deer-hope/zulip,showell/zulip,armooo/zulip,hayderimran7/zulip,tiansiyuan/zulip,codeKonami/zulip,vakila/zulip,nicholasbs/zulip,firstblade/zulip,eeshangarg/zulip,johnny9/zulip,luyifan/zulip,zorojean/zulip,avastu/zulip,jackrzhang/zulip,ipernet/zulip,ashwinirudrappa/zulip,yuvipanda/zulip,tommyip/zulip,paxapy/zulip,synicalsyntax/zulip,technicalpickles/zulip,shubhamdhama/zulip,ryansnowboarder/zulip,seapasulli/zulip,grave-w-grave/zulip,jonesgithub/zulip,armooo/zulip,bssrdf/zulip,wavelets/zulip,sonali0901/zulip,brockwhittaker/zulip,jphilipsen05/zulip,MayB/zulip,hj3938/zulip,ryansnowboarder/zulip,wangdeshui/zulip,eastlhu/zulip,Diptanshu8/zulip,he15his/zulip,ipernet/zulip,kaiyuanheshang/zulip,JanzTam/zulip,glovebx/zulip,aliceriot/zulip,vikas-parashar/zulip,thomasboyt/zulip,susansls/zulip,swinghu/zulip,wweiradio/zulip,EasonYi/zulip,rht/zulip,seapasulli/zulip,SmartPeople/zulip,Qgap/zulip,umkay/zulip,tdr130/zulip,dxq-git/zulip,MariaFaBella85/zulip,Vallher/zulip,jackrzhang/zulip,saitodisse/zulip,levixie/zulip,udxxabp/zulip,willingc/zulip,babbage/zulip,yuvipanda/zulip,ryanbackman/zulip,Galexrt/zulip,aps-sids/zulip,suxinde2009/zulip,babbage/zulip,peiwei/zulip,dwrpayne/zulip,ahmadassaf/zulip,krtkmj/zulip,jonesgithub/zulip,wavelets/zulip,dattatreya303/zulip,dotcool/zulip,eastlhu/zulip,showell/zulip,bssrdf/zulip,jeffcao/zulip,ufosky-server/zulip,paxapy/zulip,ufosky-server/zulip,sonali0901/zulip,sharmaeklavya2/zulip,dawran6/zulip,jessedhillon/zulip,verma-varsha/zulip,dhcrzf/zulip,shrikrishnaholla/zulip,zorojean/zulip,vikas-parashar/zulip,Gabriel0402/zulip,atomic-labs/zulip,Jianchun1/zulip,guiquanz/zulip,rishig/zulip,ryansnowboarder/zulip,kou/zulip,ryanbackman/zulip,MayB/zulip,jainayush975/zulip,jackrzhang/zulip,eastlhu/zulip,hengqujushi/zulip,sonali0901/zulip,synicalsyntax/zulip,cosmicAsymmetry/zulip,saitodisse/zulip,Gabriel0402/zulip,firstblade/zulip,christi3k/zulip,suxinde2009/zulip,umkay/zulip,zhaoweigg/zulip,Suninus/zulip,shaunstanislaus/zulip,zulip/zulip,technicalpickles/zulip,Juanvulcano/zulip,kokoar/zulip,suxinde2009/zulip,fw1121/zulip,dxq-git/zulip,dattatreya303/zulip,ikasumiwt/zulip,moria/zulip,shubhamdhama/zulip,krtkmj/zulip,zacps/zulip,andersk/zulip,Jianchun1/zulip,jessedhillon/zulip,jonesgithub/zulip,cosmicAsymmetry/zulip,ahmadassaf/zulip,MariaFaBella85/zulip,proliming/zulip,calvinleenyc/zulip,deer-hope/zulip,LAndreas/zulip,gkotian/zulip,dnmfarrell/zulip,sharmaeklavya2/zulip,JPJPJPOPOP/zulip,Jianchun1/zulip,arpitpanwar/zulip,hayderimran7/zulip,Gabriel0402/zulip,EasonYi/zulip,zacps/zulip,Juanvulcano/zulip,codeKonami/zulip,esander91/zulip,DazWorrall/zulip,souravbadami/zulip,kou/zulip,jimmy54/zulip,aliceriot/zulip,jessedhillon/zulip,itnihao/zulip,calvinleenyc/zulip,technicalpickles/zulip,wweiradio/zulip,jimmy54/zulip,joshisa/zulip,firstblade/zulip,avastu/zulip,tbutter/zulip,LeeRisk/zulip,reyha/zulip,KJin99/zulip,vabs22/zulip,j831/zulip,gigawhitlocks/zulip,paxapy/zulip,brainwane/zulip,tbutter/zulip,vakila/zulip,akuseru/zulip,jerryge/zulip,noroot/zulip,easyfmxu/zulip,so0k/zulip,ahmadassaf/zulip,dhcrzf/zulip,PhilSk/zulip,johnny9/zulip,wweiradio/zulip,ericzhou2008/zulip,joshisa/zulip,shubhamdhama/zulip,dxq-git/zulip,itnihao/zulip,arpitpanwar/zulip,tommyip/zulip,kaiyuanheshang/zulip,babbage/zulip,yuvipanda/zulip,avastu/zulip,SmartPeople/zulip,Vallher/zulip,ApsOps/zulip,mansilladev/zulip,KingxBanana/zulip,zhaoweigg/zulip,j831/zulip,isht3/zulip,EasonYi/zulip,tdr130/zulip,Diptanshu8/zulip,niftynei/zulip,ipernet/zulip,Batterfii/zulip,adnanh/zulip,adnanh/zulip,bitemyapp/zulip,dawran6/zulip,KingxBanana/zulip,MariaFaBella85/zulip,peiwei/zulip,praveenaki/zulip,dxq-git/zulip,vakila/zulip,xuxiao/zulip,dhcrzf/zulip,zwily/zulip,hengqujushi/zulip,tbutter/zulip,showell/zulip,bluesea/zulip,SmartPeople/zulip,andersk/zulip,shubhamdhama/zulip,dhcrzf/zulip,PaulPetring/zulip,lfranchi/zulip,bastianh/zulip,mohsenSy/zulip,calvinleenyc/zulip,aps-sids/zulip,luyifan/zulip,technicalpickles/zulip,alliejones/zulip,jrowan/zulip,krtkmj/zulip,DazWorrall/zulip,ikasumiwt/zulip,tiansiyuan/zulip,Drooids/zulip,krtkmj/zulip,yuvipanda/zulip,kou/zulip,eeshangarg/zulip,shrikrishnaholla/zulip,schatt/zulip,he15his/zulip,mdavid/zulip,zachallaun/zulip,gkotian/zulip,he15his/zulip,shaunstanislaus/zulip,Frouk/zulip,rht/zulip,lfranchi/zulip,zulip/zulip,xuxiao/zulip,littledogboy/zulip,Suninus/zulip,hustlzp/zulip,showell/zulip,brainwane/zulip,dotcool/zulip,itnihao/zulip,hj3938/zulip,MayB/zulip,tdr130/zulip,DazWorrall/zulip,pradiptad/zulip,zhaoweigg/zulip,aps-sids/zulip,punchagan/zulip,AZtheAsian/zulip,dotcool/zulip,willingc/zulip,rishig/zulip,umkay/zulip,ahmadassaf/zulip,Galexrt/zulip,hj3938/zulip,tbutter/zulip,fw1121/zulip,noroot/zulip,susansls/zulip,bowlofstew/zulip,jerryge/zulip,firstblade/zulip,xuxiao/zulip,amallia/zulip,krtkmj/zulip,bastianh/zulip,m1ssou/zulip,noroot/zulip,hackerkid/zulip,ericzhou2008/zulip,LAndreas/zulip,karamcnair/zulip,Cheppers/zulip,peguin40/zulip,tbutter/zulip,samatdav/zulip,udxxabp/zulip,armooo/zulip,mahim97/zulip,vakila/zulip,verma-varsha/zulip,susansls/zulip,grave-w-grave/zulip,isht3/zulip,andersk/zulip,jackrzhang/zulip,ryanbackman/zulip,babbage/zulip,timabbott/zulip,vabs22/zulip,KingxBanana/zulip,luyifan/zulip,dhcrzf/zulip,mohsenSy/zulip,so0k/zulip,KJin99/zulip,shrikrishnaholla/zulip,jimmy54/zulip,atomic-labs/zulip,zachallaun/zulip,mansilladev/zulip,zhaoweigg/zulip,mohsenSy/zulip,karamcnair/zulip,kaiyuanheshang/zulip,codeKonami/zulip,shrikrishnaholla/zulip,calvinleenyc/zulip,wweiradio/zulip,sup95/zulip,PaulPetring/zulip,vakila/zulip,peiwei/zulip,dnmfarrell/zulip,ufosky-server/zulip,jimmy54/zulip,grave-w-grave/zulip,itnihao/zulip,ikasumiwt/zulip,gigawhitlocks/zulip,johnny9/zulip,sonali0901/zulip,johnny9/zulip,willingc/zulip,littledogboy/zulip,easyfmxu/zulip,eastlhu/zulip,PhilSk/zulip,joyhchen/zulip,m1ssou/zulip,synicalsyntax/zulip,swinghu/zulip,glovebx/zulip,yuvipanda/zulip,Suninus/zulip,joyhchen/zulip,codeKonami/zulip,tbutter/zulip,nicholasbs/zulip,susansls/zulip,jimmy54/zulip,cosmicAsymmetry/zulip,shrikrishnaholla/zulip,pradiptad/zulip,grave-w-grave/zulip,mohsenSy/zulip,he15his/zulip,aps-sids/zulip,samatdav/zulip,eastlhu/zulip,vikas-parashar/zulip,reyha/zulip,KJin99/zulip,zulip/zulip,ufosky-server/zulip,ApsOps/zulip,zacps/zulip,levixie/zulip,LeeRisk/zulip,fw1121/zulip,Gabriel0402/zulip,kou/zulip,Galexrt/zulip,zachallaun/zulip,aps-sids/zulip,tommyip/zulip,vaidap/zulip,themass/zulip,firstblade/zulip,jeffcao/zulip,natanovia/zulip,arpith/zulip,joyhchen/zulip,shaunstanislaus/zulip,bssrdf/zulip,guiquanz/zulip,alliejones/zulip,rht/zulip,sup95/zulip,so0k/zulip,ashwinirudrappa/zulip,arpitpanwar/zulip,brockwhittaker/zulip,joshisa/zulip,j831/zulip,jainayush975/zulip,samatdav/zulip,swinghu/zulip,yocome/zulip,saitodisse/zulip,reyha/zulip,joshisa/zulip,natanovia/zulip,zofuthan/zulip,yocome/zulip,DazWorrall/zulip,udxxabp/zulip,wdaher/zulip,shaunstanislaus/zulip,dotcool/zulip,willingc/zulip,proliming/zulip,JPJPJPOPOP/zulip,gigawhitlocks/zulip,isht3/zulip,swinghu/zulip,showell/zulip,tiansiyuan/zulip,stamhe/zulip,themass/zulip,Frouk/zulip,jonesgithub/zulip,JanzTam/zulip,eastlhu/zulip,avastu/zulip,glovebx/zulip,KJin99/zulip,eeshangarg/zulip,arpitpanwar/zulip,qq1012803704/zulip,johnnygaddarr/zulip,MariaFaBella85/zulip,thomasboyt/zulip,tbutter/zulip,huangkebo/zulip,souravbadami/zulip,qq1012803704/zulip,mansilladev/zulip,shrikrishnaholla/zulip,JanzTam/zulip,ashwinirudrappa/zulip,johnny9/zulip,dawran6/zulip,moria/zulip,jackrzhang/zulip,shaunstanislaus/zulip,he15his/zulip,punchagan/zulip,ericzhou2008/zulip,zachallaun/zulip,saitodisse/zulip,udxxabp/zulip,developerfm/zulip,arpith/zulip,JanzTam/zulip,arpitpanwar/zulip,zwily/zulip,gigawhitlocks/zulip,m1ssou/zulip,kokoar/zulip,RobotCaleb/zulip,amyliu345/zulip,dattatreya303/zulip,johnny9/zulip,Jianchun1/zulip,MayB/zulip,suxinde2009/zulip,m1ssou/zulip,Juanvulcano/zulip,rht/zulip,ericzhou2008/zulip,aliceriot/zulip,jessedhillon/zulip,JPJPJPOPOP/zulip,moria/zulip,johnny9/zulip,andersk/zulip,kokoar/zulip,voidException/zulip,stamhe/zulip,gkotian/zulip,LeeRisk/zulip,lfranchi/zulip,hustlzp/zulip,rishig/zulip,rishig/zulip,timabbott/zulip,natanovia/zulip,pradiptad/zulip,willingc/zulip,amyliu345/zulip,deer-hope/zulip,johnnygaddarr/zulip,developerfm/zulip,alliejones/zulip,DazWorrall/zulip,vikas-parashar/zulip,esander91/zulip,lfranchi/zulip,luyifan/zulip,akuseru/zulip,umkay/zulip,amanharitsh123/zulip,johnnygaddarr/zulip,peguin40/zulip,adnanh/zulip,jphilipsen05/zulip,moria/zulip,levixie/zulip,wangdeshui/zulip,DazWorrall/zulip,joyhchen/zulip,mdavid/zulip,KingxBanana/zulip,yocome/zulip,Gabriel0402/zulip,MayB/zulip,natanovia/zulip,huangkebo/zulip,TigorC/zulip,saitodisse/zulip,tiansiyuan/zulip,gigawhitlocks/zulip,lfranchi/zulip,LAndreas/zulip,rishig/zulip,dwrpayne/zulip,j831/zulip,Vallher/zulip,cosmicAsymmetry/zulip,isht3/zulip,brainwane/zulip,dattatreya303/zulip,sharmaeklavya2/zulip,synicalsyntax/zulip,kokoar/zulip,aliceriot/zulip,zwily/zulip,niftynei/zulip,guiquanz/zulip,SmartPeople/zulip,schatt/zulip,zulip/zulip,deer-hope/zulip,akuseru/zulip,vikas-parashar/zulip,PaulPetring/zulip,dhcrzf/zulip,huangkebo/zulip,xuanhan863/zulip,deer-hope/zulip,Cheppers/zulip,dwrpayne/zulip,Suninus/zulip,udxxabp/zulip,Vallher/zulip,ericzhou2008/zulip,fw1121/zulip,avastu/zulip,amyliu345/zulip,vaidap/zulip,kokoar/zulip,amallia/zulip,seapasulli/zulip,bitemyapp/zulip,Juanvulcano/zulip,atomic-labs/zulip,isht3/zulip,vabs22/zulip,vakila/zulip,zofuthan/zulip,Diptanshu8/zulip,kou/zulip,hj3938/zulip,arpith/zulip,akuseru/zulip,blaze225/zulip,themass/zulip,jainayush975/zulip,zachallaun/zulip,hafeez3000/zulip,kou/zulip,amanharitsh123/zulip,Drooids/zulip,jerryge/zulip,arpith/zulip,technicalpickles/zulip,fw1121/zulip,karamcnair/zulip,wavelets/zulip,nicholasbs/zulip,jeffcao/zulip,wdaher/zulip,huangkebo/zulip,LeeRisk/zulip,voidException/zulip,Frouk/zulip,bowlofstew/zulip,swinghu/zulip,ryanbackman/zulip,luyifan/zulip,sonali0901/zulip,zorojean/zulip,TigorC/zulip,peguin40/zulip,cosmicAsymmetry/zulip,MariaFaBella85/zulip,zulip/zulip,developerfm/zulip,christi3k/zulip,AZtheAsian/zulip,kou/zulip,armooo/zulip,zwily/zulip,jimmy54/zulip,bastianh/zulip,Galexrt/zulip,Suninus/zulip,mdavid/zulip,j831/zulip,he15his/zulip,wweiradio/zulip,JPJPJPOPOP/zulip,jonesgithub/zulip,qq1012803704/zulip,udxxabp/zulip,amallia/zulip,amallia/zulip,shubhamdhama/zulip,hafeez3000/zulip,m1ssou/zulip,kaiyuanheshang/zulip,johnnygaddarr/zulip,bowlofstew/zulip,arpith/zulip,isht3/zulip,m1ssou/zulip,voidException/zulip,timabbott/zulip,peguin40/zulip,Qgap/zulip,amallia/zulip,thomasboyt/zulip,PaulPetring/zulip,niftynei/zulip,ipernet/zulip,itnihao/zulip,reyha/zulip,lfranchi/zulip,RobotCaleb/zulip,verma-varsha/zulip,wdaher/zulip,Qgap/zulip,calvinleenyc/zulip,Frouk/zulip,eeshangarg/zulip,zacps/zulip,niftynei/zulip,arpitpanwar/zulip,hengqujushi/zulip,souravbadami/zulip,LAndreas/zulip,blaze225/zulip,Drooids/zulip,hayderimran7/zulip,zwily/zulip,itnihao/zulip,hayderimran7/zulip,adnanh/zulip,mohsenSy/zulip,jerryge/zulip,esander91/zulip,zhaoweigg/zulip,mahim97/zulip,zachallaun/zulip,EasonYi/zulip,bssrdf/zulip,joshisa/zulip,eeshangarg/zulip,arpith/zulip,ryansnowboarder/zulip,reyha/zulip,dnmfarrell/zulip,dawran6/zulip,hengqujushi/zulip,easyfmxu/zulip,ryansnowboarder/zulip,luyifan/zulip,guiquanz/zulip,christi3k/zulip,karamcnair/zulip,zwily/zulip,zulip/zulip,easyfmxu/zulip,hackerkid/zulip,tiansiyuan/zulip,atomic-labs/zulip,arpitpanwar/zulip,gigawhitlocks/zulip,bitemyapp/zulip,vabs22/zulip,MayB/zulip,ApsOps/zulip,Qgap/zulip,zorojean/zulip,hafeez3000/zulip,Cheppers/zulip,esander91/zulip,pradiptad/zulip,Vallher/zulip,jonesgithub/zulip,PhilSk/zulip,vaidap/zulip,Suninus/zulip,esander91/zulip,tiansiyuan/zulip,hafeez3000/zulip,adnanh/zulip,glovebx/zulip,peguin40/zulip,synicalsyntax/zulip,xuanhan863/zulip,seapasulli/zulip,hafeez3000/zulip,brockwhittaker/zulip,tommyip/zulip,dnmfarrell/zulip,johnnygaddarr/zulip,Galexrt/zulip,joyhchen/zulip,levixie/zulip,natanovia/zulip,TigorC/zulip,armooo/zulip,JanzTam/zulip,krtkmj/zulip,Drooids/zulip,jrowan/zulip,EasonYi/zulip,brockwhittaker/zulip,brainwane/zulip,alliejones/zulip,levixie/zulip,verma-varsha/zulip,gkotian/zulip,thomasboyt/zulip,proliming/zulip,udxxabp/zulip,paxapy/zulip,armooo/zulip,amanharitsh123/zulip,akuseru/zulip,jackrzhang/zulip,seapasulli/zulip,seapasulli/zulip,niftynei/zulip,Jianchun1/zulip,bluesea/zulip,joshisa/zulip,zofuthan/zulip,xuanhan863/zulip,jessedhillon/zulip,synicalsyntax/zulip,atomic-labs/zulip,shaunstanislaus/zulip,hustlzp/zulip,deer-hope/zulip,easyfmxu/zulip,PhilSk/zulip,RobotCaleb/zulip,praveenaki/zulip,proliming/zulip,jerryge/zulip,jerryge/zulip,schatt/zulip,KingxBanana/zulip,bluesea/zulip,kokoar/zulip,gkotian/zulip,MariaFaBella85/zulip,punchagan/zulip,amyliu345/zulip,TigorC/zulip,huangkebo/zulip,seapasulli/zulip,babbage/zulip,moria/zulip,Vallher/zulip,jainayush975/zulip,mdavid/zulip,saitodisse/zulip,ikasumiwt/zulip,hengqujushi/zulip,zacps/zulip,RobotCaleb/zulip,tdr130/zulip,AZtheAsian/zulip,shubhamdhama/zulip,SmartPeople/zulip,wavelets/zulip,swinghu/zulip,dnmfarrell/zulip,dotcool/zulip,voidException/zulip,zulip/zulip,timabbott/zulip,PaulPetring/zulip,schatt/zulip,guiquanz/zulip,bitemyapp/zulip,ericzhou2008/zulip,wangdeshui/zulip,dxq-git/zulip,souravbadami/zulip,mansilladev/zulip,kaiyuanheshang/zulip,JanzTam/zulip,EasonYi/zulip,developerfm/zulip,thomasboyt/zulip,zhaoweigg/zulip,Diptanshu8/zulip,joyhchen/zulip,zorojean/zulip,peiwei/zulip,ApsOps/zulip,hustlzp/zulip,glovebx/zulip,zorojean/zulip,hj3938/zulip,ikasumiwt/zulip,KingxBanana/zulip,easyfmxu/zulip,rht/zulip,tdr130/zulip,dwrpayne/zulip,eastlhu/zulip,tdr130/zulip,nicholasbs/zulip,PaulPetring/zulip,ahmadassaf/zulip,willingc/zulip,guiquanz/zulip,aakash-cr7/zulip,wangdeshui/zulip,stamhe/zulip,itnihao/zulip,atomic-labs/zulip,ashwinirudrappa/zulip,jphilipsen05/zulip,levixie/zulip,wdaher/zulip,dattatreya303/zulip,hafeez3000/zulip,hackerkid/zulip,adnanh/zulip,jrowan/zulip,EasonYi/zulip,thomasboyt/zulip,susansls/zulip,Diptanshu8/zulip,sup95/zulip,mdavid/zulip,mahim97/zulip,ApsOps/zulip,mansilladev/zulip,wavelets/zulip,themass/zulip,amallia/zulip,avastu/zulip,j831/zulip,qq1012803704/zulip,mansilladev/zulip,wweiradio/zulip,peiwei/zulip,yocome/zulip,eeshangarg/zulip,susansls/zulip,punchagan/zulip,samatdav/zulip,jeffcao/zulip,KJin99/zulip,alliejones/zulip,esander91/zulip,calvinleenyc/zulip,jonesgithub/zulip,peiwei/zulip,bluesea/zulip,paxapy/zulip,bastianh/zulip,ApsOps/zulip,vabs22/zulip,schatt/zulip,samatdav/zulip,jrowan/zulip,jeffcao/zulip,Gabriel0402/zulip,PhilSk/zulip,TigorC/zulip,praveenaki/zulip,dwrpayne/zulip,amyliu345/zulip,amallia/zulip,johnnygaddarr/zulip,shaunstanislaus/zulip,ahmadassaf/zulip,ericzhou2008/zulip,esander91/zulip,jrowan/zulip,sup95/zulip,Qgap/zulip,Frouk/zulip,themass/zulip,blaze225/zulip,avastu/zulip,bowlofstew/zulip,aps-sids/zulip,vaidap/zulip,ipernet/zulip,xuanhan863/zulip,KJin99/zulip,stamhe/zulip,yocome/zulip,TigorC/zulip,aps-sids/zulip,firstblade/zulip,hackerkid/zulip,ryanbackman/zulip,hustlzp/zulip,SmartPeople/zulip,blaze225/zulip,mahim97/zulip,willingc/zulip,developerfm/zulip,yuvipanda/zulip,samatdav/zulip,MariaFaBella85/zulip,ryansnowboarder/zulip,christi3k/zulip,brainwane/zulip,yocome/zulip,Batterfii/zulip,tiansiyuan/zulip,xuanhan863/zulip,bitemyapp/zulip,adnanh/zulip,amanharitsh123/zulip,jessedhillon/zulip,hustlzp/zulip,xuxiao/zulip,schatt/zulip,brockwhittaker/zulip,wdaher/zulip,peguin40/zulip,so0k/zulip,qq1012803704/zulip,peiwei/zulip,yuvipanda/zulip,zacps/zulip,PaulPetring/zulip,krtkmj/zulip,Qgap/zulip,aliceriot/zulip
|
6486a888cbcec7285df92020f76e3f1c5fbba0e2
|
bluebottle/test/test_runner.py
|
bluebottle/test/test_runner.py
|
from django.test.runner import DiscoverRunner
from django.db import connection
from tenant_schemas.utils import get_tenant_model
from bluebottle.test.utils import InitProjectDataMixin
class MultiTenantRunner(DiscoverRunner, InitProjectDataMixin):
def setup_databases(self, *args, **kwargs):
result = super(MultiTenantRunner, self).setup_databases(*args, **kwargs)
# Create secondary tenant
connection.set_schema_to_public()
tenant_domain = 'testserver2'
tenant2 = get_tenant_model()(
domain_url=tenant_domain,
schema_name='test2',
client_name='test2')
tenant2.save(
verbosity=self.verbosity)
# Add basic data for tenant
connection.set_tenant(tenant2)
self.init_projects()
# Create main tenant
connection.set_schema_to_public()
tenant_domain = 'testserver'
tenant = get_tenant_model()(
domain_url=tenant_domain,
schema_name='test',
client_name='test')
tenant.save(
verbosity=self.verbosity)
connection.set_tenant(tenant)
return result
|
from django.test.runner import DiscoverRunner
from django.db import connection
from django.core import management
from tenant_schemas.utils import get_tenant_model
from bluebottle.test.utils import InitProjectDataMixin
class MultiTenantRunner(DiscoverRunner, InitProjectDataMixin):
def setup_databases(self, *args, **kwargs):
result = super(MultiTenantRunner, self).setup_databases(*args, **kwargs)
# Create secondary tenant
connection.set_schema_to_public()
tenant_domain = 'testserver2'
tenant2, _created = get_tenant_model().objects.get_or_create(
domain_url=tenant_domain,
schema_name='test2',
client_name='test2')
tenant2.save(
verbosity=self.verbosity)
# Add basic data for tenant
connection.set_tenant(tenant2)
self.init_projects()
# Create main tenant
connection.set_schema_to_public()
management.call_command('loaddata', 'exchange_rates.json', verbosity=1)
tenant_domain = 'testserver'
tenant, _created = get_tenant_model().objects.get_or_create(
domain_url=tenant_domain,
schema_name='test',
client_name='test')
tenant.save(
verbosity=self.verbosity)
connection.set_tenant(tenant)
return result
|
Load exchange rates in test setup. Make it posible to use --keepdb
|
Load exchange rates in test setup. Make it posible to use --keepdb
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
873c5e8bf85a8be5a08852134967d29353ed3009
|
examples/simple.py
|
examples/simple.py
|
from lobster import cmssw
from lobster.core import *
storage = StorageConfiguration(
output=[
"hdfs:///store/user/matze/test_shuffle_take29",
"file:///hadoop/store/user/matze/test_shuffle_take29",
"root://ndcms.crc.nd.edu//store/user/matze/test_shuffle_take29",
"srm://T3_US_NotreDame/store/user/matze/test_shuffle_take29",
]
)
processing = Category(
name='processing',
cores=1,
runtime=900,
memory=1000
)
workflows = []
single_mu = Workflow(
label='single_mu',
dataset=cmssw.Dataset(
dataset='/SingleMu/Run2012A-recover-06Aug2012-v1/AOD',
events_per_task=5000
),
category=processing,
pset='slim.py',
publish_label='test',
merge_size='3.5G',
outputs=['output.root']
)
workflows.append(single_mu)
config = Config(
label='shuffle',
workdir='/tmpscratch/users/matze/test_shuffle_take30',
plotdir='/afs/crc.nd.edu/user/m/mwolf3/www/lobster/test_shuffle_take29',
storage=storage,
workflows=workflows,
advanced=AdvancedOptions(log_level=1)
)
|
from lobster import cmssw
from lobster.core import *
storage = StorageConfiguration(
output=[
"hdfs:///store/user/matze/test_shuffle_take29",
"file:///hadoop/store/user/matze/test_shuffle_take29",
"root://T3_US_NotreDame/store/user/matze/test_shuffle_take29",
"srm://T3_US_NotreDame/store/user/matze/test_shuffle_take29",
]
)
processing = Category(
name='processing',
cores=1,
runtime=900,
memory=1000
)
workflows = []
single_mu = Workflow(
label='single_mu',
dataset=cmssw.Dataset(
dataset='/SingleMu/Run2012A-recover-06Aug2012-v1/AOD',
events_per_task=5000
),
category=processing,
pset='slim.py',
publish_label='test',
merge_size='3.5G',
outputs=['output.root']
)
workflows.append(single_mu)
config = Config(
label='shuffle',
workdir='/tmpscratch/users/matze/test_shuffle_take30',
plotdir='/afs/crc.nd.edu/user/m/mwolf3/www/lobster/test_shuffle_take29',
storage=storage,
workflows=workflows,
advanced=AdvancedOptions(log_level=1)
)
|
Swap ndcms for generic T3 string.
|
Swap ndcms for generic T3 string.
|
Python
|
mit
|
matz-e/lobster,matz-e/lobster,matz-e/lobster
|
587abec7ff5b90c03885e164d9b6b62a1fb41f76
|
grip/github_renderer.py
|
grip/github_renderer.py
|
from flask import abort, json
import requests
def render_content(text, gfm=False, context=None,
username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm'}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
|
from flask import abort, json
import requests
def render_content(text, gfm=False, context=None,
username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm'}
if context:
data['context'] = context
data = json.dumps(data)
headers = {'content-type': 'application/json'}
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/x-markdown'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
|
Fix the headers sent by the GitHub renderer.
|
Fix the headers sent by the GitHub renderer.
|
Python
|
mit
|
ssundarraj/grip,mgoddard-pivotal/grip,mgoddard-pivotal/grip,joeyespo/grip,joeyespo/grip,jbarreras/grip,jbarreras/grip,ssundarraj/grip
|
71a84ecb772aa5560e35409219c11001ac168c6a
|
chmvh_website/contact/forms.py
|
chmvh_website/contact/forms.py
|
from django import forms
from django.conf import settings
from django.core import mail
from django.template import loader
class ContactForm(forms.Form):
name = forms.CharField()
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea(
attrs={'rows': 5}))
template = loader.get_template('contact/email/message.txt')
def send_email(self):
subject = '[CHMVH Website] Message from {}'.format(
self.cleaned_data['name'])
context = {
'name': self.cleaned_data['name'],
'email': self.cleaned_data['email'],
'message': self.cleaned_data['message'],
}
emails_sent = mail.send_mail(
subject,
self.template.render(context),
settings.DEFAULT_FROM_EMAIL,
['[email protected]'],
fail_silently=True)
return emails_sent == 1
|
import logging
from smtplib import SMTPException
from django import forms
from django.conf import settings
from django.core import mail
from django.template import loader
logger = logging.getLogger('chmvh_website.{0}'.format(__name__))
class ContactForm(forms.Form):
name = forms.CharField()
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea(
attrs={'rows': 5}))
template = loader.get_template('contact/email/message.txt')
def send_email(self):
subject = '[CHMVH Website] Message from {}'.format(
self.cleaned_data['name'])
context = {
'name': self.cleaned_data['name'],
'email': self.cleaned_data['email'],
'message': self.cleaned_data['message'],
}
logger.debug("Preparing to send email")
try:
emails_sent = mail.send_mail(
subject,
self.template.render(context),
settings.DEFAULT_FROM_EMAIL,
['[email protected]'])
logger.info("Succesfully sent email from {0}".format(
self.cleaned_data['email']))
except SMTPException as e:
emails_sent = 0
logger.exception("Failed to send email.", exc_info=e)
return emails_sent == 1
|
Add logging for contact form email.
|
Add logging for contact form email.
|
Python
|
mit
|
cdriehuys/chmvh-website,cdriehuys/chmvh-website,cdriehuys/chmvh-website
|
d7cfdbd2bde0cc876db8c1bce020d8a1cf0ea77b
|
mdot_rest/views.py
|
mdot_rest/views.py
|
from django.shortcuts import render
from .models import Resource
from .serializers import ResourceSerializer
from rest_framework import generics, permissions
class ResourceList(generics.ListCreateAPIView):
queryset = Resource.objects.all()
serializer_class = ResourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class ResourceDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Resource.objects.all()
serializer_class = ResourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
|
from django.shortcuts import render
from .models import Resource
from .serializers import ResourceSerializer
from rest_framework import generics, permissions
import django_filters
class ResourceFilter(django_filters.FilterSet):
class Meta:
model = Resource
fields = ('name', 'featured', 'accessible', 'responsive_web',)
class ResourceList(generics.ListCreateAPIView):
queryset = Resource.objects.all()
serializer_class = ResourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filter_class = ResourceFilter
class ResourceDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Resource.objects.all()
serializer_class = ResourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
|
Add search filtering for name and booleans in resource API.
|
Add search filtering for name and booleans in resource API.
|
Python
|
apache-2.0
|
uw-it-aca/mdot-rest,uw-it-aca/mdot-rest
|
3555b002aae386220bc02d662a9b188426afc08f
|
cmsplugin_facebook/cms_plugins.py
|
cmsplugin_facebook/cms_plugins.py
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_facebook import models
class BasePlugin(CMSPluginBase):
name = None
def render(self, context, instance, placeholder):
context.update({'instance': instance,
'name': self.name,
'url': instance.pageurl or \
context['request'].build_absolute_uri()})
return context
class FacebookLikeBoxPlugin(BasePlugin):
model = models.FacebookLikeBox
name = 'Facebook Like Box'
render_template = 'cmsplugin_facebook/likebox.html'
change_form_template = 'cmsplugin_facebook/likebox_change_form.html'
class FacebookLikeButtonPlugin(BasePlugin):
model = models.FacebookLikeButton
name = 'Facebook Like Button'
render_template = 'cmsplugin_facebook/likebutton.html'
change_form_template = 'cmsplugin_facebook/likebutton_change_form.html'
plugin_pool.register_plugin(FacebookLikeBoxPlugin)
plugin_pool.register_plugin(FacebookLikeButtonPlugin)
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_facebook import models
class BasePlugin(CMSPluginBase):
name = None
def render(self, context, instance, placeholder):
context.update({'instance': instance,
'name': self.name,
'url': instance.pageurl or \
context['request'].build_absolute_uri()})
return context
class FacebookLikeBoxPlugin(BasePlugin):
model = models.FacebookLikeBox
name = 'Facebook Like Box'
module = 'Facebook'
render_template = 'cmsplugin_facebook/likebox.html'
change_form_template = 'cmsplugin_facebook/likebox_change_form.html'
class FacebookLikeButtonPlugin(BasePlugin):
model = models.FacebookLikeButton
name = 'Facebook Like Button'
module = 'Facebook'
render_template = 'cmsplugin_facebook/likebutton.html'
change_form_template = 'cmsplugin_facebook/likebutton_change_form.html'
plugin_pool.register_plugin(FacebookLikeBoxPlugin)
plugin_pool.register_plugin(FacebookLikeButtonPlugin)
|
Create a specific group for the Facebook plugins - makes it a bit neater in the list of plugins.
|
Create a specific group for the Facebook plugins - makes it a bit neater in the list of plugins.
|
Python
|
bsd-3-clause
|
chrisglass/cmsplugin_facebook
|
c2798702a1f2b1dc40c10b481b9989f9a86c71b2
|
helpers/fix_fathatan.py
|
helpers/fix_fathatan.py
|
# -*- coding: utf-8 -*-
import os
import re
import argparse
def fix_fathatan(file_path):
with open(file_path, 'r') as file:
lines = file.readlines()
new_lines = []
for line in lines:
new_lines.append(re.sub(r'اً', 'ًا', line))
file_path = file_path.split(os.sep)
file_path[-1] = 'fixed_' + file_path[-1]
file_path = os.sep.join(file_path)
with open(file_path, 'w') as file:
file.write(''.join(new_lines))
print(file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Changes after-Alif fathatan to before-Alit fathatan')
parser.add_argument('-in', '--file-path', help='File path to fix it', required=True)
args = parser.parse_args()
fix_fathatan(args.file_path)
|
# -*- coding: utf-8 -*-
import os
import re
import argparse
def fix_fathatan(file_path):
with open(file_path, 'r') as file:
lines = file.readlines()
new_lines = []
for line in lines:
new_lines.append(re.sub(r'اً', 'ًا', line))
file_path = file_path.split(os.sep)
file_path[-1] = 'fixed_' + file_path[-1]
file_path = os.sep.join(file_path)
with open(file_path, 'w') as file:
file.write(''.join(new_lines))
print(file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Changes after-Alif fathatan to before-Alit fathatan')
parser.add_argument('-in', '--file-path', help='File path to fix it', required=True)
args = parser.parse_args()
fix_fathatan(args.file_path)
|
Fix indentation error in some helpers
|
Fix indentation error in some helpers
|
Python
|
mit
|
AliOsm/arabic-text-diacritization
|
97a490db75f0a4976199365c3f654ba8cdb9a781
|
01_Built-in_Types/tuple.py
|
01_Built-in_Types/tuple.py
|
#!/usr/bin/env python
import sys
import pickle
# Check argument
if len(sys.argv) != 2:
print("%s filename" % sys.argv[0])
raise SystemExit(1)
# Write tuples
file = open(sys.argv[1], "wb");
line = []
while True:
print("Enter name, age, score (ex: zzz, 16, 90) or quit");
line = sys.stdin.readline()
if line == "quit\n":
break
raws = line.split(",")
name = raws[0]
age = int(raws[1])
score = int(raws[2])
record = (name, age, score)
pickle.dump(record, file)
file.close()
# Read back
file = open(sys.argv[1], "rb");
while True:
try:
record = pickle.load(file)
print record
name, age, score= record
print("name = %s" % name)
print("name = %d" % age)
print("name = %d" % score)
except (EOFError):
break
file.close()
|
#!/usr/bin/env python
import sys
import pickle
# Test zip, and format in print
names = ["xxx", "yyy", "zzz"]
ages = [18, 19, 20]
persons = zip(names, ages)
for name, age in persons:
print "{0}'s age is {1}".format(name, age)
# Check argument
if len(sys.argv) != 2:
print("%s filename" % sys.argv[0])
raise SystemExit(1)
# Write tuples
file = open(sys.argv[1], "wb");
line = []
while True:
print("Enter name, age, score (ex: zzz, 16, 90) or quit");
line = sys.stdin.readline()
if line == "quit\n":
break
raws = line.split(",")
name = raws[0]
age = int(raws[1])
score = int(raws[2])
record = (name, age, score)
pickle.dump(record, file)
file.close()
# Read back
file = open(sys.argv[1], "rb");
while True:
try:
record = pickle.load(file)
print record
name, age, score= record
print("name = %s" % name)
print("name = %d" % age)
print("name = %d" % score)
except (EOFError):
break
file.close()
|
Test zip, and print format
|
Test zip, and print format
|
Python
|
bsd-2-clause
|
zzz0072/Python_Exercises,zzz0072/Python_Exercises
|
e5963987e678926ad8cdde93e2551d0516a7686b
|
slave/skia_slave_scripts/android_bench_pictures.py
|
slave/skia_slave_scripts/android_bench_pictures.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia bench_pictures executable. """
from android_render_pictures import AndroidRenderPictures
from android_run_bench import DoBench
from bench_pictures import BenchPictures
from build_step import BuildStep
import sys
class AndroidBenchPictures(BenchPictures, AndroidRenderPictures):
def _DoBenchPictures(self, config, threads):
data_file = self._BuildDataFile(self._device_dirs.SKPPerfDir(), config,
threads)
args = self._PictureArgs(self._device_dirs.SKPDir(), config, threads)
DoBench(serial=self._serial,
executable='bench_pictures',
perf_data_dir=self._perf_data_dir,
device_perf_dir=self._device_dirs.SKPPerfDir(),
data_file=data_file,
extra_args=args)
def _Run(self):
self._PushSKPSources(self._serial)
super(AndroidBenchPictures, self)._Run()
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(AndroidBenchPictures))
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia bench_pictures executable. """
from android_render_pictures import AndroidRenderPictures
from android_run_bench import DoBench
from bench_pictures import BenchPictures
from build_step import BuildStep
import sys
class AndroidBenchPictures(BenchPictures, AndroidRenderPictures):
def __init__(self, args, attempts=1, timeout=4800):
super(AndroidBenchPictures, self).__init__(args, attempts=attempts,
timeout=timeout)
def _DoBenchPictures(self, config, threads):
data_file = self._BuildDataFile(self._device_dirs.SKPPerfDir(), config,
threads)
args = self._PictureArgs(self._device_dirs.SKPDir(), config, threads)
DoBench(serial=self._serial,
executable='bench_pictures',
perf_data_dir=self._perf_data_dir,
device_perf_dir=self._device_dirs.SKPPerfDir(),
data_file=data_file,
extra_args=args)
def _Run(self):
self._PushSKPSources(self._serial)
super(AndroidBenchPictures, self)._Run()
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(AndroidBenchPictures))
|
Increase timeout for bench_pictures on Android
|
Increase timeout for bench_pictures on Android
Unreviewed.
git-svn-id: 32fc27f4dcfb6c0385cd9719852b95fe6680452d@6290 2bbb7eff-a529-9590-31e7-b0007b416f81
|
Python
|
bsd-3-clause
|
Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot
|
b219823af7188f968d7c52c5273148c510bd7454
|
blaze/compute/air/frontend/ckernel_impls.py
|
blaze/compute/air/frontend/ckernel_impls.py
|
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
# Default overload is CKERNEL, so no need to look it up again
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
|
"""
Convert 'kernel' Op to 'ckernel'.
"""
from __future__ import absolute_import, division, print_function
from pykit.ir import transform, Op
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
# Default overload is CKERNEL, so no need to look it up again
overload = op.metadata['overload']
impl = overload.func
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
|
Simplify the ckernel pass a bit more
|
Simplify the ckernel pass a bit more
|
Python
|
bsd-3-clause
|
ChinaQuants/blaze,jdmcbr/blaze,xlhtc007/blaze,aterrel/blaze,FrancescAlted/blaze,mrocklin/blaze,maxalbert/blaze,alexmojaki/blaze,dwillmer/blaze,cowlicks/blaze,LiaoPan/blaze,jcrist/blaze,maxalbert/blaze,mrocklin/blaze,cowlicks/blaze,ChinaQuants/blaze,aterrel/blaze,scls19fr/blaze,mwiebe/blaze,FrancescAlted/blaze,nkhuyu/blaze,scls19fr/blaze,ContinuumIO/blaze,xlhtc007/blaze,alexmojaki/blaze,nkhuyu/blaze,caseyclements/blaze,LiaoPan/blaze,FrancescAlted/blaze,FrancescAlted/blaze,jcrist/blaze,jdmcbr/blaze,dwillmer/blaze,mwiebe/blaze,mwiebe/blaze,mwiebe/blaze,ContinuumIO/blaze,cpcloud/blaze,aterrel/blaze,caseyclements/blaze,cpcloud/blaze
|
970978b5355259fe943d5efed1b8b4ce945fdfa7
|
weather.py
|
weather.py
|
#! /usr/bin/python2
from os.path import expanduser,isfile
from sys import argv
from urllib import urlopen
location_path="~/.location"
def location_from_homedir():
if isfile(expanduser(location_path)):
with open(expanduser(location_path)) as f:
return "&".join(f.read().split("\n"))
else:
print("no location file at ", location_path)
def location_from_file(file):
try:
f = open(expanduser(file),'r')
except:
print("file ", location_file, " not found")
location_from_homedir
if len(argv) == 1:
# not given location file
data = location_from_homedir()
elif len(argv) == 2:
# given location file
data = location_from_file(argv[1])
else:
# wrong number of arguments
print("Usage: ", argv[0], " [location file]")
url="http://forecast.weather.gov/MapClick.php?"+data+"FcstType=digitalDWML"
forecast = urlopen(url).read()
|
#! /usr/bin/python2
from os.path import expanduser,isfile
import sys
from urllib import urlopen
location_path="~/.location"
def location_from_homedir():
if isfile(expanduser(location_path)):
with open(expanduser(location_path)) as f:
return "&".join(f.read().split("\n"))
else:
print("no location file at ", location_path)
sys.exit(2)
def location_from_file(location_file):
try:
f = open(expanduser(location_file),'r')
except:
print("file ", location_file, " not found\nLooking in home directory")
return location_from_homedir()
if len(sys.argv) == 1:
# not given location file
data = location_from_homedir()
elif len(sys.argv) == 2:
# given location file
data = location_from_file(sys.argv[1])
else:
# wrong number of arguments
print("Usage: ", sys.argv[0], " [location file]")
sys.exit(1)
url="http://forecast.weather.gov/MapClick.php?"+data+"FcstType=digitalDWML"
forecast = urlopen(url).read()
|
Debug control flow and exit on errors
|
Debug control flow and exit on errors
|
Python
|
mit
|
robbystk/weather
|
1caace2631f8e9c38cf0adfb1179a5260dcd3c33
|
tools/management/commands/output_all_uniprot.py
|
tools/management/commands/output_all_uniprot.py
|
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.conf import settings
from django.db import connection
from django.db.models import Q
from django.template.loader import render_to_string
from protein.models import Protein
from residue.models import ResidueGenericNumber, ResidueGenericNumberEquivalent
from common import definitions
from common.selection import SelectionItem
from common.alignment_gpcr import Alignment
import xlsxwriter, xlrd
import logging, json, os
class Command(BaseCommand):
help = "Output all uniprot mappings"
logger = logging.getLogger(__name__)
def handle(self, *args, **options):
#Get the proteins
f = open('uniprot.json', 'w')
ps = Protein.objects.filter(Q(source__name='SWISSPROT') | Q(source__name='TREMBL'),web_links__web_resource__slug='uniprot').all().prefetch_related('web_links__web_resource')
print('total:',len(ps))
mapping = {}
for p in ps:
uniprot = p.web_links.get(web_resource__slug='uniprot')
mapping[p.entry_name] = uniprot.index
json.dump(mapping,f, indent=4, separators=(',', ': '))
# print("Seqs: {}\tNot matching: {}".format(num_of_sequences, num_of_non_matching_sequences))
# open("uniprot.txt", "w").write()
|
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.conf import settings
from django.db import connection
from django.db.models import Q
from django.template.loader import render_to_string
from protein.models import Protein
from residue.models import ResidueGenericNumber, ResidueGenericNumberEquivalent
from common import definitions
from common.selection import SelectionItem
from common.alignment_gpcr import Alignment
import xlsxwriter, xlrd
import logging, json, os
class Command(BaseCommand):
help = "Output all uniprot mappings"
logger = logging.getLogger(__name__)
def handle(self, *args, **options):
#Get the proteins
f = open('uniprot.json', 'w')
ps = Protein.objects.filter(Q(source__name='SWISSPROT') | Q(source__name='TREMBL'),web_links__web_resource__slug='uniprot').all().prefetch_related('web_links__web_resource')
print('total:',len(ps))
mapping = {}
for p in ps:
uniprot = p.web_links.filter(web_resource__slug='uniprot').values_list('index', flat = True)
mapping[p.entry_name] = list(uniprot)
json.dump(mapping,f, indent=4, separators=(',', ': '))
# print("Seqs: {}\tNot matching: {}".format(num_of_sequences, num_of_non_matching_sequences))
# open("uniprot.txt", "w").write()
|
Change output_all_unitprot to allow multi ids for some proteins.
|
Change output_all_unitprot to allow multi ids for some proteins.
|
Python
|
apache-2.0
|
cmunk/protwis,fosfataza/protwis,fosfataza/protwis,fosfataza/protwis,cmunk/protwis,protwis/protwis,cmunk/protwis,cmunk/protwis,fosfataza/protwis,protwis/protwis,protwis/protwis
|
1b06091101c119f30eb5eabb2d2638fab0e8f658
|
test_debug.py
|
test_debug.py
|
from bullsandcows import isdebugmode
def test_isdebugmode():
assert isdebugmode() == 0, "program is in debug mode, this should not be commited"
|
from bullsandcows import isdebug
def test_isdebug():
assert isdebug() == 0, "program is in debug mode, this should not be commited"
|
Test modified to work with renamed debug function
|
Test modified to work with renamed debug function
|
Python
|
mit
|
petrgabrlik/BullsAndCows
|
1305af162dd05591cc0e5328eb192843b63dabb1
|
kk/urls_v1.py
|
kk/urls_v1.py
|
from django.conf.urls import include, url
from kk.views import (
HearingCommentViewSet, HearingImageViewSet, HearingViewSet, SectionCommentViewSet,
SectionViewSet, UserDataViewSet
)
from rest_framework_nested import routers
router = routers.SimpleRouter()
router.register(r'hearing', HearingViewSet)
router.register(r'users', UserDataViewSet, base_name='users')
hearing_comments_router = routers.NestedSimpleRouter(router, r'hearing', lookup='comment_parent')
hearing_comments_router.register(r'comments', HearingCommentViewSet, base_name='comments')
hearing_child_router = routers.NestedSimpleRouter(router, r'hearing', lookup='hearing')
hearing_child_router.register(r'sections', SectionViewSet, base_name='sections')
hearing_child_router.register(r'images', HearingImageViewSet, base_name='images')
section_comments_router = routers.NestedSimpleRouter(hearing_child_router, r'sections', lookup='comment_parent')
section_comments_router.register(r'comments', SectionCommentViewSet, base_name='comments')
urlpatterns = [
url(r'^', include(router.urls, namespace='v1')),
url(r'^', include(hearing_comments_router.urls, namespace='v1')),
url(r'^', include(hearing_child_router.urls, namespace='v1')),
url(r'^', include(section_comments_router.urls, namespace='v1')),
]
|
from django.conf.urls import include, url
from kk.views import (
HearingCommentViewSet, HearingImageViewSet, HearingViewSet, SectionCommentViewSet,
SectionViewSet, UserDataViewSet
)
from rest_framework_nested import routers
router = routers.DefaultRouter()
router.register(r'hearing', HearingViewSet)
router.register(r'users', UserDataViewSet, base_name='users')
hearing_comments_router = routers.NestedSimpleRouter(router, r'hearing', lookup='comment_parent')
hearing_comments_router.register(r'comments', HearingCommentViewSet, base_name='comments')
hearing_child_router = routers.NestedSimpleRouter(router, r'hearing', lookup='hearing')
hearing_child_router.register(r'sections', SectionViewSet, base_name='sections')
hearing_child_router.register(r'images', HearingImageViewSet, base_name='images')
section_comments_router = routers.NestedSimpleRouter(hearing_child_router, r'sections', lookup='comment_parent')
section_comments_router.register(r'comments', SectionCommentViewSet, base_name='comments')
urlpatterns = [
url(r'^', include(router.urls, namespace='v1')),
url(r'^', include(hearing_comments_router.urls, namespace='v1')),
url(r'^', include(hearing_child_router.urls, namespace='v1')),
url(r'^', include(section_comments_router.urls, namespace='v1')),
]
|
Use DefaultRouter instead of SimpleRouter
|
Use DefaultRouter instead of SimpleRouter
|
Python
|
mit
|
City-of-Helsinki/kerrokantasi,vikoivun/kerrokantasi,stephawe/kerrokantasi,stephawe/kerrokantasi,stephawe/kerrokantasi,City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi,vikoivun/kerrokantasi,City-of-Helsinki/kerrokantasi,vikoivun/kerrokantasi
|
abd3daed5cd0c70d76bf8fa1cfdda93efcda3e70
|
knights/compat/django.py
|
knights/compat/django.py
|
from django.core.urlresolvers import reverse
from django.utils.encoding import iri_to_uri
import datetime
from knights.library import Library
register = Library()
@register.helper
def now(fmt):
return datetime.datetime.now().strftime(fmt)
@register.helper
def url(name, *args, **kwargs):
try:
return reverse(name, args=args, kwargs=kwargs)
except:
return None
@register.helper
def static(filename):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, filename, ''))
return prefix
|
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.encoding import iri_to_uri
import datetime
from knights.library import Library
register = Library()
@register.helper
def now(fmt):
return timezone.now().strftime(fmt)
@register.helper
def url(name, *args, **kwargs):
try:
return reverse(name, args=args, kwargs=kwargs)
except:
return None
@register.helper
def static(filename):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, filename, ''))
return prefix
|
Make the `now` helper timezone aware
|
Make the `now` helper timezone aware
Thanks @tysonclugg
|
Python
|
mit
|
funkybob/knights-templater,funkybob/knights-templater
|
4e9a530403dce47f322df471255a0fc40fd1071f
|
examples/tic_ql_tabular_selfplay_all.py
|
examples/tic_ql_tabular_selfplay_all.py
|
'''
The Q-learning algorithm is used to learn the state-action values for all
Tic-Tac-Toe positions by playing games against itself (self-play).
'''
from capstone.game.games import TicTacToe
from capstone.game.players import RandPlayer
from capstone.rl import Environment, GameMDP
from capstone.rl.learners import QLearningSelfPlay
from capstone.rl.policies import EGreedy, RandomPolicy
from capstone.rl.utils import EpisodicWLDPlotter
from capstone.rl.value_functions import TabularQ
game = TicTacToe()
env = Environment(GameMDP(game))
tabularq = TabularQ(random_state=23)
egreedy = EGreedy(env.actions, tabularq, epsilon=0.5, random_state=23)
rand_policy = RandomPolicy(env.actions, random_state=23)
qlearning = QLearningSelfPlay(
env=env,
qf=tabularq,
policy=rand_policy,
learning_rate=0.1,
discount_factor=0.99,
n_episodes=3000,
verbose=0,
callbacks=[
EpisodicWLDPlotter(
game=game,
opp_player=RandPlayer(random_state=23),
n_matches=2000,
period=1000,
filename='tic_ql_tabular_selfplay_all.pdf'
)
]
)
qlearning.learn()
|
'''
The Q-learning algorithm is used to learn the state-action values for all
Tic-Tac-Toe positions by playing games against itself (self-play).
'''
from capstone.game.games import TicTacToe
from capstone.game.players import RandPlayer
from capstone.rl import Environment, GameMDP
from capstone.rl.learners import QLearningSelfPlay
from capstone.rl.policies import EGreedy, RandomPolicy
from capstone.rl.utils import EpisodicWLDPlotter
from capstone.rl.value_functions import TabularQ
game = TicTacToe()
env = Environment(GameMDP(game))
tabularq = TabularQ(random_state=23)
egreedy = EGreedy(env.actions, tabularq, epsilon=0.5, random_state=23)
rand_policy = RandomPolicy(env.actions, random_state=23)
qlearning = QLearningSelfPlay(
env=env,
qf=tabularq,
policy=rand_policy,
learning_rate=0.1,
discount_factor=0.99,
n_episodes=60000,
verbose=0,
callbacks=[
EpisodicWLDPlotter(
game=game,
opp_player=RandPlayer(random_state=23),
n_matches=2000,
period=1000,
filename='tic_ql_tabular_selfplay_all.pdf'
)
]
)
qlearning.learn()
|
Change number of episodes to 60000
|
Change number of episodes to 60000
|
Python
|
mit
|
davidrobles/mlnd-capstone-code
|
52a6ea1e7dd4333b9db6a0bbd53b8ae0b39a1f6d
|
Designs/redundant.py
|
Designs/redundant.py
|
'''Due to the needs arising from completing the project on time, I have defined redundant.py
which will hold replacement modules as I migrate from file based application to lists only web application. This modules
so far will offer the capabilities of registration, creating a shopping list and adding items into
a shopping list'''
global account
account=[]
def register(username,email,password):
account.append(username)
account.append(email)
account.append(password)
return account
global shopping_list_container
shopping_list_container=[]#contain shopping lists only
def create(list_name):
#list_name=[]
shopping_list_container.append(list_name)
return shopping_list_container#list of dictionaries
def list_update(nameoflist,item):
nameoflist.append(item)
shopping_list_container.append(nameoflist)
global itemsdictionary
itemsdictionary={}
def create1(slist):
itemsdictionary.update(slist)
global shared_shopping_list_container
shared_shopping_list_container=[]
def create3(list_name):
#list_name=[]
shared_shopping_list_container.append(list_name)
return shared_shopping_list_container#list of dictionaries
global shareditemsdictionary
shareditemsdictionary={}
def create2(slist):
shareditemsdictionary.update(slist)
|
'''Due to the needs arising from completing the project on time, I have defined redundant.py
which will hold replacement modules as I migrate from file based application to lists only web application. This modules
so far will offer the capabilities of registration, creating a shopping list and adding items into
a shopping list'''
global account
account=[]
def register(username,email,password):
'''registration list'''
account.append(username)
account.append(email)
account.append(password)
return account
global shopping_list_container
shopping_list_container=[]#contain shopping lists only
def create(list_name):
'''container of names of shopping lists'''
#list_name=[]
shopping_list_container.append(list_name)
return shopping_list_container#list of dictionaries
def list_update(nameoflist,item):
'''adding item to a given name of list'''
nameoflist.append(item)
shopping_list_container.append(nameoflist)
global itemsdictionary
itemsdictionary={}
def create1(slist):
'''update shopping lists with key (names) and items(as dictionaris)'''
itemsdictionary.update(slist)
global shared_shopping_list_container
shared_shopping_list_container=[]
def create3(list_name):
'''container for the shared lists. In future may be integrated with facebook'''
#list_name=[]
shared_shopping_list_container.append(list_name)
return shared_shopping_list_container#list of dictionaries
global shareditemsdictionary
shareditemsdictionary={}
def create2(slist):
'''updating shared dictionary'''
shareditemsdictionary.update(slist)
|
Add __doc__ to module functions
|
[Fix] Add __doc__ to module functions
|
Python
|
mit
|
AndersonMasese/Myshop,AndersonMasese/Myshop,AndersonMasese/Myshop
|
22428bcdbb095b407a0845c35e06c8ace0653a44
|
urls.py
|
urls.py
|
from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/(.*)', admin.site.root),
(r'^', include('blangoblog.blango.urls')),
)
handler500 = 'blango.views.server_error'
handler404 = 'blango.views.page_not_found'
if settings.DEBUG:
from os.path import abspath, dirname, join
PROJECT_DIR = dirname(abspath(__file__))
urlpatterns += patterns('',
(r'^site-media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': join(PROJECT_DIR, 'media')}),
)
|
from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/(.*)', admin.site.root),
(r'^', include('blangoblog.blango.urls')),
)
handler500 = 'blango.views.server_error'
handler404 = 'blango.views.page_not_found'
if settings.DEBUG:
from os.path import abspath, dirname, join
PROJECT_DIR = dirname(abspath(__file__))
urlpatterns += patterns('',
(r'^%s(?P<path>.*)$' % settings.MEDIA_URL[1:], 'django.views.static.serve', {'document_root': join(PROJECT_DIR, 'media')}),
)
|
Use MEDIA_URL instead of a hardcoded path
|
Use MEDIA_URL instead of a hardcoded path
|
Python
|
bsd-3-clause
|
fiam/blangoblog,fiam/blangoblog,fiam/blangoblog
|
224700aada7e7d80b4389b123ee00b5f14e88c99
|
fluent_contents/plugins/text/content_plugins.py
|
fluent_contents/plugins/text/content_plugins.py
|
"""
Definition of the plugin.
"""
from django.utils.html import format_html
from fluent_contents.extensions import ContentPlugin, plugin_pool, ContentItemForm
from fluent_contents.plugins.text.models import TextItem
class TextItemForm(ContentItemForm):
"""
Perform extra processing for the text item
"""
def clean_text(self, html):
"""
Perform the cleanup in the form, allowing to raise a ValidationError
"""
return self.instance.apply_pre_filters(html)
@plugin_pool.register
class TextPlugin(ContentPlugin):
"""
CMS plugin for WYSIWYG text items.
"""
model = TextItem
form = TextItemForm
admin_init_template = "admin/fluent_contents/plugins/text/admin_init.html" # TODO: remove the need for this.
admin_form_template = ContentPlugin.ADMIN_TEMPLATE_WITHOUT_LABELS
search_output = True
def render(self, request, instance, **kwargs):
# Included in a DIV, so the next item will be displayed below.
# The text_final is allowed to be None, to migrate old plugins.
text = instance.text if instance.text_final is None else instance.text_final
return format_html(u'<div class="text">{0}</div>\n', text)
|
"""
Definition of the plugin.
"""
from django.utils.safestring import mark_safe
from fluent_contents.extensions import ContentPlugin, plugin_pool, ContentItemForm
from fluent_contents.plugins.text.models import TextItem
class TextItemForm(ContentItemForm):
"""
Perform extra processing for the text item
"""
def clean_text(self, html):
"""
Perform the cleanup in the form, allowing to raise a ValidationError
"""
return self.instance.apply_pre_filters(html)
@plugin_pool.register
class TextPlugin(ContentPlugin):
"""
CMS plugin for WYSIWYG text items.
"""
model = TextItem
form = TextItemForm
admin_init_template = "admin/fluent_contents/plugins/text/admin_init.html" # TODO: remove the need for this.
admin_form_template = ContentPlugin.ADMIN_TEMPLATE_WITHOUT_LABELS
search_output = True
def render(self, request, instance, **kwargs):
# Included in a DIV, so the next item will be displayed below.
# The text_final is allowed to be None, to migrate old plugins.
text = instance.text if instance.text_final is None else instance.text_final
return mark_safe(u'<div class="text">{0}</div>\n'.format(text))
|
Fix HTML escaping of TextPlugin, by feature/text-filters branch
|
Fix HTML escaping of TextPlugin, by feature/text-filters branch
|
Python
|
apache-2.0
|
django-fluent/django-fluent-contents,django-fluent/django-fluent-contents,edoburu/django-fluent-contents,edoburu/django-fluent-contents,django-fluent/django-fluent-contents,edoburu/django-fluent-contents
|
5fbb4ff5d3427c8f4050fc5b75d4a6a2c15351c6
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Donne Martin'
SITENAME = 'Donne Martin'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/New_York'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
HIDE_SIDEBAR = True
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
STATIC_PATHS = [
'images',
'extra/favicon.ico'
]
EXTRA_PATH_METADATA = {
'extra/favicon.ico': {'path': 'favicon.ico'}
}
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
# THEME = 'pelican-bootstrap3'
# BOOTSTRAP_THEME = 'readable'
THEME = 'startbootstrap-agency'
BOOTSTRAP_THEME = ''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Donne Martin'
SITENAME = 'Donne Martin'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/New_York'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
HIDE_SIDEBAR = True
PYGMENTS_STYLE = 'monokai'
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
STATIC_PATHS = [
'images',
'extra/favicon.ico'
]
EXTRA_PATH_METADATA = {
'extra/favicon.ico': {'path': 'favicon.ico'}
}
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
# THEME = 'pelican-bootstrap3'
# BOOTSTRAP_THEME = 'readable'
THEME = 'startbootstrap-agency'
BOOTSTRAP_THEME = ''
|
Set pygments style to monokai.
|
Set pygments style to monokai.
|
Python
|
mit
|
donnemartin/outdated-donnemartin.github.io,donnemartin/outdated-donnemartin.github.io,donnemartin/outdated-donnemartin.github.io,donnemartin/outdated-donnemartin.github.io,donnemartin/outdated-donnemartin.github.io
|
5dc4641a40ff25b439541f6c3c02639a53346985
|
comics/crawlers/betty.py
|
comics/crawlers/betty.py
|
from comics.crawler.base import BaseComicsComComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Get Fuzzy'
language = 'en'
url = 'http://comics.com/betty/'
start_date = '1991-01-01'
history_capable_date = '2008-10-13'
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -5
rights = 'Delainey & Gerry Rasmussen'
class ComicCrawler(BaseComicsComComicCrawler):
def _get_url(self):
self._get_url_helper('Betty')
|
from comics.crawler.base import BaseComicsComComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Betty'
language = 'en'
url = 'http://comics.com/betty/'
start_date = '1991-01-01'
history_capable_date = '2008-10-13'
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -5
rights = 'Delainey & Gerry Rasmussen'
class ComicCrawler(BaseComicsComComicCrawler):
def _get_url(self):
self._get_url_helper('Betty')
|
Fix name of 'Betty' comic
|
Fix name of 'Betty' comic
|
Python
|
agpl-3.0
|
klette/comics,jodal/comics,jodal/comics,datagutten/comics,datagutten/comics,klette/comics,datagutten/comics,datagutten/comics,klette/comics,jodal/comics,jodal/comics
|
fbdaeff6f01ffaf0ac4f9a0d0d962a19c2865b32
|
jupyterlab/labhubapp.py
|
jupyterlab/labhubapp.py
|
import os
import warnings
from traitlets import default
from .labapp import LabApp
try:
from jupyterhub.singleuser import SingleUserNotebookApp
except ImportError:
SingleUserLabApp = None
raise ImportError('You must have jupyterhub installed for this to work.')
else:
class SingleUserLabApp(SingleUserNotebookApp, LabApp):
@default("default_url")
def _default_url(self):
"""when using jupyter-labhub, jupyterlab is default ui"""
return "/lab"
def init_webapp(self, *args, **kwargs):
warnings.warn(
"SingleUserLabApp is deprecated, use SingleUserNotebookApp and set " + \
"c.Spawner.default_url = '/lab' in jupyterhub_config.py", DeprecationWarning
)
super().init_webapp(*args, **kwargs)
def main(argv=None):
return SingleUserLabApp.launch_instance(argv)
if __name__ == "__main__":
main()
|
import os
from traitlets import default
from .labapp import LabApp
try:
from jupyterhub.singleuser import SingleUserNotebookApp
except ImportError:
SingleUserLabApp = None
raise ImportError('You must have jupyterhub installed for this to work.')
else:
class SingleUserLabApp(SingleUserNotebookApp, LabApp):
"""
A sublcass of JupyterHub's SingleUserNotebookApp which includes LabApp
as a mixin. This makes the LabApp configurables available to the spawned
jupyter server.
If you don't need to change any of the configurables from their default
values, then this class is not necessary, and you can deploy JupyterLab
by ensuring that its server extension is enabled and setting the
`Spawner.default_url` to '/lab'.
If you do need to configure JupyterLab, then use this application by
setting `Spawner.cmd = ['jupyter-labhub']`.
"""
@default("default_url")
def _default_url(self):
"""when using jupyter-labhub, jupyterlab is default ui"""
return "/lab"
def init_webapp(self, *args, **kwargs):
super().init_webapp(*args, **kwargs)
def main(argv=None):
return SingleUserLabApp.launch_instance(argv)
if __name__ == "__main__":
main()
|
Add docstring documenting the intended use of LabHubApp.
|
Add docstring documenting the intended use of LabHubApp.
|
Python
|
bsd-3-clause
|
jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab
|
de9f9c07c6f1dde8d7ad314b6a6fb58a963e1558
|
geodj/youtube.py
|
geodj/youtube.py
|
from gdata.youtube.service import YouTubeService, YouTubeVideoQuery
from django.utils.encoding import smart_str
import re
class YoutubeMusic:
def __init__(self):
self.service = YouTubeService()
def search(self, artist):
query = YouTubeVideoQuery()
query.vq = artist
query.orderby = 'relevance'
query.racy = 'exclude'
query.format = '5'
query.categories.append("/Music")
feed = self.service.YouTubeQuery(query)
results = []
for entry in feed.entry:
if not self.is_valid_entry(artist, entry):
continue
results.append({
'url': entry.media.player.url,
'title': smart_str(entry.media.title.text),
'duration': int(entry.media.duration.seconds),
})
return {'artist': artist, 'results': results}
def is_valid_entry(self, artist, entry):
duration = int(entry.media.duration.seconds)
title = smart_str(entry.media.title.text).lower()
if entry.rating is not None and float(entry.rating.average) < 3.5:
return False
if duration < (2 * 60) or duration > (9 * 60):
return False
if artist.lower() not in title:
return False
if re.search("\b(concert|cover)\b", title):
return False
return True
|
from gdata.youtube.service import YouTubeService, YouTubeVideoQuery
from django.utils.encoding import smart_str
import re
class YoutubeMusic:
def __init__(self):
self.service = YouTubeService()
def search(self, artist):
query = YouTubeVideoQuery()
query.vq = artist
query.orderby = 'relevance'
query.racy = 'exclude'
query.format = '5'
query.max_results = 50
query.categories.append("/Music")
feed = self.service.YouTubeQuery(query)
results = []
for entry in feed.entry:
if not self.is_valid_entry(artist, entry):
continue
results.append({
'url': entry.media.player.url,
'title': smart_str(entry.media.title.text),
'duration': int(entry.media.duration.seconds),
})
return {'artist': artist, 'results': results}
def is_valid_entry(self, artist, entry):
duration = int(entry.media.duration.seconds)
title = smart_str(entry.media.title.text).lower()
if entry.rating is not None and float(entry.rating.average) < 3.5:
return False
if duration < (2 * 60) or duration > (9 * 60):
return False
if artist.lower() not in title:
return False
if re.search("\b(concert|cover)\b", title):
return False
return True
|
Return as many results as possible
|
Return as many results as possible
|
Python
|
mit
|
6/GeoDJ,6/GeoDJ
|
db1653c551f71092a7eca96e6a4d1c96ef17e06a
|
lib/rapidsms/message.py
|
lib/rapidsms/message.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import copy
class Message(object):
def __init__(self, backend, caller=None, text=None):
self._backend = backend
self.caller = caller
self.text = text
# initialize some empty attributes
self.received = None
self.sent = None
self.responses = []
def __unicode__(self):
return self.text
@property
def backend(self):
# backend is read-only, since it's an
# immutable property of this object
return self._backend
def send(self):
"""Send this message via self.backend, returning
True if the message was sent successfully."""
return self.backend.router.outgoing(self)
def flush_responses (self):
for response in self.responses:
response.send()
def respond(self, text):
"""Send the given text back to the original caller of this
message on the same route that it came in on"""
if self.caller:
response = copy.copy(self)
response.text = text
self.responses.append(response)
return True
else:
return False
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import copy
class Message(object):
def __init__(self, backend, caller=None, text=None):
self._backend = backend
self.caller = caller
self.text = text
self.responses = []
def __unicode__(self):
return self.text
@property
def backend(self):
# backend is read-only, since it's an
# immutable property of this object
return self._backend
def send(self):
"""Send this message via self.backend, returning
True if the message was sent successfully."""
return self.backend.router.outgoing(self)
def flush_responses (self):
for response in self.responses:
response.send()
self.responses.remove(response)
def respond(self, text):
"""Send the given text back to the original caller of this
message on the same route that it came in on"""
if self.caller:
response = copy.copy(self)
response.text = text
self.responses.append(response)
return True
else:
return False
|
Remove unused attributes; also, empty responses after it's flushed.
|
Remove unused attributes; also, empty responses after it's flushed.
|
Python
|
bsd-3-clause
|
rapidsms/rapidsms-legacy,rapidsms/rapidsms-legacy,rapidsms/rapidsms-legacy
|
388653366ee4db58ed8ce8a9c8ab071593b9fc53
|
lancet/contrib/dploi.py
|
lancet/contrib/dploi.py
|
from shlex import quote
import click
@click.command()
@click.option('-p', '--print/--exec', 'print_cmd', default=False,
help='Print the command instead of executing it.')
@click.argument('environment')
@click.pass_obj
def ssh(lancet, print_cmd, environment):
"""
SSH into the given environment, based on the dploi configuration.
"""
namespace = {}
with open('deployment.py') as fh:
code = compile(fh.read(), 'deployment.py', 'exec')
exec(code, {}, namespace)
config = namespace['settings'][environment]
host = '{}@{}'.format(config['user'], config['hosts'][0])
cmd = ['ssh', '-p', str(config.get('port', 20)), host]
if print_cmd:
click.echo(' '.join(quote(s) for s in cmd))
else:
lancet.defer_to_shell(*cmd)
|
from shlex import quote
import click
@click.command()
@click.option('-p', '--print/--exec', 'print_cmd', default=False,
help='Print the command instead of executing it.')
@click.argument('environment')
@click.pass_obj
def ssh(lancet, print_cmd, environment):
"""
SSH into the given environment, based on the dploi configuration.
"""
namespace = {}
with open('deployment.py') as fh:
code = compile(fh.read(), 'deployment.py', 'exec')
exec(code, {}, namespace)
config = namespace['settings'][environment]
host = '{}@{}'.format(config['user'], config['hosts'][0])
cmd = ['ssh', '-p', str(config.get('port', 22)), host]
if print_cmd:
click.echo(' '.join(quote(s) for s in cmd))
else:
lancet.defer_to_shell(*cmd)
|
Use correct default SSH port
|
Use correct default SSH port
|
Python
|
mit
|
GaretJax/lancet,GaretJax/lancet
|
7d34b407a35fe917e919fc01b3a6c736a7bdc372
|
helpdesk/urls.py
|
helpdesk/urls.py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'helpdesk.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'helpdesk.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'', include(admin.site.urls)),
)
|
Remove admin prefix from url
|
Remove admin prefix from url
|
Python
|
mit
|
rosti-cz/django-emailsupport
|
52d76647b1fa50a2649335b65f22f88d7877e9d3
|
spotpy/unittests/test_fast.py
|
spotpy/unittests/test_fast.py
|
import unittest
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
from spotpy.examples.spot_setup_hymod_python import spot_setup
class TestFast(unittest.TestCase):
def setUp(self):
self.spot_setup = spot_setup()
self.rep = 200 # REP must be a multiply of amount of parameters which are in 7 if using hymod
self.timeout = 10 # Given in Seconds
def test_fast(self):
sampler = spotpy.algorithms.fast(self.spot_setup, parallel="seq", dbname='test_FAST', dbformat="ram",
sim_timeout=self.timeout)
results = []
sampler.sample(self.rep)
results = sampler.getdata()
self.assertEqual(200,len(results))
if __name__ == '__main__':
unittest.main()
|
import unittest
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
from spotpy.examples.spot_setup_hymod_python import spot_setup
class TestFast(unittest.TestCase):
def setUp(self):
self.spot_setup = spot_setup()
self.rep = 200 # REP must be a multiply of amount of parameters which are in 7 if using hymod
self.timeout = 10 # Given in Seconds
def test_fast(self):
sampler = spotpy.algorithms.fast(self.spot_setup, parallel="seq", dbname='test_FAST', dbformat="ram",
sim_timeout=self.timeout)
results = []
sampler.sample(self.rep)
results = sampler.getdata()
self.assertEqual(203,len(results))
if __name__ == '__main__':
unittest.main()
|
Return to old setting of repetitions for fast testing
|
Return to old setting of repetitions for fast testing
|
Python
|
mit
|
bees4ever/spotpy,bees4ever/spotpy,thouska/spotpy,thouska/spotpy,bees4ever/spotpy,thouska/spotpy
|
4b54d1472a57ad4d45293ec7bdce9a0ed9746bde
|
ideasbox/mixins.py
|
ideasbox/mixins.py
|
from django.views.generic import ListView
class ByTagListView(ListView):
def get_queryset(self):
qs = super(ByTagListView, self).get_queryset()
if 'tag' in self.kwargs:
qs = qs.filter(tags__slug__in=[self.kwargs['tag']])
return qs
def get_context_data(self, **kwargs):
context = super(ByTagListView, self).get_context_data(**kwargs)
context['tag'] = self.kwargs.get('tag')
return context
|
from django.views.generic import ListView
from taggit.models import Tag
class ByTagListView(ListView):
def get_queryset(self):
qs = super(ByTagListView, self).get_queryset()
if 'tag' in self.kwargs:
qs = qs.filter(tags__slug__in=[self.kwargs['tag']])
return qs
def get_context_data(self, **kwargs):
context = super(ByTagListView, self).get_context_data(**kwargs)
context['tag'] = Tag.objects.get(slug=self.kwargs.get('tag'))
return context
|
Use tag name not slug in tag page title
|
Use tag name not slug in tag page title
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,Lcaracol/ideasbox.lan,Lcaracol/ideasbox.lan,Lcaracol/ideasbox.lan,ideascube/ideascube
|
72539e1a83eba8db9adfdeef6099081475ef8d86
|
objectset/forms.py
|
objectset/forms.py
|
from django import forms
from .models import ObjectSet
def objectset_form_factory(Model, queryset=None):
"""Takes an ObjectSet subclass and defines a base form class.
In addition, an optional queryset can be supplied to limit the choices
for the objects.
This uses the generic `objects` field rather being named after a specific
type.
"""
# A few checks to keep things sane..
if not issubclass(Model, ObjectSet):
raise TypeError('{0} must subclass ObjectSet'.format(Model.__name__))
instance = Model()
if queryset is None:
queryset = instance._object_class._default_manager.all()
elif queryset.model is not instance._object_class:
raise TypeError('ObjectSet of type {0}, not {1}'
.format(instance._object_class.__name__,
queryset.model.__name__))
label = getattr(Model, instance._set_object_rel).field.verbose_name
class form_class(forms.ModelForm):
objects = forms.ModelMultipleChoiceField(queryset, label=label,
required=False)
def save(self, *args, **kwargs):
self.instance._pending = self.cleaned_data.get('objects')
return super(form_class, self).save(*args, **kwargs)
class Meta(object):
model = Model
exclude = (instance._set_object_rel,)
form_class.__name__ = '{0}Form'.format(Model.__name__)
return form_class
|
from django import forms
from .models import ObjectSet
def objectset_form_factory(Model, queryset=None):
"""Takes an ObjectSet subclass and defines a base form class.
In addition, an optional queryset can be supplied to limit the choices
for the objects.
This uses the generic `objects` field rather being named after a specific
type.
"""
# A few checks to keep things sane..
if not issubclass(Model, ObjectSet):
raise TypeError('{0} must subclass ObjectSet'.format(Model.__name__))
instance = Model()
if queryset is None:
queryset = instance._object_class._default_manager.all()
elif queryset.model is not instance._object_class:
raise TypeError('ObjectSet of type {0}, not {1}'
.format(instance._object_class.__name__,
queryset.model.__name__))
label = getattr(Model, instance._set_object_rel).field.verbose_name
class form_class(forms.ModelForm):
objects = forms.ModelMultipleChoiceField(queryset, label=label,
required=False)
def save(self, *args, **kwargs):
objects = self.cleaned_data.get('objects')
# Django 1.4 nuance when working with an empty list. It is not
# properly defined an empty query set
if isinstance(objects, list) and not objects:
objects = self.instance.__class__.objects.none()
self.instance._pending = objects
return super(form_class, self).save(*args, **kwargs)
class Meta(object):
model = Model
exclude = (instance._set_object_rel,)
form_class.__name__ = '{0}Form'.format(Model.__name__)
return form_class
|
Handle Django 1.4 nuance for the empty ModelMultipleChoiceField values
|
Handle Django 1.4 nuance for the empty ModelMultipleChoiceField values
|
Python
|
bsd-2-clause
|
chop-dbhi/django-objectset,chop-dbhi/django-objectset
|
6694a9e8d554c9450cdf6cd076bb56a324048b44
|
social/apps/django_app/urls.py
|
social/apps/django_app/urls.py
|
"""URLs module"""
from django.conf import settings
try:
from django.conf.urls import patterns, url
except ImportError:
# Django < 1.4
from django.conf.urls.defaults import patterns, url
from social.utils import setting_name
extra = getattr(settings, setting_name('TRAILING_SLASH'), True) and '/' or ''
urlpatterns = patterns('social.apps.django_app.views',
# authentication / association
url(r'^login/(?P<backend>[^/]+){0}$'.format(extra), 'auth',
name='begin'),
url(r'^complete/(?P<backend>[^/]+){0}$'.format(extra), 'complete',
name='complete'),
# disconnection
url(r'^disconnect/(?P<backend>[^/]+){0}$'.format(extra), 'disconnect',
name='disconnect'),
url(r'^disconnect/(?P<backend>[^/]+)/(?P<association_id>[^/]+){0}$'
.format(extra), 'disconnect', name='disconnect_individual'),
)
|
"""URLs module"""
from django import VERSION
from django.conf import settings
from django.conf.urls import url
from social.apps.django_app import views
from social.utils import setting_name
extra = getattr(settings, setting_name('TRAILING_SLASH'), True) and '/' or ''
urlpatterns = (
# authentication / association
url(r'^login/(?P<backend>[^/]+){0}$'.format(extra),
views.auth,
name='begin'),
url(r'^complete/(?P<backend>[^/]+){0}$'.format(extra),
views.complete,
name='complete'),
# disconnection
url(r'^disconnect/(?P<backend>[^/]+){0}$'.format(extra),
views.disconnect,
name='disconnect'),
url(r'^disconnect/(?P<backend>[^/]+)/(?P<association_id>[^/]+){0}$'
.format(extra),
views.disconnect,
name='disconnect_individual'),
)
|
Update the url patterns to be compliant with Django 1.9 new formatting
|
Update the url patterns to be compliant with Django 1.9 new formatting
so need to support Django < 1.8 because it is now deprecated for
security reasons
|
Python
|
bsd-3-clause
|
cmichal/python-social-auth,cmichal/python-social-auth,cmichal/python-social-auth
|
9fe1c66b70bd396d9a698cc001ccb89644383f3b
|
logs.py
|
logs.py
|
# Copyright 2013 Russell Heilling
import logging
import sys
import args
args.add_argument('--verbose', '-v', action='count',
help='Enable verbose logging.')
ARGS = args.ARGS
LEVEL_SETTERS = set([logging.getLogger().setLevel])
def register_logger(level_setter):
LEVEL_SETTERS.add(level_setter)
def set_logging():
level = logging.WARN
if ARGS.verbose > 1:
level = logging.DEBUG
if ARGS.verbose == 1:
level = logging.INFO
for setter in LEVEL_SETTERS:
setter(level)
|
# Copyright 2013 Russell Heilling
import logging
import sys
import args
args.add_argument('--verbose', '-v', action='count',
help='Enable verbose logging.')
ARGS = args.ARGS
LEVEL_SETTERS = set([logging.getLogger().setLevel])
def register_logger(level_setter):
LEVEL_SETTERS.add(level_setter)
def set_logging():
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s:%(name)s %(message)s'))
logging.getLogger().addHandler(handler)
level = logging.WARN
if ARGS.verbose > 1:
level = logging.DEBUG
if ARGS.verbose == 1:
level = logging.INFO
for setter in LEVEL_SETTERS:
setter(level)
|
Include timestamp in log format
|
Include timestamp in log format
|
Python
|
mit
|
xchewtoyx/comicmgt,xchewtoyx/comicmgt
|
f7fc79ac56b94e2e54d14360b0f9a5ab97d28a3b
|
tests/conftest.py
|
tests/conftest.py
|
"""
Configuration, plugins and fixtures for `pytest`.
"""
from typing import Iterator
import pytest
from mock_vws import MockVWS
from mock_vws.database import VuforiaDatabase
from vws import VWS
pytest_plugins = [ # pylint: disable=invalid-name
'tests.fixtures.images',
]
@pytest.fixture()
def _mock_database() -> Iterator[VuforiaDatabase]:
with MockVWS() as mock:
database = VuforiaDatabase()
mock.add_database(database=database)
yield database
@pytest.fixture()
def client(_mock_database: VuforiaDatabase) -> Iterator[VWS]:
"""
# TODO rename this fixture
Yield a VWS client which connects to a mock.
"""
vws_client = VWS(
server_access_key=_mock_database.server_access_key.decode(),
server_secret_key=_mock_database.server_secret_key.decode(),
)
yield vws_client
@pytest.fixture()
def cloud_reco_client(_mock_database: VuforiaDatabase) -> Iterator[VWS]:
"""
# TODO rename this fixture
Yield a VWS client which connects to a mock.
"""
vws_client = VWS(
server_access_key=_mock_database.server_access_key.decode(),
server_secret_key=_mock_database.server_secret_key.decode(),
)
yield vws_client
|
"""
Configuration, plugins and fixtures for `pytest`.
"""
from typing import Iterator
import pytest
from mock_vws import MockVWS
from mock_vws.database import VuforiaDatabase
from vws import VWS
pytest_plugins = [ # pylint: disable=invalid-name
'tests.fixtures.images',
]
@pytest.fixture()
def _mock_database() -> Iterator[VuforiaDatabase]:
with MockVWS() as mock:
database = VuforiaDatabase()
mock.add_database(database=database)
yield database
@pytest.fixture()
def client(_mock_database: VuforiaDatabase) -> Iterator[VWS]:
"""
# TODO rename this fixture
Yield a VWS client which connects to a mock.
"""
vws_client = VWS(
server_access_key=_mock_database.server_access_key,
server_secret_key=_mock_database.server_secret_key,
)
yield vws_client
@pytest.fixture()
def cloud_reco_client(_mock_database: VuforiaDatabase) -> Iterator[VWS]:
"""
# TODO rename this fixture
Yield a VWS client which connects to a mock.
"""
vws_client = VWS(
server_access_key=_mock_database.server_access_key,
server_secret_key=_mock_database.server_secret_key,
)
yield vws_client
|
Update for new mock database
|
Update for new mock database
|
Python
|
mit
|
adamtheturtle/vws-python,adamtheturtle/vws-python
|
5c0282810f298762bdd00c260f40e2ceb7914eb0
|
tests/test_dna.py
|
tests/test_dna.py
|
#!/usr/bin/env python2
import pytest
from kbkdna.dna import *
def test_reverse_complement():
assert reverse_complement('ATGC') == 'GCAT'
def test_gc_content():
assert gc_content('ATGC') == 0.5
|
#!/usr/bin/env python2
import pytest
import kbkdna
def test_reverse_complement():
assert kbkdna.reverse_complement('ATGC') == 'GCAT'
def test_gc_content():
assert kbkdna.gc_content('ATGC') == 0.5
|
Simplify the imports in the tests.
|
Simplify the imports in the tests.
|
Python
|
mit
|
kalekundert/kbkdna
|
1e62c328bb42ec123e75b5c66fb29f002cd57db2
|
tests/test_nap.py
|
tests/test_nap.py
|
from nap.api import Api
from . import HttpServerTestBase
class TestNap(HttpServerTestBase):
def test_unallowed_method(self):
"""Tries to use non-existent HTTP method"""
api = Api('http://localhost:8888')
with self.assertRaises(AttributeError):
api.resource.nonexisting()
|
"""
Tests for nap module.
These tests only focus that requests is called properly.
Everything related to HTTP requests should be tested in requests' own tests.
"""
import unittest
import requests
from nap.api import Api
class TestNap(unittest.TestCase):
def test_unallowed_method(self):
"""Tries to use non-existent HTTP method"""
api = Api('')
# lambda trickery is necessary, because otherwise it would raise
# AttributeError uncontrolled
self.assertRaises(AttributeError, lambda: api.resource.nonexisting)
def test_requests_raises_error(self):
"""Test that requests properly raises its own errors
>>> requests.get('/kk')
requests.exceptions.MissingSchema: Invalid URL u'/kk':
No schema supplied. Perhaps you meant http:///kk?
"""
api = Api('')
self.assertRaises(requests.exceptions.MissingSchema, api.resource.get)
def test_resource_not_callable(self):
"""Make sure resource can't be called directly"""
api = Api('')
self.assertRaises(TypeError, api.resource)
|
Document module and add couple of tests
|
Document module and add couple of tests
|
Python
|
mit
|
kimmobrunfeldt/nap
|
5f2ffbbbf145653b853a4f67308bc28da6839dba
|
main.py
|
main.py
|
"""Usage: chronicler [-c CHRONICLE]
The Chronicler remembers…
Options:
-c, --chronicle CHRONICLE chronicle file to use [default: chronicle.txt]
"""
import docopt
import hjson
import jsonschema
import chronicle
def main():
options = docopt.docopt(__doc__)
try:
c = open(options['--chronicle'])
except FileNotFoundError:
print("No chronicle to read.")
exit(1)
try:
c = hjson.load(c)
except hjson.HjsonDecodeError as e:
print("This chronicle can't be deciphered.")
print("L%d, C%d: %s" % (e.lineno, e.colno, e.msg))
exit(1)
try:
jsonschema.validate(c, chronicle.schema)
except jsonschema.ValidationError as e:
print("This chronicle can't be deciphered.")
print("%s: %s" % (list(e.path), e.message))
exit(1)
print("Behold my story:")
played = 0
won = 0
for h in c:
for a in h['against']:
played += 1
if a['result']['victory'] == True:
won += 1
print("victories: %d/%d" % (won, played))
if __name__ == '__main__':
main()
|
"""Usage: chronicler [-c CHRONICLE]
The Chronicler remembers…
Options:
-c, --chronicle CHRONICLE chronicle file to use [default: chronicle.hjson]
"""
import docopt
import hjson
import jsonschema
import chronicle
def main():
options = docopt.docopt(__doc__)
try:
c = open(options['--chronicle'])
except FileNotFoundError:
print("No chronicle to read.")
exit(1)
try:
c = hjson.load(c)
except hjson.HjsonDecodeError as e:
print("This chronicle can't be deciphered.")
print("L%d, C%d: %s" % (e.lineno, e.colno, e.msg))
exit(1)
try:
jsonschema.validate(c, chronicle.schema)
except jsonschema.ValidationError as e:
print("This chronicle can't be deciphered.")
print("%s: %s" % (list(e.path), e.message))
exit(1)
print("Behold my story:")
played = 0
won = 0
for h in c:
for a in h['against']:
played += 1
if a['result']['victory'] == True:
won += 1
print("victories: %d/%d" % (won, played))
if __name__ == '__main__':
main()
|
Change the default value for the chronicle option
|
Change the default value for the chronicle option
|
Python
|
unlicense
|
elwinar/chronicler
|
5f72b6edc28caa7bf03720ed27a9f3aa32c8323e
|
go/billing/management/commands/go_gen_statements.py
|
go/billing/management/commands/go_gen_statements.py
|
from datetime import datetime
from optparse import make_option
from go.billing.models import Account
from go.billing.tasks import month_range, generate_monthly_statement
from go.base.command_utils import BaseGoCommand, get_user_by_email
class Command(BaseGoCommand):
help = "Generate monthly billing statements for an account."
option_list = BaseGoCommand.option_list + (
make_option(
'--email-address', dest='email_address',
help="Email address of the account to generate statements for."),
make_option(
'--month', dest='month', action='append',
help="Month to generate statements for in the form YYYY-MM, "
"e.g. 2014-01. Multiple may be specified."))
def handle(self, *args, **opts):
user = get_user_by_email(opts['email_address'])
account_number = user.get_profile().user_account
account = Account.objects.get(account_number=account_number)
self.stdout.write(
"Generating statements for account %s..."
% (opts['email_address'],))
months = [datetime.strptime(m, '%Y-%m') for m in opts['months']]
for month in months:
from_date, to_date = month_range(0, month)
generate_monthly_statement(account.id, from_date, to_date)
self.stdout.write("Generated statement for %s." % (month,))
|
from datetime import datetime
from optparse import make_option
from go.billing.models import Account
from go.billing.tasks import month_range, generate_monthly_statement
from go.base.command_utils import BaseGoCommand, get_user_by_email
class Command(BaseGoCommand):
help = "Generate monthly billing statements for an account."
option_list = BaseGoCommand.option_list + (
make_option(
'--email-address', dest='email_address',
help="Email address of the account to generate statements for."),
make_option(
'--month', dest='month', action='append',
help="Month to generate statements for in the form YYYY-MM, "
"e.g. 2014-01. Multiple may be specified."))
def handle(self, *args, **opts):
user = get_user_by_email(opts['email_address'])
account_number = user.get_profile().user_account
account = Account.objects.get(account_number=account_number)
self.stdout.write(
"Generating statements for account %s..."
% (opts['email_address'],))
months = [datetime.strptime(m, '%Y-%m') for m in opts['month']]
for month in months:
from_date, to_date = month_range(0, month)
generate_monthly_statement(account.id, from_date, to_date)
self.stdout.write(
"Generated statement for %s."
% (datetime.strftime(month, '%Y-%m'),))
|
Fix broken billing statement command tests
|
Fix broken billing statement command tests
|
Python
|
bsd-3-clause
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
1e8f9a95badc1e2b558bae7570ef9bc23f26a0df
|
pyhaystack/info.py
|
pyhaystack/info.py
|
# -*- coding: utf-8 -*-
"""
File : pyhaystackTest.py (2.x)
This module allow a connection to a haystack server
Feautures provided allow user to fetch data from the server and eventually, to post to it.
See http://www.project-haystack.org for more details
Project Haystack is an open source initiative to streamline working with data from the Internet of Things. We standardize semantic data models and web services with the goal of making it easier to unlock value from the vast quantity of data being generated by the smart devices that permeate our homes, buildings, factories, and cities. Applications include automation, control, energy, HVAC, lighting, and other environmental systems.
"""
__author__ = 'Christian Tremblay, @sjlongland, @sudo-Whateverman, Igor'
__author_email__ = '[email protected]'
__version__ = '0.71.1.8.2'
__license__ = 'LGPL'
|
# -*- coding: utf-8 -*-
"""
File : pyhaystackTest.py (2.x)
This module allow a connection to a haystack server
Feautures provided allow user to fetch data from the server and eventually, to post to it.
See http://www.project-haystack.org for more details
Project Haystack is an open source initiative to streamline working with data from the Internet of Things. We standardize semantic data models and web services with the goal of making it easier to unlock value from the vast quantity of data being generated by the smart devices that permeate our homes, buildings, factories, and cities. Applications include automation, control, energy, HVAC, lighting, and other environmental systems.
"""
__author__ = 'Christian Tremblay, Stuart J. Longland, @sudo-Whateverman, Igor'
__author_email__ = '[email protected]'
__version__ = '0.72'
__license__ = 'LGPL'
|
Modify version to 0.72 to mark change
|
Modify version to 0.72 to mark change
Signed-off-by: Christian Tremblay <[email protected]>
|
Python
|
apache-2.0
|
ChristianTremblay/pyhaystack,vrtsystems/pyhaystack,ChristianTremblay/pyhaystack
|
36ea751618287a75fc82db500d953d4fa40b373b
|
tests/containers/entrypoint/renew-demo-token.py
|
tests/containers/entrypoint/renew-demo-token.py
|
#!/usr/bin/python3
import os
import json
import time
from urllib import request
# Create the requested json
demo_json = {
"payload": {
"aud": "ANY",
"ver": "scitokens:2.0",
"scope": "condor:/READ condor:/WRITE",
"exp": int(time.time() + 3600*8),
"sub": "abh3"
}
}
# Convert the format from dictionary to json string
data = json.dumps({
'payload': json.dumps(demo_json['payload']),
"algorithm": "ES256"
}).encode()
# Headers so that heroku doesn't block us
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3)' +
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36',
'Content-Type': 'application/json'
}
# The actual request
req = request.Request("https://demo.scitokens.org/issue",
data=data,
headers=headers) # this will make the method "POST"
resp = request.urlopen(req).read()
# Convert the "bytes" response to text
token_path = os.environ.get('BEARER_TOKEN', '') or \
f"/tmp/bt_u{os.geteuid()}"
with open(token_path, 'w') as f:
f.write(resp.decode('utf-8'))
|
#!/usr/bin/python3
import os
import json
import time
from urllib import request
# Request payload
payload = {"aud": "ANY",
"ver": "scitokens:2.0",
"scope": "condor:/READ condor:/WRITE",
"exp": int(time.time() + 3600*8),
"sub": "abh3"
}
# Convert the format from dictionary to json string
data = json.dumps({
'payload': payload,
"algorithm": "ES256"
}).encode()
# Headers so that heroku doesn't block us
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3)' +
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36',
'Content-Type': 'application/json'
}
# The actual request
req = request.Request("https://demo.scitokens.org/issue",
data=data,
headers=headers) # this will make the method "POST"
resp = request.urlopen(req).read()
# Convert the "bytes" response to text
token_path = os.environ.get('BEARER_TOKEN', '') or \
f"/tmp/bt_u{os.geteuid()}"
with open(token_path, 'w') as f:
f.write(resp.decode('utf-8'))
|
Update token renewal due to demo.scitokens.org API update
|
Update token renewal due to demo.scitokens.org API update
|
Python
|
apache-2.0
|
matyasselmeci/htcondor-ce,brianhlin/htcondor-ce,matyasselmeci/htcondor-ce,brianhlin/htcondor-ce,matyasselmeci/htcondor-ce,brianhlin/htcondor-ce
|
1eacbac722ca949518e1a8e9d6a0a957e193ba9e
|
tests/functional/staging_and_prod/test_admin.py
|
tests/functional/staging_and_prod/test_admin.py
|
from retry.api import retry_call
from config import config
from tests.pages import UploadCsvPage
from tests.postman import (
send_notification_via_csv,
get_notification_by_id_via_api,
)
from tests.test_utils import assert_notification_body, recordtime, NotificationStatuses
@recordtime
def test_admin(driver, client, login_user):
upload_csv_page = UploadCsvPage(driver)
csv_sms_notification_id = send_notification_via_csv(upload_csv_page, 'sms')
csv_sms_notification = retry_call(
get_notification_by_id_via_api,
fargs=[client, csv_sms_notification_id, NotificationStatuses.SENT],
tries=config['notification_retry_times'],
delay=config['notification_retry_interval']
)
assert_notification_body(csv_sms_notification_id, csv_sms_notification)
csv_email_notification_id = send_notification_via_csv(upload_csv_page, 'email')
csv_email_notification = retry_call(
get_notification_by_id_via_api,
fargs=[client, csv_email_notification_id, NotificationStatuses.SENT],
tries=config['notification_retry_times'],
delay=config['notification_retry_interval']
)
assert_notification_body(csv_email_notification_id, csv_email_notification)
upload_csv_page.sign_out()
|
import pytest
from retry.api import retry_call
from config import config
from tests.pages import UploadCsvPage
from tests.postman import (
send_notification_via_csv,
get_notification_by_id_via_api,
)
from tests.test_utils import assert_notification_body, recordtime, NotificationStatuses
@pytest.mark.skip(reason="intermittent pager duty alerts due to queue backlog")
def test_admin(driver, client, login_user):
upload_csv_page = UploadCsvPage(driver)
csv_sms_notification_id = send_notification_via_csv(upload_csv_page, 'sms')
csv_sms_notification = retry_call(
get_notification_by_id_via_api,
fargs=[client, csv_sms_notification_id, NotificationStatuses.SENT],
tries=config['notification_retry_times'],
delay=config['notification_retry_interval']
)
assert_notification_body(csv_sms_notification_id, csv_sms_notification)
csv_email_notification_id = send_notification_via_csv(upload_csv_page, 'email')
csv_email_notification = retry_call(
get_notification_by_id_via_api,
fargs=[client, csv_email_notification_id, NotificationStatuses.SENT],
tries=config['notification_retry_times'],
delay=config['notification_retry_interval']
)
assert_notification_body(csv_email_notification_id, csv_email_notification)
upload_csv_page.sign_out()
|
Disable CSV upload tests temporarily
|
Disable CSV upload tests temporarily
When the database tasks queue builds up we get false pager duty alerts due to the time it takes for the test csv to get through to the front of the queue.
|
Python
|
mit
|
alphagov/notifications-functional-tests,alphagov/notifications-functional-tests
|
d28c0e25ba9d3779a77d285e0dc82a799643e1e6
|
tests/test_webapps/filestotest/rest_routing.py
|
tests/test_webapps/filestotest/rest_routing.py
|
"""Routes configuration
The more specific and detailed routes should be defined first so they
may take precedent over the more generic routes. For more information
refer to the routes manual at http://routes.groovie.org/docs/
"""
from pylons import config
from routes import Mapper
def make_map():
"""Create, configure and return the routes Mapper"""
map = Mapper(directory=config['pylons.paths']['controllers'],
always_scan=config['debug'])
map.minimization = False
# The ErrorController route (handles 404/500 error pages); it should
# likely stay at the top, ensuring it can always be resolved
map.connect('error/:action/:id', controller='error')
# CUSTOM ROUTES HERE
map.resource('restsample', 'restsamples')
map.connect('/:controller/index', action='index')
map.connect('/:controller/:action/')
map.connect('/:controller/:action/:id')
return map
|
"""Routes configuration
The more specific and detailed routes should be defined first so they
may take precedent over the more generic routes. For more information
refer to the routes manual at http://routes.groovie.org/docs/
"""
from pylons import config
from routes import Mapper
def make_map():
"""Create, configure and return the routes Mapper"""
map = Mapper(directory=config['pylons.paths']['controllers'],
always_scan=config['debug'])
map.minimization = False
# The ErrorController route (handles 404/500 error pages); it should
# likely stay at the top, ensuring it can always be resolved
map.connect('error/:action/:id', controller='error')
# CUSTOM ROUTES HERE
map.resource('restsample', 'restsamples')
map.connect('/:controller/:action')
map.connect('/:controller/:action/:id')
return map
|
Update to reflect not having trailing slashes
|
Update to reflect not having trailing slashes
--HG--
branch : trunk
|
Python
|
bsd-3-clause
|
Pylons/pylons,moreati/pylons,moreati/pylons,Pylons/pylons,moreati/pylons,Pylons/pylons
|
428889029541bb5c8f8998eb1f4cbc057a80fb87
|
s2v2.py
|
s2v2.py
|
from s2v1 import *
def number_of_records(data_sample):
return len(data_sample)
number_of_ties = number_of_records(data_from_csv)
print(number_of_ties, "ties in our data sample")
|
from s2v1 import *
def number_of_records(data_sample):
return len(data_sample)
number_of_ties = number_of_records(data_from_csv) - 1 # minus header row
print(number_of_ties, "ties in our data sample")
|
Subtract header row for acurate counting
|
Subtract header row for acurate counting
|
Python
|
mit
|
alexmilesyounger/ds_basics
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.